summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/msm/imem.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,osm.txt104
-rw-r--r--Documentation/devicetree/bindings/batterydata/batterydata.txt221
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gpucc.txt23
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmcc.txt9
-rw-r--r--Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt31
-rw-r--r--Documentation/devicetree/bindings/gpu/adreno-iommu.txt2
-rw-r--r--Documentation/devicetree/bindings/gpu/adreno.txt4
-rw-r--r--Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt4
-rw-r--r--Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt10
-rw-r--r--Documentation/devicetree/bindings/media/video/msm-cci.txt4
-rw-r--r--Documentation/devicetree/bindings/media/video/msm-ir-cut.txt26
-rw-r--r--Documentation/devicetree/bindings/media/video/msm-ir-led.txt26
-rw-r--r--Documentation/devicetree/bindings/pci/msm_pcie.txt4
-rw-r--r--Documentation/devicetree/bindings/platform/msm/ipa.txt3
-rw-r--r--Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt4
-rw-r--r--Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt121
-rw-r--r--Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt20
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-qpnp.txt1
-rw-r--r--Documentation/devicetree/bindings/scheduler/sched_hmp.txt35
-rw-r--r--Documentation/devicetree/bindings/thermal/tsens.txt1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/scheduler/sched-hmp.txt25
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-jdi-a407-dualmipi-wqhd-cmd.dtsi80
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi54
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi50
-rw-r--r--arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi38
-rw-r--r--arch/arm/boot/dts/qcom/msm-gdsc-falcon.dtsi159
-rw-r--r--arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi377
-rw-r--r--arch/arm/boot/dts/qcom/msm-pmfalcon.dtsi169
-rw-r--r--arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi39
-rw-r--r--arch/arm/boot/dts/qcom/msm8996.dtsi13
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi43
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-qrd.dtsi356
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi153
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi16
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-gpu.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi9
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mdss-pll.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi12
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi60
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi50
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dtsi67
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi524
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi44
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-v2-camera.dtsi27
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi380
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-vidc.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt.dtsi198
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi20
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi322
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi16
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi23
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon.dtsi146
-rw-r--r--arch/arm/boot/dts/qcom/msmtriton-ion.dtsi52
-rw-r--r--arch/arm/boot/dts/qcom/msmtriton-smp2p.dtsi136
-rw-r--r--arch/arm/boot/dts/qcom/msmtriton.dtsi312
-rw-r--r--arch/arm/configs/msmcortex_defconfig3
-rw-r--r--arch/arm/configs/msmfalcon_defconfig15
-rw-r--r--arch/arm/kernel/irq.c11
-rw-r--r--arch/arm/kernel/topology.c34
-rw-r--r--arch/arm/mach-qcom/Kconfig5
-rw-r--r--arch/arm64/Kconfig.platforms8
-rw-r--r--arch/arm64/configs/msm-perf_defconfig5
-rw-r--r--arch/arm64/configs/msm_defconfig5
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig11
-rw-r--r--arch/arm64/configs/msmcortex_defconfig11
-rw-r--r--arch/arm64/configs/msmfalcon-perf_defconfig5
-rw-r--r--arch/arm64/configs/msmfalcon_defconfig6
-rw-r--r--arch/arm64/kernel/cpuinfo.c3
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--arch/arm64/mm/dma-mapping.c265
-rw-r--r--block/blk-mq-cpumap.c16
-rw-r--r--block/blk-mq.c106
-rw-r--r--block/blk-mq.h2
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/base/cpu.c58
-rw-r--r--drivers/base/regmap/regmap-swr.c14
-rw-r--r--drivers/char/adsprpc.c26
-rw-r--r--drivers/char/diag/diag_dci.c4
-rw-r--r--drivers/char/diag/diagfwd_cntl.c4
-rw-r--r--drivers/char/diag/diagfwd_glink.c23
-rw-r--r--drivers/char/diag/diagfwd_peripheral.c4
-rw-r--r--drivers/clk/msm/clock-gcc-cobalt.c2
-rw-r--r--drivers/clk/msm/clock-gpu-cobalt.c3
-rw-r--r--drivers/clk/msm/clock-mmss-cobalt.c22
-rw-r--r--drivers/clk/msm/clock-osm.c238
-rw-r--r--drivers/clk/msm/clock.h4
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c83
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h1
-rw-r--r--drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c4
-rw-r--r--drivers/clk/msm/mdss/mdss-hdmi-pll-cobalt.c4
-rw-r--r--drivers/clk/qcom/Kconfig22
-rw-r--r--drivers/clk/qcom/Makefile4
-rw-r--r--drivers/clk/qcom/clk-rcg.h9
-rw-r--r--drivers/clk/qcom/clk-rcg2.c136
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c397
-rw-r--r--drivers/clk/qcom/gcc-msmfalcon.c2830
-rw-r--r--drivers/clk/qcom/gdsc-regulator.c745
-rw-r--r--drivers/clk/qcom/gpucc-msmfalcon.c482
-rw-r--r--drivers/clk/qcom/vdd-level-falcon.h140
-rw-r--r--drivers/cpuidle/cpuidle.c9
-rw-r--r--drivers/cpuidle/lpm-levels-of.c210
-rw-r--r--drivers/cpuidle/lpm-levels.c660
-rw-r--r--drivers/cpuidle/lpm-levels.h26
-rw-r--r--drivers/devfreq/bimc-bwmon.c2
-rw-r--r--drivers/gpu/msm/Makefile2
-rw-r--r--drivers/gpu/msm/a4xx_reg.h1
-rw-r--r--drivers/gpu/msm/a5xx_reg.h1
-rw-r--r--drivers/gpu/msm/adreno-gpulist.h22
-rw-r--r--drivers/gpu/msm/adreno.c178
-rw-r--r--drivers/gpu/msm/adreno.h75
-rw-r--r--drivers/gpu/msm/adreno_a4xx.c266
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c286
-rw-r--r--drivers/gpu/msm/adreno_a5xx_preempt.c2
-rw-r--r--drivers/gpu/msm/adreno_debugfs.c76
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c1244
-rw-r--r--drivers/gpu/msm/adreno_dispatch.h38
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.c73
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.h27
-rw-r--r--drivers/gpu/msm/adreno_perfcounter.c27
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c178
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.h6
-rw-r--r--drivers/gpu/msm/adreno_snapshot.c154
-rw-r--r--drivers/gpu/msm/adreno_trace.h64
-rw-r--r--drivers/gpu/msm/kgsl.c280
-rw-r--r--drivers/gpu/msm/kgsl.h34
-rw-r--r--drivers/gpu/msm/kgsl_cffdump.c4
-rw-r--r--drivers/gpu/msm/kgsl_cffdump.h6
-rw-r--r--drivers/gpu/msm/kgsl_cmdbatch.h168
-rw-r--r--drivers/gpu/msm/kgsl_compat.h8
-rw-r--r--drivers/gpu/msm/kgsl_device.h32
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.c (renamed from drivers/gpu/msm/kgsl_cmdbatch.c)642
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.h198
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c13
-rw-r--r--drivers/gpu/msm/kgsl_mmu.c7
-rw-r--r--drivers/gpu/msm/kgsl_mmu.h2
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c377
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.h2
-rw-r--r--drivers/gpu/msm/kgsl_pwrscale.c8
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.c12
-rw-r--r--drivers/gpu/msm/kgsl_snapshot.c77
-rw-r--r--drivers/gpu/msm/kgsl_snapshot.h30
-rw-r--r--drivers/gpu/msm/kgsl_trace.h44
-rw-r--r--drivers/hid/hid-apple.c3
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hwmon/qpnp-adc-common.c42
-rw-r--r--drivers/hwtracing/coresight/coresight-csr.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c4
-rw-r--r--drivers/iio/adc/qcom-rradc.c112
-rw-r--r--drivers/input/misc/Kconfig2
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ots_pat9125/Kconfig14
-rw-r--r--drivers/input/misc/ots_pat9125/Makefile7
-rw-r--r--drivers/input/misc/ots_pat9125/pat9125_linux_driver.c296
-rw-r--r--drivers/input/misc/ots_pat9125/pixart_ots.c77
-rw-r--r--drivers/input/misc/ots_pat9125/pixart_ots.h45
-rw-r--r--drivers/input/misc/ots_pat9125/pixart_platform.h17
-rw-r--r--drivers/input/touchscreen/gt9xx/goodix_tool.c417
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx.c194
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx.h80
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx_firmware.h6
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx_update.c907
-rw-r--r--drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c20
-rw-r--r--drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c21
-rw-r--r--drivers/iommu/arm-smmu.c39
-rw-r--r--drivers/iommu/dma-mapping-fast.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c13
-rw-r--r--drivers/iommu/iommu-debug.c22
-rw-r--r--drivers/iommu/msm_dma_iommu_mapping.c13
-rw-r--r--drivers/media/dvb-core/demux.h216
-rw-r--r--drivers/media/dvb-core/dmxdev.c3941
-rw-r--r--drivers/media/dvb-core/dmxdev.h137
-rw-r--r--drivers/media/dvb-core/dvb_demux.c2272
-rw-r--r--drivers/media/dvb-core/dvb_demux.h272
-rw-r--r--drivers/media/dvb-core/dvb_net.c6
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.c71
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.h30
-rw-r--r--drivers/media/platform/msm/Kconfig2
-rw-r--r--drivers/media/platform/msm/Makefile2
-rw-r--r--drivers/media/platform/msm/broadcast/Kconfig14
-rw-r--r--drivers/media/platform/msm/broadcast/Makefile4
-rw-r--r--drivers/media/platform/msm/broadcast/tspp.c3094
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c15
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c128
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.h2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp48.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c94
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c28
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c31
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c131
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/Makefile2
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c14
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c75
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h4
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c6
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c80
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c6
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/ir_cut/Makefile4
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/ir_cut/msm_ir_cut.c665
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/ir_cut/msm_ir_cut.h72
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/ir_led/Makefile4
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/ir_led/msm_ir_led.c462
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/ir_led/msm_ir_led.h71
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c1
-rw-r--r--drivers/media/platform/msm/dvb/Kconfig10
-rw-r--r--drivers/media/platform/msm/dvb/Makefile2
-rw-r--r--drivers/media/platform/msm/dvb/adapter/Makefile6
-rw-r--r--drivers/media/platform/msm/dvb/adapter/mpq_adapter.c211
-rw-r--r--drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c827
-rw-r--r--drivers/media/platform/msm/dvb/demux/Kconfig46
-rw-r--r--drivers/media/platform/msm/dvb/demux/Makefile14
-rw-r--r--drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c5195
-rw-r--r--drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h1027
-rw-r--r--drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c280
-rw-r--r--drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c1968
-rw-r--r--drivers/media/platform/msm/dvb/demux/mpq_sdmx.c1023
-rw-r--r--drivers/media/platform/msm/dvb/demux/mpq_sdmx.h368
-rw-r--r--drivers/media/platform/msm/dvb/include/mpq_adapter.h199
-rw-r--r--drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h41
-rw-r--r--drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h462
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_core.c114
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_core.h2
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c34
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c47
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h7
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c8
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_util.c7
-rw-r--r--drivers/media/platform/msm/vidc/msm_vdec.c72
-rw-r--r--drivers/media/platform/msm/vidc/msm_venc.c155
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c4
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c55
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_dcvs.c16
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_debug.c8
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_internal.h2
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_res_parse.c9
-rw-r--r--drivers/media/platform/msm/vidc/venus_boot.c13
-rw-r--r--drivers/media/platform/msm/vidc/venus_hfi.c18
-rw-r--r--drivers/mfd/wcd934x-regmap.c4
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_utils.c1
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_utils_aio.c9
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_utils_aio.h3
-rw-r--r--drivers/misc/qcom/qdsp6v2/q6audio_v2_aio.c5
-rw-r--r--drivers/net/usb/cdc_ncm.c20
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c237
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c53
-rw-r--r--drivers/net/wireless/ath/wil6210/ftm.c903
-rw-r--r--drivers/net/wireless/ath/wil6210/ftm.h512
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h14
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c92
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c60
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c34
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c43
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c9
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c9
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h29
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c44
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h932
-rw-r--r--drivers/net/wireless/cnss/cnss_common.c71
-rw-r--r--drivers/net/wireless/cnss/cnss_common.h1
-rw-r--r--drivers/net/wireless/cnss/cnss_pci.c67
-rw-r--r--drivers/pci/host/pci-msm.c55
-rw-r--r--drivers/platform/msm/gsi/gsi.c215
-rw-r--r--drivers/platform/msm/gsi/gsi.h10
-rw-r--r--drivers/platform/msm/gsi/gsi_dbg.c53
-rw-r--r--drivers/platform/msm/gsi/gsi_reg.h217
-rw-r--r--drivers/platform/msm/ipa/ipa_api.c12
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/ipa_usb.c16
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c42
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c26
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h4
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c26
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c155
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c213
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c24
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_flt.c10
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h15
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c21
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c20
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c62
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c173
-rw-r--r--drivers/platform/msm/msm_11ad/msm_11ad.c12
-rw-r--r--drivers/platform/msm/qpnp-revid.c9
-rw-r--r--drivers/platform/msm/sps/bam.c6
-rw-r--r--drivers/power/power_supply_sysfs.c1
-rw-r--r--drivers/power/qcom-charger/Makefile4
-rw-r--r--drivers/power/qcom-charger/battery_current_limit.c77
-rw-r--r--drivers/power/qcom-charger/fg-core.h98
-rw-r--r--drivers/power/qcom-charger/fg-reg.h1
-rw-r--r--drivers/power/qcom-charger/fg-util.c176
-rw-r--r--drivers/power/qcom-charger/qpnp-fg-gen3.c1776
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c340
-rw-r--r--drivers/power/qcom-charger/smb-lib.c479
-rw-r--r--drivers/power/qcom-charger/smb-lib.h55
-rw-r--r--drivers/power/qcom-charger/smb-reg.h19
-rw-r--r--drivers/power/qcom-charger/smb138x-charger.c214
-rw-r--r--drivers/power/qcom-charger/storm-watch.c57
-rw-r--r--drivers/power/qcom-charger/storm-watch.h36
-rw-r--r--drivers/power/qcom/msm-core.c10
-rw-r--r--drivers/power/reset/msm-poweroff.c197
-rw-r--r--drivers/pwm/pwm-qpnp.c11
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/cpr3-mmss-regulator.c13
-rw-r--r--drivers/regulator/cpr3-regulator.c156
-rw-r--r--drivers/regulator/cpr3-regulator.h5
-rw-r--r--drivers/regulator/cpr3-util.c101
-rw-r--r--drivers/regulator/cprh-kbss-regulator.c194
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.c4
-rw-r--r--drivers/scsi/ufs/ufs_test.c20
-rw-r--r--drivers/soc/qcom/Kconfig41
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/common_log.c2
-rw-r--r--drivers/soc/qcom/core_ctl_helper.c99
-rw-r--r--drivers/soc/qcom/glink.c4
-rw-r--r--drivers/soc/qcom/glink_ssr.c4
-rw-r--r--drivers/soc/qcom/icnss.c418
-rw-r--r--drivers/soc/qcom/jtag-fuse.c4
-rw-r--r--drivers/soc/qcom/pil-msa.c10
-rw-r--r--drivers/soc/qcom/qdsp6v2/Makefile3
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr.c202
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_v2.c20
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_v3.c18
-rw-r--r--drivers/soc/qcom/qdsp6v2/audio_notifier.c635
-rw-r--r--drivers/soc/qcom/qdsp6v2/audio_pdr.c148
-rw-r--r--drivers/soc/qcom/qdsp6v2/audio_ssr.c66
-rw-r--r--drivers/soc/qcom/qdsp6v2/msm_audio_ion.c5
-rw-r--r--drivers/soc/qcom/qsee_ipc_irq_bridge.c12
-rw-r--r--drivers/soc/qcom/rpm-smd-debug.c2
-rw-r--r--drivers/soc/qcom/rpm-smd.c8
-rw-r--r--drivers/soc/qcom/secure_buffer.c2
-rw-r--r--drivers/soc/qcom/service-notifier.c5
-rw-r--r--drivers/soc/qcom/smcinvoke.c24
-rw-r--r--drivers/soc/qcom/subsys-pil-tz.c18
-rw-r--r--drivers/soc/qcom/system_stats.c2
-rw-r--r--drivers/soc/qcom/watchdog_v2.c2
-rw-r--r--drivers/soc/qcom/wcd-dsp-glink.c227
-rwxr-xr-xdrivers/staging/android/ion/ion.c4
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c6
-rw-r--r--drivers/staging/android/ion/ion_cma_secure_heap.c12
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c13
-rw-r--r--drivers/staging/android/ion/msm/msm_ion.c11
-rw-r--r--drivers/staging/android/uapi/msm_ion.h32
-rw-r--r--drivers/thermal/lmh_lite.c15
-rw-r--r--drivers/thermal/msm-tsens.c109
-rw-r--r--drivers/thermal/msm_thermal.c18
-rw-r--r--drivers/thermal/qpnp-adc-tm.c2
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/core/hub.c6
-rw-r--r--drivers/usb/dwc3/core.c9
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c58
-rw-r--r--drivers/usb/dwc3/gadget.c22
-rw-r--r--drivers/usb/gadget/Kconfig11
-rw-r--r--drivers/usb/gadget/function/Makefile2
-rw-r--r--drivers/usb/gadget/function/f_cdev.c23
-rw-r--r--drivers/usb/gadget/function/f_gsi.h15
-rw-r--r--drivers/usb/gadget/function/f_mtp.c3
-rw-r--r--drivers/usb/gadget/function/f_ncm.c82
-rw-r--r--drivers/usb/gadget/function/f_qc_rndis.c592
-rw-r--r--drivers/usb/gadget/function/rndis.c28
-rw-r--r--drivers/usb/gadget/function/rndis.h6
-rw-r--r--drivers/usb/gadget/function/u_data_ipa.c656
-rw-r--r--drivers/usb/gadget/function/u_data_ipa.h71
-rw-r--r--drivers/usb/host/xhci.c21
-rw-r--r--drivers/usb/pd/policy_engine.c228
-rw-r--r--drivers/video/fbdev/msm/mdss.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c158
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c172
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.c106
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.h61
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c23
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_panel.c49
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_status.c10
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c3
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.c170
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.c39
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h11
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c11
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_hwio.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c171
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c30
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c10
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c107
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c14
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.h13
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.c18
-rw-r--r--drivers/video/fbdev/msm/msm_ext_display.c18
-rw-r--r--fs/ecryptfs/kthread.c13
-rw-r--r--fs/ext4/inode.c7
-rw-r--r--fs/fuse/passthrough.c4
-rw-r--r--fs/proc/root.c7
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msmfalcon.h396
-rw-r--r--include/dt-bindings/clock/qcom,gpu-msmfalcon.h47
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h105
-rw-r--r--include/linux/cgroup_subsys.h4
-rw-r--r--include/linux/cpumask.h8
-rw-r--r--include/linux/device.h1
-rw-r--r--include/linux/diagchar.h18
-rw-r--r--include/linux/dma-mapping-fast.h2
-rw-r--r--include/linux/hrtimer.h6
-rw-r--r--include/linux/iommu.h5
-rw-r--r--include/linux/ipa.h1
-rw-r--r--include/linux/msm_dma_iommu_mapping.h2
-rw-r--r--include/linux/msm_ext_display.h1
-rw-r--r--include/linux/msm_gsi.h10
-rw-r--r--include/linux/percpu-rwsem.h84
-rw-r--r--include/linux/power_supply.h1
-rw-r--r--include/linux/qcom_tspp.h99
-rw-r--r--include/linux/qdsp6v2/apr.h4
-rw-r--r--include/linux/qdsp6v2/audio_notifier.h105
-rw-r--r--include/linux/qdsp6v2/audio_pdr.h101
-rw-r--r--include/linux/qdsp6v2/audio_ssr.h78
-rw-r--r--include/linux/qpnp/qpnp-revid.h1
-rw-r--r--include/linux/rcu_sync.h1
-rw-r--r--include/linux/sched.h43
-rw-r--r--include/linux/sched/sysctl.h17
-rw-r--r--include/linux/tick.h12
-rw-r--r--include/linux/timer.h3
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/media/msm_cam_sensor.h17
-rw-r--r--include/net/cnss.h2
-rw-r--r--include/soc/qcom/core_ctl.h35
-rw-r--r--include/soc/qcom/icnss.h3
-rw-r--r--include/soc/qcom/secure_buffer.h1
-rw-r--r--include/soc/qcom/smem.h2
-rw-r--r--include/sound/apr_audio-v2.h137
-rw-r--r--include/sound/q6afe-v2.h2
-rw-r--r--include/sound/q6asm-v2.h64
-rw-r--r--include/sound/wcd-dsp-mgr.h28
-rw-r--r--include/trace/events/power.h43
-rw-r--r--include/trace/events/sched.h78
-rw-r--r--include/trace/events/trace_msm_low_power.h108
-rw-r--r--include/uapi/linux/dvb/dmx.h725
-rw-r--r--include/uapi/linux/msm_vidc_dec.h6
-rw-r--r--include/uapi/media/msm_cam_sensor.h18
-rw-r--r--include/uapi/media/msm_camsensor_sdk.h31
-rw-r--r--include/uapi/media/msmb_camera.h4
-rw-r--r--include/uapi/sound/wcd-dsp-glink.h2
-rw-r--r--init/Kconfig53
-rw-r--r--kernel/cgroup.c6
-rw-r--r--kernel/cpu.c17
-rw-r--r--kernel/events/core.c3
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/irq/cpuhotplug.c14
-rw-r--r--kernel/locking/percpu-rwsem.c229
-rw-r--r--kernel/power/qos.c7
-rw-r--r--kernel/rcu/sync.c12
-rw-r--r--kernel/sched/Makefile2
-rw-r--r--kernel/sched/core.c356
-rw-r--r--kernel/sched/core_ctl.c1055
-rw-r--r--kernel/sched/core_ctl.h24
-rw-r--r--kernel/sched/fair.c186
-rw-r--r--kernel/sched/hmp.c113
-rw-r--r--kernel/sched/rt.c13
-rw-r--r--kernel/sched/sched.h13
-rw-r--r--kernel/sched/sched_avg.c19
-rw-r--r--kernel/sched/tune.c241
-rw-r--r--kernel/smp.c4
-rw-r--r--kernel/sysctl.c47
-rw-r--r--kernel/time/hrtimer.c104
-rw-r--r--kernel/time/timer.c68
-rw-r--r--kernel/trace/power-traces.c2
-rw-r--r--kernel/watchdog.c22
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/asn1_decoder.c16
-rw-r--r--mm/vmstat.c7
-rw-r--r--net/core/dev.c3
-rw-r--r--net/ipc_router/ipc_router_socket.c10
-rw-r--r--net/ipv4/netfilter/arp_tables.c41
-rw-r--r--net/ipv4/netfilter/ip_tables.c46
-rw-r--r--net/ipv4/tcp_input.c15
-rw-r--r--net/ipv6/netfilter/ip6_tables.c46
-rw-r--r--net/rmnet_data/rmnet_data_vnd.c2
-rw-r--r--net/wireless/db.txt2
-rw-r--r--security/keys/key.c2
-rw-r--r--sound/soc/codecs/Makefile8
-rw-r--r--sound/soc/codecs/msm_hdmi_codec_rx.c7
-rw-r--r--sound/soc/codecs/wcd-dsp-mgr.c420
-rw-r--r--sound/soc/codecs/wcd-mbhc-v2.c63
-rw-r--r--sound/soc/codecs/wcd-mbhc-v2.h12
-rw-r--r--sound/soc/codecs/wcd-spi.c118
-rw-r--r--sound/soc/codecs/wcd9330.c12
-rw-r--r--sound/soc/codecs/wcd9335.c103
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsd.c36
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c309
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h19
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-mbhc.c29
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-mbhc.h1
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-routing.h4
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x.c671
-rw-r--r--sound/soc/codecs/wcd9xxx-mbhc.c4
-rw-r--r--sound/soc/codecs/wcd9xxx-resmgr-v2.c2
-rw-r--r--sound/soc/codecs/wcd_cpe_core.c2
-rw-r--r--sound/soc/codecs/wsa881x.c5
-rw-r--r--sound/soc/msm/Kconfig13
-rw-r--r--sound/soc/msm/msm-cpe-lsm.c14
-rw-r--r--sound/soc/msm/msm-dai-fe.c68
-rw-r--r--sound/soc/msm/msmcobalt.c960
-rw-r--r--sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c2
-rw-r--r--sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c14
-rw-r--r--sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c95
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c26
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c62
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h4
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c320
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h14
-rw-r--r--sound/soc/msm/qdsp6v2/msm-qti-pp-config.c6
-rw-r--r--sound/soc/msm/qdsp6v2/msm-qti-pp-config.h37
-rw-r--r--sound/soc/msm/qdsp6v2/q6adm.c109
-rw-r--r--sound/soc/msm/qdsp6v2/q6afe.c29
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c505
-rw-r--r--sound/soc/soc-dapm.c3
528 files changed, 56662 insertions, 9201 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/imem.txt b/Documentation/devicetree/bindings/arm/msm/imem.txt
index d1f8ce1e5ac8..a9d2a2456cfd 100644
--- a/Documentation/devicetree/bindings/arm/msm/imem.txt
+++ b/Documentation/devicetree/bindings/arm/msm/imem.txt
@@ -46,6 +46,12 @@ Required properties:
-compatible: "qcom,msm-imem-restart_reason
-reg: start address and size of restart_reason region in imem
+Download Mode Type:
+-------------------
+Required properties:
+-compatible: "qcom,msm-imem-dload-type"
+-reg: start address and size of dload type region in imem
+
Download Mode:
--------------
Required properties:
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
index 518cc6f85f95..c4d651e36d02 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
@@ -56,15 +56,17 @@ Properties:
Usage: required
Value type: <prop-encoded-array>
Definition: Array which defines the frequency in Hertz, frequency,
- PLL override data, and ACC level used by the OSM hardware
- for each supported DCVS setpoint of the Power cluster.
+ PLL override data, ACC level, and virtual corner used
+ by the OSM hardware for each supported DCVS setpoint
+ of the Power cluster.
- qcom,perfcl-speedbinX-v0
Usage: required
Value type: <prop-encoded-array>
Definition: Array which defines the frequency in Hertz, frequency,
- PLL override data, and ACC level used by the OSM hardware
- for each supported DCVS setpoint of the Performance cluster.
+ PLL override data, ACC level and virtual corner used
+ by the OSM hardware for each supported DCVS setpoint
+ of the Performance cluster.
- qcom,osm-no-tz
Usage: optional
@@ -317,55 +319,55 @@ Example:
interrupt-names = "pwrcl-irq", "perfcl-irq";
qcom,pwrcl-speedbin0-v0 =
- < 300000000 0x0004000f 0x031e001e 0x1>,
- < 345600000 0x05040012 0x04200020 0x1>,
- < 422400000 0x05040016 0x04200020 0x1>,
- < 499200000 0x0504001a 0x05200020 0x1>,
- < 576000000 0x0504001e 0x06200020 0x1>,
- < 633600000 0x04040021 0x07200020 0x1>,
- < 710400000 0x04040025 0x07200020 0x1>,
- < 806400000 0x0404002a 0x08220022 0x2>,
- < 883200000 0x0404002e 0x09250025 0x2>,
- < 960000000 0x04040032 0x0a280028 0x2>,
- < 1036800000 0x04040036 0x0b2b002b 0x3>,
- < 1113600000 0x0404003a 0x0c2e002e 0x3>,
- < 1190400000 0x0404003e 0x0c320032 0x3>,
- < 1248000000 0x04040041 0x0d340034 0x3>,
- < 1324800000 0x04040045 0x0e370037 0x3>,
- < 1401600000 0x04040049 0x0f3a003a 0x3>,
- < 1478400000 0x0404004d 0x103e003e 0x3>,
- < 1574400000 0x04040052 0x10420042 0x4>,
- < 1651200000 0x04040056 0x11450045 0x4>,
- < 1728000000 0x0404005a 0x12480048 0x4>,
- < 1804800000 0x0404005e 0x134b004b 0x4>,
- < 1881600000 0x04040062 0x144e004e 0x4>;
+ < 300000000 0x0004000f 0x031e001e 0x1 1 >,
+ < 345600000 0x05040012 0x04200020 0x1 2 >,
+ < 422400000 0x05040016 0x04200020 0x1 3 >,
+ < 499200000 0x0504001a 0x05200020 0x1 4 >,
+ < 576000000 0x0504001e 0x06200020 0x1 5 >,
+ < 633600000 0x04040021 0x07200020 0x1 6 >,
+ < 710400000 0x04040025 0x07200020 0x1 7 >,
+ < 806400000 0x0404002a 0x08220022 0x2 8 >,
+ < 883200000 0x0404002e 0x09250025 0x2 9 >,
+ < 960000000 0x04040032 0x0a280028 0x2 10 >,
+ < 1036800000 0x04040036 0x0b2b002b 0x3 11 >,
+ < 1113600000 0x0404003a 0x0c2e002e 0x3 12 >,
+ < 1190400000 0x0404003e 0x0c320032 0x3 13 >,
+ < 1248000000 0x04040041 0x0d340034 0x3 14 >,
+ < 1324800000 0x04040045 0x0e370037 0x3 15 >,
+ < 1401600000 0x04040049 0x0f3a003a 0x3 16 >,
+ < 1478400000 0x0404004d 0x103e003e 0x3 17 >,
+ < 1574400000 0x04040052 0x10420042 0x4 18 >,
+ < 1651200000 0x04040056 0x11450045 0x4 19 >,
+ < 1728000000 0x0404005a 0x12480048 0x4 20>,
+ < 1804800000 0x0404005e 0x134b004b 0x4 21 >,
+ < 1881600000 0x04040062 0x144e004e 0x4 22 >;
qcom,perfcl-speedbin0-v0 =
- < 300000000 0x0004000f 0x03200020 0x1>,
- < 345600000 0x05040012 0x04200020 0x1>,
- < 422400000 0x05040016 0x04200020 0x1>,
- < 480000000 0x05040019 0x05200020 0x1>,
- < 556800000 0x0504001d 0x06200020 0x1>,
- < 633600000 0x04040021 0x07200020 0x1>,
- < 710400000 0x04040025 0x07200020 0x1>,
- < 787200000 0x04040029 0x08210021 0x1>,
- < 844800000 0x0404002c 0x09240024 0x2>,
- < 902400000 0x0404002f 0x09260026 0x2>,
- < 979200000 0x04040033 0x0a290029 0x2>,
- < 1056000000 0x04040037 0x0b2c002c 0x2>,
- < 1171200000 0x0404003d 0x0c300030 0x3>,
- < 1248000000 0x04040041 0x0d340034 0x3>,
- < 1324800000 0x04040045 0x0e370037 0x3>,
- < 1401600000 0x04040049 0x0f3b003b 0x3>,
- < 1478400000 0x0404004d 0x0f3e003e 0x3>,
- < 1536000000 0x04040050 0x10400040 0x3>,
- < 1632000000 0x04040055 0x11440044 0x4>,
- < 1708800000 0x04040059 0x12480048 0x4>,
- < 1785600000 0x0404005d 0x134a004a 0x4>,
- < 1862400000 0x04040061 0x134e004e 0x4>,
- < 1939200000 0x04040065 0x14510051 0x4>,
- < 2016000000 0x04040069 0x15540054 0x4>,
- < 2092800000 0x0404006d 0x16570057 0x4>;
+ < 300000000 0x0004000f 0x03200020 0x1 1 >,
+ < 345600000 0x05040012 0x04200020 0x1 2 >,
+ < 422400000 0x05040016 0x04200020 0x1 3 >,
+ < 480000000 0x05040019 0x05200020 0x1 4 >,
+ < 556800000 0x0504001d 0x06200020 0x1 5 >,
+ < 633600000 0x04040021 0x07200020 0x1 6 >,
+ < 710400000 0x04040025 0x07200020 0x1 7 >,
+ < 787200000 0x04040029 0x08210021 0x1 8 >,
+ < 844800000 0x0404002c 0x09240024 0x2 9 >,
+ < 902400000 0x0404002f 0x09260026 0x2 10 >,
+ < 979200000 0x04040033 0x0a290029 0x2 11 >,
+ < 1056000000 0x04040037 0x0b2c002c 0x2 12 >,
+ < 1171200000 0x0404003d 0x0c300030 0x3 13 >,
+ < 1248000000 0x04040041 0x0d340034 0x3 14 >,
+ < 1324800000 0x04040045 0x0e370037 0x3 15 >,
+ < 1401600000 0x04040049 0x0f3b003b 0x3 16 >,
+ < 1478400000 0x0404004d 0x0f3e003e 0x3 17 >,
+ < 1536000000 0x04040050 0x10400040 0x3 18 >,
+ < 1632000000 0x04040055 0x11440044 0x4 19 >,
+ < 1708800000 0x04040059 0x12480048 0x4 20 >,
+ < 1785600000 0x0404005d 0x134a004a 0x4 21 >,
+ < 1862400000 0x04040061 0x134e004e 0x4 22 >,
+ < 1939200000 0x04040065 0x14510051 0x4 23 >,
+ < 2016000000 0x04040069 0x15540054 0x4 24 >,
+ < 2092800000 0x0404006d 0x16570057 0x4 25 >;
qcom,osm-no-tz;
qcom,osm-pll-setup;
diff --git a/Documentation/devicetree/bindings/batterydata/batterydata.txt b/Documentation/devicetree/bindings/batterydata/batterydata.txt
new file mode 100644
index 000000000000..39f9375a6c48
--- /dev/null
+++ b/Documentation/devicetree/bindings/batterydata/batterydata.txt
@@ -0,0 +1,221 @@
+Battery Profile Data
+
+Battery Data is a collection of battery profile data made available to
+the QPNP Charger and BMS drivers via device tree.
+
+qcom,battery-data node required properties:
+- qcom,rpull-up-kohm : The vadc pullup resistor's resistance value in kOhms.
+- qcom,vref-batt-therm-uv : The vadc voltage used to make readings.
+ For Qualcomm Technologies, Inc. VADCs, this should be
+ 1800000uV.
+
+qcom,battery-data node optional properties:
+- qcom,batt-id-range-pct : The area of variation between upper and lower bound
+ for which a given battery ID resistance is valid. This
+ value is expressed as a percentage of the specified kohm
+ resistance provided by qcom,batt-id-kohm.
+
+qcom,battery-data can also include any number of children nodes. These children
+nodes will be treated as battery profile data nodes.
+
+Profile data node required properties:
+- qcom,fcc-mah : Full charge count of the battery in milliamp-hours
+- qcom,default-rbatt-mohm : The nominal battery resistance value
+- qcom,rbatt-capacitive-mohm : The capacitive resistance of the battery.
+- qcom,flat-ocv-threshold-uv : The threshold under which the battery can be
+ considered to be in the flat portion of the discharge
+ curve.
+- qcom,max-voltage-uv : The maximum rated voltage of the battery
+- qcom,v-cutoff-uv : The cutoff voltage of the battery at which the device
+ should shutdown gracefully.
+- qcom,chg-term-ua : The termination charging current of the battery.
+- qcom,batt-id-kohm : The battery id resistance of the battery. It can be
+ used as an array which could support multiple IDs for one battery
+ module when the ID resistance of some battery modules goes across
+ several ranges.
+- qcom,battery-type : A string indicating the type of battery.
+- qcom,fg-profile-data : An array of hexadecimal values used to configure more
+ complex fuel gauge peripherals which have a large amount
+ of coefficients used in hardware state machines and thus
+ influencing the final output of the state of charge read
+ by software.
+
+Profile data node optional properties:
+- qcom,chg-rslow-comp-c1 : A constant for rslow compensation in the fuel gauge.
+ This will be provided by the profiling tool for
+ additional fuel gauge accuracy during charging.
+- qcom,chg-rslow-comp-c2 : A constant for rslow compensation in the fuel gauge.
+ This will be provided by the profiling tool for
+ additional fuel gauge accuracy during charging.
+- qcom,chg-rslow-comp-thr : A constant for rslow compensation in the fuel gauge.
+ This will be provided by the profiling tool for
+ additional fuel gauge accuracy during charging.
+- qcom,chg-rs-to-rslow: A constant for rslow compensation in the fuel gauge.
+ This will be provided by the profiling tool for
+ additional fuel gauge accuracy during charging.
+- qcom,fastchg-current-ma: Specifies the maximum fastcharge current.
+- qcom,fg-cc-cv-threshold-mv: Voltage threshold in mV for transition from constant
+ charge (CC) to constant voltage (CV). This value should
+ be 10 mV less than the float voltage.
+ This property should only be specified if
+ "qcom,autoadjust-vfloat" property is specified in the
+ charger driver to ensure a proper operation.
+- qcom,thermal-coefficients: Byte array of thermal coefficients for reading
+ battery thermistor. This should be exactly 6 bytes
+ in length.
+ Example: [01 02 03 04 05 06]
+
+Profile data node required subnodes:
+- qcom,fcc-temp-lut : An 1-dimensional lookup table node that encodes
+ temperature to fcc lookup. The units for this lookup
+ table should be degrees celsius to milliamp-hours.
+- qcom,pc-temp-ocv-lut : A 2-dimensional lookup table node that encodes
+ temperature and percent charge to open circuit voltage
+ lookup. The units for this lookup table should be
+ degrees celsius and percent to millivolts.
+- qcom,rbatt-sf-lut : A 2-dimentional lookup table node that encodes
+ temperature and percent charge to battery internal
+ resistance lookup. The units for this lookup table
+ should be degrees celsius and percent to milliohms.
+
+Profile data node optional subnodes:
+- qcom,ibat-acc-luit: A 2-dimentional lookup table that encodes temperature
+ and battery current to battery ACC (apparent charge
+ capacity). The units for this lookup table should be
+ temperature in degrees celsius, ibat in milli-amps
+ and ACC in milli-ampere-hour.
+
+Lookup table required properties:
+- qcom,lut-col-legend : An array that encodes the legend of the lookup table's
+ columns. The length of this array will determine the
+ lookup table's width.
+- qcom,lut-data : An array that encodes the lookup table's data. The size of this
+ array should be equal to the size of qcom,lut-col-legend
+ multiplied by 1 if it's a 1-dimensional table, or
+ the size of qcom,lut-row-legend if it's a 2-dimensional
+ table. The data should be in a flattened row-major
+ representation.
+
+Lookup table optional properties:
+- qcom,lut-row-legend : An array that encodes the legend of the lookup table's rows.
+ If this property exists, then it is assumed that the
+ lookup table is a 2-dimensional table.
+
+Example:
+
+In msm8974-mtp.dtsi:
+
+mtp_batterydata: qcom,battery-data {
+ qcom,rpull-up-kohm = <100>;
+ qcom,vref-batt-therm-uv = <1800000>;
+
+ /include/ "batterydata-palladium.dtsi"
+ /include/ "batterydata-mtp-3000mah.dtsi"
+};
+
+&pm8941_bms {
+ qcom,battery-data = <&mtp_batterydata>;
+};
+
+In batterydata-palladium.dtsi:
+
+qcom,palladium-batterydata {
+ qcom,fcc-mah = <1500>;
+ qcom,default-rbatt-mohm = <236>;
+ qcom,rbatt-capacitive-mohm = <50>;
+ qcom,flat-ocv-threshold-uv = <3800000>;
+ qcom,max-voltage-uv = <4200000>;
+ qcom,v-cutoff-uv = <3400000>;
+ qcom,chg-term-ua = <100000>;
+ qcom,batt-id-kohm = <75>;
+ qcom,battery-type = "palladium_1500mah";
+
+ qcom,fcc-temp-lut {
+ qcom,lut-col-legend = <(-20) 0 25 40 65>;
+ qcom,lut-data = <1492 1492 1493 1483 1502>;
+ };
+
+ qcom,pc-temp-ocv-lut {
+ qcom,lut-col-legend = <(-20) 0 25 40 65>;
+ qcom,lut-row-legend = <100 95 90 85 80 75 70>,
+ <65 60 55 50 45 40 35>,
+ <30 25 20 15 10 9 8>,
+ <7 6 5 4 3 2 1 0>;
+ qcom,lut-data = <4173 4167 4163 4156 4154>,
+ <4104 4107 4108 4102 4104>,
+ <4057 4072 4069 4061 4060>,
+ <3973 4009 4019 4016 4020>,
+ <3932 3959 3981 3982 3983>,
+ <3899 3928 3954 3950 3950>,
+ <3868 3895 3925 3921 3920>,
+ <3837 3866 3898 3894 3892>,
+ <3812 3841 3853 3856 3862>,
+ <3794 3818 3825 3823 3822>,
+ <3780 3799 3804 3804 3803>,
+ <3768 3787 3790 3788 3788>,
+ <3757 3779 3778 3775 3776>,
+ <3747 3772 3771 3766 3765>,
+ <3736 3763 3766 3760 3746>,
+ <3725 3749 3756 3747 3729>,
+ <3714 3718 3734 3724 3706>,
+ <3701 3703 3696 3689 3668>,
+ <3675 3695 3682 3675 3662>,
+ <3670 3691 3680 3673 3661>,
+ <3661 3686 3679 3672 3656>,
+ <3649 3680 3676 3669 3641>,
+ <3633 3669 3667 3655 3606>,
+ <3610 3647 3640 3620 3560>,
+ <3580 3607 3596 3572 3501>,
+ <3533 3548 3537 3512 3425>,
+ <3457 3468 3459 3429 3324>,
+ <3328 3348 3340 3297 3172>,
+ <3000 3000 3000 3000 3000>;
+ };
+
+ qcom,rbatt-sf-lut {
+ qcom,lut-col-legend = <(-20) 0 25 40 65>;
+ qcom,lut-row-legend = <100 95 90 85 80 75 70>,
+ <65 60 55 50 45 40 35>,
+ <30 25 20 15 10 9 8>,
+ <7 6 5 4 3 2 1 0>;
+ qcom,lut-data = <357 187 100 91 91>,
+ <400 208 105 94 94>,
+ <390 204 106 95 96>,
+ <391 201 108 98 98>,
+ <391 202 110 98 100>,
+ <390 200 110 99 102>,
+ <389 200 110 99 102>,
+ <393 202 101 93 100>,
+ <407 205 99 89 94>,
+ <428 208 100 91 96>,
+ <455 212 102 92 98>,
+ <495 220 104 93 101>,
+ <561 232 107 95 102>,
+ <634 245 112 98 98>,
+ <714 258 114 98 98>,
+ <791 266 114 97 100>,
+ <871 289 108 95 97>,
+ <973 340 124 108 105>,
+ <489 241 109 96 99>,
+ <511 246 110 96 99>,
+ <534 252 111 95 98>,
+ <579 263 112 96 96>,
+ <636 276 111 95 97>,
+ <730 294 109 96 99>,
+ <868 328 112 98 104>,
+ <1089 374 119 101 115>,
+ <1559 457 128 105 213>,
+ <12886 1026 637 422 3269>,
+ <170899 127211 98968 88907 77102>;
+ };
+
+ qcom,ibat-acc-lut {
+ qcom,lut-col-legend = <(-20) 0 25>;
+ qcom,lut-row-legend = <0 250 500 1000>;
+ qcom,lut-data = <1470 1470 1473>,
+ <1406 1406 1430>,
+ <1247 1247 1414>,
+ <764 764 1338>;
+ };
+};
+
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 72f82f444091..1330d2bdc18d 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -14,6 +14,7 @@ Required properties :
"qcom,gcc-msm8974pro"
"qcom,gcc-msm8974pro-ac"
"qcom,gcc-msm8996"
+ "qcom,gcc-msmfalcon"
- reg : shall contain base register location and length
- #clock-cells : shall contain 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
new file mode 100644
index 000000000000..9f8ea0d6ef8f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
@@ -0,0 +1,23 @@
+Qualcomm Technologies, Inc Graphics Clock & Reset Controller Binding
+--------------------------------------------------------------------
+
+Required properties :
+- compatible : shall contain only one of the following:
+
+ "qcom,gpucc-msmfalcon"
+
+- reg : shall contain base register location and length
+- #clock-cells : shall contain 1
+- #reset-cells : shall contain 1
+
+Optional properties :
+- #power-domain-cells : shall contain 1
+
+Example:
+ clock-controller@4000000 {
+ compatible = "qcom,gpucc-msmfalcon";
+ reg = <<0x5065000 0x10000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
index 87d3714b956a..f825a44e5911 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
@@ -12,6 +12,8 @@ Required properties :
"qcom,rpmcc-msm8916", "qcom,rpmcc"
"qcom,rpmcc-apq8064", "qcom,rpmcc"
+ "qcom,rpmcc-msm8996", "qcom,rpmcc"
+ "qcom,rpmcc-msmfalcon", "qcom,rpmcc"
- #clock-cells : shall contain 1
@@ -35,3 +37,10 @@ Example:
};
};
};
+
+ The below are applicable for MSM8996 & MSMFalcon.
+
+ rpmcc: clock-controller {
+ compatible = "qcom,rpmcc-msm8996", "qcom,rpmcc";
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index 90abf0305319..68b8f09238e0 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -249,6 +249,35 @@ Optional properties:
60 = 60 frames per second (default)
- qcom,mdss-dsi-panel-clockrate: A 64 bit value specifies the panel clock speed in Hz.
0 = default value.
+- qcom,mdss-mdp-kickoff-threshold: This property can be used to define a region
+ (in terms of scanlines) where the
+hardware is allowed
+ to trigger a data transfer from MDP to DSI.
+ If this property is used, the region must be defined setting
+ two values, the low and the high thresholds:
+ <low_threshold high_threshold>
+ Where following condition must be met:
+ low_threshold < high_threshold
+ These values will be used by the driver in such way that if
+ the Driver receives a request to kickoff a transfer (MDP to DSI),
+ the transfer will be triggered only if the following condition
+ is satisfied:
+ low_threshold < scanline < high_threshold
+ If the condition is not met, then the driver will delay the
+ transfer by the time defined in the following property:
+ "qcom,mdss-mdp-kickoff-delay".
+ So in order to use this property, the delay property must
+ be defined as well and greater than 0.
+- qcom,mdss-mdp-kickoff-delay: This property defines the delay in microseconds that
+ the driver will delay before triggering an MDP transfer if the
+ thresholds defined by the following property are not met:
+ "qcom,mdss-mdp-kickoff-threshold".
+ So in order to use this property, the threshold property must
+ be defined as well. Note that this delay cannot be zero
+ and also should not be greater than
+the fps window.
+ i.e. For 60fps value should not exceed
+16666 uS.
- qcom,mdss-mdp-transfer-time-us: Specifies the dsi transfer time for command mode
panels in microseconds. Driver uses this number to adjust
the clock rate according to the expected transfer time.
@@ -568,6 +597,8 @@ Example:
qcom,mdss-dsi-dma-trigger = <0>;
qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-panel-clockrate = <424000000>;
+ qcom,mdss-mdp-kickoff-threshold = <11 2430>;
+ qcom,mdss-mdp-kickoff-delay = <1000>;
qcom,mdss-mdp-transfer-time-us = <12500>;
qcom,mdss-dsi-panel-timings = [7d 25 1d 00 37 33
22 27 1e 03 04 00];
diff --git a/Documentation/devicetree/bindings/gpu/adreno-iommu.txt b/Documentation/devicetree/bindings/gpu/adreno-iommu.txt
index de88a6eba7a5..b399145ea8a2 100644
--- a/Documentation/devicetree/bindings/gpu/adreno-iommu.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno-iommu.txt
@@ -36,8 +36,6 @@ Optional properties:
for secure buffer allocation
- qcom,secure_align_mask: A mask for determining how secure buffers need to
be aligned
-- qcom,coherent-htw: A boolean specifying if coherent hardware table walks should
- be enabled.
- List of sub nodes, one for each of the translation context banks supported.
The driver uses the names of these nodes to determine how they are used,
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index fffb8cc39d0f..ca58f0da07ef 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -139,6 +139,10 @@ Optional Properties:
baseAddr - base address of the gpu channels in the qdss stm memory region
size - size of the gpu stm region
+- qcom,tsens-name:
+ Specify the name of GPU temperature sensor. This name will be used
+ to get the temperature from the thermal driver API.
+
GPU Quirks:
- qcom,gpu-quirk-two-pass-use-wfi:
Signal the GPU to set Set TWOPASSUSEWFI bit in
diff --git a/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt b/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt
index 721a4f72563e..1ab49edfe30c 100644
--- a/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt
@@ -41,6 +41,10 @@ The channel list supported by the RRADC driver is available in the enum rradc_ch
located at at drivers/iio/adc/qcom-rradc.c. Clients can use this index from the enum
as the channel number while requesting ADC reads.
+Optional property:
+- qcom,pmic-revid : Phandle pointing to the revision peripheral node. Use it to query the
+ PMIC fabrication ID for applying the appropriate temperature
+ compensation parameters.
Example:
/* RRADC node */
diff --git a/Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt b/Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt
new file mode 100644
index 000000000000..02f21835f870
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt
@@ -0,0 +1,10 @@
+PixArt pat9125 rotating switch
+
+The Pixart's PAT9125 controller is connected to the host processor via I2C.
+It detects the rotation when user rotates the switch and generates interrupt
+to the Host processor. The host processor reads the direction and number of
+steps over I2C and passes the data to the rest of the system.
+
+Required properties:
+
+ - compatible : should be "pixart,pat9125".
diff --git a/Documentation/devicetree/bindings/media/video/msm-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cci.txt
index fb1ca0261f9c..991c6d4ec255 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cci.txt
@@ -168,6 +168,10 @@ Optional properties:
property should contain phandle of respective eeprom nodes
- qcom,ois-src : if optical image stabilization is supported by this sensor,
this property should contain phandle of respective ois node
+- qcom,ir-led-src : if ir led is supported by this sensor, this property
+ should contain phandle of respective ir-led node
+- qcom,ir-cut-src : if ir cut is supported by this sensor, this property
+ should contain phandle of respective ir-cut node
* Qualcomm Technologies, Inc. MSM ACTUATOR
diff --git a/Documentation/devicetree/bindings/media/video/msm-ir-cut.txt b/Documentation/devicetree/bindings/media/video/msm-ir-cut.txt
new file mode 100644
index 000000000000..96cb28d692c1
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-ir-cut.txt
@@ -0,0 +1,26 @@
+* QTI MSM IR CUT
+
+Required properties:
+- cell-index : ir cut filter hardware core index
+- compatible :
+ - "qcom,ir-cut"
+
+Optional properties:
+- gpios : should specify the gpios to be used for the ir cut filter.
+- qcom,gpio-req-tbl-num : should contain index to gpios specific to ir cut filter
+- qcom,gpio-req-tbl-flags : should contain direction of gpios present in
+ qcom,gpio-req-tbl-num property (in the same order)
+- qcom,gpio-req-tbl-label : should contain name of gpios present in
+ qcom,gpio-req-tbl-num property (in the same order)
+- label : should contain unique ir cut filter name
+Example:
+
+qcom,ir-cut@60 {
+ cell-index = <0>;
+ compatible = "qcom,ir-cut";
+ label = "led-ir-label";
+ gpios = <&tlmm 60 0>;
+ qcom,gpio-req-tbl-num = <0>;
+ qcom,gpio-req-tbl-flags = <0>;
+ qcom,gpio-req-tbl-label = "LED_IR_EN";
+ };
diff --git a/Documentation/devicetree/bindings/media/video/msm-ir-led.txt b/Documentation/devicetree/bindings/media/video/msm-ir-led.txt
new file mode 100644
index 000000000000..7e66fa0cef5b
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-ir-led.txt
@@ -0,0 +1,26 @@
+* QTI MSM IR LED
+
+Required properties:
+- cell-index : ir led hardware core index
+- compatible :
+ - "qcom,ir-led"
+
+Optional properties:
+- gpios : should specify the gpios to be used for the ir led.
+- qcom,gpio-req-tbl-num : should contain index to gpios specific to ir led
+- qcom,gpio-req-tbl-flags : should contain direction of gpios present in
+ qcom,gpio-req-tbl-num property (in the same order)
+- qcom,gpio-req-tbl-label : should contain name of gpios present in
+ qcom,gpio-req-tbl-num property (in the same order)
+- label : should contain unique ir led name
+Example:
+
+qcom,ir-led {
+ cell-index = <0>;
+ compatible = "qcom,ir-led";
+ label = "led-ir-label";
+ gpios = <&tlmm 60 0>;
+ qcom,gpio-req-tbl-num = <0>;
+ qcom,gpio-req-tbl-flags = <0>;
+ qcom,gpio-req-tbl-label = "LED_IR_EN";
+ };
diff --git a/Documentation/devicetree/bindings/pci/msm_pcie.txt b/Documentation/devicetree/bindings/pci/msm_pcie.txt
index 8885f4ae80ad..21b6d99424c0 100644
--- a/Documentation/devicetree/bindings/pci/msm_pcie.txt
+++ b/Documentation/devicetree/bindings/pci/msm_pcie.txt
@@ -98,6 +98,10 @@ Optional Properties:
from the endpoint.
- linux,pci-domain: For details of pci-domains properties, please refer to:
"Documentation/devicetree/bindings/pci/pci.txt"
+ - qcom,perst-delay-us-min: The minimum allowed time (unit: us) to sleep after
+ asserting or de-asserting PERST GPIO.
+ - qcom,perst-delay-us-max: The maximum allowed time (unit: us) to sleep after
+ asserting or de-asserting PERST GPIO.
- qcom,tlp-rd-size: The max TLP read size (Calculation: 128 times 2 to the
tlp-rd-size power).
- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index f3166d33f9e4..80f2d8f43e35 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -36,7 +36,6 @@ Optional:
compatible "qcom,ipa-smmu-wlan-cb"
- ipa_smmu_uc: uc SMMU device
compatible "qcom,ipa-smmu-uc-cb"
-- qcom,smmu-disable-htw: boolean value to turn off SMMU page table caching
- qcom,use-a2-service: determine if A2 service will be used
- qcom,use-ipa-tethering-bridge: determine if tethering bridge will be used
- qcom,use-ipa-bamdma-a2-bridge: determine if a2/ipa hw bridge will be used
@@ -80,6 +79,8 @@ memory allocation over a PCIe bridge
- qcom,rx-polling-sleep-ms: Receive Polling Timeout in millisecond,
default is 1 millisecond.
- qcom,ipa-polling-iteration: IPA Polling Iteration Count,default is 40.
+- qcom,ipa-tz-unlock-reg: Register start addresses and ranges which
+ need to be unlocked by TZ.
IPA pipe sub nodes (A2 static pipes configurations):
diff --git a/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt b/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt
index 93312df2a43b..babc4523a29a 100644
--- a/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt
+++ b/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt
@@ -6,6 +6,10 @@ Required properties:
- compatible : should be "qcom,qpnp-revid"
- reg : offset and length of the PMIC peripheral register map.
+Optional property:
+- qcom,fab-id-valid: Use this property when support to read Fab
+ identification from REV ID peripheral is available.
+
Example:
qcom,revid@100 {
compatible = "qcom,qpnp-revid";
diff --git a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt
index bd236df6c056..caabcd347a72 100644
--- a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt
@@ -78,14 +78,17 @@ First Level Node - FG Gen3 device
Definition: Battery current (in mA) at which the fuel gauge will try to
scale towards 100%. When the charge current goes above this
the SOC should be at 100%. If this property is not
- specified, then the default value used will be 125mA.
+ specified, then the default value used will be -125mA.
+ This value has to be specified in negative values for
+ the charging current.
- qcom,fg-delta-soc-thr
Usage: optional
Value type: <u32>
Definition: Percentage of monotonic SOC increase upon which the delta
SOC interrupt will be triggered. If this property is not
- specified, then the default value will be 1.
+ specified, then the default value will be 1. Possible
+ values are in the range of 0 to 12.
- qcom,fg-recharge-soc-thr
Usage: optional
@@ -138,6 +141,117 @@ First Level Node - FG Gen3 device
asleep and the battery is discharging. This option requires
qcom,fg-esr-timer-awake to be defined.
+- qcom,cycle-counter-en
+ Usage: optional
+ Value type: <bool>
+ Definition: Enables the cycle counter feature.
+
+- qcom,fg-force-load-profile
+ Usage: optional
+ Value type: <bool>
+ Definition: If set, battery profile will be force loaded if the profile
+ loaded earlier by bootloader doesn't match with the profile
+ available in the device tree.
+
+- qcom,cl-start-capacity
+ Usage: optional
+ Value type: <u32>
+ Definition: Battery SOC threshold to start the capacity learning.
+ If this is not specified, then the default value used
+ will be 15.
+
+- qcom,cl-min-temp
+ Usage: optional
+ Value type: <u32>
+ Definition: Lower limit of battery temperature to start the capacity
+ learning. If this is not specified, then the default value
+ used will be 150. Unit is in decidegC.
+
+- qcom,cl-max-temp
+ Usage: optional
+ Value type: <u32>
+ Definition: Upper limit of battery temperature to start the capacity
+ learning. If this is not specified, then the default value
+ used will be 450 (45C). Unit is in decidegC.
+
+- qcom,cl-max-increment
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum capacity increment allowed per capacity learning
+ cycle. If this is not specified, then the default value
+ used will be 5 (0.5%). Unit is in decipercentage.
+
+- qcom,cl-max-decrement
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum capacity decrement allowed per capacity learning
+ cycle. If this is not specified, then the default value
+ used will be 100 (10%). Unit is in decipercentage.
+
+- qcom,cl-min-limit
+ Usage: optional
+ Value type: <u32>
+ Definition: Minimum limit that the capacity cannot go below in a
+ capacity learning cycle. If this is not specified, then
+ the default value is 0. Unit is in decipercentage.
+
+- qcom,cl-max-limit
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum limit that the capacity cannot go above in a
+ capacity learning cycle. If this is not specified, then
+ the default value is 0. Unit is in decipercentage.
+
+- qcom,fg-jeita-hyst-temp
+ Usage: optional
+ Value type: <u32>
+ Definition: Hysteresis applied to Jeita temperature comparison.
+ Possible values are:
+ 0 - No hysteresis
+ 1,2,3 - Value in Celsius.
+
+- qcom,fg-batt-temp-delta
+ Usage: optional
+ Value type: <u32>
+ Definition: Battery temperature delta interrupt threshold. Possible
+ values are: 2, 4, 6 and 10. Unit is in Kelvin.
+
+- qcom,hold-soc-while-full:
+ Usage: optional
+ Value type: <bool>
+ Definition: A boolean property that when defined holds SOC at 100% when
+ the battery is full.
+
+- qcom,ki-coeff-soc-dischg:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: Array of monotonic SOC threshold values to change the ki
+ coefficient for medium discharge current during discharge.
+ This should be defined in the ascending order and in the
+ range of 0-100. Array limit is set to 3.
+
+- qcom,ki-coeff-med-dischg:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: Array of ki coefficient values for medium discharge current
+ during discharge. These values will be applied when the
+ monotonic SOC goes below the SOC threshold specified under
+ qcom,ki-coeff-soc-dischg. Array limit is set to 3. This
+ property should be specified if qcom,ki-coeff-soc-dischg
+ is specified to make it fully functional. Value has no
+ unit. Allowed range is 0 to 62200 in micro units.
+
+- qcom,ki-coeff-hi-dischg:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: Array of ki coefficient values for high discharge current
+ during discharge. These values will be applied when the
+ monotonic SOC goes below the SOC threshold specified under
+ qcom,ki-coeff-soc-dischg. Array limit is set to 3. This
+ property should be specified if qcom,ki-coeff-soc-dischg
+ is specified to make it fully functional. Value has no
+ unit. Allowed range is 0 to 62200 in micro units.
+
==========================================================
Second Level Nodes - Peripherals managed by FG Gen3 driver
==========================================================
@@ -168,6 +282,9 @@ pmicobalt_fg: qpnp,fg {
qcom,pmic-revid = <&pmicobalt_revid>;
io-channels = <&pmicobalt_rradc 3>;
io-channel-names = "rradc_batt_id";
+ qcom,ki-coeff-soc-dischg = <30 60 90>;
+ qcom,ki-coeff-med-dischg = <800 1000 1400>;
+ qcom,ki-coeff-hi-dischg = <1200 1500 2100>;
status = "okay";
qcom,fg-batt-soc@4000 {
diff --git a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
index 510a824fda79..12ac75a8608c 100644
--- a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
@@ -21,11 +21,19 @@ Charger specific properties:
Value type: <string>
Definition: "qcom,qpnp-smb2".
-- qcom,suspend-input
+- qcom,pmic-revid
+ Usage: required
+ Value type: phandle
+ Definition: Should specify the phandle of PMI's revid module. This is used to
+ identify the PMI subtype.
+
+- qcom,batteryless-platform
Usage: optional
Value type: <empty>
- Definition: Boolean flag which indicates that the charger should not draw
- current from any of its input sources (USB, DC).
+ Definition: Boolean flag which indicates that the platform does not have a
+ battery, and therefore charging should be disabled. In
+ addition battery properties will be faked such that the device
+ assumes normal operation.
- qcom,fcc-max-ua
Usage: optional
@@ -45,6 +53,12 @@ Charger specific properties:
Definition: Specifies the USB input current limit in micro-amps.
If the value is not present, 1.5Amps is used as default.
+- qcom,usb-ocl-ua
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the OTG output current limit in micro-amps.
+ If the value is not present, 1.5Amps is used as default
+
- qcom,dc-icl-ua
Usage: optional
Value type: <u32>
diff --git a/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt b/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt
index c784a01d6411..8cb513b5605f 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt
@@ -15,6 +15,7 @@ Required device bindings:
- reg-names: Name for the above register.
"qpnp-lpg-channel-base" = physical base address of the
controller's LPG channel register.
+- qcom,lpg-lut-size: LPG LUT size.
- qcom,channel-id: channel Id for the PWM.
- qcom,supported-sizes: Supported PWM sizes.
Following three pwm sizes lists are supported by PWM/LPG controllers.
diff --git a/Documentation/devicetree/bindings/scheduler/sched_hmp.txt b/Documentation/devicetree/bindings/scheduler/sched_hmp.txt
new file mode 100644
index 000000000000..ba1d4db9e407
--- /dev/null
+++ b/Documentation/devicetree/bindings/scheduler/sched_hmp.txt
@@ -0,0 +1,35 @@
+* HMP scheduler
+
+This file describes the bindings for an optional HMP scheduler
+node (/sched-hmp).
+
+Required properties:
+
+Optional properties:
+
+- boost-policy: The HMP scheduler has two types of task placement boost
+policies.
+
+(1) boost-on-big policy make use of all big CPUs up to their full capacity
+before using the little CPUs. This improves performance on true b.L systems
+where the big CPUs have higher efficiency compared to the little CPUs.
+
+(2) boost-on-all policy place the tasks on the CPU having the highest
+spare capacity. This policy is optimal for SMP like systems.
+
+The scheduler sets the boost policy to boost-on-big on systems which has
+CPUs of different efficiencies. However it is possible that CPUs of the
+same micro architecture to have slight difference in efficiency due to
+other factors like cache size. Selecting the boost-on-big policy based
+on relative difference in efficiency is not optimal on such systems.
+The boost-policy device tree property is introduced to specify the
+required boost type and it overrides the default selection of boost
+type in the scheduler.
+
+The possible values for this property are "boost-on-big" and "boost-on-all".
+
+Example:
+
+sched-hmp {
+ boost-policy = "boost-on-all"
+}
diff --git a/Documentation/devicetree/bindings/thermal/tsens.txt b/Documentation/devicetree/bindings/thermal/tsens.txt
index 684bea131405..7189edbf8c5c 100644
--- a/Documentation/devicetree/bindings/thermal/tsens.txt
+++ b/Documentation/devicetree/bindings/thermal/tsens.txt
@@ -32,6 +32,7 @@ Required properties:
should be "qcom,msmcobalt-tsens" for cobalt TSENS driver.
should be "qcom,msmhamster-tsens" for hamster TSENS driver.
should be "qcom,msmfalcon-tsens" for falcon TSENS driver.
+ should be "qcom,msmtriton-tsens" for triton TSENS driver.
The compatible property is used to identify the respective fusemap to use
for the corresponding SoC.
- reg : offset and length of the TSENS registers with associated property in reg-names
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 1d7e54f68ee4..91412a10bf65 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -175,6 +175,7 @@ parade Parade Technologies Inc.
pericom Pericom Technology Inc.
phytec PHYTEC Messtechnik GmbH
picochip Picochip Ltd
+pixart PixArt Imaging Inc
plathome Plat'Home Co., Ltd.
plda PLDA
pixcir PIXCIR MICROELECTRONICS Co., Ltd
diff --git a/Documentation/scheduler/sched-hmp.txt b/Documentation/scheduler/sched-hmp.txt
index 22449aec5558..298064bc44d7 100644
--- a/Documentation/scheduler/sched-hmp.txt
+++ b/Documentation/scheduler/sched-hmp.txt
@@ -43,6 +43,7 @@ CONTENTS
8.8 sched_get_busy
8.9 sched_freq_alert
8.10 sched_set_boost
+9. Device Tree bindings
===============
1. INTRODUCTION
@@ -1220,6 +1221,23 @@ This tunable is a percentage. Configure the minimum demand of big sync waker
task. Scheduler places small wakee tasks woken up by big sync waker on the
waker's cluster.
+*** 7.19 sched_prefer_sync_wakee_to_waker
+
+Appears at: /proc/sys/kernel/sched_prefer_sync_wakee_to_waker
+
+Default value: 0
+
+The default sync wakee policy has a preference to select an idle CPU in the
+waker cluster compared to the waker CPU running only 1 task. By selecting
+an idle CPU, it eliminates the chance of waker migrating to a different CPU
+after the wakee preempts it. This policy is also not susceptible to the
+incorrect "sync" usage i.e the waker does not goto sleep after waking up
+the wakee.
+
+However LPM exit latency associated with an idle CPU outweigh the above
+benefits on some targets. When this knob is turned on, the waker CPU is
+selected if it has only 1 runnable task.
+
=========================
8. HMP SCHEDULER TRACE POINTS
=========================
@@ -1430,3 +1448,10 @@ Logged when boost settings are being changed
<task>-0 [004] d.h4 12700.711489: sched_set_boost: ref_count=1
- ref_count: A non-zero value indicates boost is in effect
+
+========================
+9. Device Tree bindings
+========================
+
+The device tree bindings for the HMP scheduler are defined in
+Documentation/devicetree/bindings/sched/sched_hmp.txt
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-jdi-a407-dualmipi-wqhd-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-jdi-a407-dualmipi-wqhd-cmd.dtsi
new file mode 100644
index 000000000000..6c17bca64a86
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/dsi-panel-jdi-a407-dualmipi-wqhd-cmd.dtsi
@@ -0,0 +1,80 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ dsi_dual_jdi_a407_cmd: qcom,mdss_dsi_jdi_a407_wqhd_cmd {
+ qcom,mdss-dsi-panel-name = "JDI a407 wqhd cmd mode dsi panel";
+ qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <720>;
+ qcom,mdss-dsi-panel-height = <2560>;
+ qcom,mdss-dsi-h-front-porch = <16>;
+ qcom,mdss-dsi-h-back-porch = <40>;
+ qcom,mdss-dsi-h-pulse-width = <4>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <20>;
+ qcom,mdss-dsi-v-front-porch = <7>;
+ qcom,mdss-dsi-v-pulse-width = <1>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-on-command = [
+ 15 01 00 00 00 00 02 35 00
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 32 00 02 29 00];
+ qcom,mdss-dsi-off-command = [
+ 05 01 00 00 32 00 02 28 00
+ 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
+ qcom,mdss-dsi-lane-map = "lane_map_0123";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-lane-3-state;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+
+ qcom,mdss-dsi-te-using-te-pin;
+ qcom,mdss-dsi-te-pin-select = <1>;
+ qcom,mdss-dsi-te-v-sync-rd-ptr-irq-line = <0x2c>;
+ qcom,mdss-dsi-te-dcs-command = <1>;
+
+ qcom,mdss-dsi-lp11-init;
+ qcom,adjust-timer-wakeup-ms = <1>;
+ qcom,mdss-dsi-reset-sequence = <1 20>, <0 10>, <1 20>;
+
+ qcom,config-select = <&dsi_dual_jdi_a407_cmd_config0>;
+
+ dsi_dual_jdi_a407_cmd_config0: config0 {
+ qcom,split-mode = "dualctl-split";
+ };
+
+ dsi_dual_jdi_a407_cmd_config1: config1 {
+ qcom,split-mode = "pingpong-split";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi
index 95a8e80ccdbd..9ad9e4adce00 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi
@@ -43,7 +43,7 @@
qcom,mdss-dsi-t-clk-pre = <0x24>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
- qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-dsi-te-pin-select = <1>;
qcom,mdss-dsi-wr-mem-start = <0x2c>;
qcom,mdss-dsi-wr-mem-continue = <0x3c>;
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi
index fd11be721dbb..6b549a4af6eb 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi
@@ -76,7 +76,7 @@
qcom,mdss-dsi-t-clk-pre = <0x24>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
- qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,compression-mode = "dsc";
qcom,config-select = <&dsi_nt35597_dsc_video_config0>;
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
index b6f19b78ea70..1e42d0846acf 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -59,33 +59,33 @@
qcom,mdss-dsi-te-check-enable;
qcom,mdss-dsi-te-using-te-pin;
qcom,ulps-enabled;
- qcom,mdss-dsi-on-command = [15 01 00 00 10 00 02 ff 10
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 ba 03
- 15 01 00 00 10 00 02 e5 01
- 15 01 00 00 10 00 02 35 00
- 15 01 00 00 10 00 02 bb 10
- 15 01 00 00 10 00 02 b0 03
- 15 01 00 00 10 00 02 ff e0
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 6b 3d
- 15 01 00 00 10 00 02 6c 3d
- 15 01 00 00 10 00 02 6d 3d
- 15 01 00 00 10 00 02 6e 3d
- 15 01 00 00 10 00 02 6f 3d
- 15 01 00 00 10 00 02 35 02
- 15 01 00 00 10 00 02 36 72
- 15 01 00 00 10 00 02 37 10
- 15 01 00 00 10 00 02 08 c0
- 15 01 00 00 10 00 02 ff 24
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 c6 06
- 15 01 00 00 10 00 02 ff 10
- 05 01 00 00 a0 00 02 11 00
- 05 01 00 00 a0 00 02 29 00];
+ qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 ba 03
+ 15 01 00 00 00 00 02 e5 01
+ 15 01 00 00 00 00 02 35 00
+ 15 01 00 00 00 00 02 bb 10
+ 15 01 00 00 00 00 02 b0 03
+ 15 01 00 00 00 00 02 ff e0
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 6b 3d
+ 15 01 00 00 00 00 02 6c 3d
+ 15 01 00 00 00 00 02 6d 3d
+ 15 01 00 00 00 00 02 6e 3d
+ 15 01 00 00 00 00 02 6f 3d
+ 15 01 00 00 00 00 02 35 02
+ 15 01 00 00 00 00 02 36 72
+ 15 01 00 00 00 00 02 37 10
+ 15 01 00 00 00 00 02 08 c0
+ 15 01 00 00 00 00 02 ff 24
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 c6 06
+ 15 01 00 00 00 00 02 ff 10
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 32 00 02 29 00];
- qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 0a 00 02 28 00
+ 05 01 00 00 3c 00 02 10 00];
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
index 367384a8c3e5..82413bfbca89 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
@@ -29,30 +29,30 @@
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
- qcom,mdss-dsi-on-command = [15 01 00 00 10 00 02 ff 10
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 ba 03
- 15 01 00 00 10 00 02 e5 01
- 15 01 00 00 10 00 02 35 00
- 15 01 00 00 10 00 02 bb 03
- 15 01 00 00 10 00 02 b0 03
- 39 01 00 00 10 00 06 3b 03 08 08 64 9a
- 15 01 00 00 10 00 02 ff e0
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 6b 3d
- 15 01 00 00 10 00 02 6c 3d
- 15 01 00 00 10 00 02 6d 3d
- 15 01 00 00 10 00 02 6e 3d
- 15 01 00 00 10 00 02 6f 3d
- 15 01 00 00 10 00 02 35 02
- 15 01 00 00 10 00 02 36 72
- 15 01 00 00 10 00 02 37 10
- 15 01 00 00 10 00 02 08 c0
- 15 01 00 00 10 00 02 ff 10
- 05 01 00 00 a0 00 02 11 00
- 05 01 00 00 a0 00 02 29 00];
- qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 ba 03
+ 15 01 00 00 00 00 02 e5 01
+ 15 01 00 00 00 00 02 35 00
+ 15 01 00 00 00 00 02 bb 03
+ 15 01 00 00 00 00 02 b0 03
+ 39 01 00 00 00 00 06 3b 03 08 08 64 9a
+ 15 01 00 00 00 00 02 ff e0
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 6b 3d
+ 15 01 00 00 00 00 02 6c 3d
+ 15 01 00 00 00 00 02 6d 3d
+ 15 01 00 00 00 00 02 6e 3d
+ 15 01 00 00 00 00 02 6f 3d
+ 15 01 00 00 00 00 02 35 02
+ 15 01 00 00 00 00 02 36 72
+ 15 01 00 00 00 00 02 37 10
+ 15 01 00 00 00 00 02 08 c0
+ 15 01 00 00 00 00 02 ff 10
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 32 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 0a 00 02 28 00
+ 05 01 00 00 3c 00 02 10 00];
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <0>;
@@ -69,7 +69,7 @@
qcom,mdss-dsi-t-clk-pre = <0x2d>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
- qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-dsi-min-refresh-rate = <55>;
qcom,mdss-dsi-max-refresh-rate = <60>;
qcom,mdss-dsi-pan-enable-dynamic-fps;
diff --git a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
index 90df1d0c1ac0..69067f5f1cc7 100644
--- a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
+++ b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
@@ -13,6 +13,7 @@
qcom,ascent_3450mah {
/* #Ascent_860_82209_0000_3450mAh_averaged_MasterSlave_Jul20th2016*/
qcom,max-voltage-uv = <4350000>;
+ qcom,fg-cc-cv-threshold-mv = <4340>;
qcom,nom-batt-capacity-mah = <3450>;
qcom,batt-id-kohm = <60>;
qcom,battery-beta = <3435>;
diff --git a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
index 2c1edde56d6a..c3f23b75fa9c 100644
--- a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
+++ b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
@@ -13,6 +13,7 @@
qcom,itech_3000mah {
/* #Itech_B00826LF_3000mAh_ver1660_averaged_MasterSlave_Jul20th2016*/
qcom,max-voltage-uv = <4350000>;
+ qcom,fg-cc-cv-threshold-mv = <4340>;
qcom,nom-batt-capacity-mah = <3000>;
qcom,batt-id-kohm = <100>;
qcom,battery-beta = <3450>;
diff --git a/arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi
index 6360b54b1013..ba27e3912ee6 100644
--- a/arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi
@@ -370,4 +370,42 @@
compatible = "qcom,adsp-loader";
qcom,adsp-state = <0>;
};
+
+ qcom,msm-dai-tdm-tert-rx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37152>;
+ qcom,msm-cpudai-tdm-group-num-ports = <1>;
+ qcom,msm-cpudai-tdm-group-port-id = <36896>;
+ qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+ pinctrl-names = "default", "sleep";
+ dai_tert_tdm_rx_0: qcom,msm-dai-q6-tdm-tert-rx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36896>;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <1>;
+ qcom,msm-cpudai-tdm-data-delay = <1>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
+ qcom,msm-dai-tdm-tert-tx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37153>;
+ qcom,msm-cpudai-tdm-group-num-ports = <1>;
+ qcom,msm-cpudai-tdm-group-port-id = <36897 >;
+ qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+ pinctrl-names = "default", "sleep";
+ dai_tert_tdm_tx_0: qcom,msm-dai-q6-tdm-tert-tx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36897 >;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <1>;
+ qcom,msm-cpudai-tdm-data-delay = <1>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msm-gdsc-falcon.dtsi b/arch/arm/boot/dts/qcom/msm-gdsc-falcon.dtsi
new file mode 100644
index 000000000000..6550ddcad86c
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm-gdsc-falcon.dtsi
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ /* GCC GDSCs */
+ gdsc_usb30: qcom,gdsc@10f004 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_usb30";
+ reg = <0x10f004 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_ufs: qcom,gdsc@175004 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_ufs";
+ reg = <0x175004 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_hlos1_vote_lpass_adsp: qcom,gdsc@17d034 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_hlos1_vote_lpass_adsp";
+ reg = <0x17d034 0x4>;
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ status = "disabled";
+ };
+
+ gdsc_hlos1_vote_turing_adsp: qcom,gdsc@17d04c {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_hlos1_vote_turing_adsp";
+ reg = <0x17d04c 0x4>;
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ status = "disabled";
+ };
+
+ gdsc_hlos2_vote_turing_adsp: qcom,gdsc@17e04c {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_hlos2_vote_turing_adsp";
+ reg = <0x17e04c 0x4>;
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ status = "disabled";
+ };
+
+ /* MMSS GDSCs */
+ bimc_smmu_hw_ctrl: syscon@c8ce024 {
+ compatible = "syscon";
+ reg = <0xc8ce024 0x4>;
+ };
+
+ gdsc_bimc_smmu: qcom,gdsc@c8ce020 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_bimc_smmu";
+ reg = <0xc8ce020 0x4>;
+ hw-ctrl-addr = <&bimc_smmu_hw_ctrl>;
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ status = "disabled";
+ };
+
+ gdsc_venus: qcom,gdsc@c8c1024 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_venus";
+ reg = <0xc8c1024 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_venus_core0: qcom,gdsc@c8c1040 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_venus_core0";
+ reg = <0xc8c1040 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_camss_top: qcom,gdsc@c8c34a0 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_camss_top";
+ reg = <0xc8c34a0 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_vfe0: qcom,gdsc@c8c3664 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_vfe0";
+ reg = <0xc8c3664 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_vfe1: qcom,gdsc@c8c3674 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_vfe1";
+ reg = <0xc8c3674 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_cpp: qcom,gdsc@c8c36d4 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_cpp";
+ reg = <0xc8c36d4 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_mdss: qcom,gdsc@c8c2304 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_mdss";
+ reg = <0xc8c2304 0x4>;
+ status = "disabled";
+ };
+
+ /* GPU GDSCs */
+ gpu_cx_hw_ctrl: syscon@5066008 {
+ compatible = "syscon";
+ reg = <0x5066008 0x4>;
+ };
+
+ gdsc_gpu_cx: qcom,gdsc@5066004 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_gpu_cx";
+ reg = <0x5066004 0x4>;
+ hw-ctrl-addr = <&gpu_cx_hw_ctrl>;
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <2000>;
+ status = "disabled";
+ };
+
+ /* GPU GX GDSCs */
+ gpu_gx_domain_addr: syscon@5065130 {
+ compatible = "syscon";
+ reg = <0x5065130 0x4>;
+ };
+
+ gpu_gx_sw_reset: syscon@5066090 {
+ compatible = "syscon";
+ reg = <0x5066090 0x4>;
+ };
+
+ gdsc_gpu_gx: qcom,gdsc@5066094 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_gpu_gx";
+ reg = <0x5066094 0x4>;
+ domain-addr = <&gpu_gx_domain_addr>;
+ sw-reset = <&gpu_gx_sw_reset>;
+ qcom,retain-periph;
+ qcom,reset-aon-logic;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi b/arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi
new file mode 100644
index 000000000000..399892f52b6f
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi
@@ -0,0 +1,377 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/msm/power-on.h>
+
+&spmi_bus {
+ qcom,pm2falcon@2 {
+ compatible = "qcom,spmi-pmic";
+ reg = <0x2 SPMI_USID>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ pm2falcon_revid: qcom,revid@100 {
+ compatible = "qcom,qpnp-revid";
+ reg = <0x100 0x100>;
+ };
+
+ qcom,power-on@800 {
+ compatible = "qcom,qpnp-power-on";
+ reg = <0x800 0x100>;
+ qcom,secondary-pon-reset;
+ qcom,hard-reset-poweroff-type =
+ <PON_POWER_OFF_SHUTDOWN>;
+ };
+
+ pm2falcon_gpios: gpios {
+ compatible = "qcom,qpnp-pin";
+ gpio-controller;
+ #gpio-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ label = "pm2falcon-gpio";
+
+ gpio@c000 {
+ reg = <0xc000 0x100>;
+ qcom,pin-num = <1>;
+ status = "disabled";
+ };
+
+ gpio@c100 {
+ reg = <0xc100 0x100>;
+ qcom,pin-num = <2>;
+ status = "disabled";
+ };
+
+ gpio@c200 {
+ reg = <0xc200 0x100>;
+ qcom,pin-num = <3>;
+ status = "disabled";
+ };
+
+ gpio@c300 {
+ reg = <0xc300 0x100>;
+ qcom,pin-num = <4>;
+ status = "disabled";
+ };
+
+ gpio@c400 {
+ reg = <0xc400 0x100>;
+ qcom,pin-num = <5>;
+ status = "disabled";
+ };
+
+ gpio@c500 {
+ reg = <0xc500 0x100>;
+ qcom,pin-num = <6>;
+ status = "disabled";
+ };
+
+ gpio@c600 {
+ reg = <0xc600 0x100>;
+ qcom,pin-num = <7>;
+ status = "disabled";
+ };
+
+ gpio@c700 {
+ reg = <0xc700 0x100>;
+ qcom,pin-num = <8>;
+ status = "disabled";
+ };
+
+ gpio@c800 {
+ reg = <0xc800 0x100>;
+ qcom,pin-num = <9>;
+ status = "disabled";
+ };
+
+ gpio@c900 {
+ reg = <0xc900 0x100>;
+ qcom,pin-num = <10>;
+ status = "disabled";
+ };
+
+ gpio@ca00 {
+ reg = <0xca00 0x100>;
+ qcom,pin-num = <11>;
+ status = "disabled";
+ };
+
+ gpio@cb00 {
+ reg = <0xcb00 0x100>;
+ qcom,pin-num = <12>;
+ status = "disabled";
+ };
+
+ };
+ };
+
+ qcom,pm2falcon@3 {
+ compatible ="qcom,spmi-pmic";
+ reg = <0x3 SPMI_USID>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ pm2falcon_pwm_1: pwm@b100 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb100 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base",
+ "qpnp-lpg-lut-base";
+ qcom,channel-id = <1>;
+ qcom,supported-sizes = <6>, <9>;
+ qcom,ramp-index = <0>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pm2falcon_pwm_2: pwm@b200 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb200 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base",
+ "qpnp-lpg-lut-base";
+ qcom,channel-id = <2>;
+ qcom,supported-sizes = <6>, <9>;
+ qcom,ramp-index = <1>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pm2falcon_pwm_3: pwm@b300 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb300 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base",
+ "qpnp-lpg-lut-base";
+ qcom,channel-id = <3>;
+ qcom,supported-sizes = <6>, <9>;
+ qcom,ramp-index = <2>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pm2falcon_pwm_4: pwm@b400 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb400 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base",
+ "qpnp-lpg-lut-base";
+ qcom,channel-id = <4>;
+ qcom,supported-sizes = <6>, <9>;
+ qcom,ramp-index = <3>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ qcom,leds@d000 {
+ compatible = "qcom,leds-qpnp";
+ reg = <0xd000 0x100>;
+ label = "rgb";
+ status = "disabled";
+
+ red_led: qcom,rgb_0 {
+ label = "rgb";
+ qcom,id = <3>;
+ qcom,mode = "pwm";
+ pwms = <&pm2falcon_pwm_3 0 0>;
+ qcom,pwm-us = <1000>;
+ qcom,max-current = <12>;
+ qcom,default-state = "off";
+ linux,name = "red";
+ linux,default-trigger =
+ "battery-charging";
+ };
+
+ green_led: qcom,rgb_1 {
+ label = "rgb";
+ qcom,id = <4>;
+ qcom,mode = "pwm";
+ pwms = <&pm2falcon_pwm_2 0 0>;
+ qcom,pwm-us = <1000>;
+ qcom,max-current = <12>;
+ qcom,default-state = "off";
+ linux,name = "green";
+ linux,default-trigger = "battery-full";
+ };
+
+ blue_led: qcom,rgb_2 {
+ label = "rgb";
+ qcom,id = <5>;
+ qcom,mode = "pwm";
+ pwms = <&pm2falcon_pwm_1 0 0>;
+ qcom,pwm-us = <1000>;
+ qcom,max-current = <12>;
+ qcom,default-state = "off";
+ linux,name = "blue";
+ linux,default-trigger = "boot-indication";
+ };
+ };
+
+ pm2falcon_wled: qcom,leds@d800 {
+ compatible = "qcom,qpnp-wled";
+ reg = <0xd800 0x100>,
+ <0xd900 0x100>,
+ <0xdc00 0x100>,
+ <0xde00 0x100>;
+ reg-names = "qpnp-wled-ctrl-base",
+ "qpnp-wled-sink-base",
+ "qpnp-wled-ibb-base",
+ "qpnp-wled-lab-base";
+ interrupts = <0x3 0xd8 0x2>;
+ interrupt-names = "sc-irq";
+ linux,name = "wled";
+ linux,default-trigger = "bkl-trigger";
+ qcom,fdbk-output = "auto";
+ qcom,vref-mv = <350>;
+ qcom,switch-freq-khz = <800>;
+ qcom,ovp-mv = <29500>;
+ qcom,ilim-ma = <980>;
+ qcom,boost-duty-ns = <26>;
+ qcom,mod-freq-khz = <9600>;
+ qcom,dim-mode = "hybrid";
+ qcom,hyb-thres = <625>;
+ qcom,sync-dly-us = <800>;
+ qcom,fs-curr-ua = <25000>;
+ qcom,cons-sync-write-delay-us = <1000>;
+ qcom,en-phase-stag;
+ qcom,led-strings-list = [00 01 02];
+ qcom,en-ext-pfet-sc-pro;
+ status = "ok";
+ };
+
+ flash_led: qcom,leds@d300 {
+ compatible = "qcom,qpnp-flash-led-v2";
+ reg = <0xd300 0x100>;
+ label = "flash";
+ interrupts = <0x3 0xd3 0x0 IRQ_TYPE_EDGE_RISING>,
+ <0x3 0xd3 0x3 IRQ_TYPE_EDGE_RISING>,
+ <0x3 0xd3 0x4 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "led-fault-irq",
+ "all-ramp-down-done-irq",
+ "all-ramp-up-done-irq";
+ qcom,hdrm-auto-mode;
+ qcom,short-circuit-det;
+ qcom,open-circuit-det;
+ qcom,vph-droop-det;
+ qcom,thermal-derate-en;
+ qcom,thermal-derate-current = <200 500 1000>;
+ qcom,isc-delay = <192>;
+ status = "disabled";
+
+ pm2falcon_flash0: qcom,flash_0 {
+ label = "flash";
+ qcom,led-name = "led:flash_0";
+ qcom,max-current = <1500>;
+ qcom,default-led-trigger = "flash0_trigger";
+ qcom,id = <0>;
+ qcom,current-ma = <1000>;
+ qcom,duration-ms = <1280>;
+ qcom,ires-ua = <12500>;
+ qcom,hdrm-voltage-mv = <325>;
+ qcom,hdrm-vol-hi-lo-win-mv = <100>;
+ };
+
+ pm2falcon_flash1: qcom,flash_1 {
+ label = "flash";
+ qcom,led-name = "led:flash_1";
+ qcom,max-current = <1500>;
+ qcom,default-led-trigger = "flash1_trigger";
+ qcom,id = <1>;
+ qcom,current-ma = <1000>;
+ qcom,duration-ms = <1280>;
+ qcom,ires-ua = <12500>;
+ qcom,hdrm-voltage-mv = <325>;
+ qcom,hdrm-vol-hi-lo-win-mv = <100>;
+ };
+
+ pm2falcon_flash2: qcom,flash_2 {
+ label = "flash";
+ qcom,led-name = "led:flash_2";
+ qcom,max-current = <750>;
+ qcom,default-led-trigger = "flash2_trigger";
+ qcom,id = <2>;
+ qcom,current-ma = <500>;
+ qcom,duration-ms = <1280>;
+ qcom,ires-ua = <12500>;
+ qcom,hdrm-voltage-mv = <325>;
+ qcom,hdrm-vol-hi-lo-win-mv = <100>;
+ pinctrl-names = "led_enable","led_disable";
+ pinctrl-0 = <&led_enable>;
+ pinctrl-1 = <&led_disable>;
+ };
+
+ pm2falcon_torch0: qcom,torch_0 {
+ label = "torch";
+ qcom,led-name = "led:torch_0";
+ qcom,max-current = <500>;
+ qcom,default-led-trigger = "torch0_trigger";
+ qcom,id = <0>;
+ qcom,current-ma = <300>;
+ qcom,ires-ua = <12500>;
+ qcom,hdrm-voltage-mv = <325>;
+ qcom,hdrm-vol-hi-lo-win-mv = <100>;
+ };
+
+ pm2falcon_torch1: qcom,torch_1 {
+ label = "torch";
+ qcom,led-name = "led:torch_1";
+ qcom,max-current = <500>;
+ qcom,default-led-trigger = "torch1_trigger";
+ qcom,id = <1>;
+ qcom,current-ma = <300>;
+ qcom,ires-ua = <12500>;
+ qcom,hdrm-voltage-mv = <325>;
+ qcom,hdrm-vol-hi-lo-win-mv = <100>;
+ };
+
+ pm2falcon_torch2: qcom,torch_2 {
+ label = "torch";
+ qcom,led-name = "led:torch_2";
+ qcom,max-current = <500>;
+ qcom,default-led-trigger = "torch2_trigger";
+ qcom,id = <2>;
+ qcom,current-ma = <300>;
+ qcom,ires-ua = <12500>;
+ qcom,hdrm-voltage-mv = <325>;
+ qcom,hdrm-vol-hi-lo-win-mv = <100>;
+ pinctrl-names = "led_enable","led_disable";
+ pinctrl-0 = <&led_enable>;
+ pinctrl-1 = <&led_disable>;
+ };
+
+ pm2falcon_switch0: qcom,led_switch_0 {
+ label = "switch";
+ qcom,led-name = "led:switch_0";
+ qcom,led-mask = <3>;
+ qcom,default-led-trigger = "switch0_trigger";
+ reg0 {
+ regulator-name = "pmfalcon_bob";
+ max-voltage-uv = <3600000>;
+ };
+ };
+
+ pm2falcon_switch1: qcom,led_switch_1 {
+ label = "switch";
+ qcom,led-name = "led:switch_1";
+ qcom,led-mask = <4>;
+ qcom,default-led-trigger = "switch1_trigger";
+ reg0 {
+ regulator-name = "pmfalcon_bob";
+ max-voltage-uv = <3600000>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm-pmfalcon.dtsi b/arch/arm/boot/dts/qcom/msm-pmfalcon.dtsi
new file mode 100644
index 000000000000..dec37881249c
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm-pmfalcon.dtsi
@@ -0,0 +1,169 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+&spmi_bus {
+ qcom,pmfalcon@0 {
+ compatible ="qcom,spmi-pmic";
+ reg = <0x0 SPMI_USID>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ pmfalcon_revid: qcom,revid@100 {
+ compatible = "qcom,qpnp-revid";
+ reg = <0x100 0x100>;
+ };
+
+ qcom,power-on@800 {
+ compatible = "qcom,qpnp-power-on";
+ reg = <0x800 0x100>;
+ interrupts = <0x0 0x8 0x0 IRQ_TYPE_NONE>,
+ <0x0 0x8 0x1 IRQ_TYPE_NONE>,
+ <0x0 0x8 0x4 IRQ_TYPE_NONE>,
+ <0x0 0x8 0x5 IRQ_TYPE_NONE>;
+ interrupt-names = "kpdpwr", "resin",
+ "resin-bark", "kpdpwr-resin-bark";
+ qcom,pon-dbc-delay = <15625>;
+ qcom,system-reset;
+ qcom,store-hard-reset-reason;
+
+ qcom,pon_1 {
+ qcom,pon-type = <0>;
+ qcom,pull-up = <1>;
+ linux,code = <116>;
+ };
+
+ qcom,pon_2 {
+ qcom,pon-type = <1>;
+ qcom,pull-up = <1>;
+ linux,code = <114>;
+ };
+ };
+
+ pmfalcon_gpios: gpios {
+ compatible = "qcom,qpnp-pin";
+ gpio-controller;
+ #gpio-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ label = "pmfalcon-gpio";
+
+ gpio@c000 {
+ reg = <0xc000 0x100>;
+ qcom,pin-num = <1>;
+ status = "disabled";
+ };
+
+ gpio@c100 {
+ reg = <0xc100 0x100>;
+ qcom,pin-num = <2>;
+ status = "disabled";
+ };
+
+ gpio@c200 {
+ reg = <0xc200 0x100>;
+ qcom,pin-num = <3>;
+ status = "disabled";
+ };
+
+ gpio@c300 {
+ reg = <0xc300 0x100>;
+ qcom,pin-num = <4>;
+ status = "disabled";
+ };
+
+ gpio@c400 {
+ reg = <0xc400 0x100>;
+ qcom,pin-num = <5>;
+ status = "disabled";
+ };
+
+ gpio@c500 {
+ reg = <0xc500 0x100>;
+ qcom,pin-num = <6>;
+ status = "disabled";
+ };
+
+ gpio@c600 {
+ reg = <0xc600 0x100>;
+ qcom,pin-num = <7>;
+ status = "disabled";
+ };
+
+ gpio@c700 {
+ reg = <0xc700 0x100>;
+ qcom,pin-num = <8>;
+ status = "disabled";
+ };
+
+ gpio@c800 {
+ reg = <0xc800 0x100>;
+ qcom,pin-num = <9>;
+ status = "disabled";
+ };
+
+ gpio@c900 {
+ reg = <0xc900 0x100>;
+ qcom,pin-num = <10>;
+ status = "disabled";
+ };
+
+ gpio@ca00 {
+ reg = <0xca00 0x100>;
+ qcom,pin-num = <11>;
+ status = "disabled";
+ };
+
+ gpio@cb00 {
+ reg = <0xcb00 0x100>;
+ qcom,pin-num = <12>;
+ status = "disabled";
+ };
+
+ gpio@cc00 {
+ reg = <0xcc00 0x100>;
+ qcom,pin-num = <13>;
+ status = "disabled";
+ };
+ };
+
+ pmfalcon_coincell: qcom,coincell@2800 {
+ compatible = "qcom,qpnp-coincell";
+ reg = <0x2800 0x100>;
+ };
+
+ pmfalcon_rtc: qcom,pmfalcon_rtc {
+ compatible = "qcom,qpnp-rtc";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ qcom,qpnp-rtc-write = <0>;
+ qcom,qpnp-rtc-alarm-pwrup = <0>;
+
+ qcom,pmfalcon_rtc_rw@6000 {
+ reg = <0x6000 0x100>;
+ };
+ qcom,pmfalcon_rtc_alarm@6100 {
+ reg = <0x6100 0x100>;
+ interrupts = <0x0 0x61 0x1 IRQ_TYPE_NONE>;
+ };
+ };
+ };
+
+ qcom,pmfalcon@1 {
+ compatible ="qcom,spmi-pmic";
+ reg = <0x1 SPMI_USID>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
index 3f1ffd497f2c..28d230dfb6bf 100644
--- a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
@@ -23,6 +23,7 @@
pmicobalt_revid: qcom,revid@100 {
compatible = "qcom,qpnp-revid";
reg = <0x100 0x100>;
+ qcom,fab-id-valid;
};
qcom,power-on@800 {
@@ -310,6 +311,7 @@
#address-cells = <1>;
#size-cells = <0>;
#io-channel-cells = <1>;
+ qcom,pmic-revid = <&pmicobalt_revid>;
};
pmicobalt_fg: qpnp,fg {
@@ -321,6 +323,7 @@
io-channel-names = "rradc_batt_id";
qcom,fg-esr-timer-awake = <64>;
qcom,fg-esr-timer-asleep = <256>;
+ qcom,cycle-counter-en;
status = "okay";
qcom,fg-batt-soc@4000 {
@@ -329,23 +332,45 @@
interrupts = <0x2 0x40 0x0 IRQ_TYPE_EDGE_BOTH>,
<0x2 0x40 0x1 IRQ_TYPE_EDGE_BOTH>,
<0x2 0x40 0x2 IRQ_TYPE_EDGE_BOTH>,
- <0x2 0x40 0x3 IRQ_TYPE_EDGE_BOTH>;
+ <0x2 0x40 0x3 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x40 0x4 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x40 0x5 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x40 0x6 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x40 0x7 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "soc-update",
"soc-ready",
"bsoc-delta",
- "msoc-delta";
+ "msoc-delta",
+ "msoc-low",
+ "msoc-empty",
+ "msoc-high",
+ "msoc-full";
};
qcom,fg-batt-info@4100 {
status = "okay";
reg = <0x4100 0x100>;
- interrupts = <0x2 0x41 0x3 IRQ_TYPE_EDGE_BOTH>;
- interrupt-names = "batt-missing";
+ interrupts = <0x2 0x41 0x0 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x41 0x1 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x41 0x2 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x41 0x3 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x41 0x6 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "vbatt-pred-delta",
+ "vbatt-low",
+ "esr-delta",
+ "batt-missing",
+ "batt-temp-delta";
};
qcom,fg-memif@4400 {
status = "okay";
reg = <0x4400 0x100>;
+ interrupts = <0x2 0x44 0x0 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x44 0x1 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x44 0x2 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "ima-rdy",
+ "mem-xcp",
+ "dma-grant";
};
};
};
@@ -362,6 +387,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <1>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <0>;
@@ -375,6 +401,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <2>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <1>;
@@ -388,6 +415,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <3>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <2>;
@@ -400,6 +428,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <4>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <3>;
@@ -412,6 +441,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <5>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <4>;
@@ -424,6 +454,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <6>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <5>;
diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi
index dc1bbcd13c36..7e88f524367f 100644
--- a/arch/arm/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996.dtsi
@@ -1355,7 +1355,7 @@
gdsc-vdd-supply = <&gdsc_pcie_0>;
vreg-1.8-supply = <&pm8994_l12>;
vreg-0.9-supply = <&pm8994_l28>;
- vreg-cx-supply = <&pm8994_s1_corner_ao>;
+ vreg-cx-supply = <&pm8994_s1_corner>;
qcom,vreg-0.9-voltage-level = <925000 925000 24000>;
qcom,vreg-cx-voltage-level = <7 4 0>;
@@ -1510,7 +1510,7 @@
gdsc-vdd-supply = <&gdsc_pcie_1>;
vreg-1.8-supply = <&pm8994_l12>;
vreg-0.9-supply = <&pm8994_l28>;
- vreg-cx-supply = <&pm8994_s1_corner_ao>;
+ vreg-cx-supply = <&pm8994_s1_corner>;
qcom,vreg-0.9-voltage-level = <925000 925000 24000>;
qcom,vreg-cx-voltage-level = <7 5 0>;
@@ -1663,10 +1663,10 @@
gdsc-vdd-supply = <&gdsc_pcie_2>;
vreg-1.8-supply = <&pm8994_l12>;
vreg-0.9-supply = <&pm8994_l28>;
- vreg-cx-supply = <&pm8994_s1_corner_ao>;
+ vreg-cx-supply = <&pm8994_s1_corner>;
qcom,vreg-0.9-voltage-level = <925000 925000 24000>;
- qcom,vreg-cx-voltage-level = <7 4 0>;
+ qcom,vreg-cx-voltage-level = <7 5 0>;
qcom,l1-supported;
qcom,l1ss-supported;
@@ -1815,6 +1815,11 @@
reg = <0x10 8>;
};
+ dload_type@18 {
+ compatible = "qcom,msm-imem-dload-type";
+ reg = <0x18 4>;
+ };
+
restart_reason@65c {
compatible = "qcom,msm-imem-restart_reason";
reg = <0x65c 4>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi
index 1db3c2482935..a1d80075abe0 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi
@@ -35,6 +35,19 @@
compatible = "qcom,msmcobalt-asoc-snd-tasha";
qcom,model = "msmcobalt-tasha-snd-card";
qcom,ext-disp-audio-rx;
+ qcom,wcn-btfm;
+ qcom,mi2s-audio-intf;
+ qcom,auxpcm-audio-intf;
+ qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
+
+ reg = <0x1711a000 0x4>,
+ <0x1711b000 0x4>,
+ <0x1711c000 0x4>,
+ <0x1711d000 0x4>;
+ reg-names = "lpaif_pri_mode_muxsel",
+ "lpaif_sec_mode_muxsel",
+ "lpaif_tert_mode_muxsel",
+ "lpaif_quat_mode_muxsel";
qcom,audio-routing =
"AIF4 VI", "MCLK",
@@ -93,7 +106,8 @@
<&incall_record_tx>, <&incall_music_rx>,
<&incall_music_2_rx>, <&sb_5_rx>, <&sb_6_rx>,
<&sb_7_rx>, <&sb_7_tx>, <&sb_8_tx>,
- <&usb_audio_rx>, <&usb_audio_tx>;
+ <&usb_audio_rx>, <&usb_audio_tx>,
+ <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>;
asoc-cpu-names = "msm-dai-q6-hdmi.8", "msm-dai-q6-dp.24608",
"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
@@ -111,7 +125,8 @@
"msm-dai-q6-dev.32770", "msm-dai-q6-dev.16394",
"msm-dai-q6-dev.16396", "msm-dai-q6-dev.16398",
"msm-dai-q6-dev.16399", "msm-dai-q6-dev.16401",
- "msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673";
+ "msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
+ "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897";
asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
asoc-codec-names = "msm-stub-codec.1",
"msm-ext-disp-audio-codec-rx";
@@ -126,9 +141,23 @@
compatible = "qcom,msmcobalt-asoc-snd-tavil";
qcom,model = "msmcobalt-tavil-snd-card";
qcom,ext-disp-audio-rx;
+ qcom,wcn-btfm;
+ qcom,mi2s-audio-intf;
+ qcom,auxpcm-audio-intf;
+ qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
+
+ reg = <0x1711a000 0x4>,
+ <0x1711b000 0x4>,
+ <0x1711c000 0x4>,
+ <0x1711d000 0x4>;
+ reg-names = "lpaif_pri_mode_muxsel",
+ "lpaif_sec_mode_muxsel",
+ "lpaif_tert_mode_muxsel",
+ "lpaif_quat_mode_muxsel";
qcom,audio-routing =
"RX_BIAS", "MCLK",
+ "MADINPUT", "MCLK",
"AMIC2", "MIC BIAS2",
"MIC BIAS2", "Headset Mic",
"AMIC3", "MIC BIAS2",
@@ -180,8 +209,10 @@
<&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
<&afe_proxy_tx>, <&incall_record_rx>,
<&incall_record_tx>, <&incall_music_rx>,
- <&incall_music_2_rx>, <&sb_5_rx>,
- <&usb_audio_rx>, <&usb_audio_tx>, <&sb_6_rx>;
+ <&incall_music_2_rx>, <&sb_5_rx>, <&sb_6_rx>,
+ <&sb_7_rx>, <&sb_7_tx>, <&sb_8_tx>,
+ <&usb_audio_rx>, <&usb_audio_tx>,
+ <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>;
asoc-cpu-names = "msm-dai-q6-hdmi.8", "msm-dai-q6-dp.24608",
"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
@@ -197,8 +228,10 @@
"msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
"msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
"msm-dai-q6-dev.32770", "msm-dai-q6-dev.16394",
+ "msm-dai-q6-dev.16396", "msm-dai-q6-dev.16398",
+ "msm-dai-q6-dev.16399", "msm-dai-q6-dev.16401",
"msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
- "msm-dai-q6-dev.16396";
+ "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897";
asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
asoc-codec-names = "msm-stub-codec.1",
"msm-ext-disp-audio-codec-rx";
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi
index ed29dd9e1508..ed8eb8459e51 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi
@@ -87,7 +87,7 @@
cam_vdig-supply = <&pmcobalt_s3>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
- qcom,cam-vreg-max-voltage = <0 3312000 1352000>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1352000>;
qcom,cam-vreg-op-mode = <0 80000 105000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
@@ -132,7 +132,7 @@
cam_vana-supply = <&pmicobalt_bob>;
qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
qcom,cam-vreg-min-voltage = <0 0 3312000>;
- qcom,cam-vreg-max-voltage = <0 0 3312000>;
+ qcom,cam-vreg-max-voltage = <0 0 3600000>;
qcom,cam-vreg-op-mode = <0 0 80000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
@@ -215,7 +215,7 @@
cam_vdig-supply = <&pmcobalt_s3>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
- qcom,cam-vreg-max-voltage = <0 3312000 1352000>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1352000>;
qcom,cam-vreg-op-mode = <0 80000 105000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
@@ -259,7 +259,7 @@
cam_vana-supply = <&pmicobalt_bob>;
qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
qcom,cam-vreg-min-voltage = <0 0 3312000>;
- qcom,cam-vreg-max-voltage = <0 0 3312000>;
+ qcom,cam-vreg-max-voltage = <0 0 3600000>;
qcom,cam-vreg-op-mode = <0 0 80000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi
index 485bc560eef5..2be67ab52ba7 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi
@@ -87,7 +87,7 @@
cam_vdig-supply = <&pmcobalt_s3>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
- qcom,cam-vreg-max-voltage = <0 3312000 1352000>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1352000>;
qcom,cam-vreg-op-mode = <0 80000 105000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
@@ -132,7 +132,7 @@
cam_vana-supply = <&pmicobalt_bob>;
qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
qcom,cam-vreg-min-voltage = <0 0 3312000>;
- qcom,cam-vreg-max-voltage = <0 0 3312000>;
+ qcom,cam-vreg-max-voltage = <0 0 3600000>;
qcom,cam-vreg-op-mode = <0 0 80000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
@@ -215,7 +215,7 @@
cam_vdig-supply = <&pmcobalt_s3>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
- qcom,cam-vreg-max-voltage = <0 3312000 1352000>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1352000>;
qcom,cam-vreg-op-mode = <0 80000 105000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
@@ -259,7 +259,7 @@
cam_vana-supply = <&pmicobalt_bob>;
qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
qcom,cam-vreg-min-voltage = <0 0 3312000>;
- qcom,cam-vreg-max-voltage = <0 0 3312000>;
+ qcom,cam-vreg-max-voltage = <0 0 3600000>;
qcom,cam-vreg-op-mode = <0 0 80000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-qrd.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-qrd.dtsi
new file mode 100644
index 000000000000..4b435aee73b0
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-qrd.dtsi
@@ -0,0 +1,356 @@
+
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ led_flash0: qcom,camera-flash@0 {
+ cell-index = <0>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pmicobalt_flash0 &pmicobalt_flash1>;
+ qcom,torch-source = <&pmicobalt_torch0 &pmicobalt_torch1>;
+ qcom,switch-source = <&pmicobalt_switch0>;
+ status = "ok";
+ };
+
+ led_flash1: qcom,camera-flash@1 {
+ cell-index = <1>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pmicobalt_flash2>;
+ qcom,torch-source = <&pmicobalt_torch2>;
+ qcom,switch-source = <&pmicobalt_switch1>;
+ status = "ok";
+ };
+};
+
+&cci {
+ actuator0: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ gpios = <&tlmm 27 0>;
+ qcom,gpio-vaf = <0>;
+ qcom,gpio-req-tbl-num = <0>;
+ qcom,gpio-req-tbl-flags = <0>;
+ qcom,gpio-req-tbl-label = "CAM_VAF";
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_actuator_vaf_active>;
+ pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ };
+
+ actuator1: qcom,actuator@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <1>;
+ gpios = <&tlmm 27 0>;
+ qcom,gpio-vaf = <0>;
+ qcom,gpio-req-tbl-num = <0>;
+ qcom,gpio-req-tbl-flags = <0>;
+ qcom,gpio-req-tbl-label = "CAM_VAF";
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_actuator_vaf_active>;
+ pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ };
+
+ ois0: qcom,ois@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,ois";
+ qcom,cci-master = <0>;
+ gpios = <&tlmm 27 0>;
+ qcom,gpio-vaf = <0>;
+ qcom,gpio-req-tbl-num = <0>;
+ qcom,gpio-req-tbl-flags = <0>;
+ qcom,gpio-req-tbl-label = "CAM_VAF";
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_actuator_vaf_active>;
+ pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ status = "disabled";
+ };
+
+ eeprom0: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pmcobalt_lvs1>;
+ cam_vana-supply = <&pmicobalt_bob>;
+ cam_vdig-supply = <&pmcobalt_s3>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
+ qcom,cam-vreg-max-voltage = <0 3312000 1352000>;
+ qcom,cam-vreg-op-mode = <0 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active
+ &cam_actuator_vaf_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend
+ &cam_actuator_vaf_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>,
+ <&pmcobalt_gpios 20 0>,
+ <&tlmm 29 0>,
+ <&tlmm 27 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-vaf = <4>;
+ qcom,gpio-req-tbl-num = <0 1 2 3 4>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VDIG",
+ "CAM_VANA",
+ "CAM_VAF";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk0_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk0_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ eeprom1: qcom,eeprom@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,eeprom";
+ cam_vdig-supply = <&pmcobalt_lvs1>;
+ cam_vio-supply = <&pmcobalt_lvs1>;
+ cam_vana-supply = <&pmicobalt_bob>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+ qcom,cam-vreg-min-voltage = <0 0 3312000>;
+ qcom,cam-vreg-max-voltage = <0 0 3312000>;
+ qcom,cam-vreg-op-mode = <0 0 80000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>,
+ <&tlmm 8 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1",
+ "CAM_VANA1";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk2_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk2_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ eeprom2: qcom,eeprom@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pmcobalt_lvs1>;
+ cam_vana-supply = <&pmcobalt_l22>;
+ cam_vdig-supply = <&pmcobalt_s3>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage =
+ <0 2864000 1352000>;
+ qcom,cam-vreg-max-voltage =
+ <0 2864000 1352000>;
+ qcom,cam-vreg-op-mode = <0 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>,
+ <&pmcobalt_gpios 9 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VDIG";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk1_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk1_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@0 {
+ cell-index = <0>;
+ compatible = "qcom,camera";
+ reg = <0x0>;
+ qcom,csiphy-sd-index = <0>;
+ qcom,csid-sd-index = <0>;
+ qcom,mount-angle = <90>;
+ qcom,led-flash-src = <&led_flash0>;
+ qcom,actuator-src = <&actuator0>;
+ qcom,ois-src = <&ois0>;
+ qcom,eeprom-src = <&eeprom0>;
+ cam_vio-supply = <&pmcobalt_lvs1>;
+ cam_vana-supply = <&pmicobalt_bob>;
+ cam_vdig-supply = <&pmcobalt_s3>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
+ qcom,cam-vreg-max-voltage = <0 3312000 1352000>;
+ qcom,cam-vreg-op-mode = <0 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>,
+ <&pmcobalt_gpios 20 0>,
+ <&tlmm 29 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VDIG",
+ "CAM_VANA";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk0_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk0_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@1 {
+ cell-index = <1>;
+ compatible = "qcom,camera";
+ reg = <0x1>;
+ qcom,csiphy-sd-index = <1>;
+ qcom,csid-sd-index = <1>;
+ qcom,mount-angle = <90>;
+ qcom,eeprom-src = <&eeprom1>;
+ cam_vdig-supply = <&pmcobalt_lvs1>;
+ cam_vio-supply = <&pmcobalt_lvs1>;
+ cam_vana-supply = <&pmicobalt_bob>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+ qcom,cam-vreg-min-voltage = <0 0 3312000>;
+ qcom,cam-vreg-max-voltage = <0 0 3312000>;
+ qcom,cam-vreg-op-mode = <0 0 80000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>,
+ <&tlmm 8 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1",
+ "CAM_VANA1";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk2_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk2_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@2 {
+ cell-index = <2>;
+ compatible = "qcom,camera";
+ reg = <0x02>;
+ qcom,csiphy-sd-index = <2>;
+ qcom,csid-sd-index = <2>;
+ qcom,mount-angle = <270>;
+ qcom,eeprom-src = <&eeprom2>;
+ qcom,led-flash-src = <&led_flash1>;
+ qcom,actuator-src = <&actuator1>;
+ cam_vio-supply = <&pmcobalt_lvs1>;
+ cam_vana-supply = <&pmcobalt_l22>;
+ cam_vdig-supply = <&pmcobalt_s3>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage =
+ <0 2864000 1352000>;
+ qcom,cam-vreg-max-voltage =
+ <0 2864000 1352000>;
+ qcom,cam-vreg-op-mode = <0 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>,
+ <&pmcobalt_gpios 9 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VDIG";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk1_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk1_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+};
+&pmcobalt_gpios {
+ gpio@c800 { /* GPIO 9 - CAMERA SENSOR 2 VDIG */
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <0>; /* VIN1 GPIO_LV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "ok";
+ };
+
+ gpio@d300 { /* GPIO 20 - CAMERA SENSOR 0 VDIG */
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <1>; /* VIN1 GPIO_MV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "ok";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi
index 154bc5b092df..a37fa26b1055 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi
@@ -28,7 +28,10 @@
reg-names = "csiphy";
interrupts = <0 78 0>;
interrupt-names = "csiphy";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ gdscr-supply = <&gdsc_camss_top>;
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
@@ -42,7 +45,7 @@
<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
<&clock_mmss clk_csiphy_clk_src>,
<&clock_mmss clk_mmss_camss_csiphy0_clk>;
- clock-names = "mnoc_maxi", "mnoc_ahb",
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
"bmic_smmu_ahb", "bmic_smmu_axi",
"camss_ahb_clk", "camss_top_ahb_clk",
"csi_src_clk", "csi_clk", "cphy_csid_clk",
@@ -60,7 +63,10 @@
reg-names = "csiphy";
interrupts = <0 79 0>;
interrupt-names = "csiphy";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ gdscr-supply = <&gdsc_camss_top>;
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
@@ -74,7 +80,7 @@
<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
<&clock_mmss clk_csiphy_clk_src>,
<&clock_mmss clk_mmss_camss_csiphy1_clk>;
- clock-names = "mnoc_maxi", "mnoc_ahb",
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
"bmic_smmu_ahb", "bmic_smmu_axi",
"camss_ahb_clk", "camss_top_ahb_clk",
"csi_src_clk", "csi_clk", "cphy_csid_clk",
@@ -92,7 +98,10 @@
reg-names = "csiphy";
interrupts = <0 80 0>;
interrupt-names = "csiphy";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ gdscr-supply = <&gdsc_camss_top>;
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
@@ -106,7 +115,7 @@
<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
<&clock_mmss clk_csiphy_clk_src>,
<&clock_mmss clk_mmss_camss_csiphy2_clk>;
- clock-names = "mnoc_maxi", "mnoc_ahb",
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
"bmic_smmu_ahb", "bmic_smmu_axi",
"camss_ahb_clk", "camss_top_ahb_clk",
"csi_src_clk", "csi_clk", "cphy_csid_clk",
@@ -128,8 +137,9 @@
qcom,mipi-csi-vdd-supply = <&pmcobalt_l2>;
gdscr-supply = <&gdsc_camss_top>;
vdd_sec-supply = <&pmcobalt_l1>;
- qcom,cam-vreg-name = "vdd_sec", "gdscr";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
@@ -137,21 +147,20 @@
<&clock_mmss clk_mmss_camss_top_ahb_clk>,
<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
<&clock_mmss clk_csi0_clk_src>,
+ <&clock_mmss clk_csiphy_clk_src>,
<&clock_mmss clk_mmss_camss_csi0_clk>,
- <&clock_mmss clk_mmss_camss_csiphy0_clk>,
<&clock_mmss clk_mmss_camss_csi0_ahb_clk>,
<&clock_mmss clk_mmss_camss_csi0rdi_clk>,
<&clock_mmss clk_mmss_camss_csi0pix_clk>,
- <&clock_mmss clk_mmss_camss_cphy_csid0_clk>,
- <&clock_mmss clk_csiphy_clk_src>;
- clock-names = "mnoc_maxi", "mnoc_ahb",
+ <&clock_mmss clk_mmss_camss_cphy_csid0_clk>;
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
"bmic_smmu_ahb", "bmic_smmu_axi",
"camss_ahb_clk", "camss_top_ahb_clk",
- "ispif_ahb_clk", "csi_src_clk", "csi_clk",
- "csi_phy_clk", "csi_ahb_clk", "csi_rdi_clk",
- "csi_pix_clk", "cphy_csid_clk", "cphy_clk_src";
- qcom,clock-rates = <0 0 0 0 0 0 0 256000000 0 0 0 0 0 0
- 256000000>;
+ "ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+ "csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+ "csi_pix_clk", "cphy_csid_clk";
+ qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000
+ 0 0 0 0 0>;
status = "ok";
};
@@ -166,8 +175,9 @@
qcom,mipi-csi-vdd-supply = <&pmcobalt_l2>;
gdscr-supply = <&gdsc_camss_top>;
vdd_sec-supply = <&pmcobalt_l1>;
- qcom,cam-vreg-name = "vdd_sec", "gdscr";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
@@ -175,21 +185,20 @@
<&clock_mmss clk_mmss_camss_top_ahb_clk>,
<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
<&clock_mmss clk_csi1_clk_src>,
+ <&clock_mmss clk_csiphy_clk_src>,
<&clock_mmss clk_mmss_camss_csi1_clk>,
- <&clock_mmss clk_mmss_camss_csiphy1_clk>,
<&clock_mmss clk_mmss_camss_csi1_ahb_clk>,
<&clock_mmss clk_mmss_camss_csi1rdi_clk>,
<&clock_mmss clk_mmss_camss_csi1pix_clk>,
- <&clock_mmss clk_mmss_camss_cphy_csid1_clk>,
- <&clock_mmss clk_csiphy_clk_src>;
- clock-names = "mnoc_maxi", "mnoc_ahb",
+ <&clock_mmss clk_mmss_camss_cphy_csid1_clk>;
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
"bmic_smmu_ahb", "bmic_smmu_axi",
"camss_ahb_clk", "camss_top_ahb_clk",
- "ispif_ahb_clk", "csi_src_clk", "csi_clk",
- "csi_phy_clk", "csi_ahb_clk", "csi_rdi_clk",
- "csi_pix_clk", "cphy_csid_clk", "cphy_clk_src";
- qcom,clock-rates = <0 0 0 0 0 0 0 256000000 0 0 0 0 0 0
- 256000000>;
+ "ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+ "csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+ "csi_pix_clk", "cphy_csid_clk";
+ qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000
+ 0 0 0 0 0>;
status = "ok";
};
@@ -204,8 +213,9 @@
qcom,mipi-csi-vdd-supply = <&pmcobalt_l2>;
gdscr-supply = <&gdsc_camss_top>;
vdd_sec-supply = <&pmcobalt_l1>;
- qcom,cam-vreg-name = "vdd_sec", "gdscr";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
@@ -213,21 +223,20 @@
<&clock_mmss clk_mmss_camss_top_ahb_clk>,
<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
<&clock_mmss clk_csi2_clk_src>,
+ <&clock_mmss clk_csiphy_clk_src>,
<&clock_mmss clk_mmss_camss_csi2_clk>,
- <&clock_mmss clk_mmss_camss_csiphy2_clk>,
<&clock_mmss clk_mmss_camss_csi2_ahb_clk>,
<&clock_mmss clk_mmss_camss_csi2rdi_clk>,
<&clock_mmss clk_mmss_camss_csi2pix_clk>,
- <&clock_mmss clk_mmss_camss_cphy_csid2_clk>,
- <&clock_mmss clk_csiphy_clk_src>;
- clock-names = "mnoc_maxi", "mnoc_ahb",
+ <&clock_mmss clk_mmss_camss_cphy_csid2_clk>;
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
"bmic_smmu_ahb", "bmic_smmu_axi",
"camss_ahb_clk", "camss_top_ahb_clk",
- "ispif_ahb_clk", "csi_src_clk", "csi_clk",
- "csi_phy_clk", "csi_ahb_clk", "csi_rdi_clk",
- "csi_pix_clk", "cphy_csid_clk", "cphy_clk_src";
- qcom,clock-rates = <0 0 0 0 0 0 0 256000000 0 0 0 0 0 0
- 256000000>;
+ "ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+ "csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+ "csi_pix_clk", "cphy_csid_clk";
+ qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000
+ 0 0 0 0 0>;
status = "ok";
};
@@ -242,8 +251,9 @@
qcom,mipi-csi-vdd-supply = <&pmcobalt_l2>;
gdscr-supply = <&gdsc_camss_top>;
vdd_sec-supply = <&pmcobalt_l1>;
- qcom,cam-vreg-name = "vdd_sec", "gdscr";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
@@ -251,20 +261,20 @@
<&clock_mmss clk_mmss_camss_top_ahb_clk>,
<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
<&clock_mmss clk_csi3_clk_src>,
+ <&clock_mmss clk_csiphy_clk_src>,
<&clock_mmss clk_mmss_camss_csi3_clk>,
<&clock_mmss clk_mmss_camss_csi3_ahb_clk>,
<&clock_mmss clk_mmss_camss_csi3rdi_clk>,
<&clock_mmss clk_mmss_camss_csi3pix_clk>,
- <&clock_mmss clk_mmss_camss_cphy_csid1_clk>,
- <&clock_mmss clk_csiphy_clk_src>;
- clock-names = "mnoc_maxi", "mnoc_ahb",
+ <&clock_mmss clk_mmss_camss_cphy_csid3_clk>;
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
"bmic_smmu_ahb", "bmic_smmu_axi",
"camss_ahb_clk", "camss_top_ahb_clk",
- "ispif_ahb_clk", "csi_src_clk", "csi_clk",
- "csi_ahb_clk", "csi_rdi_clk",
- "csi_pix_clk", "cphy_csid_clk", "cphy_clk_src";
- qcom,clock-rates = <0 0 0 0 0 0 0 256000000 0 0 0 0 0
- 256000000>;
+ "ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+ "csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+ "csi_pix_clk", "cphy_csid_clk";
+ qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000
+ 0 0 0 0 0>;
status = "ok";
};
@@ -320,7 +330,7 @@
camss-vdd-supply = <&gdsc_camss_top>;
vdd-supply = <&gdsc_cpp>;
qcom,vdd-names = "smmu-vdd", "camss-vdd", "vdd";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
@@ -331,7 +341,7 @@
<&clock_mmss clk_mmss_fd_ahb_clk>,
<&clock_mmss clk_mmss_camss_cpp_axi_clk>,
<&clock_mmss clk_mmss_camss_cpp_vbif_ahb_clk>;
- clock-names = "mmss_mnoc_maxi_clk",
+ clock-names = "mmssnoc_axi",
"mmss_mnoc_ahb_clk",
"mmss_bimc_smmu_ahb_clk",
"mmss_bimc_smmu_axi_clk",
@@ -342,14 +352,18 @@
"mmss_fd_ahb_clk",
"mmss_camss_cpp_axi_clk",
"mmss_camss_cpp_vbif_ahb_clk";
- qcom,clock-rates = <0 0 0 0 0 0 200000000 0 0 0 0>;
+ qcom,clock-rates =
+ <0 0 0 0 0 0 404000000 0 0 0 0>,
+ <0 0 0 0 0 0 100000000 0 0 0 0>,
+ <0 0 0 0 0 0 404000000 0 0 0 0>,
+ <0 0 0 0 0 0 404000000 0 0 0 0>;
qcom,msm-bus,name = "msm_camera_fd";
qcom,msm-bus,num-cases = <4>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <106 512 0 0>,
- <106 512 13000000 13000000>,
- <106 512 45000000 45000000>,
- <106 512 90000000 90000000>;
+ <106 512 1625 0>,
+ <106 512 2995 0>,
+ <106 512 7200 0>;
qcom,fd-vbif-reg-settings = <0x20 0x10000000 0x30000000>,
<0x24 0x10000000 0x30000000>,
<0x28 0x10000000 0x30000000>,
@@ -374,7 +388,6 @@
vdd-supply = <&gdsc_cpp>;
qcom,vdd-names = "smmu-vdd", "camss-vdd", "vdd";
clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
- <&clock_mmss clk_mmss_mnoc_maxi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_camss_ahb_clk>,
<&clock_mmss clk_mmss_camss_top_ahb_clk>,
@@ -385,12 +398,12 @@
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
<&clock_mmss clk_mmss_camss_cpp_vbif_ahb_clk>;
clock-names = "mmssnoc_axi_clk",
- "mnoc_maxi_clk", "mnoc_ahb_clk",
+ "mnoc_ahb_clk",
"camss_ahb_clk", "camss_top_ahb_clk",
"cpp_core_clk", "camss_cpp_ahb_clk",
"camss_cpp_axi_clk", "micro_iface_clk",
"mmss_smmu_axi_clk", "cpp_vbif_ahb_clk";
- qcom,clock-rates = <0 0 0 0 0 200000000 0 0 0 0 0>;
+ qcom,clock-rates = <0 0 0 0 200000000 0 0 0 0 0>;
qcom,min-clock-rate = <200000000>;
qcom,bus-master = <1>;
qcom,vbif-qos-setting = <0x20 0x10000000>,
@@ -442,7 +455,7 @@
qcom,vdd-names = "camss-vdd", "vfe0-vdd",
"vfe1-vdd";
qcom,clock-cntl-support;
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_camss_ahb_clk>,
<&clock_mmss clk_mmss_camss_top_ahb_clk>,
@@ -469,7 +482,7 @@
<&clock_mmss clk_mmss_camss_vfe1_clk>,
<&clock_mmss clk_vfe1_clk_src>,
<&clock_mmss clk_mmss_camss_csi_vfe1_clk>;
- clock-names = "mnoc_maxi_clk", "mnoc_ahb_clk",
+ clock-names = "mmssnoc_axi", "mnoc_ahb_clk",
"camss_ahb_clk",
"camss_top_ahb_clk", "ispif_ahb_clk",
"csi0_src_clk", "csi1_src_clk",
@@ -521,7 +534,7 @@
camss-vdd-supply = <&gdsc_camss_top>;
smmu-vdd-supply = <&gdsc_bimc_smmu>;
qcom,vdd-names = "vdd", "camss-vdd", "smmu-vdd";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
@@ -534,7 +547,7 @@
<&clock_mmss clk_mmss_camss_vfe_vbif_axi_clk>,
<&clock_mmss clk_vfe0_clk_src>,
<&clock_mmss clk_mmss_camss_csi_vfe0_clk>;
- clock-names = "mnoc_maxi_clk", "mnoc_ahb_clk",
+ clock-names = "mmssnoc_axi", "mnoc_ahb_clk",
"bimc_smmu_ahb_clk", "bimc_smmu_axi_clk",
"camss_ahb_clk", "camss_top_ahb_clk",
"camss_vfe_clk", "camss_vfe_stream_clk",
@@ -601,7 +614,7 @@
camss-vdd-supply = <&gdsc_camss_top>;
smmu-vdd-supply = <&gdsc_bimc_smmu>;
qcom,vdd-names = "vdd", "camss-vdd", "smmu-vdd";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
@@ -614,7 +627,7 @@
<&clock_mmss clk_mmss_camss_vfe_vbif_axi_clk>,
<&clock_mmss clk_vfe1_clk_src>,
<&clock_mmss clk_mmss_camss_csi_vfe1_clk>;
- clock-names = "mnoc_maxi_clk", "mnoc_ahb_clk",
+ clock-names = "mmssnoc_axi", "mnoc_ahb_clk",
"bimc_smmu_ahb_clk", "bimc_smmu_axi_clk",
"camss_ahb_clk", "camss_top_ahb_clk",
"camss_vfe_clk", "camss_vfe_stream_clk",
@@ -687,7 +700,7 @@
mmagic-supply = <&gdsc_bimc_smmu>;
gdscr-supply = <&gdsc_camss_top>;
qcom,cam-vreg-name = "mmagic", "gdscr";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
@@ -696,7 +709,7 @@
<&clock_mmss clk_cci_clk_src>,
<&clock_mmss clk_mmss_camss_cci_ahb_clk>,
<&clock_mmss clk_mmss_camss_cci_clk>;
- clock-names = "mnoc_axi", "mnoc_ahb", "smmu_ahb", "smmu_axi",
+ clock-names = "mmssnoc_axi", "mnoc_ahb", "smmu_ahb", "smmu_axi",
"camss_ahb_clk", "camss_top_ahb_clk",
"cci_src_clk", "cci_ahb_clk", "camss_cci_clk";
qcom,clock-rates = <0 0 0 0 0 0 19200000 0 0>,
@@ -739,7 +752,7 @@
smmu-vdd-supply = <&gdsc_bimc_smmu>;
camss-vdd-supply = <&gdsc_camss_top>;
qcom,vdd-names = "smmu-vdd", "camss-vdd";
- clock-names = "mmss_mnoc_maxi_clk",
+ clock-names = "mmssnoc_axi",
"mmss_mnoc_ahb_clk",
"mmss_bimc_smmu_ahb_clk",
"mmss_bimc_smmu_axi_clk",
@@ -748,7 +761,7 @@
"core_clk",
"mmss_camss_jpeg_ahb_clk",
"mmss_camss_jpeg_axi_clk";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
@@ -783,7 +796,7 @@
smmu-vdd-supply = <&gdsc_bimc_smmu>;
camss-vdd-supply = <&gdsc_camss_top>;
qcom,vdd-names = "smmu-vdd", "camss-vdd";
- clock-names = "mmss_mnoc_maxi_clk",
+ clock-names = "mmssnoc_axi",
"mmss_mnoc_ahb_clk",
"mmss_bimc_smmu_ahb_clk",
"mmss_bimc_smmu_axi_clk",
@@ -792,7 +805,7 @@
"core_clk",
"mmss_camss_jpeg_ahb_clk",
"mmss_camss_jpeg_axi_clk";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
index f59899fba039..fcceac6e2469 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
@@ -21,6 +21,8 @@
qca,bt-vdd-pa-supply = <&pmcobalt_l17_pin_ctrl>;
qca,bt-vdd-ldo-supply = <&pmcobalt_l25_pin_ctrl>;
qca,bt-chip-pwd-supply = <&pmicobalt_bob_pin1>;
+ clocks = <&clock_gcc clk_rf_clk2>;
+ clock-names = "rf_clk2";
qca,bt-vdd-io-voltage-level = <1352000 1352000>;
qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
@@ -31,9 +33,9 @@
qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
- qca,bt-vdd-core-current-level = <0>; /* LPM/PFM */
- qca,bt-vdd-pa-current-level = <0>; /* LPM/PFM */
- qca,bt-vdd-ldo-current-level = <0>; /* LPM/PFM */
+ qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
};
};
@@ -376,6 +378,8 @@
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,5v-boost-gpio = <&tlmm 51 0>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,partial-update-enabled;
+ qcom,panel-roi-alignment = <4 2 4 2 20 20>;
};
&dsi_sharp_1080_cmd {
@@ -425,7 +429,7 @@
};
&pmicobalt_charger {
- qcom,suspend-input;
+ qcom,batteryless-platform;
};
&pmicobalt_haptics {
@@ -539,10 +543,6 @@
};
&soc {
- sound-9335 {
- qcom,wcn-btfm;
- };
-
gpio_keys {
compatible = "gpio-keys";
input-name = "gpio-keys";
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-gpu.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-gpu.dtsi
index 1267e578f9b4..e140074465ef 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-gpu.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-gpu.dtsi
@@ -75,6 +75,8 @@
qcom,gpu-qdss-stm = <0x161c0000 0x40000>; // base addr, size
+ qcom,tsens-name = "tsens_tz_sensor12";
+
clocks = <&clock_gfx clk_gpucc_gfx3d_clk>,
<&clock_gcc clk_gcc_gpu_cfg_ahb_clk>,
<&clock_gpu clk_gpucc_rbbmtimer_clk>,
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
index a4ba9a61cded..0278cbde90ce 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
@@ -25,6 +25,7 @@
#include "dsi-panel-sharp-1080p-cmd.dtsi"
#include "dsi-panel-jdi-1080p-video.dtsi"
#include "dsi-panel-sharp-dualmipi-1080p-120hz.dtsi"
+#include "dsi-panel-jdi-a407-dualmipi-wqhd-cmd.dtsi"
&soc {
dsi_panel_pwr_supply: dsi_panel_pwr_supply {
@@ -81,6 +82,7 @@
qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,cmd-sync-wait-broadcast;
qcom,esd-check-enabled;
qcom,mdss-dsi-panel-status-check-mode = "bta_check";
};
@@ -89,6 +91,7 @@
qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,cmd-sync-wait-broadcast;
};
&dsi_dual_nt35597_truly_video {
@@ -156,3 +159,9 @@
qcom,mdss-dsi-t-clk-post = <0x7>;
qcom,mdss-dsi-t-clk-pre = <0x26>;
};
+
+&dsi_dual_jdi_a407_cmd {
+ qcom,mdss-dsi-panel-timings = [00 16 05 05 09 0e 05 05 04 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x06>;
+ qcom,mdss-dsi-t-clk-pre = <0x22>;
+};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mdss-pll.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mdss-pll.dtsi
index 11571415c02e..af0eb60818fb 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mdss-pll.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mdss-pll.dtsi
@@ -96,8 +96,10 @@
gdsc-supply = <&gdsc_mdss>;
- clocks = <&clock_mmss clk_mmss_mdss_ahb_clk>;
- clock-names = "iface_clk";
+ clocks = <&clock_mmss clk_mmss_mdss_ahb_clk>,
+ <&clock_gcc clk_ln_bb_clk1>,
+ <&clock_gcc clk_gcc_usb3_clkref_clk>;
+ clock-names = "iface_clk", "ref_clk_src", "ref_clk";
clock-rate = <0>;
qcom,platform-supply-entries {
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
index 4fe694069011..ec38e46b1d89 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
@@ -477,15 +477,17 @@
<&clock_mmss clk_mmss_mdss_mdp_clk>,
<&clock_mmss clk_mmss_mdss_hdmi_dp_ahb_clk>,
<&clock_mmss clk_mmss_mdss_dp_aux_clk>,
+ <&clock_gcc clk_ln_bb_clk1>,
+ <&clock_gcc clk_gcc_usb3_clkref_clk>,
<&clock_mmss clk_mmss_mdss_dp_link_clk>,
<&clock_mmss clk_mmss_mdss_dp_link_intf_clk>,
<&clock_mmss clk_mmss_mdss_dp_crypto_clk>,
<&clock_mmss clk_mmss_mdss_dp_pixel_clk>;
clock-names = "core_mnoc_clk", "core_iface_clk", "core_bus_clk",
"core_mdp_core_clk", "core_alt_iface_clk",
- "core_aux_clk", "ctrl_link_clk",
- "ctrl_link_iface_clk", "ctrl_crypto_clk",
- "ctrl_pixel_clk";
+ "core_aux_clk", "core_ref_clk_src", "core_ref_clk",
+ "ctrl_link_clk", "ctrl_link_iface_clk",
+ "ctrl_crypto_clk", "ctrl_pixel_clk";
qcom,dp-usbpd-detection = <&pmicobalt_pdphy>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
index 8697ba4cb889..f9bb6e512d33 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
@@ -22,6 +22,8 @@
qca,bt-vdd-pa-supply = <&pmcobalt_l17_pin_ctrl>;
qca,bt-vdd-ldo-supply = <&pmcobalt_l25_pin_ctrl>;
qca,bt-chip-pwd-supply = <&pmicobalt_bob_pin1>;
+ clocks = <&clock_gcc clk_rf_clk2>;
+ clock-names = "rf_clk2";
qca,bt-vdd-io-voltage-level = <1352000 1352000>;
qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
@@ -32,9 +34,9 @@
qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
- qca,bt-vdd-core-current-level = <0>; /* LPM/PFM */
- qca,bt-vdd-pa-current-level = <0>; /* LPM/PFM */
- qca,bt-vdd-ldo-current-level = <0>; /* LPM/PFM */
+ qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
};
};
@@ -562,10 +564,6 @@
};
&soc {
- sound-9335 {
- qcom,wcn-btfm;
- };
-
gpio_keys {
compatible = "gpio-keys";
input-name = "gpio-keys";
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi
index f4f47bc461fc..3975bc5d16f5 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi
@@ -1779,6 +1779,66 @@
};
};
+ tsif0_signals_active: tsif0_signals_active {
+ tsif1_clk {
+ pins = "gpio89"; /* TSIF0 CLK */
+ function = "tsif1_clk";
+ };
+ tsif1_en {
+ pins = "gpio90"; /* TSIF0 Enable */
+ function = "tsif1_en";
+ };
+ tsif1_data {
+ pins = "gpio91"; /* TSIF0 DATA */
+ function = "tsif1_data";
+ };
+ signals_cfg {
+ pins = "gpio89", "gpio90", "gpio91";
+ drive_strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
+
+ /* sync signal is only used if configured to mode-2 */
+ tsif0_sync_active: tsif0_sync_active {
+ tsif1_sync {
+ pins = "gpio9"; /* TSIF0 SYNC */
+ function = "tsif1_sync";
+ drive_strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
+
+ tsif1_signals_active: tsif1_signals_active {
+ tsif2_clk {
+ pins = "gpio93"; /* TSIF1 CLK */
+ function = "tsif2_clk";
+ };
+ tsif2_en {
+ pins = "gpio94"; /* TSIF1 Enable */
+ function = "tsif2_en";
+ };
+ tsif2_data {
+ pins = "gpio95"; /* TSIF1 DATA */
+ function = "tsif2_data";
+ };
+ signals_cfg {
+ pins = "gpio93", "gpio94", "gpio95";
+ drive_strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
+
+ /* sync signal is only used if configured to mode-2 */
+ tsif1_sync_active: tsif1_sync_active {
+ tsif2_sync {
+ pins = "gpio96"; /* TSIF1 SYNC */
+ function = "tsif2_sync";
+ drive_strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
+
pri_aux_pcm_clk {
pri_aux_pcm_clk_sleep: pri_aux_pcm_clk_sleep {
mux {
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi
index 6018124caf68..c6d7defbf35c 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi
@@ -24,7 +24,7 @@
qcom,vctl-port = <0x0>;
qcom,phase-port = <0x1>;
qcom,saw2-avs-ctl = <0x1010031>;
- qcom,saw2-avs-limit = <0x4000208>;
+ qcom,saw2-avs-limit = <0x4580458>;
qcom,pfm-port = <0x2>;
};
@@ -40,7 +40,7 @@
qcom,vctl-port = <0x0>;
qcom,phase-port = <0x1>;
qcom,saw2-avs-ctl = <0x1010031>;
- qcom,saw2-avs-limit = <0x4000208>;
+ qcom,saw2-avs-limit = <0x4580458>;
qcom,pfm-port = <0x2>;
};
@@ -279,6 +279,52 @@
qcom,sleep-stats-version = <2>;
};
+ qcom,rpm-rail-stats@200000 {
+ compatible = "qcom,rpm-rail-stats";
+ reg = <0x200000 0x100>,
+ <0x29000c 0x4>;
+ reg-names = "phys_addr_base",
+ "offset_addr";
+ };
+
+ qcom,rpm-log@200000 {
+ compatible = "qcom,rpm-log";
+ reg = <0x200000 0x4000>,
+ <0x290018 0x4>;
+ qcom,rpm-addr-phys = <0x200000>;
+ qcom,offset-version = <4>;
+ qcom,offset-page-buffer-addr = <36>;
+ qcom,offset-log-len = <40>;
+ qcom,offset-log-len-mask = <44>;
+ qcom,offset-page-indices = <56>;
+ };
+
+ qcom,rpm-master-stats@778150 {
+ compatible = "qcom,rpm-master-stats";
+ reg = <0x778150 0x5000>;
+ qcom,masters = "APSS", "MPSS", "ADSP", "SLPI";
+ qcom,master-stats-version = <2>;
+ qcom,master-offset = <4096>;
+ };
+
+ rpm_msg_ram: memory@0x200000 {
+ compatible = "qcom,rpm-msg-ram";
+ reg = <0x200000 0x1000>,
+ <0x290000 0x1000>;
+ };
+
+ rpm_code_ram: rpm-memory@0x778000 {
+ compatible = "qcom,rpm-code-ram";
+ reg = <0x778000 0x5000>;
+ };
+
+ qcom,system-stats {
+ compatible = "qcom,system-stats";
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+ qcom,rpm-code-ram = <&rpm_code_ram>;
+ qcom,masters = "APSS", "MPSS", "ADSP", "SLPI";
+ };
+
qcom,mpm@7781b8 {
compatible = "qcom,mpm-v2";
reg = <0x7781b8 0x1000>, /* MSM_RPM_MPM_BASE 4K */
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dtsi
index 1ae0ab804eac..5f89985db0a3 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dtsi
@@ -126,3 +126,70 @@
qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
};
};
+
+&pmx_mdss {
+ mdss_dsi_active: mdss_dsi_active {
+ mux {
+ pins = "gpio94";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio94";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable = <0>; /* no pull */
+ output-high;
+ };
+ };
+
+ mdss_dsi_suspend: mdss_dsi_suspend {
+ mux {
+ pins = "gpio94";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio94";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
+};
+
+&mdss_mdp {
+ qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi {
+ hw-config = "split_dsi";
+};
+
+&mdss_dsi0 {
+ qcom,dsi-pref-prim-pan = <&dsi_dual_jdi_a407_cmd>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-reset-gpio = <&tlmm 94 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&mdss_dsi1 {
+ qcom,dsi-pref-prim-pan = <&dsi_dual_jdi_a407_cmd>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-reset-gpio = <&tlmm 94 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&labibb {
+ status = "ok";
+ qpnp,qpnp-labibb-mode = "lcd";
+};
+
+&dsi_dual_jdi_a407_cmd {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi
index e0ae9a8873a7..51e1154beaa9 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi
@@ -10,7 +10,529 @@
* GNU General Public License for more details.
*/
-#include "msmcobalt-mtp.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
+#include "msmcobalt-pinctrl.dtsi"
+#include "msmcobalt-camera-sensor-qrd.dtsi"
+/ {
+ bluetooth: bt_wcn3990 {
+ compatible = "qca,wcn3990";
+ qca,bt-vdd-io-supply = <&pmcobalt_s3>;
+ qca,bt-vdd-xtal-supply = <&pmcobalt_s5>;
+ qca,bt-vdd-core-supply = <&pmcobalt_l7_pin_ctrl>;
+ qca,bt-vdd-pa-supply = <&pmcobalt_l17_pin_ctrl>;
+ qca,bt-vdd-ldo-supply = <&pmcobalt_l25_pin_ctrl>;
+ qca,bt-chip-pwd-supply = <&pmicobalt_bob_pin1>;
+
+ qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+ qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+ qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+ qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+ qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+ qca,bt-chip-pwd-voltage-level = <3600000 3600000>;
+
+ qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-core-current-level = <0>; /* LPM/PFM */
+ qca,bt-vdd-pa-current-level = <0>; /* LPM/PFM */
+ qca,bt-vdd-ldo-current-level = <0>; /* LPM/PFM */
+ };
+};
+
+&blsp1_uart3_hs {
+ status = "ok";
+};
+
+&ufsphy1 {
+ vdda-phy-supply = <&pmcobalt_l1>;
+ vdda-pll-supply = <&pmcobalt_l2>;
+ vddp-ref-clk-supply = <&pmcobalt_l26>;
+ vdda-phy-max-microamp = <51400>;
+ vdda-pll-max-microamp = <14600>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+ status = "ok";
+};
+
+&ufs1 {
+ vdd-hba-supply = <&gdsc_ufs>;
+ vdd-hba-fixed-regulator;
+ vcc-supply = <&pmcobalt_l20>;
+ vccq-supply = <&pmcobalt_l26>;
+ vccq2-supply = <&pmcobalt_s4>;
+ vcc-max-microamp = <750000>;
+ vccq-max-microamp = <560000>;
+ vccq2-max-microamp = <750000>;
+ status = "ok";
+};
+
+&ufs_ice {
+ status = "ok";
+};
+
+&sdhc_2 {
+ vdd-supply = <&pmcobalt_l21>;
+ qcom,vdd-voltage-level = <2950000 2960000>;
+ qcom,vdd-current-level = <200 800000>;
+
+ vdd-io-supply = <&pmcobalt_l13>;
+ qcom,vdd-io-voltage-level = <1808000 2960000>;
+ qcom,vdd-io-current-level = <200 22000>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+ qcom,clk-rates = <400000 20000000 25000000
+ 50000000 100000000 200000000>;
+ qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+ cd-gpios = <&tlmm 95 0x1>;
+
+ status = "ok";
+};
+
+&uartblsp2dm1 {
+ status = "ok";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart_console_active>;
+};
+
+&pmcobalt_gpios {
+ /* GPIO 6 for Vol+ Key */
+ gpio@c500 {
+ status = "okay";
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,out-strength = <1>;
+ };
+
+ /* GPIO 7 for Snapshot Key */
+ gpio@c600 {
+ status = "okay";
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,out-strength = <1>;
+ };
+
+ /* GPIO 8 for Focus Key */
+ gpio@c700 {
+ status = "okay";
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,out-strength = <1>;
+ };
+
+ gpio@cc00 { /* GPIO 13 */
+ qcom,mode = <1>;
+ qcom,output-type = <0>;
+ qcom,pull = <5>;
+ qcom,vin-sel = <0>;
+ qcom,out-strength = <1>;
+ qcom,src-sel = <3>;
+ qcom,master-en = <1>;
+ status = "okay";
+ };
+
+ /* GPIO 21 (NFC_CLK_REQ) */
+ gpio@d400 {
+ qcom,mode = <0>;
+ qcom,vin-sel = <1>;
+ qcom,src-sel = <0>;
+ qcom,master-en = <1>;
+ status = "okay";
+ };
+
+ /* GPIO 18 SMB138X */
+ gpio@d100 {
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,master-en = <1>;
+ status = "okay";
+ };
+};
+
+&i2c_5 {
+ status = "okay";
+ synaptics@20 {
+ compatible = "synaptics,dsx";
+ reg = <0x20>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <125 0x2008>;
+ vdd-supply = <&pmcobalt_l6>;
+ avdd-supply = <&pmcobalt_l28>;
+ synaptics,vdd-voltage = <1808000 1808000>;
+ synaptics,avdd-voltage = <3008000 3008000>;
+ synaptics,vdd-current = <40000>;
+ synaptics,avdd-current = <20000>;
+ pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+ pinctrl-0 = <&ts_active>;
+ pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+ synaptics,display-coords = <0 0 1439 2559>;
+ synaptics,panel-coords = <0 0 1439 2559>;
+ synaptics,reset-gpio = <&tlmm 89 0x00>;
+ synaptics,irq-gpio = <&tlmm 125 0x2008>;
+ synaptics,disable-gpios;
+ synaptics,fw-name = "PR1702898-s3528t_60QHD_00400001.img";
+ };
+};
+
+&i2c_6 { /* BLSP1 QUP6 (NFC) */
+ status = "okay";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&tlmm 92 0x00>;
+ qcom,nq-ven = <&tlmm 12 0x00>;
+ qcom,nq-firm = <&tlmm 93 0x00>;
+ qcom,nq-clkreq = <&pmcobalt_gpios 21 0x00>;
+ qcom,nq-esepwr = <&tlmm 116 0x00>;
+ interrupt-parent = <&tlmm>;
+ qcom,clk-src = "BBCLK3";
+ interrupts = <92 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active", "nfc_suspend";
+ pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+ pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+ clocks = <&clock_gcc clk_ln_bb_clk3_pin>;
+ clock-names = "ref_clk";
+ };
+};
+
+&mdss_hdmi_tx {
+ pinctrl-names = "hdmi_hpd_active", "hdmi_ddc_active", "hdmi_cec_active",
+ "hdmi_active", "hdmi_sleep";
+ pinctrl-0 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active
+ &mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>;
+ pinctrl-1 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active
+ &mdss_hdmi_ddc_active &mdss_hdmi_cec_suspend>;
+ pinctrl-2 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active
+ &mdss_hdmi_cec_active &mdss_hdmi_ddc_suspend>;
+ pinctrl-3 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active
+ &mdss_hdmi_ddc_active &mdss_hdmi_cec_active>;
+ pinctrl-4 = <&mdss_hdmi_5v_suspend &mdss_hdmi_hpd_suspend
+ &mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>;
+};
+
+&mdss_dp_ctrl {
+ pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
+ pinctrl-0 = <&mdss_dp_aux_active &mdss_dp_usbplug_cc_active>;
+ pinctrl-1 = <&mdss_dp_aux_suspend &mdss_dp_usbplug_cc_suspend>;
+ qcom,aux-en-gpio = <&tlmm 77 0>;
+ qcom,aux-sel-gpio = <&tlmm 78 0>;
+ qcom,usbplug-cc-gpio = <&tlmm 38 0>;
+};
+
+&mdss_mdp {
+ qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi {
+ hw-config = "split_dsi";
+};
+
+&mdss_dsi0 {
+ qcom,dsi-pref-prim-pan = <&dsi_dual_nt35597_video>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-reset-gpio = <&tlmm 94 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,panel-mode-gpio = <&tlmm 91 0>;
+};
+
+&mdss_dsi1 {
+ qcom,dsi-pref-prim-pan = <&dsi_dual_nt35597_video>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-reset-gpio = <&tlmm 94 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,panel-mode-gpio = <&tlmm 91 0>;
+};
+
+&labibb {
+ status = "ok";
+ qpnp,qpnp-labibb-mode = "lcd";
+};
+
+&dsi_dual_nt35597_video {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_dual_nt35597_cmd {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_dual_nt35597_truly_video {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_dual_nt35597_truly_cmd {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_nt35597_dsc_video {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_nt35597_dsc_cmd {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_sharp_4k_dsc_video {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_sharp_4k_dsc_cmd {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_dual_jdi_video {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,5v-boost-gpio = <&tlmm 51 0>;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_dual_jdi_cmd {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,5v-boost-gpio = <&tlmm 51 0>;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_sharp_1080_cmd {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_jdi_1080_vid {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply_no_labibb>;
+ qcom,5v-boost-gpio = <&tlmm 51 0>;
+};
+
+&i2c_7 {
+ status = "okay";
+ qcom,smb138x@8 {
+ compatible = "qcom,i2c-pmic";
+ reg = <0x8>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-parent = <&spmi_bus>;
+ interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt_names = "smb138x";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ qcom,periph-map = <0x10 0x11 0x12 0x13 0x14 0x16 0x36>;
+
+ smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 {
+ compatible = "qcom,smb138x-parallel-slave";
+ reg = <0x1000 0x700>;
+ };
+ };
+};
+
+&pmicobalt_haptics {
+ status = "okay";
+};
+
+&pmcobalt_vadc {
+ chan@83 {
+ label = "vph_pwr";
+ reg = <0x83>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@85 {
+ label = "vcoin";
+ reg = <0x85>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@4c {
+ label = "xo_therm";
+ reg = <0x4c>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <4>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@4d {
+ label = "msm_therm";
+ reg = <0x4d>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@51 {
+ label = "quiet_therm";
+ reg = <0x51>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
+};
+
+&pmcobalt_adc_tm {
+ chan@83 {
+ label = "vph_pwr";
+ reg = <0x83>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,btm-channel-number = <0x60>;
+ };
+
+ chan@4d {
+ label = "msm_therm";
+ reg = <0x4d>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,btm-channel-number = <0x68>;
+ qcom,thermal-node;
+ };
+
+ chan@51 {
+ label = "quiet_therm";
+ reg = <0x51>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,btm-channel-number = <0x70>;
+ qcom,thermal-node;
+ };
+
+ chan@4c {
+ label = "xo_therm";
+ reg = <0x4c>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <4>;
+ qcom,hw-settle-time = <2>;
+ qcom,btm-channel-number = <0x78>;
+ qcom,thermal-node;
+ };
+};
+
+&wil6210 {
+ status = "ok";
+};
+
+&soc {
+ sound-9335 {
+ qcom,wcn-btfm;
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ input-name = "gpio-keys";
+ status = "okay";
+
+ vol_up {
+ label = "volume_up";
+ gpios = <&pmcobalt_gpios 6 0x1>;
+ linux,input-type = <1>;
+ linux,code = <115>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ };
+
+ cam_snapshot {
+ label = "cam_snapshot";
+ gpios = <&pmcobalt_gpios 7 0x1>;
+ linux,input-type = <1>;
+ linux,code = <766>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ };
+
+ cam_focus {
+ label = "cam_focus";
+ gpios = <&pmcobalt_gpios 8 0x1>;
+ linux,input-type = <1>;
+ linux,code = <528>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ };
+ };
+};
+
+/{
+ mtp_batterydata: qcom,battery-data {
+ qcom,batt-id-range-pct = <15>;
+ #include "fg-gen3-batterydata-itech-3000mah.dtsi"
+ #include "fg-gen3-batterydata-ascent-3450mah.dtsi"
+ };
+};
&mdss_mdp {
qcom,mdss-pref-prim-intf = "dsi";
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
index 86bc048adeb5..2a61cccad273 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
@@ -533,10 +533,32 @@
regulator-name = "pm8005_s1";
status = "okay";
regulator-min-microvolt = <524000>;
- regulator-max-microvolt = <1032000>;
+ regulator-max-microvolt = <1100000>;
qcom,enable-time = <500>;
};
};
+
+ qcom,pmcobalt@1 {
+ pmcobalt_s10: regulator@2f00 {
+ compatible = "qcom,qpnp-regulator";
+ reg = <0x2f00 0x100>;
+ regulator-name = "pmcobalt_s10";
+ regulator-min-microvolt = <572000>;
+ regulator-max-microvolt = <1112000>;
+ qcom,enable-time = <500>;
+ regulator-always-on;
+ };
+
+ pmcobalt_s13: regulator@3800 {
+ compatible = "qcom,qpnp-regulator";
+ reg = <0x3800 0x100>;
+ regulator-name = "pmcobalt_s13";
+ regulator-min-microvolt = <572000>;
+ regulator-max-microvolt = <1112000>;
+ qcom,enable-time = <500>;
+ regulator-always-on;
+ };
+ };
};
/* Stub regulators */
@@ -590,6 +612,9 @@
qcom,cpr-panic-reg-name-list =
"PWR_CPRH_STATUS", "APCLUS0_L2_SAW4_PMIC_STS";
+ qcom,cpr-aging-ref-voltage = <1112000>;
+ vdd-supply = <&pmcobalt_s10>;
+
thread@0 {
qcom,cpr-thread-id = <0>;
qcom,cpr-consecutive-up = <0>;
@@ -712,6 +737,13 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-max-voltage-adjustment = <15000>;
+ qcom,cpr-aging-ref-corner = <22>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
+ qcom,allow-aging-open-loop-voltage-adjustment =
+ <1>;
};
};
};
@@ -752,6 +784,9 @@
qcom,cpr-panic-reg-name-list =
"PERF_CPRH_STATUS", "APCLUS1_L2_SAW4_PMIC_STS";
+ qcom,cpr-aging-ref-voltage = <1112000>;
+ vdd-supply = <&pmcobalt_s13>;
+
thread@0 {
qcom,cpr-thread-id = <0>;
qcom,cpr-consecutive-up = <0>;
@@ -894,6 +929,13 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-max-voltage-adjustment = <15000>;
+ qcom,cpr-aging-ref-corner = <25>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
+ qcom,allow-aging-open-loop-voltage-adjustment =
+ <1>;
};
};
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2-camera.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-v2-camera.dtsi
index 99d80a3b3848..fcc4d6d8ee2d 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-v2-camera.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-v2-camera.dtsi
@@ -19,7 +19,10 @@
reg-names = "csiphy";
interrupts = <0 78 0>;
interrupt-names = "csiphy";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ gdscr-supply = <&gdsc_camss_top>;
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
@@ -33,13 +36,13 @@
<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
<&clock_mmss clk_csiphy_clk_src>,
<&clock_mmss clk_mmss_camss_csiphy0_clk>;
- clock-names = "mnoc_maxi", "mnoc_ahb",
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
"bmic_smmu_ahb", "bmic_smmu_axi",
"camss_ahb_clk", "camss_top_ahb_clk",
"csi_src_clk", "csi_clk", "cphy_csid_clk",
"csiphy_timer_src_clk", "csiphy_timer_clk",
"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
- qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 269333333 0
+ qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 200000000 0
0 256000000 0>;
status = "ok";
};
@@ -51,7 +54,10 @@
reg-names = "csiphy";
interrupts = <0 79 0>;
interrupt-names = "csiphy";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ gdscr-supply = <&gdsc_camss_top>;
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
@@ -65,13 +71,13 @@
<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
<&clock_mmss clk_csiphy_clk_src>,
<&clock_mmss clk_mmss_camss_csiphy1_clk>;
- clock-names = "mnoc_maxi", "mnoc_ahb",
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
"bmic_smmu_ahb", "bmic_smmu_axi",
"camss_ahb_clk", "camss_top_ahb_clk",
"csi_src_clk", "csi_clk", "cphy_csid_clk",
"csiphy_timer_src_clk", "csiphy_timer_clk",
"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
- qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 269333333 0
+ qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 200000000 0
0 256000000 0>;
status = "ok";
};
@@ -83,7 +89,10 @@
reg-names = "csiphy";
interrupts = <0 80 0>;
interrupt-names = "csiphy";
- clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>,
+ gdscr-supply = <&gdsc_camss_top>;
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
<&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
@@ -97,13 +106,13 @@
<&clock_mmss clk_mmss_camss_ispif_ahb_clk>,
<&clock_mmss clk_csiphy_clk_src>,
<&clock_mmss clk_mmss_camss_csiphy2_clk>;
- clock-names = "mnoc_maxi", "mnoc_ahb",
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
"bmic_smmu_ahb", "bmic_smmu_axi",
"camss_ahb_clk", "camss_top_ahb_clk",
"csi_src_clk", "csi_clk", "cphy_csid_clk",
"csiphy_timer_src_clk", "csiphy_timer_clk",
"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
- qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 269333333 0
+ qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 200000000 0
0 256000000 0>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
index db50038b297e..8016a3822a7f 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
@@ -28,88 +28,89 @@
compatible = "qcom,cpu-clock-osm-msmcobalt-v2";
/delete-property/ qcom,llm-sw-overr;
qcom,pwrcl-speedbin0-v0 =
- < 300000000 0x0004000f 0x01200020 0x1 >,
- < 364800000 0x05040013 0x01200020 0x1 >,
- < 441600000 0x05040017 0x02200020 0x1 >,
- < 518400000 0x0504001b 0x02200020 0x1 >,
- < 595200000 0x0504001f 0x02200020 0x1 >,
- < 672000000 0x05040023 0x03200020 0x1 >,
- < 748800000 0x05040027 0x03200020 0x1 >,
- < 825600000 0x0404002b 0x03220022 0x1 >,
- < 883200000 0x0404002e 0x04250025 0x1 >,
- < 960000000 0x04040032 0x04280028 0x1 >,
- < 1036800000 0x04040036 0x042b002b 0x1 >,
- < 1094400000 0x04040039 0x052e002e 0x2 >,
- < 1171200000 0x0404003d 0x05310031 0x2 >,
- < 1248000000 0x04040041 0x05340034 0x2 >,
- < 1324800000 0x04040045 0x06370037 0x2 >,
- < 1401600000 0x04040049 0x063a003a 0x2 >,
- < 1478400000 0x0404004d 0x073e003e 0x2 >,
- < 1555200000 0x04040051 0x07410041 0x2 >,
- < 1670400000 0x04040057 0x08460046 0x2 >,
- < 1747200000 0x0404005b 0x08490049 0x2 >,
- < 1824000000 0x0404005f 0x084c004c 0x3 >,
- < 1900800000 0x04040063 0x094f004f 0x3 >;
+ < 300000000 0x0004000f 0x01200020 0x1 1 >,
+ < 364800000 0x05040013 0x01200020 0x1 2 >,
+ < 441600000 0x05040017 0x02200020 0x1 3 >,
+ < 518400000 0x0504001b 0x02200020 0x1 4 >,
+ < 595200000 0x0504001f 0x02200020 0x1 5 >,
+ < 672000000 0x05040023 0x03200020 0x1 6 >,
+ < 748800000 0x05040027 0x03200020 0x1 7 >,
+ < 825600000 0x0404002b 0x03220022 0x1 8 >,
+ < 883200000 0x0404002e 0x04250025 0x1 9 >,
+ < 960000000 0x04040032 0x04280028 0x1 10 >,
+ < 1036800000 0x04040036 0x042b002b 0x1 11 >,
+ < 1094400000 0x04040039 0x052e002e 0x2 12 >,
+ < 1171200000 0x0404003d 0x05310031 0x2 13 >,
+ < 1248000000 0x04040041 0x05340034 0x2 14 >,
+ < 1324800000 0x04040045 0x06370037 0x2 15 >,
+ < 1401600000 0x04040049 0x063a003a 0x2 16 >,
+ < 1478400000 0x0404004d 0x073e003e 0x2 17 >,
+ < 1555200000 0x04040051 0x07410041 0x2 18 >,
+ < 1670400000 0x04040057 0x08460046 0x2 19 >,
+ < 1747200000 0x0404005b 0x08490049 0x2 20 >,
+ < 1824000000 0x0404005f 0x084c004c 0x3 21 >,
+ < 1900800000 0x04040063 0x094f004f 0x3 22 >;
qcom,perfcl-speedbin0-v0 =
- < 300000000 0x0004000f 0x01200020 0x1 >,
- < 345600000 0x05040012 0x01200020 0x1 >,
- < 422400000 0x05040016 0x02200020 0x1 >,
- < 499200000 0x0504001a 0x02200020 0x1 >,
- < 576000000 0x0504001e 0x02200020 0x1 >,
- < 652800000 0x05040022 0x03200020 0x1 >,
- < 729600000 0x05040026 0x03200020 0x1 >,
- < 806400000 0x0504002a 0x03220022 0x1 >,
- < 902400000 0x0404002f 0x04260026 0x1 >,
- < 979200000 0x04040033 0x04290029 0x1 >,
- < 1056000000 0x04040037 0x052c002c 0x1 >,
- < 1132800000 0x0404003b 0x052f002f 0x1 >,
- < 1190400000 0x0404003e 0x05320032 0x2 >,
- < 1267200000 0x04040042 0x06350035 0x2 >,
- < 1344000000 0x04040046 0x06380038 0x2 >,
- < 1420800000 0x0404004a 0x063b003b 0x2 >,
- < 1497600000 0x0404004e 0x073e003e 0x2 >,
- < 1574400000 0x04040052 0x07420042 0x2 >,
- < 1651200000 0x04040056 0x07450045 0x2 >,
- < 1728000000 0x0404005a 0x08480048 0x2 >,
- < 1804800000 0x0404005e 0x084b004b 0x2 >,
- < 1881600000 0x04040062 0x094e004e 0x2 >,
- < 1958400000 0x04040066 0x09520052 0x2 >,
- < 2035200000 0x0404006a 0x09550055 0x3 >,
- < 2112000000 0x0404006e 0x0a580058 0x3 >,
- < 2188800000 0x04040072 0x0a5b005b 0x3 >,
- < 2265600000 0x04040076 0x0a5e005e 0x3 >,
- < 2342400000 0x0404007a 0x0a620062 0x3 >,
- < 2419200000 0x0404007e 0x0a650065 0x3 >,
- < 2496000000 0x04040082 0x0a680068 0x3 >;
+ < 300000000 0x0004000f 0x01200020 0x1 1 >,
+ < 345600000 0x05040012 0x01200020 0x1 2 >,
+ < 422400000 0x05040016 0x02200020 0x1 3 >,
+ < 499200000 0x0504001a 0x02200020 0x1 4 >,
+ < 576000000 0x0504001e 0x02200020 0x1 5 >,
+ < 652800000 0x05040022 0x03200020 0x1 6 >,
+ < 729600000 0x05040026 0x03200020 0x1 7 >,
+ < 806400000 0x0504002a 0x03220022 0x1 8 >,
+ < 902400000 0x0404002f 0x04260026 0x1 9 >,
+ < 979200000 0x04040033 0x04290029 0x1 10 >,
+ < 1056000000 0x04040037 0x052c002c 0x1 11 >,
+ < 1132800000 0x0404003b 0x052f002f 0x1 12 >,
+ < 1190400000 0x0404003e 0x05320032 0x2 13 >,
+ < 1267200000 0x04040042 0x06350035 0x2 14 >,
+ < 1344000000 0x04040046 0x06380038 0x2 15 >,
+ < 1420800000 0x0404004a 0x063b003b 0x2 16 >,
+ < 1497600000 0x0404004e 0x073e003e 0x2 17 >,
+ < 1574400000 0x04040052 0x07420042 0x2 18 >,
+ < 1651200000 0x04040056 0x07450045 0x2 19 >,
+ < 1728000000 0x0404005a 0x08480048 0x2 20 >,
+ < 1804800000 0x0404005e 0x084b004b 0x2 21 >,
+ < 1881600000 0x04040062 0x094e004e 0x2 22 >,
+ < 1958400000 0x04040066 0x09520052 0x2 23 >,
+ < 2035200000 0x0404006a 0x09550055 0x3 24 >,
+ < 2112000000 0x0404006e 0x0a580058 0x3 25 >,
+ < 2188800000 0x04040072 0x0a5b005b 0x3 26 >,
+ < 2265600000 0x04040076 0x0a5e005e 0x3 27 >,
+ < 2342400000 0x0404007a 0x0a620062 0x3 28 >,
+ < 2419200000 0x0404007e 0x0a650065 0x3 29 >,
+ < 2496000000 0x04040082 0x0a680068 0x3 30 >;
qcom,perfcl-speedbin1-v0 =
- < 300000000 0x0004000f 0x01200020 0x1 >,
- < 345600000 0x05040012 0x01200020 0x1 >,
- < 422400000 0x05040016 0x02200020 0x1 >,
- < 499200000 0x0504001a 0x02200020 0x1 >,
- < 576000000 0x0504001e 0x02200020 0x1 >,
- < 652800000 0x05040022 0x03200020 0x1 >,
- < 729600000 0x05040026 0x03200020 0x1 >,
- < 806400000 0x0504002a 0x03220022 0x1 >,
- < 902400000 0x0404002f 0x04260026 0x1 >,
- < 979200000 0x04040033 0x04290029 0x1 >,
- < 1056000000 0x04040037 0x052c002c 0x1 >,
- < 1132800000 0x0404003b 0x052f002f 0x1 >,
- < 1190400000 0x0404003e 0x05320032 0x2 >,
- < 1267200000 0x04040042 0x06350035 0x2 >,
- < 1344000000 0x04040046 0x06380038 0x2 >,
- < 1420800000 0x0404004a 0x063b003b 0x2 >,
- < 1497600000 0x0404004e 0x073e003e 0x2 >,
- < 1574400000 0x04040052 0x07420042 0x2 >,
- < 1651200000 0x04040056 0x07450045 0x2 >,
- < 1728000000 0x0404005a 0x08480048 0x2 >,
- < 1804800000 0x0404005e 0x084b004b 0x2 >,
- < 1881600000 0x04040062 0x094e004e 0x2 >,
- < 1958400000 0x04040066 0x09520052 0x2 >,
- < 2035200000 0x0404006a 0x09550055 0x3 >,
- < 2112000000 0x0404006e 0x0a580058 0x3 >,
- < 2208000000 0x04040073 0x0a5c005c 0x3 >;
+ < 300000000 0x0004000f 0x01200020 0x1 1 >,
+ < 345600000 0x05040012 0x01200020 0x1 2 >,
+ < 422400000 0x05040016 0x02200020 0x1 3 >,
+ < 499200000 0x0504001a 0x02200020 0x1 4 >,
+ < 576000000 0x0504001e 0x02200020 0x1 5 >,
+ < 652800000 0x05040022 0x03200020 0x1 6 >,
+ < 729600000 0x05040026 0x03200020 0x1 7 >,
+ < 806400000 0x0504002a 0x03220022 0x1 8 >,
+ < 902400000 0x0404002f 0x04260026 0x1 9 >,
+ < 979200000 0x04040033 0x04290029 0x1 10 >,
+ < 1056000000 0x04040037 0x052c002c 0x1 11 >,
+ < 1132800000 0x0404003b 0x052f002f 0x1 12 >,
+ < 1190400000 0x0404003e 0x05320032 0x2 13 >,
+ < 1267200000 0x04040042 0x06350035 0x2 14 >,
+ < 1344000000 0x04040046 0x06380038 0x2 15 >,
+ < 1420800000 0x0404004a 0x063b003b 0x2 16 >,
+ < 1497600000 0x0404004e 0x073e003e 0x2 17 >,
+ < 1574400000 0x04040052 0x07420042 0x2 18 >,
+ < 1651200000 0x04040056 0x07450045 0x2 19 >,
+ < 1728000000 0x0404005a 0x08480048 0x2 20 >,
+ < 1804800000 0x0404005e 0x084b004b 0x2 21 >,
+ < 1881600000 0x04040062 0x094e004e 0x2 22 >,
+ < 1958400000 0x04040066 0x09520052 0x2 23 >,
+ < 2035200000 0x0404006a 0x09550055 0x3 24 >,
+ < 2112000000 0x0404006e 0x0a580058 0x3 25 >,
+ < 2208000000 0x04040073 0x0a5c005c 0x3 26 >,
+ < 2304000000 0x04010078 0x0a5c005c 0x3 26 >;
};
&msm_cpufreq {
@@ -170,12 +171,17 @@
< 2496000 >;
};
+&bwmon {
+ compatible = "qcom,bimc-bwmon4";
+ qcom,hw-timer-hz = <19200000>;
+};
+
&devfreq_cpufreq {
mincpubw-cpufreq {
cpu-to-dev-map-0 =
< 1900800 1525 >;
cpu-to-dev-map-4 =
- < 2419200 1525 >,
+ < 2112000 1525 >,
< 2496000 5195 >;
};
};
@@ -202,8 +208,7 @@
< 414000000 4 RPM_SMD_REGULATOR_LEVEL_SVS >,
< 515000000 5 RPM_SMD_REGULATOR_LEVEL_NOM >,
< 596000000 6 RPM_SMD_REGULATOR_LEVEL_NOM >,
- < 670000000 7 RPM_SMD_REGULATOR_LEVEL_TURBO >,
- < 710000000 8 RPM_SMD_REGULATOR_LEVEL_TURBO >;
+ < 670000000 7 RPM_SMD_REGULATOR_LEVEL_TURBO >;
qcom,gfxfreq-mx-speedbin0 =
< 0 0 >,
< 180000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
@@ -212,8 +217,7 @@
< 414000000 RPM_SMD_REGULATOR_LEVEL_SVS >,
< 515000000 RPM_SMD_REGULATOR_LEVEL_NOM >,
< 596000000 RPM_SMD_REGULATOR_LEVEL_NOM >,
- < 670000000 RPM_SMD_REGULATOR_LEVEL_TURBO >,
- < 710000000 RPM_SMD_REGULATOR_LEVEL_TURBO >;
+ < 670000000 RPM_SMD_REGULATOR_LEVEL_TURBO >;
};
&mdss_mdp {
@@ -223,9 +227,20 @@
qcom,max-bandwidth-per-pipe-kbps = <4700000>;
};
+&pmcobalt_s10 {
+ regulator-min-microvolt = <568000>;
+ regulator-max-microvolt = <1056000>;
+};
+
+&pmcobalt_s13 {
+ regulator-min-microvolt = <568000>;
+ regulator-max-microvolt = <1056000>;
+};
+
&apc0_cpr {
compatible = "qcom,cprh-msmcobalt-v2-kbss-regulator";
qcom,cpr-corner-switch-delay-time = <1042>;
+ qcom,cpr-aging-ref-voltage = <1056000>;
};
&apc0_pwrcl_vreg {
@@ -250,14 +265,14 @@
/* Speed bin 0 */
<828000 828000 828000 828000 828000
828000 828000 828000 828000 828000
- 828000 828000 828000 828000 828000
- 828000 828000 828000 952000 952000
+ 828000 900000 900000 900000 900000
+ 900000 900000 900000 952000 952000
1056000 1056000>,
/* Speed bin 1 */
<828000 828000 828000 828000 828000
828000 828000 828000 828000 828000
- 828000 828000 828000 828000 828000
- 828000 828000 828000 952000 952000
+ 828000 900000 900000 900000 900000
+ 900000 900000 900000 952000 952000
1056000 1056000>;
qcom,cpr-voltage-floor =
@@ -326,52 +341,57 @@
qcom,cpr-open-loop-voltage-fuse-adjustment =
/* Speed bin 0 */
- <40000 24000 0 0>,
- <40000 24000 0 0>,
- <40000 24000 0 0>,
- <40000 24000 0 0>,
- <40000 24000 0 0>,
- <40000 24000 0 0>,
- <40000 24000 0 0>,
- <40000 24000 0 0>,
+ <40000 24000 0 30000>,
+ <40000 24000 0 30000>,
+ <40000 24000 0 30000>,
+ <40000 24000 0 30000>,
+ <40000 24000 0 30000>,
+ <40000 24000 0 30000>,
+ <40000 24000 0 30000>,
+ <40000 24000 0 30000>,
/* Speed bin 1 */
- <40000 24000 0 0>,
- <40000 24000 0 0>,
- <40000 24000 0 0>,
- <40000 24000 0 0>,
- <40000 24000 0 0>,
- <40000 24000 0 0>,
- <40000 24000 0 0>,
- <40000 24000 0 0>;
+ <40000 24000 0 30000>,
+ <40000 24000 0 30000>,
+ <40000 24000 0 30000>,
+ <40000 24000 0 30000>,
+ <40000 24000 0 30000>,
+ <40000 24000 0 30000>,
+ <40000 24000 0 30000>,
+ <40000 24000 0 30000>;
qcom,cpr-closed-loop-voltage-fuse-adjustment =
/* Speed bin 0 */
- <20000 26000 0 0>,
- <20000 26000 0 0>,
- <20000 26000 0 0>,
- <20000 26000 0 0>,
- <20000 26000 0 0>,
- <20000 26000 0 0>,
- <20000 26000 0 0>,
- <20000 26000 0 0>,
+ <20000 26000 0 30000>,
+ <20000 26000 0 30000>,
+ <20000 26000 0 30000>,
+ <20000 26000 0 30000>,
+ <20000 26000 0 30000>,
+ <20000 26000 0 30000>,
+ <20000 26000 0 30000>,
+ <20000 26000 0 30000>,
/* Speed bin 1 */
- <20000 26000 0 0>,
- <20000 26000 0 0>,
- <20000 26000 0 0>,
- <20000 26000 0 0>,
- <20000 26000 0 0>,
- <20000 26000 0 0>,
- <20000 26000 0 0>,
- <20000 26000 0 0>;
+ <20000 26000 0 30000>,
+ <20000 26000 0 30000>,
+ <20000 26000 0 30000>,
+ <20000 26000 0 30000>,
+ <20000 26000 0 30000>,
+ <20000 26000 0 30000>,
+ <20000 26000 0 30000>,
+ <20000 26000 0 30000>;
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-ref-corner = <22 22>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
};
&apc1_cpr {
compatible = "qcom,cprh-msmcobalt-v2-kbss-regulator";
qcom,cpr-corner-switch-delay-time = <1042>;
+ qcom,cpr-aging-ref-voltage = <1056000>;
};
&apc1_perfcl_vreg {
@@ -396,15 +416,15 @@
/* Speed bin 0 */
<828000 828000 828000 828000 828000
828000 828000 828000 828000 828000
- 828000 828000 828000 828000 828000
- 828000 828000 828000 828000 828000
+ 828000 828000 900000 900000 900000
+ 900000 900000 900000 900000 900000
952000 952000 952000 1056000 1056000
1056000 1056000 1056000 1056000 1056000>,
/* Speed bin 1 */
<828000 828000 828000 828000 828000
828000 828000 828000 828000 828000
- 828000 828000 828000 828000 828000
- 828000 828000 828000 828000 828000
+ 828000 828000 900000 900000 900000
+ 900000 900000 900000 900000 900000
952000 952000 952000 1056000 1056000
1056000>;
@@ -523,6 +543,15 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-ref-corner = <30 26>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
+};
+
+&pm8005_s1 {
+ regulator-min-microvolt = <516000>;
+ regulator-max-microvolt = <1088000>;
};
&gfx_cpr {
@@ -540,12 +569,17 @@
qcom,cpr-corner-fmax-map = <1 3 5 8>;
qcom,cpr-voltage-ceiling =
- <656000 716000 772000 880000 908000
- 948000 1016000 1088000>;
+ <656000 716000 772000 880000 908000 948000 1016000 1088000>,
+ <660000 724000 772000 832000 916000 968000 1024000 1024000>,
+ <660000 724000 772000 832000 916000 968000 1024000 1024000>,
+ <660000 724000 772000 832000 916000 968000 1024000 1024000>,
+ <660000 724000 772000 832000 916000 968000 1024000 1024000>,
+ <660000 724000 772000 832000 916000 968000 1024000 1024000>,
+ <660000 724000 772000 832000 916000 968000 1024000 1024000>,
+ <660000 724000 772000 832000 916000 968000 1024000 1024000>;
qcom,cpr-voltage-floor =
- <516000 516000 532000 584000 632000
- 672000 712000 756000>;
+ <516000 516000 532000 584000 632000 672000 712000 756000>;
qcom,mem-acc-voltage = <1 1 1 2 2 2 2 2>;
@@ -590,11 +624,11 @@
0 0 3487 0 3280 1896 1874 0>;
qcom,cpr-open-loop-voltage-fuse-adjustment =
- < 72000 0 0 0>;
+ < 100000 0 0 0>;
qcom,cpr-closed-loop-voltage-adjustment =
- < 65000 26000 8000 0
- 0 0 0 0>;
+ < 96000 18000 4000 0
+ 0 13000 9000 0>;
qcom,cpr-floor-to-ceiling-max-range =
<50000 50000 50000 50000 50000 50000 70000 70000>;
@@ -635,11 +669,10 @@
qcom,load-freq-tbl =
/* Encoders */
<1105920 533000000 0x55555555>, /* 4kx2304@30 */ /*TURBO*/
- < 979200 444000000 0x55555555>, /* 1080p@120,1440p@60,
+ <1036800 444000000 0x55555555>, /* 720p@240, 1080p@120,1440p@60,
* UHD@30 */ /*NOMINAL*/
- < 939700 355200000 0x55555555>, /* 4kx2304@24 */ /*SVSL1*/
- < 489600 269330000 0x55555555>, /* 1080p@60, 2560x1440@30 */
- /* SVS */
+ < 829440 355200000 0x55555555>, /* UHD/4096x2160@30 SVSL1 */
+ < 489600 269330000 0x55555555>, /* 1080p@60 SVS */
< 432000 200000000 0x55555555>, /* 720p@120, 1080p@30 */
/* SVS2 */
@@ -651,22 +684,51 @@
<1675472 355200000 0xffffffff>, /* 4kx2304@44 */ /*SVSL1*/
<1105920 269330000 0xffffffff>, /* UHD/4k2304@30, 1080p@120 */
/* SVS */
- < 864000 200000000 0xffffffff>; /* 720p@240, 1080p@60 */
+ < 829440 200000000 0xffffffff>; /* 720p@120, 1080p@60 */
/* SVS2 */
qcom,imem-ab-tbl =
- <200000000 1752000>,
- <269330000 1752000>,
- <355200000 2500000>,
- <444000000 6000000>,
- <533000000 6000000>;
+ <200000000 1560000>,/* imem @ svs2 freq 75 Mhz */
+ <269330000 3570000>,/* imem @ svs freq 171 Mhz */
+ <355200000 3570000>,/* imem @ svs freq 171 Mhz */
+ <444000000 6750000>,/* imem @ nom freq 323 Mhz */
+ <533000000 8490000>;/* imem @ turbo freq 406 Mhz */
+
+ qcom,dcvs-tbl = /* minLoad LoadLow LoadHigh CodecCheck */
+ /* Decode */
+ /* Load > Nominal, Nominal <-> Turbo Eg.3840x2160@60 */
+ <1728000 1728000 2211840 0x3f00000c>,
+ /* Encoder */
+ /* Load > Nominal, Nominal <-> Turbo Eg. 4kx2304@30 */
+ <1036800 1036800 1105920 0x04000004>,
+ /* Load > SVSL1, SVSL1<-> Nominal Eg. 3840x2160@30 */
+ < 829440 829440 1036800 0x04000004>,
+ /* Load > SVS , SVS <-> SVSL1 Eg. 4kx2304@24 */
+ < 489600 489600 829440 0x04000004>;
+
+ qcom,dcvs-limit = /* Min Frame size, Min MBs/sec */
+ <32400 30>, /* Encoder 3840x2160@30 */
+ <32400 60>; /* Decoder 3840x2160@60 */
+
+};
+
+&soc {
+ /* Gold L2 SAW */
+ qcom,spm@178120000 {
+ qcom,saw2-avs-limit = <0x4200420>;
+ };
+
+ /* Silver L2 SAW */
+ qcom,spm@179120000 {
+ qcom,saw2-avs-limit = <0x4200420>;
+ };
};
/* GPU overrides */
&msm_gpu {
/* Updated chip ID */
qcom,chipid = <0x05040001>;
- qcom,initial-pwrlevel = <6>;
+ qcom,initial-pwrlevel = <5>;
qcom,gpu-pwrlevels {
#address-cells = <1>;
@@ -676,69 +738,61 @@
qcom,gpu-pwrlevel@0 {
reg = <0>;
- qcom,gpu-freq = <710000000>;
- qcom,bus-freq = <12>;
- qcom,bus-min = <12>;
- qcom,bus-max = <12>;
- };
-
- qcom,gpu-pwrlevel@1 {
- reg = <1>;
qcom,gpu-freq = <670000000>;
qcom,bus-freq = <12>;
qcom,bus-min = <11>;
qcom,bus-max = <12>;
};
- qcom,gpu-pwrlevel@2 {
- reg = <2>;
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
qcom,gpu-freq = <596000000>;
qcom,bus-freq = <11>;
qcom,bus-min = <9>;
qcom,bus-max = <12>;
};
- qcom,gpu-pwrlevel@3 {
- reg = <3>;
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
qcom,gpu-freq = <515000000>;
qcom,bus-freq = <11>;
qcom,bus-min = <9>;
qcom,bus-max = <12>;
};
- qcom,gpu-pwrlevel@4 {
- reg = <4>;
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
qcom,gpu-freq = <414000000>;
qcom,bus-freq = <9>;
qcom,bus-min = <8>;
qcom,bus-max = <11>;
};
- qcom,gpu-pwrlevel@5 {
- reg = <5>;
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
qcom,gpu-freq = <342000000>;
qcom,bus-freq = <8>;
qcom,bus-min = <5>;
qcom,bus-max = <9>;
};
- qcom,gpu-pwrlevel@6 {
- reg = <6>;
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
qcom,gpu-freq = <257000000>;
qcom,bus-freq = <5>;
qcom,bus-min = <3>;
qcom,bus-max = <8>;
};
- qcom,gpu-pwrlevel@7 {
- reg = <7>;
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
qcom,gpu-freq = <180000000>;
qcom,bus-freq = <3>;
qcom,bus-min = <1>;
qcom,bus-max = <5>;
};
- qcom,gpu-pwrlevel@8 {
- reg = <8>;
+ qcom,gpu-pwrlevel@7 {
+ reg = <7>;
qcom,gpu-freq = <27000000>;
qcom,bus-freq = <0>;
qcom,bus-min = <0>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-vidc.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-vidc.dtsi
index c44e8f976710..f17be7570742 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-vidc.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-vidc.dtsi
@@ -66,10 +66,10 @@
* corresponding video core frequency.
*/
qcom,imem-ab-tbl =
- <100000000 1752000>, /* imem @ svs2 freq 75 Mhz */
- <186000000 1752000>, /* imem @ svs2 freq 75 Mhz */
- <360000000 2500000>, /* imem @ svs freq 171 Mhz */
- <465000000 6000000>; /* imem @ noimal freq 320 Mhz */
+ <100000000 1560000>, /* imem @ svs2 freq 75 Mhz */
+ <186000000 3570000>, /* imem @ svs freq 171 Mhz */
+ <360000000 6750000>, /* imem @ nom freq 323 Mhz */
+ <465000000 8490000>; /* imem @ turbo freq 406 Mhz */
/* Regulators */
smmu-vdd-supply = <&gdsc_bimc_smmu>;
@@ -124,7 +124,7 @@
qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0_OCMEM>;
qcom,bus-slave = <MSM_BUS_SLAVE_VMEM>;
qcom,bus-governor = "msm-vidc-vmem+";
- qcom,bus-range-kbps = <1000 6776000>;
+ qcom,bus-range-kbps = <1000 8490000>;
};
arm9_bus_ddr {
diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
index 7f5f81eff9e5..26822da607de 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
@@ -541,14 +541,13 @@
< 13763 /* 1804 MHz */ >;
};
- qcom,cpu-bwmon {
- compatible = "qcom,bimc-bwmon4";
+ bwmon: qcom,cpu-bwmon {
+ compatible = "qcom,bimc-bwmon3";
reg = <0x01008000 0x300>, <0x01001000 0x200>;
reg-names = "base", "global_base";
interrupts = <0 183 4>;
qcom,mport = <0>;
qcom,target-dev = <&cpubw>;
- qcom,hw-timer-hz = <19200000>;
};
mincpubw: qcom,mincpubw {
@@ -823,55 +822,55 @@
interrupt-names = "pwrcl-irq", "perfcl-irq";
qcom,pwrcl-speedbin0-v0 =
- < 300000000 0x0004000f 0x01200020 0x1 >,
- < 345600000 0x05040012 0x02200020 0x1 >,
- < 422400000 0x05040016 0x02200020 0x1 >,
- < 499200000 0x0504001a 0x02200020 0x1 >,
- < 576000000 0x0504001e 0x03200020 0x1 >,
- < 633600000 0x05040021 0x03200020 0x1 >,
- < 710400000 0x05040025 0x03200020 0x1 >,
- < 806400000 0x0504002a 0x04200020 0x1 >,
- < 883200000 0x0404002e 0x04250025 0x1 >,
- < 960000000 0x04040032 0x05280028 0x1 >,
- < 1036800000 0x04040036 0x052b002b 0x2 >,
- < 1113600000 0x0404003a 0x052e002e 0x2 >,
- < 1190400000 0x0404003e 0x06320032 0x2 >,
- < 1248000000 0x04040041 0x06340034 0x2 >,
- < 1324800000 0x04040045 0x06370037 0x2 >,
- < 1401600000 0x04040049 0x073a003a 0x2 >,
- < 1478400000 0x0404004d 0x073e003e 0x2 >,
- < 1574400000 0x04040052 0x08420042 0x2 >,
- < 1651200000 0x04040056 0x08450045 0x2 >,
- < 1728000000 0x0404005a 0x08480048 0x2 >,
- < 1804800000 0x0404005e 0x094b004b 0x3 >,
- < 1881600000 0x04040062 0x094e004e 0x3 >;
+ < 300000000 0x0004000f 0x01200020 0x1 1 >,
+ < 345600000 0x05040012 0x02200020 0x1 2 >,
+ < 422400000 0x05040016 0x02200020 0x1 3 >,
+ < 499200000 0x0504001a 0x02200020 0x1 4 >,
+ < 576000000 0x0504001e 0x03200020 0x1 5 >,
+ < 633600000 0x05040021 0x03200020 0x1 6 >,
+ < 710400000 0x05040025 0x03200020 0x1 7 >,
+ < 806400000 0x0504002a 0x04200020 0x1 8 >,
+ < 883200000 0x0404002e 0x04250025 0x1 9 >,
+ < 960000000 0x04040032 0x05280028 0x1 10 >,
+ < 1036800000 0x04040036 0x052b002b 0x2 11 >,
+ < 1113600000 0x0404003a 0x052e002e 0x2 12 >,
+ < 1190400000 0x0404003e 0x06320032 0x2 13 >,
+ < 1248000000 0x04040041 0x06340034 0x2 14 >,
+ < 1324800000 0x04040045 0x06370037 0x2 15 >,
+ < 1401600000 0x04040049 0x073a003a 0x2 16 >,
+ < 1478400000 0x0404004d 0x073e003e 0x2 17 >,
+ < 1574400000 0x04040052 0x08420042 0x2 18 >,
+ < 1651200000 0x04040056 0x08450045 0x2 19 >,
+ < 1728000000 0x0404005a 0x08480048 0x2 20 >,
+ < 1804800000 0x0404005e 0x094b004b 0x3 21 >,
+ < 1881600000 0x04040062 0x094e004e 0x3 22 >;
qcom,perfcl-speedbin0-v0 =
- < 300000000 0x0004000f 0x01200020 0x1 >,
- < 345600000 0x05040012 0x02200020 0x1 >,
- < 422400000 0x05040016 0x02200020 0x1 >,
- < 480000000 0x05040019 0x02200020 0x1 >,
- < 556800000 0x0504001d 0x03200020 0x1 >,
- < 633600000 0x05040021 0x03200020 0x1 >,
- < 710400000 0x05040025 0x03200020 0x1 >,
- < 787200000 0x05040029 0x04200020 0x1 >,
- < 844800000 0x0404002c 0x04230023 0x1 >,
- < 902400000 0x0404002f 0x04260026 0x1 >,
- < 979200000 0x04040033 0x05290029 0x1 >,
- < 1056000000 0x04040037 0x052c002c 0x1 >,
- < 1171200000 0x0404003d 0x06310031 0x2 >,
- < 1248000000 0x04040041 0x06340034 0x2 >,
- < 1324800000 0x04040045 0x06370037 0x2 >,
- < 1401600000 0x04040049 0x073a003a 0x2 >,
- < 1478400000 0x0404004d 0x073e003e 0x2 >,
- < 1536000000 0x04040050 0x07400040 0x2 >,
- < 1632000000 0x04040055 0x08440044 0x2 >,
- < 1708800000 0x04040059 0x08470047 0x2 >,
- < 1785600000 0x0404005d 0x094a004a 0x2 >,
- < 1862400000 0x04040061 0x094e004e 0x2 >,
- < 1939200000 0x04040065 0x09510051 0x3 >,
- < 2016000000 0x04040069 0x0a540054 0x3 >,
- < 2092800000 0x0404006d 0x0a570057 0x3 >;
+ < 300000000 0x0004000f 0x01200020 0x1 1 >,
+ < 345600000 0x05040012 0x02200020 0x1 2 >,
+ < 422400000 0x05040016 0x02200020 0x1 3 >,
+ < 480000000 0x05040019 0x02200020 0x1 4 >,
+ < 556800000 0x0504001d 0x03200020 0x1 5 >,
+ < 633600000 0x05040021 0x03200020 0x1 6 >,
+ < 710400000 0x05040025 0x03200020 0x1 7 >,
+ < 787200000 0x05040029 0x04200020 0x1 8 >,
+ < 844800000 0x0404002c 0x04230023 0x1 9 >,
+ < 902400000 0x0404002f 0x04260026 0x1 10 >,
+ < 979200000 0x04040033 0x05290029 0x1 11 >,
+ < 1056000000 0x04040037 0x052c002c 0x1 12 >,
+ < 1171200000 0x0404003d 0x06310031 0x2 13 >,
+ < 1248000000 0x04040041 0x06340034 0x2 14 >,
+ < 1324800000 0x04040045 0x06370037 0x2 15 >,
+ < 1401600000 0x04040049 0x073a003a 0x2 16 >,
+ < 1478400000 0x0404004d 0x073e003e 0x2 17 >,
+ < 1536000000 0x04040050 0x07400040 0x2 18 >,
+ < 1632000000 0x04040055 0x08440044 0x2 19 >,
+ < 1708800000 0x04040059 0x08470047 0x2 20 >,
+ < 1785600000 0x0404005d 0x094a004a 0x2 21 >,
+ < 1862400000 0x04040061 0x094e004e 0x2 22 >,
+ < 1939200000 0x04040065 0x09510051 0x3 23 >,
+ < 2016000000 0x04040069 0x0a540054 0x3 24 >,
+ < 2092800000 0x0404006d 0x0a570057 0x3 25 >;
qcom,up-timer =
<1000 1000>;
@@ -1193,41 +1192,49 @@
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 2>;
qcom,secure-context-bank;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb1 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 8>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb2 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 9>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb3 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 10>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb4 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 11>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb6 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 5>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb7 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 6>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb8 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 7>;
+ dma-coherent;
};
};
@@ -2191,7 +2198,9 @@
reg-names = "tsens_physical";
interrupts = <0 458 0>, <0 445 0>;
interrupt-names = "tsens-upper-lower", "tsens-critical";
- qcom,sensors = <14>;
+ qcom,client-id = <0 1 2 3 4 7 8 9 10 11 12 13>;
+ qcom,sensor-id = <0 1 2 3 4 7 8 9 10 11 12 13>;
+ qcom,sensors = <12>;
};
tsens1: tsens@10ad000 {
@@ -2232,16 +2241,6 @@
qcom,sensor-name = "tsens_tz_sensor4";
qcom,scaling-factor = <10>;
};
- sensor_information5: qcom,sensor-information-5 {
- qcom,sensor-type = "tsens";
- qcom,sensor-name = "tsens_tz_sensor5";
- qcom,scaling-factor = <10>;
- };
- sensor_information6: qcom,sensor-information-6 {
- qcom,sensor-type = "tsens";
- qcom,sensor-name = "tsens_tz_sensor6";
- qcom,scaling-factor = <10>;
- };
sensor_information7: qcom,sensor-information-7 {
qcom,sensor-type = "tsens";
qcom,sensor-name = "tsens_tz_sensor7";
@@ -2628,6 +2627,7 @@
"iface_clk", "noc_axi_clk", "bus_clk", "maxi_clk";
qcom,pas-id = <9>;
+ qcom,msm-bus,name = "pil-venus";
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
@@ -2702,6 +2702,11 @@
reg = <0x10 8>;
};
+ dload_type@18 {
+ compatible = "qcom,msm-imem-dload-type";
+ reg = <0x18 4>;
+ };
+
restart_reason@65c {
compatible = "qcom,msm-imem-restart_reason";
reg = <0x65c 4>;
@@ -2884,15 +2889,61 @@
vdd-3.3-ch0-supply = <&pmcobalt_l25_pin_ctrl>;
qcom,vdd-0.8-cx-mx-config = <800000 800000>;
qcom,vdd-3.3-ch0-config = <3104000 3312000>;
- qcom,msm-bus,name = "msm-icnss";
- qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,num-paths = <1>;
- qcom,msm-bus,vectors-KBps = <81 10065 0 0>,
- <81 10065 0 16000>;
qcom,icnss-vadc = <&pmcobalt_vadc>;
qcom,icnss-adc_tm = <&pmcobalt_adc_tm>;
};
+ tspp: msm_tspp@0c1e7000 {
+ compatible = "qcom,msm_tspp";
+ reg = <0x0c1e7000 0x200>, /* MSM_TSIF0_PHYS */
+ <0x0c1e8000 0x200>, /* MSM_TSIF1_PHYS */
+ <0x0c1e9000 0x1000>, /* MSM_TSPP_PHYS */
+ <0x0c1c4000 0x23000>; /* MSM_TSPP_BAM_PHYS */
+ reg-names = "MSM_TSIF0_PHYS",
+ "MSM_TSIF1_PHYS",
+ "MSM_TSPP_PHYS",
+ "MSM_TSPP_BAM_PHYS";
+ interrupts = <0 121 0>, /* TSIF_TSPP_IRQ */
+ <0 119 0>, /* TSIF0_IRQ */
+ <0 120 0>, /* TSIF1_IRQ */
+ <0 122 0>; /* TSIF_BAM_IRQ */
+ interrupt-names = "TSIF_TSPP_IRQ",
+ "TSIF0_IRQ",
+ "TSIF1_IRQ",
+ "TSIF_BAM_IRQ";
+
+ clock-names = "iface_clk", "ref_clk";
+ clocks = <&clock_gcc clk_gcc_tsif_ahb_clk>,
+ <&clock_gcc clk_gcc_tsif_ref_clk>;
+
+ qcom,msm-bus,name = "tsif";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <82 512 0 0>, /* No vote */
+ <82 512 12288 24576>;
+ /* Max. bandwidth, 2xTSIF, each max of 96Mbps */
+
+ pinctrl-names = "disabled",
+ "tsif0-mode1", "tsif0-mode2",
+ "tsif1-mode1", "tsif1-mode2",
+ "dual-tsif-mode1", "dual-tsif-mode2";
+
+ pinctrl-0 = <>; /* disabled */
+ pinctrl-1 = <&tsif0_signals_active>; /* tsif0-mode1 */
+ pinctrl-2 = <&tsif0_signals_active
+ &tsif0_sync_active>; /* tsif0-mode2 */
+ pinctrl-3 = <&tsif1_signals_active>; /* tsif1-mode1 */
+ pinctrl-4 = <&tsif1_signals_active
+ &tsif1_sync_active>; /* tsif1-mode2 */
+ pinctrl-5 = <&tsif0_signals_active
+ &tsif1_signals_active>; /* dual-tsif-mode1 */
+ pinctrl-6 = <&tsif0_signals_active
+ &tsif0_sync_active
+ &tsif1_signals_active
+ &tsif1_sync_active>; /* dual-tsif-mode2 */
+ };
+
wil6210: qcom,wil6210 {
compatible = "qcom,wil6210";
qcom,pcie-parent = <&pcie0>;
@@ -2940,16 +2991,10 @@
};
&gdsc_usb30 {
- clock-names = "core_clk";
- clocks = <&clock_gcc clk_gcc_usb30_master_clk>;
status = "ok";
};
&gdsc_pcie_0 {
- clock-names = "master_bus_clk", "slave_bus_clk", "core_clk";
- clocks = <&clock_gcc clk_gcc_pcie_0_mstr_axi_clk>,
- <&clock_gcc clk_gcc_pcie_0_slv_axi_clk>,
- <&clock_gcc clk_gcc_pcie_0_pipe_clk>;
status = "ok";
};
@@ -3007,19 +3052,14 @@
};
&gdsc_mdss {
- clock-names = "bus_clk", "rot_clk";
- clocks = <&clock_mmss clk_mmss_mdss_axi_clk>,
- <&clock_mmss clk_mmss_mdss_rot_clk>;
proxy-supply = <&gdsc_mdss>;
qcom,proxy-consumer-enable;
status = "ok";
};
&gdsc_gpu_gx {
- clock-names = "bimc_core_clk", "core_clk", "core_root_clk";
- clocks = <&clock_gcc clk_gcc_gpu_bimc_gfx_clk>,
- <&clock_gfx clk_gpucc_gfx3d_clk>,
- <&clock_gfx clk_gfx3d_clk_src>;
+ clock-names = "core_root_clk";
+ clocks = <&clock_gfx clk_gfx3d_clk_src>;
qcom,force-enable-root-clk;
parent-supply = <&gfx_vreg>;
status = "ok";
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi
index 11f602d842bc..cb5fce378b6c 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi
@@ -39,8 +39,8 @@
qcom,qos-off = <4096>;
qcom,base-offset = <16384>;
clock-names = "bus_clk", "bus_a_clk";
- clocks = <&clock_gcc RPM_AGGRE2_NOC_CLK>,
- <&clock_gcc RPM_AGGRE2_NOC_A_CLK>;
+ clocks = <&clock_rpmcc RPM_AGGR2_NOC_CLK>,
+ <&clock_rpmcc RPM_AGGR2_NOC_A_CLK>;
};
fab_bimc: fab-bimc {
@@ -52,8 +52,8 @@
qcom,bypass-qos-prg;
qcom,util-fact = <153>;
clock-names = "bus_clk", "bus_a_clk";
- clocks = <&clock_gcc RPM_BIMC_MSMBUS_CLK>,
- <&clock_gcc RPM_BIMC_MSMBUS_A_CLK>;
+ clocks = <&clock_rpmcc BIMC_MSMBUS_CLK>,
+ <&clock_rpmcc BIMC_MSMBUS_A_CLK>;
};
fab_cnoc: fab-cnoc {
@@ -64,8 +64,8 @@
qcom,bypass-qos-prg;
qcom,bus-type = <1>;
clock-names = "bus_clk", "bus_a_clk";
- clocks = <&clock_gcc RPM_CNOC_MSMBUS_CLK>,
- <&clock_gcc RPM_CNOC_MSMBUS_A_CLK>;
+ clocks = <&clock_rpmcc CNOC_MSMBUS_CLK>,
+ <&clock_rpmcc CNOC_MSMBUS_A_CLK>;
};
fab_gnoc: fab-gnoc {
@@ -87,8 +87,8 @@
qcom,base-offset = <20480>;
qcom,util-fact = <154>;
clock-names = "bus_clk", "bus_a_clk";
- clocks = <&clock_gcc RPM_MMSSNOC_AXI_CLK>,
- <&clock_gcc RPM_MMSSNOC_AXI_A_CLK>;
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_rpmcc MMSSNOC_AXI_A_CLK>;
};
fab_snoc: fab-snoc {
@@ -101,8 +101,8 @@
qcom,qos-off = <4096>;
qcom,base-offset = <24576>;
clock-names = "bus_clk", "bus_a_clk";
- clocks = <&clock_gcc RPM_SNOC_MSMBUS_CLK>,
- <&clock_gcc RPM_SNOC_MSMBUS_A_CLK>;
+ clocks = <&clock_rpmcc SNOC_MSMBUS_CLK>,
+ <&clock_rpmcc SNOC_MSMBUS_A_CLK>;
};
fab_mnoc_ahb: fab-mnoc-ahb {
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi
index 352856965373..3826b00bf09e 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi
@@ -26,10 +26,12 @@
arm,buffer-size = <0x400000>;
arm,sg-enable;
+ coresight-ctis = <&cti0 &cti8>;
+
coresight-name = "coresight-tmc-etr";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "core_a_clk";
port{
@@ -76,8 +78,10 @@
coresight-name = "coresight-tmc-etf";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ coresight-ctis = <&cti0 &cti8>;
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "core_a_clk";
ports{
@@ -111,8 +115,8 @@
coresight-name = "coresight-funnel-merg";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "core_a_clk";
ports {
@@ -146,8 +150,8 @@
coresight-name = "coresight-funnel-in0";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "core_a_clk";
ports {
@@ -161,6 +165,14 @@
<&funnel_merg_in_funnel_in0>;
};
};
+ port@3 {
+ reg = <6>;
+ funnel_in0_in_funnel_qatb: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_qatb_out_funnel_in0>;
+ };
+ };
port@4 {
reg = <7>;
funnel_in0_in_stm: endpoint {
@@ -181,8 +193,8 @@
coresight-name = "coresight-stm";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "core_a_clk";
port{
@@ -191,4 +203,294 @@
};
};
};
+
+ cti0: cti@6010000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x6010000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti0";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti1: cti@6011000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x6011000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti1";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti2: cti@6012000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x6012000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti2";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti3: cti@6013000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x6013000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti3";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti4: cti@6014000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x6014000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti4";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti5: cti@6015000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x6015000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti5";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti6: cti@6016000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x6016000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti6";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti7: cti@6017000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x6017000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti7";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti8: cti@6018000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x6018000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti8";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti9: cti@6019000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x6019000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti9";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti10: cti@601a000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x601a000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti10";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti11: cti@601b000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x601b000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti11";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti12: cti@601c000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x601c000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti12";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti13: cti@601d000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x601d000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti13";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti14: cti@601e000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x601e000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti14";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ cti15: cti@601f000 {
+ compatible = "arm,coresight-cti";
+ reg = <0x601f000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti15";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ funnel_qatb: funnel@6005000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6005000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-qatb";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_qatb_out_funnel_in0: endpoint {
+ remote-endpoint =
+ <&funnel_in0_in_funnel_qatb>;
+ };
+ };
+ port@1 {
+ reg = <0>;
+ funnel_qatb_in_tpda: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpda_out_funnel_qatb>;
+ };
+ };
+ };
+ };
+
+ tpda: tpda@6004000 {
+ compatible = "qcom,coresight-tpda";
+ reg = <0x6004000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda";
+
+ qcom,tpda-atid = <65>;
+ qcom,bc-elem-size = <7 32>,
+ <9 32>;
+ qcom,tc-elem-size = <3 32>,
+ <6 32>,
+ <9 32>;
+ qcom,dsb-elem-size = <7 32>,
+ <9 32>;
+ qcom,cmb-elem-size = <3 32>,
+ <4 32>,
+ <5 32>,
+ <9 64>;
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ tpda_out_funnel_qatb: endpoint {
+ remote-endpoint =
+ <&funnel_qatb_in_tpda>;
+ };
+ };
+ port@2 {
+ reg = <5>;
+ tpda_in_tpdm_dcc: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_dcc_out_tpda>;
+ };
+ };
+ };
+ };
+
+ tpdm_dcc: tpdm@7054000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x7054000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-dcc";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port{
+ tpdm_dcc_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_dcc>;
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi
index 6a000e4d4fd0..d28d09c2a527 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi
@@ -32,5 +32,21 @@
bias-disable;
};
};
+
+ led_enable: led_enable {
+ mux {
+ pins = "gpio40";
+ drive_strength = <16>;
+ output-high;
+ };
+ };
+
+ led_disable: led_disable {
+ mux {
+ pins = "gpio40";
+ drive_strength = <2>;
+ output-low;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi
index bdbcd9d7b6f9..e5db2766c553 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi
@@ -172,4 +172,27 @@
compatible = "qcom,smp2pgpio_test_smp2p_5_out";
gpios = <&smp2pgpio_smp2p_5_out 0 0>;
};
+
+ /* ssr - inbound entry from lpass */
+ smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <2>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - outbound entry to lpass */
+ smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon.dtsi b/arch/arm/boot/dts/qcom/msmfalcon.dtsi
index f5cabe910c93..2b2a201db8bc 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon.dtsi
@@ -14,6 +14,7 @@
#include <dt-bindings/clock/qcom,gcc-msmfalcon.h>
#include <dt-bindings/clock/qcom,gpu-msmfalcon.h>
#include <dt-bindings/clock/qcom,mmcc-msmfalcon.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/regulator/qcom,rpm-smd-regulator.h>
@@ -135,6 +136,22 @@
};
};
+ clocks {
+ xo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ clock-output-names = "sleep_clk";
+ };
+ };
+
soc: soc { };
reserved-memory {
@@ -231,6 +248,39 @@
clock-frequency = <19200000>;
};
+ spmi_bus: qcom,spmi@800f000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x800f000 0x1000>,
+ <0x8400000 0x1000000>,
+ <0x9400000 0x1000000>,
+ <0xa400000 0x220000>,
+ <0x800a000 0x3000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 326 IRQ_TYPE_NONE>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ cell-index = <0>;
+ qcom,not-wakeup; /* Needed until Full-boot-chain enabled */
+ status = "ok";
+ };
+
+ wdog: qcom,wdt@17817000 {
+ status = "disabled";
+ compatible = "qcom,msm-watchdog";
+ reg = <0x17817000 0x1000>;
+ reg-names = "wdt-base";
+ interrupts = <0 3 0>, <0 4 0>;
+ qcom,bark-time = <11000>;
+ qcom,pet-time = <10000>;
+ qcom,ipi-ping;
+ qcom,wakeup-enable;
+ };
+
qcom,sps {
compatible = "qcom,msm_sps_4k";
qcom,pipe-attr-ee;
@@ -327,19 +377,31 @@
};
};
- clock_gcc: qcom,dummycc {
+ clock_rpmcc: qcom,dummycc {
compatible = "qcom,dummycc";
+ clock-output-names = "rpmcc_clocks";
#clock-cells = <1>;
};
- clock_mmss: qcom,dummycc {
+ clock_gcc: clock-controller@100000 {
compatible = "qcom,dummycc";
+ clock-output-names = "gcc_clocks";
#clock-cells = <1>;
+ #reset-cells = <1>;
};
- clock_gfx: qcom,dummycc {
+ clock_mmss: clock-controller@c8c0000 {
compatible = "qcom,dummycc";
+ clock-output-names = "mmss_clocks";
#clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ clock_gfx: clock-controller@5065000 {
+ compatible = "qcom,dummycc";
+ clock-output-names = "gfx_clocks";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
};
qcom,ipc-spinlock@1f40000 {
@@ -359,6 +421,16 @@
qcom,mpu-enabled;
};
+ dcc: dcc@10b3000 {
+ compatible = "qcom,dcc";
+ reg = <0x10b3000 0x1000>,
+ <0x10b4000 0x800>;
+ reg-names = "dcc-base", "dcc-ram-base";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>;
+ clock-names = "dcc_clk";
+ };
+
qcom,glink-smem-native-xprt-modem@86000000 {
compatible = "qcom,glink-smem-native-xprt";
reg = <0x86000000 0x200000>,
@@ -401,7 +473,7 @@
label = "cdsp";
};
- qcom,glink-smem-native-xprt-rpm@68000 {
+ qcom,glink-smem-native-xprt-rpm@778000 {
compatible = "qcom,glink-rpm-native-xprt";
reg = <0x778000 0x7000>,
<0x17911008 0x4>;
@@ -577,12 +649,65 @@
memory-region = <&venus_fw_mem>;
status = "ok";
};
+
+ qcom,icnss@18800000 {
+ status = "disabled";
+ compatible = "qcom,icnss";
+ reg = <0x18800000 0x800000>,
+ <0x10ac000 0x20>;
+ reg-names = "membase", "mpm_config";
+ interrupts = <0 413 0>, /* CE0 */
+ <0 414 0>, /* CE1 */
+ <0 415 0>, /* CE2 */
+ <0 416 0>, /* CE3 */
+ <0 417 0>, /* CE4 */
+ <0 418 0>, /* CE5 */
+ <0 420 0>, /* CE6 */
+ <0 421 0>, /* CE7 */
+ <0 422 0>, /* CE8 */
+ <0 423 0>, /* CE9 */
+ <0 424 0>, /* CE10 */
+ <0 425 0>; /* CE11 */
+ qcom,wlan-msa-memory = <0x100000>;
+ };
+
+ qcom,lpass@15700000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0x15700000 0x00100>;
+ interrupts = <0 162 1>;
+
+ vdd_cx-supply = <&pmfalcon_s3b_level>;
+ qcom,proxy-reg-names = "vdd_cx";
+ qcom,vdd_cx-uV-uA = <RPM_SMD_REGULATOR_LEVEL_TURBO 100000>;
+
+ clocks = <&clock_rpmcc CXO_PIL_LPASS_CLK>;
+ clock-names = "xo";
+ qcom,proxy-clock-names = "xo";
+
+ qcom,pas-id = <1>;
+ qcom,proxy-timeout-ms = <10000>;
+ qcom,smem-id = <423>;
+ qcom,sysmon-id = <1>;
+ qcom,ssctl-instance-id = <0x14>;
+ qcom,firmware-name = "adsp";
+ memory-region = <&adsp_fw_mem>;
+
+ /* GPIO inputs from lpass */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>;
+ qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>;
+
+ /* GPIO output to lpass */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
+ status = "ok";
+ };
};
#include "msmfalcon-ion.dtsi"
#include "msmfalcon-bus.dtsi"
#include "msmfalcon-regulator.dtsi"
-#include "msm-gdsc-cobalt.dtsi"
+#include "msm-gdsc-falcon.dtsi"
&gdsc_usb30 {
clock-names = "core_clk";
@@ -606,6 +731,14 @@
status = "ok";
};
+&gdsc_hlos1_vote_turing_adsp {
+ status = "ok";
+};
+
+&gdsc_hlos2_vote_turing_adsp {
+ status = "ok";
+};
+
&gdsc_venus {
status = "ok";
};
@@ -656,3 +789,6 @@
&gdsc_gpu_cx {
status = "ok";
};
+
+#include "msm-pmfalcon.dtsi"
+#include "msm-pm2falcon.dtsi"
diff --git a/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi b/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi
new file mode 100644
index 000000000000..f6deef335844
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi
@@ -0,0 +1,52 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,ion {
+ compatible = "qcom,msm-ion";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ system_heap: qcom,ion-heap@25 {
+ reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
+ };
+
+ system_contig_heap: qcom,ion-heap@21 {
+ reg = <21>;
+ qcom,ion-heap-type = "SYSTEM_CONTIG";
+ };
+
+ qcom,ion-heap@22 { /* ADSP HEAP */
+ reg = <22>;
+ memory-region = <&adsp_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
+
+ qcom,ion-heap@27 { /* QSEECOM HEAP */
+ reg = <27>;
+ memory-region = <&qseecom_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
+
+ qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
+ reg = <10>;
+ memory-region = <&secure_display_memory>;
+ qcom,ion-heap-type = "HYP_CMA";
+ };
+
+ qcom,ion-heap@9 {
+ reg = <9>;
+ qcom,ion-heap-type = "SYSTEM_SECURE";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msmtriton-smp2p.dtsi b/arch/arm/boot/dts/qcom/msmtriton-smp2p.dtsi
new file mode 100644
index 000000000000..695a4f3b63c7
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msmtriton-smp2p.dtsi
@@ -0,0 +1,136 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+&soc {
+ qcom,smp2p-modem@17911008 {
+ compatible = "qcom,smp2p";
+ reg = <0x17911008 0x4>;
+ qcom,remote-pid = <1>;
+ qcom,irq-bitmask = <0x4000>;
+ interrupts = <0 451 1>;
+ };
+
+ qcom,smp2p-adsp@17911008 {
+ compatible = "qcom,smp2p";
+ reg = <0x17911008 0x4>;
+ qcom,remote-pid = <2>;
+ qcom,irq-bitmask = <0x400>;
+ interrupts = <0 158 1>;
+ };
+
+ smp2pgpio_smp2p_15_in: qcom,smp2pgpio-smp2p-15-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "smp2p";
+ qcom,remote-pid = <15>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_test_smp2p_15_in {
+ compatible = "qcom,smp2pgpio_test_smp2p_15_in";
+ gpios = <&smp2pgpio_smp2p_15_in 0 0>;
+ };
+
+ smp2pgpio_smp2p_15_out: qcom,smp2pgpio-smp2p-15-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "smp2p";
+ qcom,remote-pid = <15>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_test_smp2p_15_out {
+ compatible = "qcom,smp2pgpio_test_smp2p_15_out";
+ gpios = <&smp2pgpio_smp2p_15_out 0 0>;
+ };
+
+ smp2pgpio_smp2p_1_in: qcom,smp2pgpio-smp2p-1-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "smp2p";
+ qcom,remote-pid = <1>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_test_smp2p_1_in {
+ compatible = "qcom,smp2pgpio_test_smp2p_1_in";
+ gpios = <&smp2pgpio_smp2p_1_in 0 0>;
+ };
+
+ smp2pgpio_smp2p_1_out: qcom,smp2pgpio-smp2p-1-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "smp2p";
+ qcom,remote-pid = <1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_test_smp2p_1_out {
+ compatible = "qcom,smp2pgpio_test_smp2p_1_out";
+ gpios = <&smp2pgpio_smp2p_1_out 0 0>;
+ };
+
+ smp2pgpio_smp2p_2_in: qcom,smp2pgpio-smp2p-2-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "smp2p";
+ qcom,remote-pid = <2>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_test_smp2p_2_in {
+ compatible = "qcom,smp2pgpio_test_smp2p_2_in";
+ gpios = <&smp2pgpio_smp2p_2_in 0 0>;
+ };
+
+ smp2pgpio_smp2p_2_out: qcom,smp2pgpio-smp2p-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "smp2p";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_test_smp2p_2_out {
+ compatible = "qcom,smp2pgpio_test_smp2p_2_out";
+ gpios = <&smp2pgpio_smp2p_2_out 0 0>;
+ };
+
+ smp2pgpio_sleepstate_2_out: qcom,smp2pgpio-sleepstate-gpio-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "sleepstate";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio-sleepstate-2-out {
+ compatible = "qcom,smp2pgpio-sleepstate-out";
+ gpios = <&smp2pgpio_sleepstate_2_out 0 0>;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msmtriton.dtsi b/arch/arm/boot/dts/qcom/msmtriton.dtsi
index 3e27fe7df6fa..09bb5f081602 100644
--- a/arch/arm/boot/dts/qcom/msmtriton.dtsi
+++ b/arch/arm/boot/dts/qcom/msmtriton.dtsi
@@ -14,7 +14,9 @@
#include <dt-bindings/clock/qcom,gcc-msmfalcon.h>
#include <dt-bindings/clock/qcom,gpu-msmfalcon.h>
#include <dt-bindings/clock/qcom,mmcc-msmfalcon.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/regulator/qcom,rpm-smd-regulator.h>
/ {
model = "Qualcomm Technologies, Inc. MSMTRITON";
@@ -134,6 +136,22 @@
};
};
+ clocks {
+ xo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ clock-output-names = "sleep_clk";
+ };
+ };
+
soc: soc { };
reserved-memory {
@@ -185,6 +203,7 @@
};
};
+#include "msmtriton-smp2p.dtsi"
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -219,6 +238,15 @@
qcom,pipe-attr-ee;
};
+ tsens: tsens@10ad000 {
+ compatible = "qcom,msmtriton-tsens";
+ reg = <0x10ad000 0x2000>;
+ reg-names = "tsens_physical";
+ interrupts = <0 184 0>, <0 430 0>;
+ interrupt-names = "tsens-upper-lower", "tsens-critical";
+ qcom,sensors = <12>;
+ };
+
uartblsp1dm1: serial@0c170000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0xc170000 0x1000>;
@@ -298,18 +326,296 @@
};
};
- clock_gcc: qcom,dummycc {
+ clock_rpmcc: qcom,dummycc {
+ compatible = "qcom,dummycc";
+ clock-output-names = "rpmcc_clocks";
+ #clock-cells = <1>;
+ };
+
+ clock_gcc: clock-controller@100000 {
compatible = "qcom,dummycc";
+ clock-output-names = "gcc_clocks";
#clock-cells = <1>;
+ #reset-cells = <1>;
};
- clock_mmss: qcom,dummycc {
+ clock_mmss: clock-controller@c8c0000 {
compatible = "qcom,dummycc";
+ clock-output-names = "mmss_clocks";
#clock-cells = <1>;
+ #reset-cells = <1>;
};
- clock_gfx: qcom,dummycc {
+ clock_gfx: clock-controller@5065000 {
compatible = "qcom,dummycc";
+ clock-output-names = "gfx_clocks";
#clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ qcom,ipc-spinlock@1f40000 {
+ compatible = "qcom,ipc-spinlock-sfpb";
+ reg = <0x1f40000 0x8000>;
+ qcom,num-locks = <8>;
+ };
+
+ qcom,smem@86000000 {
+ compatible = "qcom,smem";
+ reg = <0x86000000 0x200000>,
+ <0x17911008 0x4>,
+ <0x778000 0x7000>,
+ <0x1fd4000 0x8>;
+ reg-names = "smem", "irq-reg-base", "aux-mem1",
+ "smem_targ_info_reg";
+ qcom,mpu-enabled;
+ };
+
+ qcom,glink-smem-native-xprt-modem@86000000 {
+ compatible = "qcom,glink-smem-native-xprt";
+ reg = <0x86000000 0x200000>,
+ <0x17911008 0x4>;
+ reg-names = "smem", "irq-reg-base";
+ qcom,irq-mask = <0x8000>;
+ interrupts = <0 452 1>;
+ label = "mpss";
+ };
+
+ qcom,glink-smem-native-xprt-adsp@86000000 {
+ compatible = "qcom,glink-smem-native-xprt";
+ reg = <0x86000000 0x200000>,
+ <0x17911008 0x4>;
+ reg-names = "smem", "irq-reg-base";
+ qcom,irq-mask = <0x200>;
+ interrupts = <0 157 1>;
+ label = "lpass";
+ qcom,qos-config = <&glink_qos_adsp>;
+ qcom,ramp-time = <0xaf>;
+ };
+
+ glink_qos_adsp: qcom,glink-qos-config-adsp {
+ compatible = "qcom,glink-qos-config";
+ qcom,flow-info = <0x3c 0x0>,
+ <0x3c 0x0>,
+ <0x3c 0x0>,
+ <0x3c 0x0>;
+ qcom,mtu-size = <0x800>;
+ qcom,tput-stats-cycle = <0xa>;
};
+
+ qcom,glink-smem-native-xprt-rpm@778000 {
+ compatible = "qcom,glink-rpm-native-xprt";
+ reg = <0x778000 0x7000>,
+ <0x17911008 0x4>;
+ reg-names = "msgram", "irq-reg-base";
+ qcom,irq-mask = <0x1>;
+ interrupts = <0 168 1>;
+ label = "rpm";
+ };
+
+ qcom,glink_pkt {
+ compatible = "qcom,glinkpkt";
+
+ qcom,glinkpkt-at-mdm0 {
+ qcom,glinkpkt-transport = "smem";
+ qcom,glinkpkt-edge = "mpss";
+ qcom,glinkpkt-ch-name = "DS";
+ qcom,glinkpkt-dev-name = "at_mdm0";
+ };
+
+ qcom,glinkpkt-loopback_cntl {
+ qcom,glinkpkt-transport = "lloop";
+ qcom,glinkpkt-edge = "local";
+ qcom,glinkpkt-ch-name = "LOCAL_LOOPBACK_CLNT";
+ qcom,glinkpkt-dev-name = "glink_pkt_loopback_ctrl";
+ };
+
+ qcom,glinkpkt-loopback_data {
+ qcom,glinkpkt-transport = "lloop";
+ qcom,glinkpkt-edge = "local";
+ qcom,glinkpkt-ch-name = "glink_pkt_lloop_CLNT";
+ qcom,glinkpkt-dev-name = "glink_pkt_loopback";
+ };
+
+ qcom,glinkpkt-apr-apps2 {
+ qcom,glinkpkt-transport = "smem";
+ qcom,glinkpkt-edge = "adsp";
+ qcom,glinkpkt-ch-name = "apr_apps2";
+ qcom,glinkpkt-dev-name = "apr_apps2";
+ };
+
+ qcom,glinkpkt-data40-cntl {
+ qcom,glinkpkt-transport = "smem";
+ qcom,glinkpkt-edge = "mpss";
+ qcom,glinkpkt-ch-name = "DATA40_CNTL";
+ qcom,glinkpkt-dev-name = "smdcntl8";
+ };
+
+ qcom,glinkpkt-data1 {
+ qcom,glinkpkt-transport = "smem";
+ qcom,glinkpkt-edge = "mpss";
+ qcom,glinkpkt-ch-name = "DATA1";
+ qcom,glinkpkt-dev-name = "smd7";
+ };
+
+ qcom,glinkpkt-data4 {
+ qcom,glinkpkt-transport = "smem";
+ qcom,glinkpkt-edge = "mpss";
+ qcom,glinkpkt-ch-name = "DATA4";
+ qcom,glinkpkt-dev-name = "smd8";
+ };
+
+ qcom,glinkpkt-data11 {
+ qcom,glinkpkt-transport = "smem";
+ qcom,glinkpkt-edge = "mpss";
+ qcom,glinkpkt-ch-name = "DATA11";
+ qcom,glinkpkt-dev-name = "smd11";
+ };
+ };
+
+ glink_mpss: qcom,glink-ssr-modem {
+ compatible = "qcom,glink_ssr";
+ label = "modem";
+ qcom,edge = "mpss";
+ qcom,notify-edges = <&glink_lpass>, <&glink_rpm>;
+ qcom,xprt = "smem";
+ };
+
+ glink_lpass: qcom,glink-ssr-adsp {
+ compatible = "qcom,glink_ssr";
+ label = "adsp";
+ qcom,edge = "lpass";
+ qcom,notify-edges = <&glink_mpss>, <&glink_rpm>;
+ qcom,xprt = "smem";
+ };
+
+ glink_rpm: qcom,glink-ssr-rpm {
+ compatible = "qcom,glink_ssr";
+ label = "rpm";
+ qcom,edge = "rpm";
+ qcom,notify-edges = <&glink_lpass>, <&glink_mpss>;
+ qcom,xprt = "smem";
+ };
+
+ qcom,ipc_router {
+ compatible = "qcom,ipc_router";
+ qcom,node-id = <1>;
+ };
+
+ qcom,ipc_router_modem_xprt {
+ compatible = "qcom,ipc_router_glink_xprt";
+ qcom,ch-name = "IPCRTR";
+ qcom,xprt-remote = "mpss";
+ qcom,glink-xprt = "smem";
+ qcom,xprt-linkid = <1>;
+ qcom,xprt-version = <1>;
+ qcom,fragmented-data;
+ };
+
+ qcom,ipc_router_q6_xprt {
+ compatible = "qcom,ipc_router_glink_xprt";
+ qcom,ch-name = "IPCRTR";
+ qcom,xprt-remote = "lpass";
+ qcom,glink-xprt = "smem";
+ qcom,xprt-linkid = <1>;
+ qcom,xprt-version = <1>;
+ qcom,fragmented-data;
+ };
+
+ qcom,icnss@18800000 {
+ status = "disabled";
+ compatible = "qcom,icnss";
+ reg = <0x18800000 0x800000>,
+ <0x10ac000 0x20>;
+ reg-names = "membase", "mpm_config";
+ interrupts = <0 413 0>, /* CE0 */
+ <0 414 0>, /* CE1 */
+ <0 415 0>, /* CE2 */
+ <0 416 0>, /* CE3 */
+ <0 417 0>, /* CE4 */
+ <0 418 0>, /* CE5 */
+ <0 420 0>, /* CE6 */
+ <0 421 0>, /* CE7 */
+ <0 422 0>, /* CE8 */
+ <0 423 0>, /* CE9 */
+ <0 424 0>, /* CE10 */
+ <0 425 0>; /* CE11 */
+ qcom,wlan-msa-memory = <0x100000>;
+ };
+};
+
+#include "msmtriton-ion.dtsi"
+#include "msmfalcon-regulator.dtsi"
+#include "msm-gdsc-falcon.dtsi"
+
+&gdsc_usb30 {
+ clock-names = "core_clk";
+ clocks = <&clock_gcc GCC_USB30_MASTER_CLK>;
+ status = "ok";
+};
+
+&gdsc_ufs {
+ status = "ok";
+};
+
+&gdsc_bimc_smmu {
+ clock-names = "bus_clk";
+ clocks = <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>;
+ proxy-supply = <&gdsc_bimc_smmu>;
+ qcom,proxy-consumer-enable;
+ status = "ok";
+};
+
+&gdsc_hlos1_vote_lpass_adsp {
+ status = "ok";
+};
+
+&gdsc_venus {
+ status = "ok";
+};
+
+&gdsc_venus_core0 {
+ qcom,support-hw-trigger;
+ status = "ok";
+};
+
+&gdsc_camss_top {
+ status = "ok";
+};
+
+&gdsc_vfe0 {
+ parent-supply = <&gdsc_camss_top>;
+ status = "ok";
+};
+
+&gdsc_vfe1 {
+ parent-supply = <&gdsc_camss_top>;
+ status = "ok";
+};
+
+&gdsc_cpp {
+ parent-supply = <&gdsc_camss_top>;
+ status = "ok";
+};
+
+&gdsc_mdss {
+ clock-names = "bus_clk", "rot_clk";
+ clocks = <&clock_mmss MMSS_MDSS_AXI_CLK>,
+ <&clock_mmss MMSS_MDSS_ROT_CLK>;
+ proxy-supply = <&gdsc_mdss>;
+ qcom,proxy-consumer-enable;
+ status = "ok";
+};
+
+&gdsc_gpu_gx {
+ clock-names = "bimc_core_clk", "core_clk", "core_root_clk";
+ clocks = <&clock_gcc GCC_GPU_BIMC_GFX_CLK>,
+ <&clock_gfx GPUCC_GFX3D_CLK>,
+ <&clock_gfx GFX3D_CLK_SRC>;
+ qcom,force-enable-root-clk;
+ parent-supply = <&gfx_vreg_corner>;
+ status = "ok";
+};
+
+&gdsc_gpu_cx {
+ status = "ok";
};
diff --git a/arch/arm/configs/msmcortex_defconfig b/arch/arm/configs/msmcortex_defconfig
index 0a20c52bd3b2..48507eebe9f3 100644
--- a/arch/arm/configs/msmcortex_defconfig
+++ b/arch/arm/configs/msmcortex_defconfig
@@ -261,8 +261,8 @@ CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v21=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
+CONFIG_SECURE_TOUCH=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_KEYCHORD=y
@@ -459,7 +459,6 @@ CONFIG_TRACER_PKT=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_MSM_MPM_OF=y
CONFIG_MSM_EVENT_TIMER=y
-CONFIG_MSM_CORE_CTL_HELPER=y
CONFIG_QCOM_REMOTEQDSS=y
CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
diff --git a/arch/arm/configs/msmfalcon_defconfig b/arch/arm/configs/msmfalcon_defconfig
index 0a20c52bd3b2..64da50bb55b2 100644
--- a/arch/arm/configs/msmfalcon_defconfig
+++ b/arch/arm/configs/msmfalcon_defconfig
@@ -222,6 +222,8 @@ CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_HDCP_QSEECOM=y
CONFIG_UID_CPUTIME=y
CONFIG_MSM_ULTRASOUND=y
CONFIG_SCSI=y
@@ -261,7 +263,6 @@ CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v21=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
@@ -331,6 +332,12 @@ CONFIG_MSM_SDE_ROTATOR=y
CONFIG_QCOM_KGSL=y
CONFIG_FB=y
CONFIG_FB_VIRTUAL=y
+CONFIG_FB_MSM=y
+CONFIG_FB_MSM_MDSS=y
+CONFIG_FB_MSM_MDSS_WRITEBACK=y
+CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
+CONFIG_FB_MSM_MDSS_DP_PANEL=y
+CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
@@ -400,7 +407,6 @@ CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_TIMED_GPIO=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
-CONFIG_SYNC=y
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_QPNP_REVID=y
@@ -414,8 +420,12 @@ CONFIG_IPA3=y
CONFIG_RMNET_IPA3=y
CONFIG_GPIO_USB_DETECT=y
CONFIG_USB_BAM=y
+CONFIG_MSM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_COMMON_LOG=y
CONFIG_MSM_SMEM=y
CONFIG_QPNP_HAPTIC=y
@@ -459,7 +469,6 @@ CONFIG_TRACER_PKT=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_MSM_MPM_OF=y
CONFIG_MSM_EVENT_TIMER=y
-CONFIG_MSM_CORE_CTL_HELPER=y
CONFIG_QCOM_REMOTEQDSS=y
CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 1d45320ee125..f56a831de043 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -37,6 +37,7 @@
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
#include <linux/export.h>
+#include <linux/cpumask.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/cache-uniphier.h>
@@ -127,6 +128,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
const struct cpumask *affinity = irq_data_get_affinity_mask(d);
struct irq_chip *c;
bool ret = false;
+ struct cpumask available_cpus;
/*
* If this is a per-CPU interrupt, or the affinity does not
@@ -135,8 +137,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
return false;
+ cpumask_copy(&available_cpus, affinity);
+ cpumask_andnot(&available_cpus, &available_cpus, cpu_isolated_mask);
+ affinity = &available_cpus;
+
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
- affinity = cpu_online_mask;
+ cpumask_andnot(&available_cpus, cpu_online_mask,
+ cpu_isolated_mask);
+ if (cpumask_empty(affinity))
+ affinity = cpu_online_mask;
ret = true;
}
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 598323a1842e..e683d147816c 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -190,6 +190,13 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
return 0;
}
+static DEFINE_PER_CPU(unsigned long, cpu_efficiency) = SCHED_CAPACITY_SCALE;
+
+unsigned long arch_get_cpu_efficiency(int cpu)
+{
+ return per_cpu(cpu_efficiency, cpu);
+}
+
#ifdef CONFIG_OF
struct cpu_efficiency {
const char *compatible;
@@ -266,6 +273,7 @@ static int __init parse_dt_topology(void)
for_each_possible_cpu(cpu) {
const u32 *rate;
int len;
+ u32 efficiency;
/* too early to use cpu->of_node */
cn = of_get_cpu_node(cpu, NULL);
@@ -274,12 +282,26 @@ static int __init parse_dt_topology(void)
continue;
}
- for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
- if (of_device_is_compatible(cn, cpu_eff->compatible))
- break;
+ /*
+ * The CPU efficiency value passed from the device tree
+ * overrides the value defined in the table_efficiency[]
+ */
+ if (of_property_read_u32(cn, "efficiency", &efficiency) < 0) {
+
+ for (cpu_eff = table_efficiency;
+ cpu_eff->compatible; cpu_eff++)
- if (cpu_eff->compatible == NULL)
- continue;
+ if (of_device_is_compatible(cn,
+ cpu_eff->compatible))
+ break;
+
+ if (cpu_eff->compatible == NULL)
+ continue;
+
+ efficiency = cpu_eff->efficiency;
+ }
+
+ per_cpu(cpu_efficiency, cpu) = efficiency;
rate = of_get_property(cn, "clock-frequency", &len);
if (!rate || len != 4) {
@@ -288,7 +310,7 @@ static int __init parse_dt_topology(void)
continue;
}
- capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
+ capacity = ((be32_to_cpup(rate)) >> 20) * efficiency;
/* Save min capacity of the system */
if (capacity < min_capacity)
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index d6ed9ac56bf1..d4d355531169 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -16,7 +16,9 @@ config ARCH_MSMFALCON
select MULTI_IRQ_HANDLER
select HAVE_ARM_ARCH_TIMER
select MAY_HAVE_SPARSE_IRQ
- select COMMON_CLK_MSM
+ select COMMON_CLK
+ select COMMON_CLK_QCOM
+ select QCOM_GDSC
select PINCTRL_MSM_TLMM
select USE_PINCTRL_IRQ
select MSM_PM if PM
@@ -31,6 +33,7 @@ config ARCH_MSMFALCON
select MSM_QDSP6V2_CODECS
select MSM_AUDIO_QDSP6V2 if SND_SOC
select MSM_RPM_SMD
+ select GENERIC_IRQ_MIGRATION
select MSM_JTAGV8 if CORESIGHT_ETMV4
help
This enables support for the MSMFALCON chipset. If you do not
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index ee4efe58d0c8..94c0bf30c284 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -89,7 +89,9 @@ config ARCH_MSMHAMSTER
config ARCH_MSMFALCON
bool "Enable Support for Qualcomm Technologies Inc MSMFALCON"
depends on ARCH_QCOM
- select COMMON_CLK_MSM
+ select COMMON_CLK
+ select COMMON_CLK_QCOM
+ select QCOM_GDSC
help
This enables support for the MSMFALCON chipset.
If you do not wish to build a kernel that runs
@@ -98,7 +100,9 @@ config ARCH_MSMFALCON
config ARCH_MSMTRITON
bool "Enable Support for Qualcomm Technologies Inc MSMTRITON"
depends on ARCH_QCOM
- select COMMON_CLK_MSM
+ select COMMON_CLK
+ select COMMON_CLK_QCOM
+ select QCOM_GDSC
help
This enables support for the MSMTRITON chipset.
If you do not wish to build a kernel that runs
diff --git a/arch/arm64/configs/msm-perf_defconfig b/arch/arm64/configs/msm-perf_defconfig
index 84bb603c3142..5f8b02904d49 100644
--- a/arch/arm64/configs/msm-perf_defconfig
+++ b/arch/arm64/configs/msm-perf_defconfig
@@ -282,7 +282,6 @@ CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v21=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=y
CONFIG_TOUCHSCREEN_ATMEL_MAXTOUCH_TS=y
@@ -525,7 +524,6 @@ CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_TRACER_PKT=y
CONFIG_MSM_MPM_OF=y
CONFIG_MSM_AVTIMER=y
-CONFIG_MSM_CORE_CTL_HELPER=y
CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
CONFIG_MSM_RPM_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
@@ -549,6 +547,9 @@ CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
diff --git a/arch/arm64/configs/msm_defconfig b/arch/arm64/configs/msm_defconfig
index 6119ff12d46d..c1c0ae9da001 100644
--- a/arch/arm64/configs/msm_defconfig
+++ b/arch/arm64/configs/msm_defconfig
@@ -269,7 +269,6 @@ CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v21=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=y
CONFIG_TOUCHSCREEN_ATMEL_MAXTOUCH_TS=y
@@ -527,7 +526,6 @@ CONFIG_TRACER_PKT=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_MSM_MPM_OF=y
CONFIG_MSM_AVTIMER=y
-CONFIG_MSM_CORE_CTL_HELPER=y
CONFIG_QCOM_REMOTEQDSS=y
CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
@@ -556,6 +554,9 @@ CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 08288e1b5c25..799e43f09a11 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -13,13 +13,16 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_SCHED_HMP=y
CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
@@ -43,7 +46,6 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSMCOBALT=y
CONFIG_ARCH_MSMHAMSTER=y
-CONFIG_ARCH_MSMFALCON=y
CONFIG_PCI=y
CONFIG_PCI_MSM=y
CONFIG_SCHED_MC=y
@@ -272,8 +274,8 @@ CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v21=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
+CONFIG_SECURE_TOUCH=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_UINPUT=y
@@ -435,6 +437,7 @@ CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_GSI=y
@@ -525,7 +528,6 @@ CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_MSM_MPM_OF=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_AVTIMER=y
-CONFIG_MSM_CORE_CTL_HELPER=y
CONFIG_QCOM_REMOTEQDSS=y
CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
@@ -556,6 +558,9 @@ CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 9e2727c4fe1e..dfd658d815fe 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -13,13 +13,16 @@ CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_SCHED_HMP=y
CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
+CONFIG_SCHED_TUNE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
@@ -44,7 +47,6 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSMCOBALT=y
CONFIG_ARCH_MSMHAMSTER=y
-CONFIG_ARCH_MSMFALCON=y
CONFIG_PCI=y
CONFIG_PCI_MSM=y
CONFIG_SCHED_MC=y
@@ -273,8 +275,8 @@ CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v21=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
+CONFIG_SECURE_TOUCH=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_KEYCHORD=y
@@ -437,6 +439,7 @@ CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_GSI=y
@@ -544,7 +547,6 @@ CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_MSM_MPM_OF=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_AVTIMER=y
-CONFIG_MSM_CORE_CTL_HELPER=y
CONFIG_QCOM_REMOTEQDSS=y
CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
@@ -576,6 +578,9 @@ CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
diff --git a/arch/arm64/configs/msmfalcon-perf_defconfig b/arch/arm64/configs/msmfalcon-perf_defconfig
index 39c2d3f71c5a..1bc352704893 100644
--- a/arch/arm64/configs/msmfalcon-perf_defconfig
+++ b/arch/arm64/configs/msmfalcon-perf_defconfig
@@ -41,8 +41,6 @@ CONFIG_MODULE_SIG_FORCE=y
CONFIG_MODULE_SIG_SHA512=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MSMCOBALT=y
-CONFIG_ARCH_MSMHAMSTER=y
CONFIG_ARCH_MSMFALCON=y
CONFIG_ARCH_MSMTRITON=y
CONFIG_PCI=y
@@ -272,7 +270,6 @@ CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v21=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
@@ -476,7 +473,6 @@ CONFIG_RMNET_IPA3=y
CONFIG_GPIO_USB_DETECT=y
CONFIG_SEEMP_CORE=y
CONFIG_USB_BAM=y
-CONFIG_MSM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_ARM_SMMU=y
@@ -522,7 +518,6 @@ CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_MSM_MPM_OF=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_AVTIMER=y
-CONFIG_MSM_CORE_CTL_HELPER=y
CONFIG_QCOM_REMOTEQDSS=y
CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
diff --git a/arch/arm64/configs/msmfalcon_defconfig b/arch/arm64/configs/msmfalcon_defconfig
index a277038b3fc3..348c34a94119 100644
--- a/arch/arm64/configs/msmfalcon_defconfig
+++ b/arch/arm64/configs/msmfalcon_defconfig
@@ -42,8 +42,6 @@ CONFIG_MODULE_SIG_SHA512=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MSMCOBALT=y
-CONFIG_ARCH_MSMHAMSTER=y
CONFIG_ARCH_MSMFALCON=y
CONFIG_ARCH_MSMTRITON=y
CONFIG_PCI=y
@@ -273,7 +271,6 @@ CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v21=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
@@ -486,7 +483,6 @@ CONFIG_RMNET_IPA3=y
CONFIG_GPIO_USB_DETECT=y
CONFIG_SEEMP_CORE=y
CONFIG_USB_BAM=y
-CONFIG_MSM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST=y
@@ -523,6 +519,7 @@ CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_IRQ_HELPER=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
CONFIG_MSM_GLADIATOR_ERP_V2=y
CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y
CONFIG_MSM_GLADIATOR_HANG_DETECT=y
@@ -541,7 +538,6 @@ CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_MSM_MPM_OF=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_AVTIMER=y
-CONFIG_MSM_CORE_CTL_HELPER=y
CONFIG_QCOM_REMOTEQDSS=y
CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 3691553f218e..c3c6557eb083 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -19,6 +19,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
+#include <asm/elf.h>
#include <linux/bitops.h>
#include <linux/bug.h>
@@ -107,6 +108,8 @@ static int c_show(struct seq_file *m, void *v)
{
int i, j;
+ seq_printf(m, "Processor\t: AArch64 Processor rev %d (%s)\n",
+ read_cpuid_id() & 15, ELF_PLATFORM);
for_each_present_cpu(i) {
struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
u32 midr = cpuinfo->reg_midr;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 129fb3f8c322..7c4563cbccf3 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -259,7 +259,7 @@ void __show_regs(struct pt_regs *regs)
printk("\n");
}
if (!user_mode(regs))
- show_extra_register_data(regs, 256);
+ show_extra_register_data(regs, 64);
printk("\n");
}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index e5389bc981ee..eff70892dada 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -49,6 +49,17 @@ static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
return prot;
}
+static int __get_iommu_pgprot(struct dma_attrs *attrs, int prot,
+ bool coherent)
+{
+ if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs))
+ prot |= IOMMU_NOEXEC;
+ if (coherent)
+ prot |= IOMMU_CACHE;
+
+ return prot;
+}
+
static struct gen_pool *atomic_pool;
#define NO_KERNEL_MAPPING_DUMMY 0x2222
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
@@ -1153,7 +1164,7 @@ static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
/* IOMMU */
static void __dma_clear_buffer(struct page *page, size_t size,
- struct dma_attrs *attrs)
+ struct dma_attrs *attrs, bool is_coherent)
{
/*
* Ensure that the allocated pages are zeroed, and that any data
@@ -1162,7 +1173,8 @@ static void __dma_clear_buffer(struct page *page, size_t size,
void *ptr = page_address(page);
if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
memset(ptr, 0, size);
- dmac_flush_range(ptr, ptr + size);
+ if (!is_coherent)
+ dmac_flush_range(ptr, ptr + size);
}
static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
@@ -1212,6 +1224,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
size_t count = size >> PAGE_SHIFT;
size_t array_size = count * sizeof(struct page *);
int i = 0;
+ bool is_coherent = is_device_dma_coherent(dev);
if (array_size <= PAGE_SIZE)
pages = kzalloc(array_size, gfp);
@@ -1228,7 +1241,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
if (!page)
goto error;
- __dma_clear_buffer(page, size, attrs);
+ __dma_clear_buffer(page, size, attrs, is_coherent);
for (i = 0; i < count; i++)
pages[i] = page + i;
@@ -1257,7 +1270,8 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
pages[i + j] = pages[i] + j;
}
- __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs);
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs,
+ is_coherent);
i += 1 << order;
count -= 1 << order;
}
@@ -1322,9 +1336,8 @@ static dma_addr_t __iommu_create_mapping(struct device *dev,
dma_addr = __alloc_iova(mapping, size);
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;
-
- if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs))
- prot |= IOMMU_NOEXEC;
+ prot = __get_iommu_pgprot(attrs, prot,
+ is_device_dma_coherent(dev));
iova = dma_addr;
for (i = 0; i < count; ) {
@@ -1404,6 +1417,7 @@ static void *__iommu_alloc_atomic(struct device *dev, size_t size,
size_t array_size = count * sizeof(struct page *);
int i;
void *addr;
+ bool coherent = is_device_dma_coherent(dev);
if (array_size <= PAGE_SIZE)
pages = kzalloc(array_size, gfp);
@@ -1413,7 +1427,13 @@ static void *__iommu_alloc_atomic(struct device *dev, size_t size,
if (!pages)
return NULL;
- addr = __alloc_from_pool(size, &page, gfp);
+ if (coherent) {
+ page = alloc_pages(gfp, get_order(size));
+ addr = page ? page_address(page) : NULL;
+ } else {
+ addr = __alloc_from_pool(size, &page, gfp);
+ }
+
if (!addr)
goto err_free;
@@ -1428,7 +1448,10 @@ static void *__iommu_alloc_atomic(struct device *dev, size_t size,
return addr;
err_mapping:
- __free_from_pool(addr, size);
+ if (coherent)
+ __free_pages(page, get_order(size));
+ else
+ __free_from_pool(addr, size);
err_free:
kvfree(pages);
return NULL;
@@ -1444,7 +1467,8 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
- pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
+ bool coherent = is_device_dma_coherent(dev);
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
struct page **pages;
void *addr = NULL;
@@ -1495,8 +1519,10 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
unsigned long uaddr = vma->vm_start;
unsigned long usize = vma->vm_end - vma->vm_start;
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+ bool coherent = is_device_dma_coherent(dev);
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+ coherent);
if (!pages)
return -ENXIO;
@@ -1577,121 +1603,6 @@ static int __dma_direction_to_prot(enum dma_data_direction dir)
return prot;
}
-/*
- * Map a part of the scatter-gather list into contiguous io address space
- */
-static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
- size_t size, dma_addr_t *handle,
- enum dma_data_direction dir, struct dma_attrs *attrs,
- bool is_coherent)
-{
- struct dma_iommu_mapping *mapping = dev->archdata.mapping;
- dma_addr_t iova, iova_base;
- int ret = 0;
- unsigned int count;
- struct scatterlist *s;
- int prot;
-
- size = PAGE_ALIGN(size);
- *handle = DMA_ERROR_CODE;
-
- iova_base = iova = __alloc_iova(mapping, size);
- if (iova == DMA_ERROR_CODE)
- return -ENOMEM;
-
- for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
- phys_addr_t phys = page_to_phys(sg_page(s));
- unsigned int len = PAGE_ALIGN(s->offset + s->length);
-
- if (!is_coherent &&
- !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
- __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length,
- dir);
-
- prot = __dma_direction_to_prot(dir);
- if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs))
- prot |= IOMMU_NOEXEC;
-
- ret = iommu_map(mapping->domain, iova, phys, len, prot);
- if (ret < 0)
- goto fail;
- count += len >> PAGE_SHIFT;
- iova += len;
- }
- *handle = iova_base;
-
- return 0;
-fail:
- iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
- __free_iova(mapping, iova_base, size);
- return ret;
-}
-
-static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs,
- bool is_coherent)
-{
- struct scatterlist *s = sg, *dma = sg, *start = sg;
- int i, count = 0;
- unsigned int offset = s->offset;
- unsigned int size = s->offset + s->length;
- unsigned int max = dma_get_max_seg_size(dev);
-
- for (i = 1; i < nents; i++) {
- s = sg_next(s);
-
- s->dma_address = DMA_ERROR_CODE;
- s->dma_length = 0;
-
- if (s->offset || (size & ~PAGE_MASK)
- || size + s->length > max) {
- if (__map_sg_chunk(dev, start, size, &dma->dma_address,
- dir, attrs, is_coherent) < 0)
- goto bad_mapping;
-
- dma->dma_address += offset;
- dma->dma_length = size - offset;
-
- size = offset = s->offset;
- start = s;
- dma = sg_next(dma);
- count += 1;
- }
- size += s->length;
- }
- if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
- is_coherent) < 0)
- goto bad_mapping;
-
- dma->dma_address += offset;
- dma->dma_length = size - offset;
-
- return count+1;
-
-bad_mapping:
- for_each_sg(sg, s, count, i)
- __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
- return 0;
-}
-
-/**
- * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
- * @dev: valid struct device pointer
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Map a set of i/o coherent buffers described by scatterlist in streaming
- * mode for DMA. The scatter gather list elements are merged together (if
- * possible) and tagged with the appropriate dma address and length. They are
- * obtained via sg_dma_{address,length}.
- */
-int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
-{
- return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
-}
-
/**
* arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
* @dev: valid struct device pointer
@@ -1722,9 +1633,8 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
dev_err(dev, "Couldn't allocate iova for sg %p\n", sg);
return 0;
}
-
- if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs))
- prot |= IOMMU_NOEXEC;
+ prot = __get_iommu_pgprot(attrs, prot,
+ is_device_dma_coherent(dev));
ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot);
if (ret != total_length) {
@@ -1741,40 +1651,6 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
return nents;
}
-static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
- bool is_coherent)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i) {
- if (sg_dma_len(s))
- __iommu_remove_mapping(dev, sg_dma_address(s),
- sg_dma_len(s));
- if (!is_coherent &&
- !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
- __dma_page_dev_to_cpu(sg_page(s), s->offset,
- s->length, dir);
- }
-}
-
-/**
- * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
- * @dev: valid struct device pointer
- * @sg: list of buffers
- * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
- * @dir: DMA transfer direction (same as was passed to dma_map_sg)
- *
- * Unmap a set of streaming mode DMA translations. Again, CPU access
- * rules concerning calls here are the same as for dma_unmap_single().
- */
-void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
-{
- __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
-}
-
/**
* arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
* @dev: valid struct device pointer
@@ -1812,6 +1688,9 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
struct scatterlist *s;
int i;
+ if (is_device_dma_coherent(dev))
+ return;
+
for_each_sg(sg, s, nents, i)
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
@@ -1830,6 +1709,9 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
struct scatterlist *s;
int i;
+ if (is_device_dma_coherent(dev))
+ return;
+
for_each_sg(sg, s, nents, i)
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
}
@@ -1858,8 +1740,8 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,
return dma_addr;
prot = __dma_direction_to_prot(dir);
- if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs))
- prot |= IOMMU_NOEXEC;
+ prot = __get_iommu_pgprot(attrs, prot,
+ is_device_dma_coherent(dev));
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
prot);
@@ -1886,38 +1768,14 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if (!is_device_dma_coherent(dev) &&
+ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(page, offset, size, dir);
return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
}
/**
- * arm_coherent_iommu_unmap_page
- * @dev: valid struct device pointer
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_page)
- * @dir: DMA transfer direction (same as passed to dma_map_page)
- *
- * Coherent IOMMU aware version of arm_dma_unmap_page()
- */
-static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- struct dma_iommu_mapping *mapping = dev->archdata.mapping;
- dma_addr_t iova = handle & PAGE_MASK;
- int offset = handle & ~PAGE_MASK;
- int len = PAGE_ALIGN(size + offset);
-
- if (!iova)
- return;
-
- iommu_unmap(mapping->domain, iova, len);
- __free_iova(mapping, iova, len);
-}
-
-/**
* arm_iommu_unmap_page
* @dev: valid struct device pointer
* @handle: DMA address of buffer
@@ -1940,7 +1798,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
if (!iova)
return;
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if (!(is_device_dma_coherent(dev) ||
+ dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)))
__dma_page_dev_to_cpu(page, offset, size, dir);
iommu_unmap(mapping->domain, iova, len);
@@ -1959,7 +1818,8 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
if (!iova)
return;
- __dma_page_dev_to_cpu(page, offset, size, dir);
+ if (!is_device_dma_coherent(dev))
+ __dma_page_dev_to_cpu(page, offset, size, dir);
}
static void arm_iommu_sync_single_for_device(struct device *dev,
@@ -1974,7 +1834,8 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
if (!iova)
return;
- __dma_page_cpu_to_dev(page, offset, size, dir);
+ if (!is_device_dma_coherent(dev))
+ __dma_page_cpu_to_dev(page, offset, size, dir);
}
static int arm_iommu_dma_supported(struct device *dev, u64 mask)
@@ -2016,22 +1877,6 @@ const struct dma_map_ops iommu_ops = {
.mapping_error = arm_iommu_mapping_error,
};
-const struct dma_map_ops iommu_coherent_ops = {
- .alloc = arm_iommu_alloc_attrs,
- .free = arm_iommu_free_attrs,
- .mmap = arm_iommu_mmap_attrs,
- .get_sgtable = arm_iommu_get_sgtable,
-
- .map_page = arm_coherent_iommu_map_page,
- .unmap_page = arm_coherent_iommu_unmap_page,
-
- .map_sg = arm_coherent_iommu_map_sg,
- .unmap_sg = arm_coherent_iommu_unmap_sg,
-
- .set_dma_mask = arm_dma_set_mask,
- .dma_supported = arm_iommu_dma_supported,
-};
-
/**
* arm_iommu_create_mapping
* @bus: pointer to the bus holding the client device (for IOMMU calls)
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 8764c241e5bb..8cf06af3036e 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -31,8 +31,8 @@ static int get_first_sibling(unsigned int cpu)
return cpu;
}
-int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
- const struct cpumask *online_mask)
+static int blk_mq_update_queue_map(unsigned int *map,
+ unsigned int nr_queues, const struct cpumask *online_mask)
{
unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
cpumask_var_t cpus;
@@ -52,18 +52,14 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
queue = 0;
for_each_possible_cpu(i) {
- if (!cpumask_test_cpu(i, online_mask)) {
- map[i] = 0;
- continue;
- }
-
/*
* Easy case - we have equal or more hardware queues. Or
* there are no thread siblings to take into account. Do
* 1:1 if enough, or sequential mapping if less.
*/
- if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
- map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
+ if (nr_queues >= nr_cpu_ids) {
+ map[i] = cpu_to_queue_index(nr_cpu_ids, nr_queues,
+ queue);
queue++;
continue;
}
@@ -75,7 +71,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
*/
first_sibling = get_first_sibling(i);
if (first_sibling == i) {
- map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
+ map[i] = cpu_to_queue_index(nr_cpu_ids, nr_queues,
queue);
queue++;
} else
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6d6f8feb48c0..8398e18d4139 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1783,10 +1783,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
INIT_LIST_HEAD(&__ctx->rq_list);
__ctx->queue = q;
- /* If the cpu isn't online, the cpu is mapped to first hctx */
- if (!cpu_online(i))
- continue;
-
hctx = q->mq_ops->map_queue(q, i);
/*
@@ -1820,12 +1816,9 @@ static void blk_mq_map_swqueue(struct request_queue *q,
* Map software to hardware queues
*/
queue_for_each_ctx(q, ctx, i) {
- /* If the cpu isn't online, the cpu is mapped to first hctx */
- if (!cpumask_test_cpu(i, online_mask))
- continue;
-
hctx = q->mq_ops->map_queue(q, i);
- cpumask_set_cpu(i, hctx->cpumask);
+ if (cpumask_test_cpu(i, online_mask))
+ cpumask_set_cpu(i, hctx->cpumask);
ctx->index_hw = hctx->nr_ctx;
hctx->ctxs[hctx->nr_ctx++] = ctx;
}
@@ -1863,17 +1856,22 @@ static void blk_mq_map_swqueue(struct request_queue *q,
/*
* Initialize batch roundrobin counts
+ * Set next_cpu for only those hctxs that have an online CPU
+ * in their cpumask field. For hctxs that belong to few online
+ * and few offline CPUs, this will always provide one CPU from
+ * online ones. For hctxs belonging to all offline CPUs, their
+ * cpumask will be updated in reinit_notify.
*/
- hctx->next_cpu = cpumask_first(hctx->cpumask);
- hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
+ if (cpumask_first(hctx->cpumask) < nr_cpu_ids) {
+ hctx->next_cpu = cpumask_first(hctx->cpumask);
+ hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
+ }
}
queue_for_each_ctx(q, ctx, i) {
- if (!cpumask_test_cpu(i, online_mask))
- continue;
-
hctx = q->mq_ops->map_queue(q, i);
- cpumask_set_cpu(i, hctx->tags->cpumask);
+ if (cpumask_test_cpu(i, online_mask))
+ cpumask_set_cpu(i, hctx->tags->cpumask);
}
}
@@ -2101,38 +2099,13 @@ void blk_mq_free_queue(struct request_queue *q)
blk_mq_free_hw_queues(q, set);
}
-/* Basically redo blk_mq_init_queue with queue frozen */
-static void blk_mq_queue_reinit(struct request_queue *q,
- const struct cpumask *online_mask)
-{
- WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
-
- blk_mq_sysfs_unregister(q);
-
- blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);
-
- /*
- * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
- * we should change hctx numa_node according to new topology (this
- * involves free and re-allocate memory, worthy doing?)
- */
-
- blk_mq_map_swqueue(q, online_mask);
-
- blk_mq_sysfs_register(q);
-}
-
static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
unsigned long action, void *hcpu)
{
struct request_queue *q;
+ struct blk_mq_hw_ctx *hctx;
+ int i;
int cpu = (unsigned long)hcpu;
- /*
- * New online cpumask which is going to be set in this hotplug event.
- * Declare this cpumasks as global as cpu-hotplug operation is invoked
- * one-by-one and dynamically allocating this could result in a failure.
- */
- static struct cpumask online_new;
/*
* Before hotadded cpu starts handling requests, new mappings must
@@ -2154,44 +2127,31 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DEAD:
case CPU_UP_CANCELED:
- cpumask_copy(&online_new, cpu_online_mask);
+ mutex_lock(&all_q_mutex);
+ list_for_each_entry(q, &all_q_list, all_q_node) {
+ queue_for_each_hw_ctx(q, hctx, i) {
+ cpumask_clear_cpu(cpu, hctx->cpumask);
+ cpumask_clear_cpu(cpu, hctx->tags->cpumask);
+ }
+ }
+ mutex_unlock(&all_q_mutex);
break;
case CPU_UP_PREPARE:
- cpumask_copy(&online_new, cpu_online_mask);
- cpumask_set_cpu(cpu, &online_new);
+ /* Update hctx->cpumask for newly onlined CPUs */
+ mutex_lock(&all_q_mutex);
+ list_for_each_entry(q, &all_q_list, all_q_node) {
+ queue_for_each_hw_ctx(q, hctx, i) {
+ cpumask_set_cpu(cpu, hctx->cpumask);
+ hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
+ cpumask_set_cpu(cpu, hctx->tags->cpumask);
+ }
+ }
+ mutex_unlock(&all_q_mutex);
break;
default:
return NOTIFY_OK;
}
- mutex_lock(&all_q_mutex);
-
- /*
- * We need to freeze and reinit all existing queues. Freezing
- * involves synchronous wait for an RCU grace period and doing it
- * one by one may take a long time. Start freezing all queues in
- * one swoop and then wait for the completions so that freezing can
- * take place in parallel.
- */
- list_for_each_entry(q, &all_q_list, all_q_node)
- blk_mq_freeze_queue_start(q);
- list_for_each_entry(q, &all_q_list, all_q_node) {
- blk_mq_freeze_queue_wait(q);
-
- /*
- * timeout handler can't touch hw queue during the
- * reinitialization
- */
- del_timer_sync(&q->timeout);
- }
-
- list_for_each_entry(q, &all_q_list, all_q_node)
- blk_mq_queue_reinit(q, &online_new);
-
- list_for_each_entry(q, &all_q_list, all_q_node)
- blk_mq_unfreeze_queue(q);
-
- mutex_unlock(&all_q_mutex);
return NOTIFY_OK;
}
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 713820b47b31..7fd11bcaa409 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -48,8 +48,6 @@ void blk_mq_disable_hotplug(void);
* CPU -> queue mappings
*/
extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
-extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
- const struct cpumask *online_mask);
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
/*
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b7d56c5ea3c6..3ac683dff7de 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -72,6 +72,11 @@ int lock_device_hotplug_sysfs(void)
return restart_syscall();
}
+void lock_device_hotplug_assert(void)
+{
+ lockdep_assert_held(&device_hotplug_lock);
+}
+
#ifdef CONFIG_BLOCK
static inline int device_is_not_partition(struct device *dev)
{
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index dee022638fe6..c8bfb6077224 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -180,6 +180,58 @@ static struct attribute_group crash_note_cpu_attr_group = {
};
#endif
+#ifdef CONFIG_HOTPLUG_CPU
+
+static ssize_t show_cpu_isolated(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ ssize_t rc;
+ int cpuid = cpu->dev.id;
+ unsigned int isolated = cpu_isolated(cpuid);
+
+ rc = snprintf(buf, PAGE_SIZE-2, "%d\n", isolated);
+
+ return rc;
+}
+
+static ssize_t __ref store_cpu_isolated(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int err;
+ int cpuid = cpu->dev.id;
+ unsigned int isolated;
+
+ err = kstrtouint(strstrip((char *)buf), 0, &isolated);
+ if (err)
+ return err;
+
+ if (isolated > 1)
+ return -EINVAL;
+
+ if (isolated)
+ sched_isolate_cpu(cpuid);
+ else
+ sched_unisolate_cpu(cpuid);
+
+ return count;
+}
+
+static DEVICE_ATTR(isolate, 0644, show_cpu_isolated, store_cpu_isolated);
+
+static struct attribute *cpu_isolated_attrs[] = {
+ &dev_attr_isolate.attr,
+ NULL
+};
+
+static struct attribute_group cpu_isolated_attr_group = {
+ .attrs = cpu_isolated_attrs,
+};
+
+#endif
+
#ifdef CONFIG_SCHED_HMP
static ssize_t show_sched_static_cpu_pwr_cost(struct device *dev,
@@ -280,6 +332,9 @@ static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_SCHED_HMP
&sched_hmp_cpu_attr_group,
#endif
+#ifdef CONFIG_HOTPLUG_CPU
+ &cpu_isolated_attr_group,
+#endif
NULL
};
@@ -290,6 +345,9 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_SCHED_HMP
&sched_hmp_cpu_attr_group,
#endif
+#ifdef CONFIG_HOTPLUG_CPU
+ &cpu_isolated_attr_group,
+#endif
NULL
};
diff --git a/drivers/base/regmap/regmap-swr.c b/drivers/base/regmap/regmap-swr.c
index 027cbfc505ab..1641c374b189 100644
--- a/drivers/base/regmap/regmap-swr.c
+++ b/drivers/base/regmap/regmap-swr.c
@@ -28,11 +28,16 @@ static int regmap_swr_gather_write(void *context,
struct device *dev = context;
struct swr_device *swr = to_swr_device(dev);
struct regmap *map = dev_get_regmap(dev, NULL);
- size_t addr_bytes = map->format.reg_bytes;
+ size_t addr_bytes;
size_t val_bytes;
int i, ret = 0;
u16 reg_addr = 0;
+ if (map == NULL) {
+ dev_err(dev, "%s: regmap is NULL\n", __func__);
+ return -EINVAL;
+ }
+ addr_bytes = map->format.reg_bytes;
if (swr == NULL) {
dev_err(dev, "%s: swr device is NULL\n", __func__);
return -EINVAL;
@@ -154,10 +159,15 @@ static int regmap_swr_read(void *context,
struct device *dev = context;
struct swr_device *swr = to_swr_device(dev);
struct regmap *map = dev_get_regmap(dev, NULL);
- size_t addr_bytes = map->format.reg_bytes;
+ size_t addr_bytes;
int ret = 0;
u16 reg_addr = 0;
+ if (map == NULL) {
+ dev_err(dev, "%s: regmap is NULL\n", __func__);
+ return -EINVAL;
+ }
+ addr_bytes = map->format.reg_bytes;
if (swr == NULL) {
dev_err(dev, "%s: swr is NULL\n", __func__);
return -EINVAL;
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 13116f010e89..67c1207d35be 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -163,6 +163,7 @@ struct fastrpc_smmu {
int enabled;
int faults;
int secure;
+ int coherent;
};
struct fastrpc_session_ctx {
@@ -1129,6 +1130,8 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
for (oix = 0; oix < inbufs + outbufs; ++oix) {
int i = ctx->overps[oix]->raix;
struct fastrpc_mmap *map = ctx->maps[i];
+ if (ctx->fl->sctx->smmu.coherent)
+ continue;
if (map && map->uncached)
continue;
if (rpra[i].buf.len && ctx->overps[oix]->mstart)
@@ -1141,7 +1144,8 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
rpra[inh + i].buf.len = ctx->lpra[inh + i].buf.len;
rpra[inh + i].h = ctx->lpra[inh + i].h;
}
- dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
+ if (!ctx->fl->sctx->smmu.coherent)
+ dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
bail:
return err;
}
@@ -1372,13 +1376,15 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
goto bail;
}
- inv_args_pre(ctx);
- if (FASTRPC_MODE_SERIAL == mode)
- inv_args(ctx);
+ if (!fl->sctx->smmu.coherent) {
+ inv_args_pre(ctx);
+ if (mode == FASTRPC_MODE_SERIAL)
+ inv_args(ctx);
+ }
VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
if (err)
goto bail;
- if (FASTRPC_MODE_PARALLEL == mode)
+ if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent)
inv_args(ctx);
wait:
if (kernel)
@@ -2275,7 +2281,6 @@ static int fastrpc_cb_probe(struct device *dev)
const char *name;
unsigned int start = 0x80000000;
int err = 0, i;
- int disable_htw = 1;
int secure_vmid = VMID_CP_PIXEL;
VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
@@ -2302,6 +2307,8 @@ static int fastrpc_cb_probe(struct device *dev)
sess = &chan->session[chan->sesscount];
sess->smmu.cb = iommuspec.args[0];
sess->used = 0;
+ sess->smmu.coherent = of_property_read_bool(dev->of_node,
+ "dma-coherent");
sess->smmu.secure = of_property_read_bool(dev->of_node,
"qcom,secure-context-bank");
if (sess->smmu.secure)
@@ -2311,9 +2318,6 @@ static int fastrpc_cb_probe(struct device *dev)
start, 0x7fffffff)));
if (err)
goto bail;
- iommu_domain_set_attr(sess->smmu.mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
iommu_set_fault_handler(sess->smmu.mapping->domain,
fastrpc_smmu_fault_handler, sess);
if (sess->smmu.secure)
@@ -2341,7 +2345,6 @@ static int fastrpc_cb_legacy_probe(struct device *dev)
unsigned int *range = 0, range_size = 0;
unsigned int *sids = 0, sids_size = 0;
int err = 0, ret = 0, i;
- int disable_htw = 1;
VERIFY(err, 0 != (domains_child_node = of_get_child_by_name(
dev->of_node,
@@ -2395,9 +2398,6 @@ static int fastrpc_cb_legacy_probe(struct device *dev)
range[0], range[1])));
if (err)
goto bail;
- iommu_domain_set_attr(first_sess->smmu.mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
VERIFY(err, !arm_iommu_attach_device(first_sess->dev,
first_sess->smmu.mapping));
if (err)
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index b830334dc701..f0cd6cf3967d 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -3066,8 +3066,8 @@ int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len)
!(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
DIAG_LOG(DIAG_DEBUG_DCI,
"buf: 0x%pK, p: %d, len: %d, f_mask: %d\n",
- buf, peripheral, len,
- driver->feature[peripheral].rcvd_feature_mask);
+ buf, peripheral, len,
+ driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask);
return -EINVAL;
}
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 594d3b1bf3b5..9f43cb5427f0 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -651,8 +651,8 @@ static void process_build_mask_report(uint8_t *buf, uint32_t len,
void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
int len)
{
- int read_len = 0;
- int header_len = sizeof(struct diag_ctrl_pkt_header_t);
+ uint32_t read_len = 0;
+ uint32_t header_len = sizeof(struct diag_ctrl_pkt_header_t);
uint8_t *ptr = buf;
struct diag_ctrl_pkt_header_t *ctrl_pkt = NULL;
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
index fea1b74aacae..a2ffabe43c86 100644
--- a/drivers/char/diag/diagfwd_glink.c
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -413,19 +413,16 @@ static int diag_glink_write(void *ctxt, unsigned char *buf, int len)
return -ENODEV;
}
- err = wait_event_interruptible(glink_info->wait_q,
- atomic_read(&glink_info->tx_intent_ready));
- if (err) {
- diagfwd_write_buffer_done(glink_info->fwd_ctxt, buf);
- return -ERESTARTSYS;
- }
-
- atomic_dec(&glink_info->tx_intent_ready);
- err = glink_tx(glink_info->hdl, glink_info, buf, len, tx_flags);
- if (!err) {
- DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s wrote to glink, len: %d\n",
- glink_info->name, len);
- }
+ if (atomic_read(&glink_info->tx_intent_ready)) {
+ atomic_dec(&glink_info->tx_intent_ready);
+ err = glink_tx(glink_info->hdl, glink_info, buf, len, tx_flags);
+ if (!err) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s wrote to glink, len: %d\n",
+ glink_info->name, len);
+ }
+ } else
+ err = -ENOMEM;
return err;
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 066890aebf39..22b9e05086bd 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -751,7 +751,9 @@ int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len)
if (!err)
fwd_info->write_bytes += len;
-
+ else
+ if (fwd_info->transport == TRANSPORT_GLINK)
+ diagfwd_write_buffer_done(fwd_info, buf_ptr);
return err;
}
diff --git a/drivers/clk/msm/clock-gcc-cobalt.c b/drivers/clk/msm/clock-gcc-cobalt.c
index 05272118af16..46e791b3cb99 100644
--- a/drivers/clk/msm/clock-gcc-cobalt.c
+++ b/drivers/clk/msm/clock-gcc-cobalt.c
@@ -2374,7 +2374,7 @@ static struct mux_clk gcc_debug_mux = {
{ &debug_cpu_clk.c, 0x00c0 },
{ &snoc_clk.c, 0x0000 },
{ &cnoc_clk.c, 0x000e },
- { &bimc_clk.c, 0x00a9 },
+ { &bimc_clk.c, 0x014e },
{ &gcc_mmss_sys_noc_axi_clk.c, 0x001f },
{ &gcc_mmss_noc_cfg_ahb_clk.c, 0x0020 },
{ &gcc_usb30_master_clk.c, 0x003e },
diff --git a/drivers/clk/msm/clock-gpu-cobalt.c b/drivers/clk/msm/clock-gpu-cobalt.c
index 7cec9be1f42c..9d93351a083e 100644
--- a/drivers/clk/msm/clock-gpu-cobalt.c
+++ b/drivers/clk/msm/clock-gpu-cobalt.c
@@ -173,7 +173,6 @@ static struct clk_freq_tbl ftbl_gfx3d_clk_src_v2[] = {
F_SLEW( 515000000, 1030000000, gpu_pll0_pll_out_even, 1, 0, 0),
F_SLEW( 596000000, 1192000000, gpu_pll0_pll_out_even, 1, 0, 0),
F_SLEW( 670000000, 1340000000, gpu_pll0_pll_out_even, 1, 0, 0),
- F_SLEW( 710000000, 1420000000, gpu_pll0_pll_out_even, 1, 0, 0),
F_END
};
@@ -612,7 +611,7 @@ static void msm_gfxcc_hamster_fixup(void)
static void msm_gfxcc_cobalt_v2_fixup(void)
{
- gpu_pll0_pll.c.fmax[VDD_DIG_MIN] = 1420000500;
+ gpu_pll0_pll.c.fmax[VDD_DIG_MIN] = 1340000500;
gfx3d_clk_src.freq_tbl = ftbl_gfx3d_clk_src_v2;
}
diff --git a/drivers/clk/msm/clock-mmss-cobalt.c b/drivers/clk/msm/clock-mmss-cobalt.c
index 1a8083e74f5f..873dd40d3a44 100644
--- a/drivers/clk/msm/clock-mmss-cobalt.c
+++ b/drivers/clk/msm/clock-mmss-cobalt.c
@@ -240,6 +240,7 @@ static struct rcg_clk ahb_clk_src = {
.set_rate = set_rate_hid,
.freq_tbl = ftbl_ahb_clk_src,
.current_freq = &rcg_dummy_freq,
+ .non_local_control_timeout = 1000,
.base = &virt_base,
.c = {
.dbg_name = "ahb_clk_src",
@@ -481,10 +482,9 @@ static struct clk_freq_tbl ftbl_video_core_clk_src[] = {
};
static struct clk_freq_tbl ftbl_video_core_clk_src_vq[] = {
- F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
F_MM( 269330000, mmpll0_pll_out, 3, 0, 0),
- F_MM( 404000000, mmpll0_pll_out, 2, 0, 0),
+ F_MM( 355200000, mmpll6_pll_out, 2.5, 0, 0),
F_MM( 444000000, mmpll6_pll_out, 2, 0, 0),
F_MM( 533000000, mmpll3_pll_out, 2, 0, 0),
F_END
@@ -735,10 +735,9 @@ static struct clk_freq_tbl ftbl_video_subcore_clk_src[] = {
};
static struct clk_freq_tbl ftbl_video_subcore_clk_src_vq[] = {
- F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
F_MM( 269330000, mmpll0_pll_out, 3, 0, 0),
- F_MM( 404000000, mmpll0_pll_out, 2, 0, 0),
+ F_MM( 355200000, mmpll6_pll_out, 2.5, 0, 0),
F_MM( 444000000, mmpll6_pll_out, 2, 0, 0),
F_MM( 533000000, mmpll3_pll_out, 2, 0, 0),
F_END
@@ -2733,17 +2732,10 @@ static void msm_mmsscc_hamster_fixup(void)
static void msm_mmsscc_v2_fixup(void)
{
- cpp_clk_src.c.fmax[VDD_DIG_LOW] = 200000000;
- cpp_clk_src.c.fmax[VDD_DIG_LOW_L1] = 480000000;
- csi0_clk_src.c.fmax[VDD_DIG_LOW] = 256000000;
- csi0_clk_src.c.fmax[VDD_DIG_LOW_L1] = 300000000;
- csi1_clk_src.c.fmax[VDD_DIG_LOW] = 256000000;
- csi1_clk_src.c.fmax[VDD_DIG_LOW_L1] = 300000000;
- csi2_clk_src.c.fmax[VDD_DIG_LOW] = 256000000;
- csi2_clk_src.c.fmax[VDD_DIG_LOW_L1] = 300000000;
- csi3_clk_src.c.fmax[VDD_DIG_LOW] = 256000000;
- csi3_clk_src.c.fmax[VDD_DIG_LOW_L1] = 300000000;
- csiphy_clk_src.c.fmax[VDD_DIG_LOW] = 256000000;
+ csi0_clk_src.c.fmax[VDD_DIG_NOMINAL] = 480000000;
+ csi1_clk_src.c.fmax[VDD_DIG_NOMINAL] = 480000000;
+ csi2_clk_src.c.fmax[VDD_DIG_NOMINAL] = 480000000;
+ csi3_clk_src.c.fmax[VDD_DIG_NOMINAL] = 480000000;
dp_pixel_clk_src.c.fmax[VDD_DIG_LOWER] = 148380000;
}
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index 8ae6a4e994f0..d29fd60719c9 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -57,6 +57,7 @@ enum clk_osm_lut_data {
FREQ_DATA,
PLL_OVERRIDES,
SPARE_DATA,
+ VIRTUAL_CORNER,
NUM_FIELDS,
};
@@ -78,6 +79,7 @@ enum clk_osm_trace_packet_id {
#define MEM_ACC_INSTR_COMP(n) (0x67 + ((n) * 0x40))
#define MEM_ACC_SEQ_REG_VAL_START(n) (SEQ_REG(60 + (n)))
#define SEQ_REG1_MSMCOBALT_V2 0x1048
+#define VERSION_REG 0x0
#define OSM_TABLE_SIZE 40
#define MAX_CLUSTER_CNT 2
@@ -181,7 +183,9 @@ enum clk_osm_trace_packet_id {
#define DROOP_UNSTALL_TIMER_CTRL_REG 0x10AC
#define DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG 0x10B0
#define DROOP_WAIT_TO_RELEASE_TIMER_CTRL1_REG 0x10B4
+#define OSM_PLL_SW_OVERRIDE_EN 0x10C0
+#define PLL_SW_OVERRIDE_DROOP_EN BIT(0)
#define DCVS_DROOP_TIMER_CTRL 0x10B8
#define SEQ_MEM_ADDR 0x500
#define SEQ_CFG_BR_ADDR 0x170
@@ -198,6 +202,8 @@ enum clk_osm_trace_packet_id {
#define TRACE_CTRL_EN_MASK BIT(0)
#define TRACE_CTRL_ENABLE 1
#define TRACE_CTRL_DISABLE 0
+#define TRACE_CTRL_ENABLE_WDOG_STATUS BIT(30)
+#define TRACE_CTRL_ENABLE_WDOG_STATUS_MASK BIT(30)
#define TRACE_CTRL_PACKET_TYPE_MASK BVAL(2, 1, 3)
#define TRACE_CTRL_PACKET_TYPE_SHIFT 1
#define TRACE_CTRL_PERIODIC_TRACE_EN_MASK BIT(3)
@@ -217,6 +223,11 @@ enum clk_osm_trace_packet_id {
#define PERFCL_EFUSE_SHIFT 29
#define PERFCL_EFUSE_MASK 0x7
+#define MSMCOBALTV1_PWRCL_BOOT_RATE 1478400000
+#define MSMCOBALTV1_PERFCL_BOOT_RATE 1536000000
+#define MSMCOBALTV2_PWRCL_BOOT_RATE 1555200000
+#define MSMCOBALTV2_PERFCL_BOOT_RATE 1728000000
+
static void __iomem *virt_base;
static void __iomem *debug_base;
@@ -376,6 +387,11 @@ static inline int clk_osm_read_reg_no_log(struct clk_osm *c, u32 offset)
return readl_relaxed_no_log((char *)c->vbases[OSM_BASE] + offset);
}
+static inline int clk_osm_mb(struct clk_osm *c, int base)
+{
+ return readl_relaxed_no_log((char *)c->vbases[base] + VERSION_REG);
+}
+
static inline int clk_osm_count_ns(struct clk_osm *c, u64 nsec)
{
u64 temp;
@@ -406,12 +422,24 @@ static long clk_osm_list_rate(struct clk *c, unsigned n)
static long clk_osm_round_rate(struct clk *c, unsigned long rate)
{
int i;
+ unsigned long rrate = 0;
- for (i = 0; i < c->num_fmax; i++)
- if (rate <= c->fmax[i])
- return c->fmax[i];
+ /*
+ * If the rate passed in is 0, return the first frequency in
+ * the FMAX table.
+ */
+ if (!rate)
+ return c->fmax[0];
+
+ for (i = 0; i < c->num_fmax; i++) {
+ if (is_better_rate(rate, rrate, c->fmax[i])) {
+ rrate = c->fmax[i];
+ if (rrate == rate)
+ break;
+ }
+ }
- return c->fmax[i-1];
+ return rrate;
}
static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
@@ -447,7 +475,7 @@ static int clk_osm_set_rate(struct clk *c, unsigned long rate)
}
pr_debug("rate: %lu --> index %d\n", rate, index);
- if (cpuclk->llm_sw_overr) {
+ if (cpuclk->llm_sw_overr[0]) {
clk_osm_write_reg(cpuclk, cpuclk->llm_sw_overr[0],
LLM_SW_OVERRIDE_REG);
clk_osm_write_reg(cpuclk, cpuclk->llm_sw_overr[1],
@@ -458,14 +486,14 @@ static int clk_osm_set_rate(struct clk *c, unsigned long rate)
/* Choose index and send request to OSM hardware */
clk_osm_write_reg(cpuclk, index, DCVS_PERF_STATE_DESIRED_REG);
- if (cpuclk->llm_sw_overr) {
+ if (cpuclk->llm_sw_overr[0]) {
udelay(1);
clk_osm_write_reg(cpuclk, cpuclk->llm_sw_overr[2],
LLM_SW_OVERRIDE_REG);
}
/* Make sure the write goes through before proceeding */
- mb();
+ clk_osm_mb(cpuclk, OSM_BASE);
return 0;
}
@@ -477,7 +505,7 @@ static int clk_osm_enable(struct clk *c)
clk_osm_write_reg(cpuclk, 1, ENABLE_REG);
/* Make sure the write goes through before proceeding */
- mb();
+ clk_osm_mb(cpuclk, OSM_BASE);
/* Wait for 5us for OSM hardware to enable */
udelay(5);
@@ -581,19 +609,21 @@ static void clk_osm_print_osm_table(struct clk_osm *c)
{
int i;
struct osm_entry *table = c->osm_table;
- u32 pll_src, pll_div, lval;
+ u32 pll_src, pll_div, lval, core_count;
- pr_debug("Index, Frequency, VC, OLV (mv), PLL Src, PLL Div, L-Val, ACC Level\n");
+ pr_debug("Index, Frequency, VC, OLV (mv), Core Count, PLL Src, PLL Div, L-Val, ACC Level\n");
for (i = 0; i < c->num_entries; i++) {
pll_src = (table[i].freq_data & GENMASK(27, 26)) >> 26;
pll_div = (table[i].freq_data & GENMASK(25, 24)) >> 24;
lval = table[i].freq_data & GENMASK(7, 0);
+ core_count = (table[i].freq_data & GENMASK(18, 16)) >> 16;
- pr_debug("%3d, %11lu, %2u, %5u, %6u, %8u, %7u, %5u\n",
+ pr_debug("%3d, %11lu, %2u, %5u, %2u, %6u, %8u, %7u, %5u\n",
i,
table[i].frequency,
table[i].virtual_corner,
table[i].open_loop_volt,
+ core_count,
pll_src,
pll_div,
lval,
@@ -655,14 +685,17 @@ static int clk_osm_get_lut(struct platform_device *pdev,
c->osm_table[j].freq_data = array[i + FREQ_DATA];
c->osm_table[j].override_data = array[i + PLL_OVERRIDES];
c->osm_table[j].spare_data = array[i + SPARE_DATA];
- pr_debug("index=%d freq=%ld freq_data=0x%x override_data=0x%x spare_data=0x%x\n",
+ /* Voltage corners are 0 based in the OSM LUT */
+ c->osm_table[j].virtual_corner = array[i + VIRTUAL_CORNER] - 1;
+ pr_debug("index=%d freq=%ld virtual_corner=%d freq_data=0x%x override_data=0x%x spare_data=0x%x\n",
j, c->osm_table[j].frequency,
+ c->osm_table[j].virtual_corner,
c->osm_table[j].freq_data,
c->osm_table[j].override_data,
c->osm_table[j].spare_data);
data = (array[i + FREQ_DATA] & GENMASK(18, 16)) >> 16;
- if (!last_entry && data == MAX_CONFIG) {
+ if (!last_entry) {
clk->fmax[k] = array[i];
k++;
}
@@ -1083,14 +1116,14 @@ static void clk_osm_setup_cluster_pll(struct clk_osm *c)
PLL_MODE);
/* Ensure writes complete before delaying */
- mb();
+ clk_osm_mb(c, PLL_BASE);
udelay(PLL_WAIT_LOCK_TIME_US);
writel_relaxed(0x6, c->vbases[PLL_BASE] + PLL_MODE);
/* Ensure write completes before delaying */
- mb();
+ clk_osm_mb(c, PLL_BASE);
usleep_range(50, 75);
@@ -1135,7 +1168,7 @@ static int clk_osm_setup_hw_table(struct clk_osm *c)
}
/* Make sure all writes go through */
- mb();
+ clk_osm_mb(c, OSM_BASE);
return 0;
}
@@ -1143,77 +1176,25 @@ static int clk_osm_setup_hw_table(struct clk_osm *c)
static int clk_osm_resolve_open_loop_voltages(struct clk_osm *c)
{
struct regulator *regulator = c->vdd_reg;
- struct dev_pm_opp *opp;
- unsigned long freq;
- u32 vc, mv, data;
- int i, rc = 0;
+ u32 vc, mv;
+ int i;
- /*
- * Determine frequency -> virtual corner -> open-loop voltage
- * mapping from the OPP table.
- */
for (i = 0; i < OSM_TABLE_SIZE; i++) {
- freq = c->osm_table[i].frequency;
- /*
- * Only frequencies that are supported across all configurations
- * are present in the OPP table associated with the regulator
- * device.
- */
- data = (c->osm_table[i].freq_data & GENMASK(18, 16)) >> 16;
- if (data != MAX_CONFIG) {
- if (i < 1) {
- pr_err("Invalid LUT entry at index 0\n");
- return -EINVAL;
- }
- c->osm_table[i].open_loop_volt =
- c->osm_table[i-1].open_loop_volt;
- c->osm_table[i].virtual_corner =
- c->osm_table[i-1].virtual_corner;
- continue;
- }
-
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_exact(&c->vdd_dev->dev, freq, true);
- if (IS_ERR(opp)) {
- rc = PTR_ERR(opp);
- if (rc == -ERANGE)
- pr_err("Frequency %lu not found\n", freq);
- goto exit;
- }
-
- vc = dev_pm_opp_get_voltage(opp);
- if (!vc) {
- pr_err("No virtual corner found for frequency %lu\n",
- freq);
- rc = -ERANGE;
- goto exit;
- }
-
- rcu_read_unlock();
-
+ vc = c->osm_table[i].virtual_corner + 1;
/* Voltage is in uv. Convert to mv */
mv = regulator_list_corner_voltage(regulator, vc) / 1000;
-
- /* CPR virtual corners are zero-based numbered */
- vc--;
c->osm_table[i].open_loop_volt = mv;
- c->osm_table[i].virtual_corner = vc;
}
return 0;
-exit:
- rcu_read_unlock();
- return rc;
}
static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
struct platform_device *pdev)
{
struct regulator *regulator = c->vdd_reg;
- struct dev_pm_opp *opp;
- unsigned long freq = 0;
- int vc, i, threshold, rc = 0;
- u32 corner_volt, data;
+ int count, vc, i, threshold, rc = 0;
+ u32 corner_volt;
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,apm-threshold-voltage",
@@ -1224,70 +1205,26 @@ static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
}
/* Determine crossover virtual corner */
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_exact(&c->vdd_dev->dev, freq, true);
- if (IS_ERR(opp)) {
- rc = PTR_ERR(opp);
- if (rc == -ERANGE)
- pr_debug("APM placeholder frequency entry not found\n");
- goto exit;
+ count = regulator_count_voltages(regulator);
+ if (count < 0) {
+ pr_err("Failed to get the number of virtual corners supported\n");
+ return count;
}
- vc = dev_pm_opp_get_voltage(opp);
- if (!vc) {
- pr_debug("APM crossover corner not found\n");
- rc = -ERANGE;
- goto exit;
- }
- rcu_read_unlock();
- vc--;
- c->apm_crossover_vc = vc;
+
+ c->apm_crossover_vc = count - 1;
/* Determine threshold virtual corner */
for (i = 0; i < OSM_TABLE_SIZE; i++) {
- freq = c->osm_table[i].frequency;
- /*
- * Only frequencies that are supported across all configurations
- * are present in the OPP table associated with the regulator
- * device.
- */
- data = (c->osm_table[i].freq_data & GENMASK(18, 16)) >> 16;
- if (data != MAX_CONFIG)
- continue;
-
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_exact(&c->vdd_dev->dev, freq, true);
- if (IS_ERR(opp)) {
- rc = PTR_ERR(opp);
- if (rc == -ERANGE)
- pr_err("Frequency %lu not found\n", freq);
- goto exit;
- }
-
- vc = dev_pm_opp_get_voltage(opp);
- if (!vc) {
- pr_err("No virtual corner found for frequency %lu\n",
- freq);
- rc = -ERANGE;
- goto exit;
- }
-
- rcu_read_unlock();
-
+ vc = c->osm_table[i].virtual_corner + 1;
corner_volt = regulator_list_corner_voltage(regulator, vc);
- /* CPR virtual corners are zero-based numbered */
- vc--;
-
if (corner_volt >= threshold) {
- c->apm_threshold_vc = vc;
+ c->apm_threshold_vc = c->osm_table[i].virtual_corner;
break;
}
}
return 0;
-exit:
- rcu_read_unlock();
- return rc;
}
static int clk_osm_set_cc_policy(struct platform_device *pdev)
@@ -1350,7 +1287,7 @@ static int clk_osm_set_cc_policy(struct platform_device *pdev)
}
/* Wait for the writes to complete */
- mb();
+ clk_osm_mb(&perfcl_clk, OSM_BASE);
rc = of_property_read_bool(pdev->dev.of_node, "qcom,set-ret-inactive");
if (rc) {
@@ -1375,7 +1312,7 @@ static int clk_osm_set_cc_policy(struct platform_device *pdev)
clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DCVS_DISABLE);
/* Wait for the writes to complete */
- mb();
+ clk_osm_mb(&perfcl_clk, OSM_BASE);
devm_kfree(&pdev->dev, array);
return 0;
@@ -1470,7 +1407,7 @@ static int clk_osm_set_llm_freq_policy(struct platform_device *pdev)
clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
/* Wait for the write to complete */
- mb();
+ clk_osm_mb(&perfcl_clk, OSM_BASE);
devm_kfree(&pdev->dev, array);
return 0;
@@ -1545,7 +1482,7 @@ static int clk_osm_set_llm_volt_policy(struct platform_device *pdev)
clk_osm_write_reg(&perfcl_clk, val, LLM_INTF_DCVS_DISABLE);
/* Wait for the writes to complete */
- mb();
+ clk_osm_mb(&perfcl_clk, OSM_BASE);
devm_kfree(&pdev->dev, array);
return 0;
@@ -1746,7 +1683,7 @@ static void clk_osm_setup_osm_was(struct clk_osm *c)
}
/* Ensure writes complete before returning */
- mb();
+ clk_osm_mb(c, OSM_BASE);
}
static void clk_osm_setup_fsms(struct clk_osm *c)
@@ -1856,7 +1793,7 @@ static void clk_osm_setup_fsms(struct clk_osm *c)
val = clk_osm_read_reg(c,
DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
- val |= BVAL(15, 0, clk_osm_count_ns(c, 500));
+ val |= BVAL(15, 0, clk_osm_count_ns(c, 15000));
clk_osm_write_reg(c, val,
DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
}
@@ -1870,7 +1807,7 @@ static void clk_osm_setup_fsms(struct clk_osm *c)
if (c->wfx_fsm_en || c->ps_fsm_en || c->droop_fsm_en) {
clk_osm_write_reg(c, 0x1, DROOP_PROG_SYNC_DELAY_REG);
- clk_osm_write_reg(c, clk_osm_count_ns(c, 250),
+ clk_osm_write_reg(c, clk_osm_count_ns(c, 500),
DROOP_RELEASE_TIMER_CTRL);
clk_osm_write_reg(c, clk_osm_count_ns(c, 500),
DCVS_DROOP_TIMER_CTRL);
@@ -1879,6 +1816,11 @@ static void clk_osm_setup_fsms(struct clk_osm *c)
BVAL(6, 0, 0x8);
clk_osm_write_reg(c, val, DROOP_CTRL_REG);
}
+
+ /* Enable the PLL Droop Override */
+ val = clk_osm_read_reg(c, OSM_PLL_SW_OVERRIDE_EN);
+ val |= PLL_SW_OVERRIDE_DROOP_EN;
+ clk_osm_write_reg(c, val, OSM_PLL_SW_OVERRIDE_EN);
}
static void clk_osm_do_additional_setup(struct clk_osm *c,
@@ -1947,7 +1889,7 @@ static void clk_osm_apm_vc_setup(struct clk_osm *c)
SEQ_REG(76));
/* Ensure writes complete before returning */
- mb();
+ clk_osm_mb(c, OSM_BASE);
} else {
if (msmcobalt_v1) {
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(1),
@@ -1960,8 +1902,8 @@ static void clk_osm_apm_vc_setup(struct clk_osm *c)
}
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(72),
c->apm_crossover_vc);
- clk_osm_write_reg(c, c->apm_threshold_vc,
- SEQ_REG(15));
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(15),
+ c->apm_threshold_vc);
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(31),
c->apm_threshold_vc != 0 ?
c->apm_threshold_vc - 1 : 0xff);
@@ -2752,6 +2694,18 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
return rc;
}
+ if (msmcobalt_v2) {
+ /* Enable OSM WDOG registers */
+ clk_osm_masked_write_reg(&pwrcl_clk,
+ TRACE_CTRL_ENABLE_WDOG_STATUS,
+ TRACE_CTRL,
+ TRACE_CTRL_ENABLE_WDOG_STATUS_MASK);
+ clk_osm_masked_write_reg(&perfcl_clk,
+ TRACE_CTRL_ENABLE_WDOG_STATUS,
+ TRACE_CTRL,
+ TRACE_CTRL_ENABLE_WDOG_STATUS_MASK);
+ }
+
/*
* The hmss_gpll0 clock runs at 300 MHz. Ensure it is at the correct
* frequency before enabling OSM. LUT index 0 is always sourced from
@@ -2765,18 +2719,22 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
}
clk_prepare_enable(&sys_apcsaux_clk_gcc.c);
- /* Set 300MHz index */
- rc = clk_set_rate(&pwrcl_clk.c, init_rate);
+ /* Set boot rate */
+ rc = clk_set_rate(&pwrcl_clk.c, msmcobalt_v1 ?
+ MSMCOBALTV1_PWRCL_BOOT_RATE :
+ MSMCOBALTV2_PWRCL_BOOT_RATE);
if (rc) {
- dev_err(&pdev->dev, "Unable to set init rate on pwr cluster, rc=%d\n",
+ dev_err(&pdev->dev, "Unable to set boot rate on pwr cluster, rc=%d\n",
rc);
clk_disable_unprepare(&sys_apcsaux_clk_gcc.c);
return rc;
}
- rc = clk_set_rate(&perfcl_clk.c, init_rate);
+ rc = clk_set_rate(&perfcl_clk.c, msmcobalt_v1 ?
+ MSMCOBALTV1_PERFCL_BOOT_RATE :
+ MSMCOBALTV2_PERFCL_BOOT_RATE);
if (rc) {
- dev_err(&pdev->dev, "Unable to set init rate on perf cluster, rc=%d\n",
+ dev_err(&pdev->dev, "Unable to set boot rate on perf cluster, rc=%d\n",
rc);
clk_disable_unprepare(&sys_apcsaux_clk_gcc.c);
return rc;
diff --git a/drivers/clk/msm/clock.h b/drivers/clk/msm/clock.h
index bee769921ff7..b7aa946f1931 100644
--- a/drivers/clk/msm/clock.h
+++ b/drivers/clk/msm/clock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -38,7 +38,7 @@ int msm_clock_init(struct clock_init_data *data);
int find_vdd_level(struct clk *clk, unsigned long rate);
extern struct list_head orphan_clk_list;
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMMON_CLK_MSM)
int clock_debug_register(struct clk *clk);
void clock_debug_print_enabled(void);
#else
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
index 9a080e4ee39b..a574a9cd2b5a 100644
--- a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
+++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
@@ -18,6 +18,7 @@
#include <linux/iopoll.h>
#include <linux/delay.h>
#include <linux/clk/msm-clock-generic.h>
+#include <linux/usb/usbpd.h>
#include "mdss-pll.h"
#include "mdss-dp-pll.h"
@@ -172,9 +173,27 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
{
u32 res = 0;
struct mdss_pll_resources *dp_res = vco->priv;
+ u8 orientation, ln_cnt;
+ u32 spare_value;
+
+ spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+ ln_cnt = spare_value & 0x0F;
+ orientation = (spare_value & 0xF0) >> 4;
+ pr_debug("%s: spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
+ __func__, spare_value, ln_cnt, orientation);
+
+ if (ln_cnt != 4) {
+ if (orientation == ORIENTATION_CC2)
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_PD_CTL, 0x2d);
+ else
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_PD_CTL, 0x35);
+ } else {
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_PD_CTL, 0x3d);
+ }
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_PD_CTL, 0x3d);
/* Make sure the PHY register writes are done */
wmb();
MDSS_PLL_REG_W(dp_res->pll_base,
@@ -314,8 +333,13 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
/* Make sure the PLL register writes are done */
wmb();
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_MODE, 0x58);
+ if (orientation == ORIENTATION_CC2)
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_MODE, 0x48);
+ else
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_MODE, 0x58);
+
MDSS_PLL_REG_W(dp_res->phy_base,
DP_PHY_TX0_TX1_LANE_CTL, 0x05);
MDSS_PLL_REG_W(dp_res->phy_base,
@@ -427,6 +451,12 @@ static int dp_pll_enable(struct clk *c)
u32 status;
struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
struct mdss_pll_resources *dp_res = vco->priv;
+ u8 orientation, ln_cnt;
+ u32 spare_value, bias_en, drvr_en;
+
+ spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+ ln_cnt = spare_value & 0x0F;
+ orientation = (spare_value & 0xF0) >> 4;
MDSS_PLL_REG_W(dp_res->phy_base,
DP_PHY_CFG, 0x01);
@@ -474,18 +504,45 @@ static int dp_pll_enable(struct clk *c)
pr_debug("%s: PLL is locked\n", __func__);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ if (ln_cnt == 1) {
+ bias_en = 0x3e;
+ drvr_en = 0x13;
+ } else {
+ bias_en = 0x3f;
+ drvr_en = 0x10;
+ }
+
+ if (ln_cnt != 4) {
+ if (orientation == ORIENTATION_CC1) {
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x3f);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
- 0x10);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ drvr_en);
+ } else {
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x3f);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
+ drvr_en);
+ }
+ } else {
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
- 0x10);
+ drvr_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
+ drvr_en);
+ }
+
MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_TX_POL_INV,
0x0a);
@@ -615,7 +672,7 @@ int dp_vco_prepare(struct clk *c)
rc = dp_pll_enable(c);
if (rc) {
mdss_pll_resource_enable(dp_pll_res, false);
- pr_err("ndx=%d failed to enable dsi pll\n",
+ pr_err("ndx=%d failed to enable dp pll\n",
dp_pll_res->index);
goto error;
}
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
index d89545b38e64..28f21ed1fe0d 100644
--- a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
+++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
@@ -41,6 +41,7 @@
#define DP_PHY_TX0_TX1_LANE_CTL 0x0068
#define DP_PHY_TX2_TX3_LANE_CTL 0x0084
+#define DP_PHY_SPARE0 0x00A8
#define DP_PHY_STATUS 0x00BC
/* Tx registers */
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c b/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c
index f6c85cf8d9a4..5f779ec9bcc3 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c
@@ -685,6 +685,10 @@ static void pll_db_commit_8996(struct mdss_pll_resources *pll,
MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0);
wmb(); /* make sure register committed */
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_VCO_TUNE, 0);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_CODE, 0);
+ wmb(); /* make sure register committed */
+
data = pdb->in.dsiclk_sel; /* set dsiclk_sel = 1 */
MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG1, data);
diff --git a/drivers/clk/msm/mdss/mdss-hdmi-pll-cobalt.c b/drivers/clk/msm/mdss/mdss-hdmi-pll-cobalt.c
index d5d55a58bf7f..c4f77e01b682 100644
--- a/drivers/clk/msm/mdss/mdss-hdmi-pll-cobalt.c
+++ b/drivers/clk/msm/mdss/mdss-hdmi-pll-cobalt.c
@@ -145,7 +145,7 @@ static void hdmi_cobalt_get_div(struct cobalt_reg_cfg *cfg, unsigned long pclk)
u32 const min_freq = 8000, max_freq = 12000;
u32 const cmp_cnt = 1024;
u32 const th_min = 500, th_max = 1000;
- u64 bit_clk = pclk * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+ u64 bit_clk = ((u64)pclk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
u32 half_rate_mode = 0;
u32 freq_optimal, list_elements;
int optimal_index;
@@ -161,7 +161,7 @@ find_optimal_index:
for (i = 0; i < sz_ratio; i++) {
for (j = 0; j < sz_band; j++) {
- u64 freq = (bit_clk / (1 << half_rate_mode));
+ u64 freq = div_u64(bit_clk, (1 << half_rate_mode));
freq *= (ratio_list[i] * (1 << band_list[j]));
do_div(freq, (u64) HDMI_MHZ_TO_HZ);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index ea6e4a1423af..e39686ca4feb 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -27,7 +27,7 @@ config QCOM_CLK_RPM
config QCOM_CLK_SMD_RPM
tristate "RPM over SMD based Clock Controller"
- depends on COMMON_CLK_QCOM && QCOM_SMD_RPM
+ depends on COMMON_CLK_QCOM
select QCOM_RPMCC
help
The RPM (Resource Power Manager) is a dedicated hardware engine for
@@ -153,6 +153,26 @@ config MSM_MMCC_8996
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+config MSM_GCC_FALCON
+ tristate "MSMFALCON Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ ---help---
+ Support for the global clock controller on Qualcomm Technologies, Inc
+ MSMfalcon devices.
+ Say Y if you want to use peripheral devices such as UART, SPI, I2C,
+ USB, UFS, SD/eMMC, PCIe, etc.
+
+config MSM_GPUCC_FALCON
+ tristate "MSMFALCON Graphics Clock Controller"
+ select MSM_GCC_FALCON
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the graphics clock controller on Qualcomm Technologies, Inc
+ MSMfalcon devices.
+ Say Y if you want to support graphics controller devices which will
+ be required to enable those device.
+
config QCOM_HFPLL
tristate "High-Frequency PLL (HFPLL) Clock Controller"
depends on COMMON_CLK_QCOM
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index dc1b66f84af2..7ee0294e9dc7 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -13,7 +13,7 @@ clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o
clk-qcom-y += clk-hfpll.o
clk-qcom-y += reset.o
clk-qcom-y += clk-dummy.o
-clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
+clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o gdsc-regulator.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
@@ -25,9 +25,11 @@ obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
+obj-$(CONFIG_MSM_GCC_FALCON) += gcc-msmfalcon.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
+obj-$(CONFIG_MSM_GPUCC_FALCON) += gpucc-msmfalcon.o
obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
obj-$(CONFIG_KRAITCC) += krait-cc.o
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index b904c335cda4..da02ab499bff 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -23,6 +23,7 @@ struct freq_tbl {
u8 pre_div;
u16 m;
u16 n;
+ unsigned long src_freq;
};
/**
@@ -158,6 +159,8 @@ extern const struct clk_ops clk_dyn_rcg_ops;
* @freq_tbl: frequency table
* @current_freq: last cached frequency when using branches with shared RCGs
* @clkr: regmap clock handle
+ * @flags: set if RCG needs to be force enabled/disabled during
+ * power sequence.
*
*/
struct clk_rcg2 {
@@ -168,6 +171,9 @@ struct clk_rcg2 {
const struct freq_tbl *freq_tbl;
unsigned long current_freq;
struct clk_regmap clkr;
+
+#define FORCE_ENABLE_RCGR BIT(0)
+ u8 flags;
};
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
@@ -179,5 +185,6 @@ extern const struct clk_ops clk_byte_ops;
extern const struct clk_ops clk_byte2_ops;
extern const struct clk_ops clk_pixel_ops;
extern const struct clk_ops clk_gfx3d_ops;
+extern const struct clk_ops clk_gfx3d_src_ops;
#endif
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index a071bba8018c..933a208392bd 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/export.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/regmap.h>
@@ -60,6 +61,57 @@ static int clk_rcg2_is_enabled(struct clk_hw *hw)
return (cmd & CMD_ROOT_OFF) == 0;
}
+static int clk_rcg_set_force_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const char *name = clk_hw_get_name(hw);
+ int ret = 0, count;
+
+ /* force enable RCG */
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, CMD_ROOT_EN);
+ if (ret)
+ return ret;
+
+ /* wait for RCG to turn ON */
+ for (count = 500; count > 0; count--) {
+ ret = clk_rcg2_is_enabled(hw);
+ if (ret) {
+ ret = 0;
+ break;
+ }
+ udelay(1);
+ }
+ if (!count)
+ pr_err("%s: RCG did not turn on after force enable\n", name);
+
+ return ret;
+}
+
+static int clk_rcg2_enable(struct clk_hw *hw)
+{
+ int ret = 0;
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (rcg->flags & FORCE_ENABLE_RCGR)
+ ret = clk_rcg_set_force_enable(hw);
+
+ return ret;
+}
+
+static void clk_rcg2_disable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (rcg->flags & FORCE_ENABLE_RCGR) {
+ /* force disable RCG - clear CMD_ROOT_EN bit */
+ regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + CMD_REG, CMD_ROOT_EN, 0);
+ /* Add a delay to disable the RCG */
+ udelay(100);
+ }
+}
+
static u8 clk_rcg2_get_parent(struct clk_hw *hw)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -290,6 +342,8 @@ static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
}
const struct clk_ops clk_rcg2_ops = {
+ .enable = clk_rcg2_enable,
+ .disable = clk_rcg2_disable,
.is_enabled = clk_rcg2_is_enabled,
.get_parent = clk_rcg2_get_parent,
.set_parent = clk_rcg2_set_parent,
@@ -801,6 +855,8 @@ static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
}
const struct clk_ops clk_gfx3d_ops = {
+ .enable = clk_rcg2_enable,
+ .disable = clk_rcg2_disable,
.is_enabled = clk_rcg2_is_enabled,
.get_parent = clk_rcg2_get_parent,
.set_parent = clk_rcg2_set_parent,
@@ -810,3 +866,81 @@ const struct clk_ops clk_gfx3d_ops = {
.determine_rate = clk_gfx3d_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
+
+static int clk_gfx3d_src_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct clk_rate_request parent_req = { };
+ struct clk_hw *p1, *p3, *xo, *curr_p;
+ const struct freq_tbl *f;
+ int ret;
+
+ xo = clk_hw_get_parent_by_index(hw, 0);
+ if (req->rate == clk_hw_get_rate(xo)) {
+ req->best_parent_hw = xo;
+ req->best_parent_rate = req->rate;
+ return 0;
+ }
+
+ f = qcom_find_freq(rcg->freq_tbl, req->rate);
+ if (!f || (req->rate != f->freq))
+ return -EINVAL;
+
+ /* Indexes of source from the parent map */
+ p1 = clk_hw_get_parent_by_index(hw, 1);
+ p3 = clk_hw_get_parent_by_index(hw, 2);
+
+ curr_p = clk_hw_get_parent(hw);
+ parent_req.rate = f->src_freq;
+
+ if (curr_p == xo || curr_p == p3)
+ req->best_parent_hw = p1;
+ else if (curr_p == p1)
+ req->best_parent_hw = p3;
+
+ parent_req.best_parent_hw = req->best_parent_hw;
+
+ ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
+ if (ret)
+ return ret;
+
+ req->best_parent_rate = parent_req.rate;
+
+ return 0;
+}
+
+static int clk_gfx3d_src_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f;
+ u32 cfg;
+ int ret;
+
+ cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ /* Update the RCG-DIV */
+ cfg |= f->pre_div << CFG_SRC_DIV_SHIFT;
+
+ ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+const struct clk_ops clk_gfx3d_src_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_gfx3d_set_rate,
+ .set_rate_and_parent = clk_gfx3d_src_set_rate_and_parent,
+ .determine_rate = clk_gfx3d_src_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_gfx3d_src_ops);
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 06eb06009268..ac007ec667bb 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, Linaro Limited
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -23,6 +23,8 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/soc/qcom/smd-rpm.h>
+#include <soc/qcom/rpm-smd.h>
+#include <linux/clk.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/mfd/qcom-rpm.h>
@@ -37,6 +39,8 @@
#define __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, stat_id, \
key) \
static struct clk_smd_rpm _platform##_##_active; \
+ static unsigned long _name##_##last_active_set_vote; \
+ static unsigned long _name##_##last_sleep_set_vote; \
static struct clk_smd_rpm _platform##_##_name = { \
.rpm_res_type = (type), \
.rpm_clk_id = (r_id), \
@@ -44,6 +48,8 @@
.rpm_key = (key), \
.peer = &_platform##_##_active, \
.rate = INT_MAX, \
+ .last_active_set_vote = &_name##_##last_active_set_vote, \
+ .last_sleep_set_vote = &_name##_##last_sleep_set_vote, \
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_ops, \
.name = #_name, \
@@ -59,6 +65,8 @@
.rpm_key = (key), \
.peer = &_platform##_##_name, \
.rate = INT_MAX, \
+ .last_active_set_vote = &_name##_##last_active_set_vote, \
+ .last_sleep_set_vote = &_name##_##last_sleep_set_vote, \
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_ops, \
.name = #_active, \
@@ -70,6 +78,8 @@
#define __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, \
stat_id, r, key) \
static struct clk_smd_rpm _platform##_##_active; \
+ static unsigned long _name##_##last_active_set_vote; \
+ static unsigned long _name##_##last_sleep_set_vote; \
static struct clk_smd_rpm _platform##_##_name = { \
.rpm_res_type = (type), \
.rpm_clk_id = (r_id), \
@@ -78,6 +88,8 @@
.branch = true, \
.peer = &_platform##_##_active, \
.rate = (r), \
+ .last_active_set_vote = &_name##_##last_active_set_vote, \
+ .last_sleep_set_vote = &_name##_##last_sleep_set_vote, \
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_branch_ops, \
.name = #_name, \
@@ -94,6 +106,8 @@
.branch = true, \
.peer = &_platform##_##_name, \
.rate = (r), \
+ .last_active_set_vote = &_name##_##last_active_set_vote, \
+ .last_sleep_set_vote = &_name##_##last_sleep_set_vote, \
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_branch_ops, \
.name = #_active, \
@@ -137,7 +151,8 @@ struct clk_smd_rpm {
struct clk_smd_rpm *peer;
struct clk_hw hw;
unsigned long rate;
- struct qcom_smd_rpm *rpm;
+ unsigned long *last_active_set_vote;
+ unsigned long *last_sleep_set_vote;
};
struct clk_smd_rpm_req {
@@ -153,61 +168,81 @@ struct rpm_cc {
};
struct rpm_smd_clk_desc {
- struct clk_smd_rpm **clks;
+ struct clk_hw **clks;
+ size_t num_rpm_clks;
size_t num_clks;
};
static DEFINE_MUTEX(rpm_smd_clk_lock);
-static int clk_smd_rpm_handoff(struct clk_smd_rpm *r)
+static int clk_smd_rpm_handoff(struct clk_hw *hw)
{
- int ret;
- struct clk_smd_rpm_req req = {
+ int ret = 0;
+ uint32_t value = cpu_to_le32(INT_MAX);
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct msm_rpm_kvp req = {
.key = cpu_to_le32(r->rpm_key),
- .nbytes = cpu_to_le32(sizeof(u32)),
- .value = cpu_to_le32(INT_MAX),
+ .data = (void *)&value,
+ .length = sizeof(value),
};
- ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
- r->rpm_res_type, r->rpm_clk_id, &req,
- sizeof(req));
+ ret = msm_rpm_send_message(QCOM_SMD_RPM_ACTIVE_STATE, r->rpm_res_type,
+ r->rpm_clk_id, &req, 1);
if (ret)
return ret;
- ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
- r->rpm_res_type, r->rpm_clk_id, &req,
- sizeof(req));
+
+ ret = msm_rpm_send_message(QCOM_SMD_RPM_SLEEP_STATE, r->rpm_res_type,
+ r->rpm_clk_id, &req, 1);
if (ret)
return ret;
- return 0;
+ return ret;
}
static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r,
- unsigned long rate)
+ uint32_t rate)
{
- struct clk_smd_rpm_req req = {
+ int ret = 0;
+ struct msm_rpm_kvp req = {
.key = cpu_to_le32(r->rpm_key),
- .nbytes = cpu_to_le32(sizeof(u32)),
- .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
+ .data = (void *)&rate,
+ .length = sizeof(rate),
};
- return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
- r->rpm_res_type, r->rpm_clk_id, &req,
- sizeof(req));
+ if (*r->last_active_set_vote == rate)
+ return ret;
+
+ ret = msm_rpm_send_message(QCOM_SMD_RPM_ACTIVE_STATE, r->rpm_res_type,
+ r->rpm_clk_id, &req, 1);
+ if (ret)
+ return ret;
+
+ *r->last_active_set_vote = rate;
+
+ return ret;
}
static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r,
- unsigned long rate)
+ uint32_t rate)
{
- struct clk_smd_rpm_req req = {
+ int ret = 0;
+ struct msm_rpm_kvp req = {
.key = cpu_to_le32(r->rpm_key),
- .nbytes = cpu_to_le32(sizeof(u32)),
- .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
+ .data = (void *)&rate,
+ .length = sizeof(rate),
};
- return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
- r->rpm_res_type, r->rpm_clk_id, &req,
- sizeof(req));
+ if (*r->last_sleep_set_vote == rate)
+ return ret;
+
+ ret = msm_rpm_send_message(QCOM_SMD_RPM_SLEEP_STATE, r->rpm_res_type,
+ r->rpm_clk_id, &req, 1);
+ if (ret)
+ return ret;
+
+ *r->last_sleep_set_vote = rate;
+
+ return ret;
}
static void to_active_sleep(struct clk_smd_rpm *r, unsigned long rate,
@@ -231,7 +266,7 @@ static int clk_smd_rpm_prepare(struct clk_hw *hw)
struct clk_smd_rpm *peer = r->peer;
unsigned long this_rate = 0, this_sleep_rate = 0;
unsigned long peer_rate = 0, peer_sleep_rate = 0;
- unsigned long active_rate, sleep_rate;
+ uint32_t active_rate, sleep_rate;
int ret = 0;
mutex_lock(&rpm_smd_clk_lock);
@@ -279,7 +314,7 @@ static void clk_smd_rpm_unprepare(struct clk_hw *hw)
struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
struct clk_smd_rpm *peer = r->peer;
unsigned long peer_rate = 0, peer_sleep_rate = 0;
- unsigned long active_rate, sleep_rate;
+ uint32_t active_rate, sleep_rate;
int ret;
mutex_lock(&rpm_smd_clk_lock);
@@ -313,7 +348,7 @@ static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
struct clk_smd_rpm *peer = r->peer;
- unsigned long active_rate, sleep_rate;
+ uint32_t active_rate, sleep_rate;
unsigned long this_rate = 0, this_sleep_rate = 0;
unsigned long peer_rate = 0, peer_sleep_rate = 0;
int ret = 0;
@@ -372,33 +407,55 @@ static unsigned long clk_smd_rpm_recalc_rate(struct clk_hw *hw,
return r->rate;
}
-static int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm)
+static int clk_smd_rpm_enable_scaling(void)
{
- int ret;
- struct clk_smd_rpm_req req = {
+ int ret = 0;
+ uint32_t value = cpu_to_le32(1);
+ struct msm_rpm_kvp req = {
.key = cpu_to_le32(QCOM_RPM_SMD_KEY_ENABLE),
- .nbytes = cpu_to_le32(sizeof(u32)),
- .value = cpu_to_le32(1),
+ .data = (void *)&value,
+ .length = sizeof(value),
};
- ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_SLEEP_STATE,
- QCOM_SMD_RPM_MISC_CLK,
- QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+ ret = msm_rpm_send_message_noirq(QCOM_SMD_RPM_SLEEP_STATE,
+ QCOM_SMD_RPM_MISC_CLK,
+ QCOM_RPM_SCALING_ENABLE_ID, &req, 1);
if (ret) {
pr_err("RPM clock scaling (sleep set) not enabled!\n");
return ret;
}
- ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_ACTIVE_STATE,
- QCOM_SMD_RPM_MISC_CLK,
- QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+ ret = msm_rpm_send_message_noirq(QCOM_SMD_RPM_ACTIVE_STATE,
+ QCOM_SMD_RPM_MISC_CLK,
+ QCOM_RPM_SCALING_ENABLE_ID, &req, 1);
if (ret) {
pr_err("RPM clock scaling (active set) not enabled!\n");
return ret;
}
pr_debug("%s: RPM clock scaling is enabled\n", __func__);
- return 0;
+ return ret;
+}
+
+static int clk_vote_bimc(struct clk_hw *hw, uint32_t rate)
+{
+ int ret = 0;
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct msm_rpm_kvp req = {
+ .key = r->rpm_key,
+ .data = (void *)&rate,
+ .length = sizeof(rate),
+ };
+
+ ret = msm_rpm_send_message_noirq(QCOM_SMD_RPM_ACTIVE_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req, 1);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ WARN(1, "BIMC vote not sent!\n");
+ return ret;
+ }
+
+ return ret;
}
static const struct clk_ops clk_smd_rpm_ops = {
@@ -430,40 +487,41 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5);
-static struct clk_smd_rpm *msm8916_clks[] = {
- [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
- [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
- [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
- [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
- [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
- [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
- [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
- [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
- [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
- [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
- [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
- [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
- [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
- [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
- [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
- [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
- [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
- [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
- [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
- [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
+static struct clk_hw *msm8916_clks[] = {
+ [RPM_PCNOC_CLK] = &msm8916_pcnoc_clk.hw,
+ [RPM_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk.hw,
+ [RPM_SNOC_CLK] = &msm8916_snoc_clk.hw,
+ [RPM_SNOC_A_CLK] = &msm8916_snoc_a_clk.hw,
+ [RPM_BIMC_CLK] = &msm8916_bimc_clk.hw,
+ [RPM_BIMC_A_CLK] = &msm8916_bimc_a_clk.hw,
+ [RPM_QDSS_CLK] = &msm8916_qdss_clk.hw,
+ [RPM_QDSS_A_CLK] = &msm8916_qdss_a_clk.hw,
+ [RPM_BB_CLK1] = &msm8916_bb_clk1.hw,
+ [RPM_BB_CLK1_A] = &msm8916_bb_clk1_a.hw,
+ [RPM_BB_CLK2] = &msm8916_bb_clk2.hw,
+ [RPM_BB_CLK2_A] = &msm8916_bb_clk2_a.hw,
+ [RPM_RF_CLK1] = &msm8916_rf_clk1.hw,
+ [RPM_RF_CLK1_A] = &msm8916_rf_clk1_a.hw,
+ [RPM_RF_CLK2] = &msm8916_rf_clk2.hw,
+ [RPM_RF_CLK2_A] = &msm8916_rf_clk2_a.hw,
+ [RPM_BB_CLK1_PIN] = &msm8916_bb_clk1_pin.hw,
+ [RPM_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin.hw,
+ [RPM_BB_CLK2_PIN] = &msm8916_bb_clk2_pin.hw,
+ [RPM_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin.hw,
+ [RPM_RF_CLK1_PIN] = &msm8916_rf_clk1_pin.hw,
+ [RPM_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin.hw,
+ [RPM_RF_CLK2_PIN] = &msm8916_rf_clk2_pin.hw,
+ [RPM_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin.hw,
};
static const struct rpm_smd_clk_desc rpm_clk_msm8916 = {
.clks = msm8916_clks,
+ .num_rpm_clks = RPM_RF_CLK2_A_PIN,
.num_clks = ARRAY_SIZE(msm8916_clks),
};
/* msm8996 */
-DEFINE_CLK_SMD_RPM(msm8996, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, pnoc_clk, pnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
DEFINE_CLK_SMD_RPM(msm8996, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
DEFINE_CLK_SMD_RPM(msm8996, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
DEFINE_CLK_SMD_RPM(msm8996, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
@@ -490,55 +548,131 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, bb_clk2_pin, bb_clk2_a_pin, 2);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk1_pin, rf_clk1_a_pin, 4);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk2_pin, rf_clk2_a_pin, 5);
-static struct clk_smd_rpm *msm8996_clks[] = {
- [RPM_SMD_XO_CLK_SRC] = &msm8996_cxo,
- [RPM_SMD_XO_A_CLK_SRC] = &msm8996_cxo_a,
- [RPM_AGGR1_NOC_CLK] = &msm8996_aggre1_noc_clk,
- [RPM_AGGR1_NOC_A_CLK] = &msm8996_aggre1_noc_a_clk,
- [RPM_AGGR2_NOC_CLK] = &msm8996_aggre2_noc_clk,
- [RPM_AGGR2_NOC_A_CLK] = &msm8996_aggre2_noc_a_clk,
- [RPM_SMD_PCNOC_CLK] = &msm8996_pcnoc_clk,
- [RPM_SMD_PCNOC_A_CLK] = &msm8996_pcnoc_a_clk,
- [RPM_SMD_SNOC_CLK] = &msm8996_snoc_clk,
- [RPM_SMD_SNOC_A_CLK] = &msm8996_snoc_a_clk,
- [RPM_CNOC_CLK] = &msm8996_cnoc_clk,
- [RPM_CNOC_A_CLK] = &msm8996_cnoc_a_clk,
- [RPM_SMD_BIMC_CLK] = &msm8996_bimc_clk,
- [RPM_SMD_BIMC_A_CLK] = &msm8996_bimc_a_clk,
- [RPM_MMAXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk,
- [RPM_MMAXI_A_CLK] = &msm8996_mmssnoc_axi_rpm_a_clk,
- [RPM_IPA_CLK] = &msm8996_ipa_clk,
- [RPM_IPA_A_CLK] = &msm8996_ipa_a_clk,
- [RPM_CE1_CLK] = &msm8996_ce1_clk,
- [RPM_CE1_A_CLK] = &msm8996_ce1_a_clk,
- [RPM_SMD_QDSS_CLK] = &msm8996_qdss_clk,
- [RPM_SMD_QDSS_A_CLK] = &msm8996_qdss_a_clk,
- [RPM_LN_BB_CLK] = &msm8996_ln_bb_clk,
- [RPM_LN_BB_A_CLK] = &msm8996_ln_bb_a_clk,
- [RPM_DIV_CLK1] = &msm8996_div_clk1,
- [RPM_DIV_CLK1_AO] = &msm8996_div_clk1_ao,
- [RPM_DIV_CLK2] = &msm8996_div_clk2,
- [RPM_DIV_CLK2_AO] = &msm8996_div_clk2_ao,
- [RPM_DIV_CLK3] = &msm8996_div_clk3,
- [RPM_DIV_CLK3_AO] = &msm8996_div_clk3_ao,
- [RPM_BB_CLK1_PIN] = &msm8996_bb_clk1_pin,
- [RPM_BB_CLK1_A_PIN] = &msm8996_bb_clk1_a_pin,
- [RPM_BB_CLK2_PIN] = &msm8996_bb_clk2_pin,
- [RPM_BB_CLK2_A_PIN] = &msm8996_bb_clk2_a_pin,
- [RPM_RF_CLK1_PIN] = &msm8996_rf_clk1_pin,
- [RPM_RF_CLK1_A_PIN] = &msm8996_rf_clk1_a_pin,
- [RPM_RF_CLK2_PIN] = &msm8996_rf_clk2_pin,
- [RPM_RF_CLK2_A_PIN] = &msm8996_rf_clk2_a_pin,
+static struct clk_hw *msm8996_clks[] = {
+ [RPM_XO_CLK_SRC] = &msm8996_cxo.hw,
+ [RPM_XO_A_CLK_SRC] = &msm8996_cxo_a.hw,
+ [RPM_PCNOC_CLK] = &msm8996_pnoc_clk.hw,
+ [RPM_PCNOC_A_CLK] = &msm8996_pnoc_a_clk.hw,
+ [RPM_SNOC_CLK] = &msm8996_snoc_clk.hw,
+ [RPM_SNOC_A_CLK] = &msm8996_snoc_a_clk.hw,
+ [RPM_BIMC_CLK] = &msm8996_bimc_clk.hw,
+ [RPM_BIMC_A_CLK] = &msm8996_bimc_a_clk.hw,
+ [RPM_QDSS_CLK] = &msm8996_qdss_clk.hw,
+ [RPM_QDSS_A_CLK] = &msm8996_qdss_a_clk.hw,
+ [RPM_BB_CLK1_PIN] = &msm8996_bb_clk1_pin.hw,
+ [RPM_BB_CLK1_A_PIN] = &msm8996_bb_clk1_a_pin.hw,
+ [RPM_BB_CLK2_PIN] = &msm8996_bb_clk2_pin.hw,
+ [RPM_BB_CLK2_A_PIN] = &msm8996_bb_clk2_a_pin.hw,
+ [RPM_RF_CLK1_PIN] = &msm8996_rf_clk1_pin.hw,
+ [RPM_RF_CLK1_A_PIN] = &msm8996_rf_clk1_a_pin.hw,
+ [RPM_RF_CLK2_PIN] = &msm8996_rf_clk2_pin.hw,
+ [RPM_RF_CLK2_A_PIN] = &msm8996_rf_clk2_a_pin.hw,
+ [RPM_AGGR1_NOC_CLK] = &msm8996_aggre1_noc_clk.hw,
+ [RPM_AGGR1_NOC_A_CLK] = &msm8996_aggre1_noc_a_clk.hw,
+ [RPM_AGGR2_NOC_CLK] = &msm8996_aggre2_noc_clk.hw,
+ [RPM_AGGR2_NOC_A_CLK] = &msm8996_aggre2_noc_a_clk.hw,
+ [RPM_CNOC_CLK] = &msm8996_cnoc_clk.hw,
+ [RPM_CNOC_A_CLK] = &msm8996_cnoc_a_clk.hw,
+ [RPM_MMAXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk.hw,
+ [RPM_MMAXI_A_CLK] = &msm8996_mmssnoc_axi_rpm_a_clk.hw,
+ [RPM_IPA_CLK] = &msm8996_ipa_clk.hw,
+ [RPM_IPA_A_CLK] = &msm8996_ipa_a_clk.hw,
+ [RPM_CE1_CLK] = &msm8996_ce1_clk.hw,
+ [RPM_CE1_A_CLK] = &msm8996_ce1_a_clk.hw,
+ [RPM_DIV_CLK1] = &msm8996_div_clk1.hw,
+ [RPM_DIV_CLK1_AO] = &msm8996_div_clk1_ao.hw,
+ [RPM_DIV_CLK2] = &msm8996_div_clk2.hw,
+ [RPM_DIV_CLK2_AO] = &msm8996_div_clk2_ao.hw,
+ [RPM_DIV_CLK3] = &msm8996_div_clk3.hw,
+ [RPM_DIV_CLK3_AO] = &msm8996_div_clk3_ao.hw,
+ [RPM_LN_BB_CLK] = &msm8996_ln_bb_clk.hw,
+ [RPM_LN_BB_A_CLK] = &msm8996_ln_bb_a_clk.hw,
};
static const struct rpm_smd_clk_desc rpm_clk_msm8996 = {
.clks = msm8996_clks,
+ .num_rpm_clks = RPM_LN_BB_A_CLK,
.num_clks = ARRAY_SIZE(msm8996_clks),
};
+/* msmfalcon */
+DEFINE_CLK_SMD_RPM_BRANCH(msmfalcon, cxo, cxo_a, QCOM_SMD_RPM_MISC_CLK, 0,
+ 19200000);
+DEFINE_CLK_SMD_RPM(msmfalcon, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(msmfalcon, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
+DEFINE_CLK_SMD_RPM(msmfalcon, cnoc_periph_clk, cnoc_periph_a_clk,
+ QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(msmfalcon, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM(msmfalcon, mmssnoc_axi_rpm_clk, mmssnoc_axi_rpm_a_clk,
+ QCOM_SMD_RPM_MMAXI_CLK, 0);
+DEFINE_CLK_SMD_RPM(msmfalcon, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
+DEFINE_CLK_SMD_RPM(msmfalcon, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
+DEFINE_CLK_SMD_RPM(msmfalcon, aggre2_noc_clk, aggre2_noc_a_clk,
+ QCOM_SMD_RPM_AGGR_CLK, 2);
+DEFINE_CLK_SMD_RPM_QDSS(msmfalcon, qdss_clk, qdss_a_clk,
+ QCOM_SMD_RPM_MISC_CLK, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msmfalcon, rf_clk2, rf_clk2_ao, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msmfalcon, div_clk1, div_clk1_ao, 0xb);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msmfalcon, ln_bb_clk1, ln_bb_clk1_ao, 0x1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msmfalcon, ln_bb_clk2, ln_bb_clk2_ao, 0x2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msmfalcon, ln_bb_clk3, ln_bb_clk3_ao, 0x3);
+
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msmfalcon, rf_clk2_pin, rf_clk2_a_pin, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msmfalcon, ln_bb_clk1_pin,
+ ln_bb_clk1_pin_ao, 0x1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msmfalcon, ln_bb_clk2_pin,
+ ln_bb_clk2_pin_ao, 0x2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msmfalcon, ln_bb_clk3_pin,
+ ln_bb_clk3_pin_ao, 0x3);
+static struct clk_hw *msmfalcon_clks[] = {
+ [RPM_XO_CLK_SRC] = &msmfalcon_cxo.hw,
+ [RPM_XO_A_CLK_SRC] = &msmfalcon_cxo_a.hw,
+ [RPM_SNOC_CLK] = &msmfalcon_snoc_clk.hw,
+ [RPM_SNOC_A_CLK] = &msmfalcon_snoc_a_clk.hw,
+ [RPM_BIMC_CLK] = &msmfalcon_bimc_clk.hw,
+ [RPM_BIMC_A_CLK] = &msmfalcon_bimc_a_clk.hw,
+ [RPM_QDSS_CLK] = &msmfalcon_qdss_clk.hw,
+ [RPM_QDSS_A_CLK] = &msmfalcon_qdss_a_clk.hw,
+ [RPM_RF_CLK2_PIN] = &msmfalcon_rf_clk2_pin.hw,
+ [RPM_RF_CLK2_A_PIN] = &msmfalcon_rf_clk2_a_pin.hw,
+ [RPM_AGGR2_NOC_CLK] = &msmfalcon_aggre2_noc_clk.hw,
+ [RPM_AGGR2_NOC_A_CLK] = &msmfalcon_aggre2_noc_a_clk.hw,
+ [RPM_CNOC_CLK] = &msmfalcon_cnoc_clk.hw,
+ [RPM_CNOC_A_CLK] = &msmfalcon_cnoc_a_clk.hw,
+ [RPM_MMAXI_CLK] = &msmfalcon_mmssnoc_axi_rpm_clk.hw,
+ [RPM_MMAXI_A_CLK] = &msmfalcon_mmssnoc_axi_rpm_a_clk.hw,
+ [RPM_IPA_CLK] = &msmfalcon_ipa_clk.hw,
+ [RPM_IPA_A_CLK] = &msmfalcon_ipa_a_clk.hw,
+ [RPM_CE1_CLK] = &msmfalcon_ce1_clk.hw,
+ [RPM_CE1_A_CLK] = &msmfalcon_ce1_a_clk.hw,
+ [RPM_DIV_CLK1] = &msmfalcon_div_clk1.hw,
+ [RPM_DIV_CLK1_AO] = &msmfalcon_div_clk1_ao.hw,
+ [RPM_LN_BB_CLK1] = &msmfalcon_ln_bb_clk1.hw,
+ [RPM_LN_BB_CLK1] = &msmfalcon_ln_bb_clk1_ao.hw,
+ [RPM_LN_BB_CLK1_PIN] = &msmfalcon_ln_bb_clk1_pin.hw,
+ [RPM_LN_BB_CLK1_PIN_AO] = &msmfalcon_ln_bb_clk1_pin_ao.hw,
+ [RPM_LN_BB_CLK2] = &msmfalcon_ln_bb_clk2.hw,
+ [RPM_LN_BB_CLK2_AO] = &msmfalcon_ln_bb_clk2_ao.hw,
+ [RPM_LN_BB_CLK2_PIN] = &msmfalcon_ln_bb_clk2_pin.hw,
+ [RPM_LN_BB_CLK2_PIN_AO] = &msmfalcon_ln_bb_clk2_pin_ao.hw,
+ [RPM_LN_BB_CLK3] = &msmfalcon_ln_bb_clk3.hw,
+ [RPM_LN_BB_CLK3_AO] = &msmfalcon_ln_bb_clk3_ao.hw,
+ [RPM_LN_BB_CLK3_PIN] = &msmfalcon_ln_bb_clk3_pin.hw,
+ [RPM_LN_BB_CLK3_PIN_AO] = &msmfalcon_ln_bb_clk3_pin_ao.hw,
+ [RPM_CNOC_PERIPH_CLK] = &msmfalcon_cnoc_periph_clk.hw,
+ [RPM_CNOC_PERIPH_A_CLK] = &msmfalcon_cnoc_periph_a_clk.hw,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msmfalcon = {
+ .clks = msmfalcon_clks,
+ .num_rpm_clks = RPM_CNOC_PERIPH_A_CLK,
+ .num_clks = ARRAY_SIZE(msmfalcon_clks),
+};
+
static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916},
{ .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996},
+ { .compatible = "qcom,rpmcc-msmfalcon", .data = &rpm_clk_msmfalcon},
{ }
};
MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
@@ -549,23 +683,30 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
struct clk *clk;
struct rpm_cc *rcc;
struct clk_onecell_data *data;
- int ret;
+ int ret, is_8996 = 0, is_falcon = 0;
size_t num_clks, i;
- struct qcom_smd_rpm *rpm;
- struct clk_smd_rpm **rpm_smd_clks;
+ struct clk_hw **hw_clks;
const struct rpm_smd_clk_desc *desc;
- rpm = dev_get_drvdata(pdev->dev.parent);
- if (!rpm) {
- dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
- return -ENODEV;
+ is_8996 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,rpmcc-msm8996");
+ is_falcon = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,rpmcc-msmfalcon");
+ if (is_8996) {
+ ret = clk_vote_bimc(&msm8996_bimc_clk.hw, INT_MAX);
+ if (ret < 0)
+ return ret;
+ } else if (is_falcon) {
+ ret = clk_vote_bimc(&msmfalcon_bimc_clk.hw, INT_MAX);
+ if (ret < 0)
+ return ret;
}
desc = of_device_get_match_data(&pdev->dev);
if (!desc)
return -EINVAL;
- rpm_smd_clks = desc->clks;
+ hw_clks = desc->clks;
num_clks = desc->num_clks;
rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc) + sizeof(*clks) * num_clks,
@@ -578,30 +719,28 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
data->clks = clks;
data->clk_num = num_clks;
- for (i = 0; i < num_clks; i++) {
- if (!rpm_smd_clks[i]) {
+ for (i = 0; i <= desc->num_rpm_clks; i++) {
+ if (!hw_clks[i]) {
clks[i] = ERR_PTR(-ENOENT);
continue;
}
- rpm_smd_clks[i]->rpm = rpm;
-
- ret = clk_smd_rpm_handoff(rpm_smd_clks[i]);
+ ret = clk_smd_rpm_handoff(hw_clks[i]);
if (ret)
goto err;
}
- ret = clk_smd_rpm_enable_scaling(rpm);
+ ret = clk_smd_rpm_enable_scaling();
if (ret)
goto err;
for (i = 0; i < num_clks; i++) {
- if (!rpm_smd_clks[i]) {
+ if (!hw_clks[i]) {
clks[i] = ERR_PTR(-ENOENT);
continue;
}
- clk = devm_clk_register(&pdev->dev, &rpm_smd_clks[i]->hw);
+ clk = devm_clk_register(&pdev->dev, hw_clks[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
goto err;
@@ -615,6 +754,14 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
if (ret)
goto err;
+ /* Keep an active vote on CXO in case no other driver votes for it */
+ if (is_8996)
+ clk_prepare_enable(msm8996_cxo_a.hw.clk);
+ else if (is_falcon)
+ clk_prepare_enable(msmfalcon_cxo_a.hw.clk);
+
+ dev_info(&pdev->dev, "Registered RPM clocks\n");
+
return 0;
err:
dev_err(&pdev->dev, "Error registering SMD clock driver (%d)\n", ret);
diff --git a/drivers/clk/qcom/gcc-msmfalcon.c b/drivers/clk/qcom/gcc-msmfalcon.c
new file mode 100644
index 000000000000..78858a3c6f1c
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msmfalcon.c
@@ -0,0 +1,2830 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <dt-bindings/clock/qcom,gcc-msmfalcon.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "common.h"
+#include "clk-pll.h"
+#include "clk-regmap.h"
+#include "clk-rcg.h"
+#include "reset.h"
+#include "vdd-level-falcon.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
+static DEFINE_VDD_REGULATORS(vdd_dig_ao, VDD_DIG_NUM, 1, vdd_corner, NULL);
+
+enum {
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL1_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_PLL0_EARLY_DIV_CLK_SRC,
+ P_PLL1_EARLY_DIV_CLK_SRC,
+ P_SLEEP_CLK,
+ P_XO,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_PLL0_EARLY_DIV_CLK_SRC, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_0[] = {
+ "xo",
+ "gpll0_out_main",
+ "gpll0_out_early_div",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+ "xo",
+ "gpll0_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_PLL0_EARLY_DIV_CLK_SRC, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_2[] = {
+ "xo",
+ "gpll0_out_main",
+ "core_pi_sleep_clk",
+ "gpll0_out_early_div",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_XO, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_3[] = {
+ "xo",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_4[] = {
+ "xo",
+ "core_pi_sleep_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_XO, 0 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_5[] = {
+ "xo",
+ "gpll4_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_PLL0_EARLY_DIV_CLK_SRC, 3 },
+ { P_GPLL1_OUT_MAIN, 4 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_PLL1_EARLY_DIV_CLK_SRC, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_6[] = {
+ "xo",
+ "gpll0_out_main",
+ "gpll0_out_early_div",
+ "gpll1_out_main",
+ "gpll4_out_main",
+ "gpll1_out_early_div",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_PLL0_EARLY_DIV_CLK_SRC, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_7[] = {
+ "xo",
+ "gpll0_out_main",
+ "gpll4_out_main",
+ "gpll0_out_early_div",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_PLL0_EARLY_DIV_CLK_SRC, 2 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_8[] = {
+ "xo",
+ "gpll0_out_main",
+ "gpll0_out_early_div",
+ "gpll4_out_main",
+ "core_bi_pll_test_se",
+};
+
+static struct clk_fixed_factor xo = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "xo",
+ .parent_names = (const char *[]){ "cxo" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll0_out_main = {
+ .offset = 0x0,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_main",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gpll0_out_early_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_early_div",
+ .parent_names = (const char *[]){ "gpll0_out_main" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll1_out_main = {
+ .offset = 0x1000,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_main",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gpll1_out_early_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_early_div",
+ .parent_names = (const char *[]){ "gpll1_out_main" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4_out_main = {
+ .offset = 0x77000,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_main",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup1_i2c_apps_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x19020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP2(
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0_OUT_MAIN, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1900c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1b020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP2(
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1b00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1d020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP2(
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1d00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1f020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP2(
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1f00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_uart1_apps_clk_src[] = {
+ F(3686400, P_GPLL0_OUT_MAIN, 1, 96, 15625),
+ F(7372800, P_GPLL0_OUT_MAIN, 1, 192, 15625),
+ F(14745600, P_GPLL0_OUT_MAIN, 1, 384, 15625),
+ F(16000000, P_GPLL0_OUT_MAIN, 5, 2, 15),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0_OUT_MAIN, 5, 1, 5),
+ F(32000000, P_GPLL0_OUT_MAIN, 1, 4, 75),
+ F(40000000, P_GPLL0_OUT_MAIN, 15, 0, 0),
+ F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 375),
+ F(48000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ F(51200000, P_GPLL0_OUT_MAIN, 1, 32, 375),
+ F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 75),
+ F(58982400, P_GPLL0_OUT_MAIN, 1, 1536, 15625),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(63157895, P_GPLL0_OUT_MAIN, 9.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x1a00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 19200000,
+ LOW, 31578947,
+ NOMINAL, 63157895),
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x1c00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 19200000,
+ LOW, 31578947,
+ NOMINAL, 63157895),
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x26020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP2(
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x28020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP2(
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2800c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x2a020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP2(
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2a00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x2c020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP2(
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2c00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x2700c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart1_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 19200000,
+ LOW, 31578947,
+ NOMINAL, 63157895),
+ },
+};
+
+static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x2900c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart2_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 19200000,
+ LOW, 31578947,
+ NOMINAL, 63157895),
+ },
+};
+
+static const struct freq_tbl ftbl_gp1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 50000000,
+ LOW, 100000000,
+ NOMINAL, 200000000),
+ },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 50000000,
+ LOW, 100000000,
+ NOMINAL, 200000000),
+ },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 50000000,
+ LOW, 100000000,
+ NOMINAL, 200000000),
+ },
+};
+
+static const struct freq_tbl ftbl_hmss_ahb_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hmss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_hmss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hmss_ahb_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3_AO(
+ LOWER, 19200000,
+ LOW, 50000000,
+ NOMINAL, 100000000),
+ },
+};
+
+static const struct freq_tbl ftbl_hmss_gpll0_clk_src[] = {
+ F(600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hmss_gpll0_clk_src = {
+ .cmd_rcgr = 0x4805c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_hmss_gpll0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hmss_gpll0_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP1_AO(
+ LOWER, 600000000),
+ },
+};
+
+static const struct freq_tbl ftbl_hmss_gpll4_clk_src[] = {
+ F(384000000, P_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(768000000, P_GPLL4_OUT_MAIN, 2, 0, 0),
+ F(1536000000, P_GPLL4_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hmss_gpll4_clk_src = {
+ .cmd_rcgr = 0x48074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_hmss_gpll4_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hmss_gpll4_clk_src",
+ .parent_names = gcc_parent_names_5,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3_AO(
+ LOWER, 400000000,
+ LOW, 800000000,
+ NOMINAL, 1600000000),
+ },
+};
+
+static const struct freq_tbl ftbl_hmss_rbcpr_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hmss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x48044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_hmss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hmss_rbcpr_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP2(
+ LOWER, 19200000,
+ NOMINAL, 50000000),
+ },
+};
+
+static const struct freq_tbl ftbl_pdm2_clk_src[] = {
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pdm2_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP2(
+ LOWER, 19200000,
+ LOW, 60000000),
+ },
+};
+
+static const struct freq_tbl ftbl_qspi_ser_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(80200000, P_PLL1_EARLY_DIV_CLK_SRC, 5, 0, 0),
+ F(160400000, P_GPLL1_OUT_MAIN, 5, 0, 0),
+ F(320800000, P_GPLL1_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 qspi_ser_clk_src = {
+ .cmd_rcgr = 0x4d00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_qspi_ser_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "qspi_ser_clk_src",
+ .parent_names = gcc_parent_names_6,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 80200000,
+ LOW, 160400000,
+ NOMINAL, 320800000),
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_PLL0_EARLY_DIV_CLK_SRC, 5, 1, 3),
+ F(25000000, P_PLL0_EARLY_DIV_CLK_SRC, 6, 1, 2),
+ F(50000000, P_PLL0_EARLY_DIV_CLK_SRC, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(192000000, P_GPLL4_OUT_MAIN, 8, 0, 0),
+ F(384000000, P_GPLL4_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x1602c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_apps_clk_src",
+ .parent_names = gcc_parent_names_7,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 50000000,
+ LOW, 100000000,
+ NOMINAL, 400000000),
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
+ F(75000000, P_PLL0_EARLY_DIV_CLK_SRC, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x16010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_ice_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 75000000,
+ LOW, 150000000,
+ NOMINAL, 300000000),
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc2_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_PLL0_EARLY_DIV_CLK_SRC, 5, 1, 3),
+ F(25000000, P_PLL0_EARLY_DIV_CLK_SRC, 6, 1, 2),
+ F(50000000, P_PLL0_EARLY_DIV_CLK_SRC, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(192000000, P_GPLL4_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x14010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc2_apps_clk_src",
+ .parent_names = gcc_parent_names_8,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 50000000,
+ LOW, 100000000,
+ NOMINAL, 200000000),
+ },
+};
+
+static const struct freq_tbl ftbl_ufs_axi_clk_src[] = {
+ F(50000000, P_PLL0_EARLY_DIV_CLK_SRC, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ufs_axi_clk_src = {
+ .cmd_rcgr = 0x75018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_ufs_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ufs_axi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP5(
+ LOWER, 50000000,
+ LOW, 100000000,
+ LOW_L1, 150000000,
+ NOMINAL, 200000000,
+ HIGH, 240000000),
+ },
+};
+
+static const struct freq_tbl ftbl_ufs_ice_core_clk_src[] = {
+ F(75000000, P_PLL0_EARLY_DIV_CLK_SRC, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ufs_ice_core_clk_src = {
+ .cmd_rcgr = 0x76010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_ufs_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ufs_ice_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 75000000,
+ LOW, 150000000,
+ NOMINAL, 300000000),
+ },
+};
+
+static struct clk_rcg2 ufs_phy_aux_clk_src = {
+ .cmd_rcgr = 0x76044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_hmss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ufs_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP1(
+ LOWER, 19200000),
+ },
+};
+
+static const struct freq_tbl ftbl_ufs_unipro_core_clk_src[] = {
+ F(37500000, P_PLL0_EARLY_DIV_CLK_SRC, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ufs_unipro_core_clk_src = {
+ .cmd_rcgr = 0x76028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_ufs_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ufs_unipro_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 37500000,
+ LOW, 75000000,
+ NOMINAL, 150000000),
+ },
+};
+
+static const struct freq_tbl ftbl_usb20_master_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb20_master_clk_src = {
+ .cmd_rcgr = 0x2f010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_usb20_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb20_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP3(
+ LOWER, 19200000,
+ LOW, 60000000,
+ NOMINAL, 120000000),
+ },
+};
+
+static struct clk_rcg2 usb20_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x2f024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_hmss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb20_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP2(
+ LOWER, 19200000,
+ LOW, 60000000),
+ },
+};
+
+static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(66666667, P_PLL0_EARLY_DIV_CLK_SRC, 4.5, 0, 0),
+ F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_master_clk_src = {
+ .cmd_rcgr = 0xf014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_usb30_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP4(
+ LOWER, 66666667,
+ LOW, 133333333,
+ NOMINAL, 200000000,
+ HIGH, 240000000),
+ },
+};
+
+static const struct freq_tbl ftbl_usb30_mock_utmi_clk_src[] = {
+ F(40000000, P_PLL0_EARLY_DIV_CLK_SRC, 7.5, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_usb30_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP2(
+ LOWER, 40000000,
+ LOW, 60000000),
+ },
+};
+
+static const struct freq_tbl ftbl_usb3_phy_aux_clk_src[] = {
+ F(1200000, P_XO, 16, 0, 0),
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb3_phy_aux_clk_src = {
+ .cmd_rcgr = 0x5000c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_usb3_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb3_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP1(
+ LOWER, 19200000),
+ },
+};
+
+static struct clk_branch gcc_aggre2_ufs_axi_clk = {
+ .halt_reg = 0x75034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre2_ufs_axi_clk",
+ .parent_names = (const char *[]){
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre2_usb3_axi_clk = {
+ .halt_reg = 0xf03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre2_usb3_axi_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x7106c,
+ .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .clkr = {
+ .enable_reg = 0x7106c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_hmss_axi_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_hmss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_mss_q6_axi_clk = {
+ .halt_reg = 0x4401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_mss_q6_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x19008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x19004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x1b008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x1b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x1d008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x1d004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x1f008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x1f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x1a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x1c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+ .halt_reg = 0x25004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
+ .halt_reg = 0x26004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
+ .halt_reg = 0x28008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
+ .halt_reg = 0x28004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
+ .halt_reg = 0x2a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
+ .halt_reg = 0x2a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x2c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x2c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart1_apps_clk = {
+ .halt_reg = 0x27004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart2_apps_clk = {
+ .halt_reg = 0x29004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb2_axi_clk = {
+ .halt_reg = 0x5058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb2_axi_clk",
+ .parent_names = (const char *[]){
+ "usb20_master_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_axi_clk = {
+ .halt_reg = 0x5018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_axi_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_dcc_ahb_clk = {
+ .halt_reg = 0x84004,
+ .clkr = {
+ .enable_reg = 0x84004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_dcc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_bimc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_bimc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_bimc_gfx_src_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_bimc_gfx_src_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x71004,
+ .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .clkr = {
+ .enable_reg = 0x71004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_gate2 gpll0_out_msscc = {
+ .udelay = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_msscc",
+ .ops = &clk_gate2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk = {
+ .halt_reg = 0x5200c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk",
+ .parent_names = (const char *[]){
+ "gpll0_out_main",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk = {
+ .halt_reg = 0x5200c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk",
+ .parent_names = (const char *[]){
+ "gpll0_out_early_div",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_hmss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_ahb_clk",
+ .parent_names = (const char *[]){
+ "hmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_hmss_dvm_bus_clk = {
+ .halt_reg = 0x4808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_dvm_bus_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_hmss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "hmss_rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_gpll0_clk = {
+ .halt_reg = 0x5200c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_gpll0_clk",
+ .parent_names = (const char *[]){
+ "gpll0_out_main",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_gpll0_div_clk = {
+ .halt_reg = 0x5200c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_gpll0_div_clk",
+ .parent_names = (const char *[]){
+ "gpll0_out_early_div",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
+ .halt_reg = 0x9004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_noc_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_sys_noc_axi_clk = {
+ .halt_reg = 0x9000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_sys_noc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .clkr = {
+ .enable_reg = 0x8a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
+ .halt_reg = 0x8a004,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mnoc_bimc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x8a040,
+ .clkr = {
+ .enable_reg = 0x8a040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x8a03c,
+ .clkr = {
+ .enable_reg = 0x8a03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_ahb_clk = {
+ .halt_reg = 0x4d004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_ser_clk = {
+ .halt_reg = 0x4d008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_ser_clk",
+ .parent_names = (const char *[]){
+ "qspi_ser_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx0_usb2_clkref_clk = {
+ .halt_reg = 0x88018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x88018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_rx0_usb2_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx1_usb2_clkref_clk = {
+ .halt_reg = 0x88014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x88014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_rx1_usb2_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx2_qlink_clkref_clk = {
+ .halt_reg = 0x88034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x88034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_rx2_qlink_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x1600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_names = (const char *[]){
+ "sdcc1_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_ahb_clk = {
+ .halt_reg = 0x7500c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_axi_clk = {
+ .halt_reg = 0x75008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_axi_clk",
+ .parent_names = (const char *[]){
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_axi_hw_ctl_clk = {
+ .halt_reg = 0x75008,
+ .clkr = {
+ .enable_reg = 0x75008,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_axi_hw_ctl_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_axi_clk",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_hw_ctl_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_clkref_clk = {
+ .halt_reg = 0x88008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x88008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_ice_core_clk = {
+ .halt_reg = 0x7600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_ice_core_clk",
+ .parent_names = (const char *[]){
+ "ufs_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x7600c,
+ .clkr = {
+ .enable_reg = 0x7600c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_ice_core_hw_ctl_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_ice_core_clk",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_hw_ctl_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_aux_clk = {
+ .halt_reg = 0x76040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x76040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "ufs_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x76040,
+ .clkr = {
+ .enable_reg = 0x76040,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_aux_hw_ctl_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_aux_clk",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_hw_ctl_ops,
+ },
+ },
+};
+
+static struct clk_gate2 gcc_ufs_rx_symbol_0_clk = {
+ .udelay = 500,
+ .clkr = {
+ .enable_reg = 0x75014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_rx_symbol_0_clk",
+ .ops = &clk_gate2_ops,
+ },
+ },
+};
+
+static struct clk_gate2 gcc_ufs_rx_symbol_1_clk = {
+ .udelay = 500,
+ .clkr = {
+ .enable_reg = 0x7605c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_rx_symbol_1_clk",
+ .ops = &clk_gate2_ops,
+ },
+ },
+};
+
+static struct clk_gate2 gcc_ufs_tx_symbol_0_clk = {
+ .udelay = 500,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_tx_symbol_0_clk",
+ .ops = &clk_gate2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_unipro_core_clk = {
+ .halt_reg = 0x76008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x76008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_unipro_core_clk",
+ .parent_names = (const char *[]){
+ "ufs_unipro_core_clk_src",
+ },
+ .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x76008,
+ .clkr = {
+ .enable_reg = 0x76008,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_unipro_core_hw_ctl_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_unipro_core_clk",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_hw_ctl_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_master_clk = {
+ .halt_reg = 0x2f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb20_master_clk",
+ .parent_names = (const char *[]){
+ "usb20_master_clk_src",
+ },
+ .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_mock_utmi_clk = {
+ .halt_reg = 0x2f00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2f00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb20_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "usb20_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sleep_clk = {
+ .halt_reg = 0x2f008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb20_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0xf008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "usb30_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0xf00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_clkref_clk = {
+ .halt_reg = 0x8800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_aux_clk = {
+ .halt_reg = 0x50000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "usb3_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_gate2 gcc_usb3_phy_pipe_clk = {
+ .udelay = 50,
+ .clkr = {
+ .enable_reg = 0x50004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk",
+ .ops = &clk_gate2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch hlos1_vote_lpass_adsp_smmu_clk = {
+ .halt_reg = 0x7d014,
+ .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .clkr = {
+ .enable_reg = 0x7d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "hlos1_vote_lpass_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch hlos1_vote_turing_adsp_smmu_clk = {
+ .halt_reg = 0x7d048,
+ .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .clkr = {
+ .enable_reg = 0x7d048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "hlos1_vote_turing_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch hlos2_vote_turing_adsp_smmu_clk = {
+ .halt_reg = 0x7e048,
+ .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .clkr = {
+ .enable_reg = 0x7e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "hlos2_vote_turing_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gcc_ce1_ahb_m_clk = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_m_clk",
+ .ops = &clk_dummy_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_ce1_axi_m_clk = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_m_clk",
+ .ops = &clk_dummy_ops,
+ },
+};
+
+struct clk_hw *gcc_msmfalcon_hws[] = {
+ [GCC_XO] = &xo.hw,
+ [GCC_GPLL0_EARLY_DIV] = &gpll0_out_early_div.hw,
+ [GCC_GPLL1_EARLY_DIV] = &gpll1_out_early_div.hw,
+ [GCC_CE1_AHB_M_CLK] = &gcc_ce1_ahb_m_clk.hw,
+ [GCC_CE1_AXI_M_CLK] = &gcc_ce1_axi_m_clk.hw,
+};
+
+static struct clk_regmap *gcc_falcon_clocks[] = {
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [GCC_AGGRE2_UFS_AXI_CLK] = &gcc_aggre2_ufs_axi_clk.clkr,
+ [GCC_AGGRE2_USB3_AXI_CLK] = &gcc_aggre2_usb3_axi_clk.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_HMSS_AXI_CLK] = &gcc_bimc_hmss_axi_clk.clkr,
+ [GCC_BIMC_MSS_Q6_AXI_CLK] = &gcc_bimc_mss_q6_axi_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB2_AXI_CLK] = &gcc_cfg_noc_usb2_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_AXI_CLK] = &gcc_cfg_noc_usb3_axi_clk.clkr,
+ [GCC_DCC_AHB_CLK] = &gcc_dcc_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GPU_BIMC_GFX_CLK] = &gcc_gpu_bimc_gfx_clk.clkr,
+ [GCC_GPU_BIMC_GFX_SRC_CLK] = &gcc_gpu_bimc_gfx_src_clk.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK] = &gcc_gpu_gpll0_clk.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK] = &gcc_gpu_gpll0_div_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_HMSS_AHB_CLK] = &gcc_hmss_ahb_clk.clkr,
+ [GCC_HMSS_DVM_BUS_CLK] = &gcc_hmss_dvm_bus_clk.clkr,
+ [GCC_HMSS_RBCPR_CLK] = &gcc_hmss_rbcpr_clk.clkr,
+ [GCC_MMSS_GPLL0_CLK] = &gcc_mmss_gpll0_clk.clkr,
+ [GCC_MMSS_GPLL0_DIV_CLK] = &gcc_mmss_gpll0_div_clk.clkr,
+ [GCC_MMSS_NOC_CFG_AHB_CLK] = &gcc_mmss_noc_cfg_ahb_clk.clkr,
+ [GCC_MMSS_SYS_NOC_AXI_CLK] = &gcc_mmss_sys_noc_axi_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_MNOC_BIMC_AXI_CLK] = &gcc_mss_mnoc_bimc_axi_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QSPI_AHB_CLK] = &gcc_qspi_ahb_clk.clkr,
+ [GCC_QSPI_SER_CLK] = &gcc_qspi_ser_clk.clkr,
+ [GCC_RX0_USB2_CLKREF_CLK] = &gcc_rx0_usb2_clkref_clk.clkr,
+ [GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
+ [GCC_RX2_QLINK_CLKREF_CLK] = &gcc_rx2_qlink_clkref_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_UFS_AHB_CLK] = &gcc_ufs_ahb_clk.clkr,
+ [GCC_UFS_AXI_CLK] = &gcc_ufs_axi_clk.clkr,
+ [GCC_UFS_CLKREF_CLK] = &gcc_ufs_clkref_clk.clkr,
+ [GCC_UFS_ICE_CORE_CLK] = &gcc_ufs_ice_core_clk.clkr,
+ [GCC_UFS_PHY_AUX_CLK] = &gcc_ufs_phy_aux_clk.clkr,
+ [GCC_UFS_RX_SYMBOL_0_CLK] = &gcc_ufs_rx_symbol_0_clk.clkr,
+ [GCC_UFS_RX_SYMBOL_1_CLK] = &gcc_ufs_rx_symbol_1_clk.clkr,
+ [GCC_UFS_TX_SYMBOL_0_CLK] = &gcc_ufs_tx_symbol_0_clk.clkr,
+ [GCC_UFS_UNIPRO_CORE_CLK] = &gcc_ufs_unipro_core_clk.clkr,
+ [GCC_USB20_MASTER_CLK] = &gcc_usb20_master_clk.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK] = &gcc_usb20_mock_utmi_clk.clkr,
+ [GCC_USB20_SLEEP_CLK] = &gcc_usb20_sleep_clk.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB3_CLKREF_CLK] = &gcc_usb3_clkref_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [GPLL0] = &gpll0_out_main.clkr,
+ [GPLL1] = &gpll1_out_main.clkr,
+ [GPLL4] = &gpll4_out_main.clkr,
+ [HLOS1_VOTE_LPASS_ADSP_SMMU_CLK] = &hlos1_vote_lpass_adsp_smmu_clk.clkr,
+ [HMSS_AHB_CLK_SRC] = &hmss_ahb_clk_src.clkr,
+ [HMSS_GPLL0_CLK_SRC] = &hmss_gpll0_clk_src.clkr,
+ [HMSS_GPLL4_CLK_SRC] = &hmss_gpll4_clk_src.clkr,
+ [HMSS_RBCPR_CLK_SRC] = &hmss_rbcpr_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [QSPI_SER_CLK_SRC] = &qspi_ser_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
+ [UFS_ICE_CORE_CLK_SRC] = &ufs_ice_core_clk_src.clkr,
+ [UFS_PHY_AUX_CLK_SRC] = &ufs_phy_aux_clk_src.clkr,
+ [UFS_UNIPRO_CORE_CLK_SRC] = &ufs_unipro_core_clk_src.clkr,
+ [USB20_MASTER_CLK_SRC] = &usb20_master_clk_src.clkr,
+ [USB20_MOCK_UTMI_CLK_SRC] = &usb20_mock_utmi_clk_src.clkr,
+ [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+ [GPLL0_OUT_MSSCC] = &gpll0_out_msscc.clkr,
+ [GCC_UFS_AXI_HW_CTL_CLK] = &gcc_ufs_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_unipro_core_hw_ctl_clk.clkr,
+ [HLOS1_VOTE_TURING_ADSP_SMMU_CLK] =
+ &hlos1_vote_turing_adsp_smmu_clk.clkr,
+ [HLOS2_VOTE_TURING_ADSP_SMMU_CLK] =
+ &hlos2_vote_turing_adsp_smmu_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_falcon_resets[] = {
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_UFS_BCR] = { 0x75000 },
+ [GCC_USB3_DP_PHY_BCR] = { 0x50028 },
+ [GCC_USB3_PHY_BCR] = { 0x50020 },
+ [GCC_USB3PHY_PHY_BCR] = { 0x50024 },
+ [GCC_USB_20_BCR] = { 0x2f000 },
+ [GCC_USB_30_BCR] = { 0xf000 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+};
+
+static const struct regmap_config gcc_falcon_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x94000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_falcon_desc = {
+ .config = &gcc_falcon_regmap_config,
+ .clks = gcc_falcon_clocks,
+ .num_clks = ARRAY_SIZE(gcc_falcon_clocks),
+ .resets = gcc_falcon_resets,
+ .num_resets = ARRAY_SIZE(gcc_falcon_resets),
+};
+
+static const struct of_device_id gcc_falcon_match_table[] = {
+ { .compatible = "qcom,gcc-msmfalcon" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_falcon_match_table);
+
+static int gcc_falcon_probe(struct platform_device *pdev)
+{
+ int ret = 0, i;
+ struct regmap *regmap;
+ struct clk *clk;
+
+ regmap = qcom_cc_map(pdev, &gcc_falcon_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be
+ * turned off by hardware during certain apps low power modes.
+ */
+ regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
+
+ /* register hardware clocks */
+ for (i = 0; i < ARRAY_SIZE(gcc_msmfalcon_hws); i++) {
+ clk = devm_clk_register(&pdev->dev, gcc_msmfalcon_hws[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ }
+
+ vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
+ if (IS_ERR(vdd_dig.regulator[0])) {
+ if (!(PTR_ERR(vdd_dig.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_dig regulator\n");
+ return PTR_ERR(vdd_dig.regulator[0]);
+ }
+
+ vdd_dig_ao.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig_ao");
+ if (IS_ERR(vdd_dig_ao.regulator[0])) {
+ if (!(PTR_ERR(vdd_dig_ao.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_dig_ao regulator\n");
+ return PTR_ERR(vdd_dig_ao.regulator[0]);
+ }
+
+ ret = qcom_cc_really_probe(pdev, &gcc_falcon_desc, regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register GCC clocks\n");
+ return ret;
+ }
+
+ /* Disable the GPLL0 active input to MMSS and GPU via MISC registers */
+ regmap_update_bits(regmap, 0x0902c, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+ /* This clock is used for all MMSSCC register access */
+ clk_prepare_enable(gcc_mmss_noc_cfg_ahb_clk.clkr.hw.clk);
+
+ /* This clock is used for all GPUCC register access */
+ clk_prepare_enable(gcc_gpu_cfg_ahb_clk.clkr.hw.clk);
+
+ dev_info(&pdev->dev, "Registered GCC clocks\n");
+
+ return ret;
+}
+
+static struct platform_driver gcc_falcon_driver = {
+ .probe = gcc_falcon_probe,
+ .driver = {
+ .name = "gcc-msmfalcon",
+ .of_match_table = gcc_falcon_match_table,
+ },
+};
+
+static int __init gcc_falcon_init(void)
+{
+ return platform_driver_register(&gcc_falcon_driver);
+}
+core_initcall_sync(gcc_falcon_init);
+
+static void __exit gcc_falcon_exit(void)
+{
+ platform_driver_unregister(&gcc_falcon_driver);
+}
+module_exit(gcc_falcon_exit);
diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c
new file mode 100644
index 000000000000..e645354445cb
--- /dev/null
+++ b/drivers/clk/qcom/gdsc-regulator.c
@@ -0,0 +1,745 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk-branch.h"
+
+/* GDSCR */
+#define PWR_ON_MASK BIT(31)
+#define CLK_DIS_WAIT_MASK (0xF << 12)
+#define CLK_DIS_WAIT_SHIFT (12)
+#define SW_OVERRIDE_MASK BIT(2)
+#define HW_CONTROL_MASK BIT(1)
+#define SW_COLLAPSE_MASK BIT(0)
+
+/* Domain Address */
+#define GMEM_CLAMP_IO_MASK BIT(0)
+#define GMEM_RESET_MASK BIT(4)
+
+/* SW Reset */
+#define BCR_BLK_ARES_BIT BIT(0)
+
+/* Register Offset */
+#define REG_OFFSET 0x0
+
+/* Timeout Delay */
+#define TIMEOUT_US 100
+
+struct gdsc {
+ struct regulator_dev *rdev;
+ struct regulator_desc rdesc;
+ void __iomem *gdscr;
+ struct regmap *regmap;
+ struct regmap *domain_addr;
+ struct regmap *hw_ctrl;
+ struct regmap *sw_reset;
+ struct clk **clocks;
+ struct reset_control **reset_clocks;
+ bool toggle_mem;
+ bool toggle_periph;
+ bool toggle_logic;
+ bool resets_asserted;
+ bool root_en;
+ bool force_root_en;
+ bool no_status_check_on_disable;
+ bool is_gdsc_enabled;
+ bool allow_clear;
+ bool reset_aon;
+ int clock_count;
+ int reset_count;
+ int root_clk_idx;
+ u32 gds_timeout;
+};
+
+enum gdscr_status {
+ ENABLED,
+ DISABLED,
+};
+
+static DEFINE_MUTEX(gdsc_seq_lock);
+
+void gdsc_allow_clear_retention(struct regulator *regulator)
+{
+ struct gdsc *sc = regulator_get_drvdata(regulator);
+
+ if (sc)
+ sc->allow_clear = true;
+}
+
+static int poll_gdsc_status(struct gdsc *sc, enum gdscr_status status)
+{
+ struct regmap *regmap;
+ int count = sc->gds_timeout;
+ u32 val;
+
+ if (sc->hw_ctrl)
+ regmap = sc->hw_ctrl;
+ else
+ regmap = sc->regmap;
+
+ for (; count > 0; count--) {
+ regmap_read(regmap, REG_OFFSET, &val);
+ val &= PWR_ON_MASK;
+
+ switch (status) {
+ case ENABLED:
+ if (val)
+ return 0;
+ break;
+ case DISABLED:
+ if (!val)
+ return 0;
+ break;
+ }
+ /*
+ * There is no guarantee about the delay needed for the enable
+ * bit in the GDSCR to be set or reset after the GDSC state
+ * changes. Hence, keep on checking for a reasonable number
+ * of times until the bit is set with the least possible delay
+ * between succeessive tries.
+ */
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int gdsc_is_enabled(struct regulator_dev *rdev)
+{
+ struct gdsc *sc = rdev_get_drvdata(rdev);
+ uint32_t regval;
+
+ if (!sc->toggle_logic)
+ return !sc->resets_asserted;
+
+ regmap_read(sc->regmap, REG_OFFSET, &regval);
+
+ if (regval & PWR_ON_MASK) {
+ /*
+ * The GDSC might be turned on due to TZ/HYP vote on the
+ * votable GDS registers. Check the SW_COLLAPSE_MASK to
+ * determine if HLOS has voted for it.
+ */
+ if (!(regval & SW_COLLAPSE_MASK))
+ return true;
+ }
+
+ return false;
+}
+
+static int gdsc_enable(struct regulator_dev *rdev)
+{
+ struct gdsc *sc = rdev_get_drvdata(rdev);
+ uint32_t regval, hw_ctrl_regval = 0x0;
+ int i, ret = 0;
+
+ mutex_lock(&gdsc_seq_lock);
+
+ if (sc->root_en || sc->force_root_en)
+ clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
+
+ regmap_read(sc->regmap, REG_OFFSET, &regval);
+ if (regval & HW_CONTROL_MASK) {
+ dev_warn(&rdev->dev, "Invalid enable while %s is under HW control\n",
+ sc->rdesc.name);
+ mutex_unlock(&gdsc_seq_lock);
+ return -EBUSY;
+ }
+
+ if (sc->toggle_logic) {
+ if (sc->sw_reset) {
+ regmap_read(sc->sw_reset, REG_OFFSET, &regval);
+ regval |= BCR_BLK_ARES_BIT;
+ regmap_write(sc->sw_reset, REG_OFFSET, regval);
+ /*
+ * BLK_ARES should be kept asserted for 1us before
+ * being de-asserted.
+ */
+ wmb();
+ udelay(1);
+
+ regval &= ~BCR_BLK_ARES_BIT;
+ regmap_write(sc->sw_reset, REG_OFFSET, regval);
+ /* Make sure de-assert goes through before continuing */
+ wmb();
+ }
+
+ if (sc->domain_addr) {
+ if (sc->reset_aon) {
+ regmap_read(sc->domain_addr, REG_OFFSET,
+ &regval);
+ regval |= GMEM_RESET_MASK;
+ regmap_write(sc->domain_addr, REG_OFFSET,
+ regval);
+ /*
+ * Keep reset asserted for at-least 1us before
+ * continuing.
+ */
+ wmb();
+ udelay(1);
+
+ regval &= ~GMEM_RESET_MASK;
+ regmap_write(sc->domain_addr, REG_OFFSET,
+ regval);
+ /*
+ * Make sure GMEM_RESET is de-asserted before
+ * continuing.
+ */
+ wmb();
+ }
+
+ regmap_read(sc->domain_addr, REG_OFFSET, &regval);
+ regval &= ~GMEM_CLAMP_IO_MASK;
+ regmap_write(sc->domain_addr, REG_OFFSET, regval);
+
+ /*
+ * Make sure CLAMP_IO is de-asserted before continuing.
+ */
+ wmb();
+ }
+
+ regmap_read(sc->regmap, REG_OFFSET, &regval);
+ regval &= ~SW_COLLAPSE_MASK;
+ regmap_write(sc->regmap, REG_OFFSET, regval);
+
+ /* Wait for 8 XO cycles before polling the status bit. */
+ mb();
+ udelay(1);
+
+ ret = poll_gdsc_status(sc, ENABLED);
+ if (ret) {
+ regmap_read(sc->regmap, REG_OFFSET, &regval);
+
+ if (sc->hw_ctrl) {
+ regmap_read(sc->hw_ctrl, REG_OFFSET,
+ &hw_ctrl_regval);
+ dev_warn(&rdev->dev, "%s state (after %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x. Re-polling.\n",
+ sc->rdesc.name, sc->gds_timeout,
+ regval, hw_ctrl_regval);
+
+ ret = poll_gdsc_status(sc, ENABLED);
+ if (ret) {
+ regmap_read(sc->regmap, REG_OFFSET,
+ &regval);
+ regmap_read(sc->hw_ctrl, REG_OFFSET,
+ &hw_ctrl_regval);
+ dev_err(&rdev->dev, "%s final state (after additional %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x\n",
+ sc->rdesc.name, sc->gds_timeout,
+ regval, hw_ctrl_regval);
+
+ mutex_unlock(&gdsc_seq_lock);
+ return ret;
+ }
+ } else {
+ dev_err(&rdev->dev, "%s enable timed out: 0x%x\n",
+ sc->rdesc.name,
+ regval);
+ udelay(sc->gds_timeout);
+
+ regmap_read(sc->regmap, REG_OFFSET, &regval);
+ dev_err(&rdev->dev, "%s final state: 0x%x (%d us after timeout)\n",
+ sc->rdesc.name, regval,
+ sc->gds_timeout);
+
+ mutex_unlock(&gdsc_seq_lock);
+
+ return ret;
+ }
+ }
+ } else {
+ for (i = 0; i < sc->reset_count; i++)
+ reset_control_deassert(sc->reset_clocks[i]);
+ sc->resets_asserted = false;
+ }
+
+ for (i = 0; i < sc->clock_count; i++) {
+ if (unlikely(i == sc->root_clk_idx))
+ continue;
+ if (sc->toggle_mem)
+ clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
+ if (sc->toggle_periph)
+ clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
+ }
+
+ /*
+ * If clocks to this power domain were already on, they will take an
+ * additional 4 clock cycles to re-enable after the rail is enabled.
+ * Delay to account for this. A delay is also needed to ensure clocks
+ * are not enabled within 400ns of enabling power to the memories.
+ */
+ udelay(1);
+
+ /* Delay to account for staggered memory powerup. */
+ udelay(1);
+
+ if (sc->force_root_en)
+ clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
+
+ sc->is_gdsc_enabled = true;
+
+ mutex_unlock(&gdsc_seq_lock);
+
+ return ret;
+}
+
+static int gdsc_disable(struct regulator_dev *rdev)
+{
+ struct gdsc *sc = rdev_get_drvdata(rdev);
+ uint32_t regval;
+ int i, ret = 0;
+
+ mutex_lock(&gdsc_seq_lock);
+
+ if (sc->force_root_en)
+ clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
+
+ for (i = sc->clock_count - 1; i >= 0; i--) {
+ if (unlikely(i == sc->root_clk_idx))
+ continue;
+ if (sc->toggle_mem && sc->allow_clear)
+ clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
+ if (sc->toggle_periph && sc->allow_clear)
+ clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
+ }
+
+ /* Delay to account for staggered memory powerdown. */
+ udelay(1);
+
+ if (sc->toggle_logic) {
+ regmap_read(sc->regmap, REG_OFFSET, &regval);
+ regval |= SW_COLLAPSE_MASK;
+ regmap_write(sc->regmap, REG_OFFSET, regval);
+
+ /* Wait for 8 XO cycles before polling the status bit. */
+ mb();
+ udelay(1);
+
+ if (sc->no_status_check_on_disable) {
+ /*
+ * Add a short delay here to ensure that gdsc_enable
+ * right after it was disabled does not put it in a
+ * weird state.
+ */
+ udelay(TIMEOUT_US);
+ } else {
+ ret = poll_gdsc_status(sc, DISABLED);
+ if (ret)
+ dev_err(&rdev->dev, "%s disable timed out: 0x%x\n",
+ sc->rdesc.name, regval);
+ }
+
+ if (sc->domain_addr) {
+ regmap_read(sc->domain_addr, REG_OFFSET, &regval);
+ regval |= GMEM_CLAMP_IO_MASK;
+ regmap_write(sc->domain_addr, REG_OFFSET, regval);
+ }
+
+ } else {
+ for (i = sc->reset_count - 1; i >= 0; i--)
+ reset_control_assert(sc->reset_clocks[i]);
+ sc->resets_asserted = true;
+ }
+
+ /*
+ * Check if gdsc_enable was called for this GDSC. If not, the root
+ * clock will not have been enabled prior to this.
+ */
+ if ((sc->is_gdsc_enabled && sc->root_en) || sc->force_root_en)
+ clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
+
+ sc->is_gdsc_enabled = false;
+
+ mutex_unlock(&gdsc_seq_lock);
+
+ return ret;
+}
+
+static unsigned int gdsc_get_mode(struct regulator_dev *rdev)
+{
+ struct gdsc *sc = rdev_get_drvdata(rdev);
+ uint32_t regval;
+
+ mutex_lock(&gdsc_seq_lock);
+ regmap_read(sc->regmap, REG_OFFSET, &regval);
+ mutex_unlock(&gdsc_seq_lock);
+
+ if (regval & HW_CONTROL_MASK)
+ return REGULATOR_MODE_FAST;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int gdsc_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct gdsc *sc = rdev_get_drvdata(rdev);
+ uint32_t regval;
+ int ret = 0;
+
+ mutex_lock(&gdsc_seq_lock);
+
+ regmap_read(sc->regmap, REG_OFFSET, &regval);
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ /* Turn on HW trigger mode */
+ regval |= HW_CONTROL_MASK;
+ regmap_write(sc->regmap, REG_OFFSET, regval);
+ /*
+ * There may be a race with internal HW trigger signal,
+ * that will result in GDSC going through a power down and
+ * up cycle. In case HW trigger signal is controlled by
+ * firmware that also poll same status bits as we do, FW
+ * might read an 'on' status before the GDSC can finish
+ * power cycle. We wait 1us before returning to ensure
+ * FW can't immediately poll the status bit.
+ */
+ mb();
+ udelay(1);
+ break;
+ case REGULATOR_MODE_NORMAL:
+ /* Turn off HW trigger mode */
+ regval &= ~HW_CONTROL_MASK;
+ regmap_write(sc->regmap, REG_OFFSET, regval);
+ /*
+ * There may be a race with internal HW trigger signal,
+ * that will result in GDSC going through a power down and
+ * up cycle. If we poll too early, status bit will
+ * indicate 'on' before the GDSC can finish the power cycle.
+ * Account for this case by waiting 1us before polling.
+ */
+ mb();
+ udelay(1);
+
+ ret = poll_gdsc_status(sc, ENABLED);
+ if (ret)
+ dev_err(&rdev->dev, "%s set_mode timed out: 0x%x\n",
+ sc->rdesc.name, regval);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&gdsc_seq_lock);
+
+ return ret;
+}
+
+static struct regulator_ops gdsc_ops = {
+ .is_enabled = gdsc_is_enabled,
+ .enable = gdsc_enable,
+ .disable = gdsc_disable,
+ .set_mode = gdsc_set_mode,
+ .get_mode = gdsc_get_mode,
+};
+
+static const struct regmap_config gdsc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+static int gdsc_probe(struct platform_device *pdev)
+{
+ static atomic_t gdsc_count = ATOMIC_INIT(-1);
+ struct regulator_config reg_config = {};
+ struct regulator_init_data *init_data;
+ struct resource *res;
+ struct gdsc *sc;
+ uint32_t regval, clk_dis_wait_val = 0;
+ bool retain_mem, retain_periph, support_hw_trigger;
+ int i, ret;
+ u32 timeout;
+
+ sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
+ if (sc == NULL)
+ return -ENOMEM;
+
+ init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
+ &sc->rdesc);
+ if (init_data == NULL)
+ return -ENOMEM;
+
+ if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
+ init_data->supply_regulator = "parent";
+
+ ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
+ &sc->rdesc.name);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Failed to get resources\n");
+ return -EINVAL;
+ }
+
+ sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (sc->gdscr == NULL)
+ return -ENOMEM;
+
+ sc->regmap = devm_regmap_init_mmio(&pdev->dev, sc->gdscr,
+ &gdsc_regmap_config);
+ if (!sc->regmap) {
+ dev_err(&pdev->dev, "Couldn't get regmap\n");
+ return -EINVAL;
+ }
+
+ if (of_find_property(pdev->dev.of_node, "domain-addr", NULL)) {
+ sc->domain_addr = syscon_regmap_lookup_by_phandle
+ (pdev->dev.of_node, "domain-addr");
+ if (IS_ERR(sc->domain_addr))
+ return -ENODEV;
+ }
+
+ if (of_find_property(pdev->dev.of_node, "sw-reset", NULL)) {
+ sc->sw_reset = syscon_regmap_lookup_by_phandle
+ (pdev->dev.of_node, "sw-reset");
+ if (IS_ERR(sc->sw_reset))
+ return -ENODEV;
+ }
+
+ if (of_find_property(pdev->dev.of_node, "hw-ctrl-addr", NULL)) {
+ sc->hw_ctrl = syscon_regmap_lookup_by_phandle(
+ pdev->dev.of_node, "hw-ctrl-addr");
+ if (IS_ERR(sc->hw_ctrl))
+ return -ENODEV;
+ }
+
+ sc->gds_timeout = TIMEOUT_US;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,gds-timeout",
+ &timeout);
+ if (!ret)
+ sc->gds_timeout = timeout;
+
+ sc->clock_count = of_property_count_strings(pdev->dev.of_node,
+ "clock-names");
+ if (sc->clock_count == -EINVAL) {
+ sc->clock_count = 0;
+ } else if (IS_ERR_VALUE(sc->clock_count)) {
+ dev_err(&pdev->dev, "Failed to get clock names\n");
+ return -EINVAL;
+ }
+
+ sc->clocks = devm_kzalloc(&pdev->dev,
+ sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
+ if (!sc->clocks)
+ return -ENOMEM;
+
+ sc->root_clk_idx = -1;
+
+ sc->root_en = of_property_read_bool(pdev->dev.of_node,
+ "qcom,enable-root-clk");
+
+ sc->force_root_en = of_property_read_bool(pdev->dev.of_node,
+ "qcom,force-enable-root-clk");
+
+ for (i = 0; i < sc->clock_count; i++) {
+ const char *clock_name;
+
+ of_property_read_string_index(pdev->dev.of_node, "clock-names",
+ i, &clock_name);
+
+ sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
+ if (IS_ERR(sc->clocks[i])) {
+ int rc = PTR_ERR(sc->clocks[i]);
+
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get %s\n",
+ clock_name);
+ return rc;
+ }
+
+ if (!strcmp(clock_name, "core_root_clk"))
+ sc->root_clk_idx = i;
+ }
+
+ if ((sc->root_en || sc->force_root_en) && (sc->root_clk_idx == -1)) {
+ dev_err(&pdev->dev, "Failed to get root clock name\n");
+ return -EINVAL;
+ }
+
+ sc->reset_aon = of_property_read_bool(pdev->dev.of_node,
+ "qcom,reset-aon-logic");
+
+ sc->rdesc.id = atomic_inc_return(&gdsc_count);
+ sc->rdesc.ops = &gdsc_ops;
+ sc->rdesc.type = REGULATOR_VOLTAGE;
+ sc->rdesc.owner = THIS_MODULE;
+ platform_set_drvdata(pdev, sc);
+
+ /*
+ * Disable HW trigger: collapse/restore occur based on registers writes.
+ * Disable SW override: Use hardware state-machine for sequencing.
+ */
+ regmap_read(sc->regmap, REG_OFFSET, &regval);
+ regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
+
+ if (!of_property_read_u32(pdev->dev.of_node, "qcom,clk-dis-wait-val",
+ &clk_dis_wait_val)) {
+ clk_dis_wait_val = clk_dis_wait_val << CLK_DIS_WAIT_SHIFT;
+
+ /* Configure wait time between states. */
+ regval &= ~(CLK_DIS_WAIT_MASK);
+ regval |= clk_dis_wait_val;
+ }
+
+ regmap_write(sc->regmap, REG_OFFSET, regval);
+
+ sc->no_status_check_on_disable =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,no-status-check-on-disable");
+ retain_mem = of_property_read_bool(pdev->dev.of_node,
+ "qcom,retain-mem");
+ sc->toggle_mem = !retain_mem;
+ retain_periph = of_property_read_bool(pdev->dev.of_node,
+ "qcom,retain-periph");
+ sc->toggle_periph = !retain_periph;
+ sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
+ "qcom,skip-logic-collapse");
+ support_hw_trigger = of_property_read_bool(pdev->dev.of_node,
+ "qcom,support-hw-trigger");
+ if (support_hw_trigger) {
+ init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_MODE;
+ init_data->constraints.valid_modes_mask |=
+ REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
+ }
+
+ if (!sc->toggle_logic) {
+ sc->reset_count = of_property_count_strings(pdev->dev.of_node,
+ "reset-names");
+ if (sc->reset_count == -EINVAL) {
+ sc->reset_count = 0;
+ } else if (IS_ERR_VALUE(sc->reset_count)) {
+ dev_err(&pdev->dev, "Failed to get reset clock names\n");
+ return -EINVAL;
+ }
+
+ sc->reset_clocks = devm_kzalloc(&pdev->dev,
+ sizeof(struct reset_control *) * sc->reset_count,
+ GFP_KERNEL);
+ if (!sc->reset_clocks)
+ return -ENOMEM;
+
+ for (i = 0; i < sc->reset_count; i++) {
+ const char *reset_name;
+
+ of_property_read_string_index(pdev->dev.of_node,
+ "reset-names", i, &reset_name);
+ sc->reset_clocks[i] = devm_reset_control_get(&pdev->dev,
+ reset_name);
+ if (IS_ERR(sc->reset_clocks[i])) {
+ int rc = PTR_ERR(sc->reset_clocks[i]);
+
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get %s\n",
+ reset_name);
+ return rc;
+ }
+ }
+
+ regval &= ~SW_COLLAPSE_MASK;
+ regmap_write(sc->regmap, REG_OFFSET, regval);
+
+ ret = poll_gdsc_status(sc, ENABLED);
+ if (ret) {
+ dev_err(&pdev->dev, "%s enable timed out: 0x%x\n",
+ sc->rdesc.name, regval);
+ return ret;
+ }
+ }
+
+ sc->allow_clear = of_property_read_bool(pdev->dev.of_node,
+ "qcom,disallow-clear");
+ sc->allow_clear = !sc->allow_clear;
+
+ for (i = 0; i < sc->clock_count; i++) {
+ if (retain_mem || (regval & PWR_ON_MASK) || !sc->allow_clear)
+ clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
+ else
+ clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
+
+ if (retain_periph || (regval & PWR_ON_MASK) || !sc->allow_clear)
+ clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
+ else
+ clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
+ }
+
+ reg_config.dev = &pdev->dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = sc;
+ reg_config.of_node = pdev->dev.of_node;
+ reg_config.regmap = sc->regmap;
+
+ sc->rdev = regulator_register(&sc->rdesc, &reg_config);
+ if (IS_ERR(sc->rdev)) {
+ dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
+ sc->rdesc.name);
+ return PTR_ERR(sc->rdev);
+ }
+
+ return 0;
+}
+
+static int gdsc_remove(struct platform_device *pdev)
+{
+ struct gdsc *sc = platform_get_drvdata(pdev);
+
+ regulator_unregister(sc->rdev);
+
+ return 0;
+}
+
+static const struct of_device_id gdsc_match_table[] = {
+ { .compatible = "qcom,gdsc" },
+ {}
+};
+
+static struct platform_driver gdsc_driver = {
+ .probe = gdsc_probe,
+ .remove = gdsc_remove,
+ .driver = {
+ .name = "gdsc",
+ .of_match_table = gdsc_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gdsc_init(void)
+{
+ return platform_driver_register(&gdsc_driver);
+}
+subsys_initcall(gdsc_init);
+
+static void __exit gdsc_exit(void)
+{
+ platform_driver_unregister(&gdsc_driver);
+}
+module_exit(gdsc_exit);
diff --git a/drivers/clk/qcom/gpucc-msmfalcon.c b/drivers/clk/qcom/gpucc-msmfalcon.c
new file mode 100644
index 000000000000..a2127e2629c7
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-msmfalcon.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <dt-bindings/clock/qcom,gpu-msmfalcon.h>
+
+#include "clk-alpha-pll.h"
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "vdd-level-falcon.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+#define F_GFX(f, s, h, m, n, sf) { (f), (s), (2 * (h) - 1), (m), (n), (sf) }
+
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
+static DEFINE_VDD_REGULATORS(vdd_mx, VDD_DIG_NUM, 1, vdd_corner, NULL);
+static DEFINE_VDD_REGS_INIT(vdd_gfx, 1);
+
+enum {
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_PLL0_PLL_OUT_MAIN,
+ P_GPU_PLL1_PLL_OUT_MAIN,
+ P_XO,
+};
+
+static const struct parent_map gpucc_parent_map_0[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpucc_parent_names_0[] = {
+ "cxo_a",
+ "gcc_gpu_gpll0_clk",
+ "gcc_gpu_gpll0_div_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gpucc_parent_map_1[] = {
+ { P_XO, 0 },
+ { P_GPU_PLL0_PLL_OUT_MAIN, 1 },
+ { P_GPU_PLL1_PLL_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpucc_parent_names_1[] = {
+ "xo",
+ "gpu_pll0_pll_out_main",
+ "gpu_pll1_pll_out_main",
+ "gcc_gpu_gpll0_clk",
+ "core_bi_pll_test_se",
+};
+
+static struct pll_vco gpu_vco[] = {
+ { 1000000000, 2000000000, 0 },
+ { 500000000, 1000000000, 2 },
+ { 250000000, 500000000, 3 },
+};
+
+/* 640MHz configuration */
+static const struct pll_config gpu_pll0_config = {
+ .l = 0x21,
+ .config_ctl_val = 0x4001055b,
+ .alpha = 0x55555600,
+ .alpha_u = 0x55,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = 0x3 << 20,
+ .main_output_mask = 0x1,
+};
+
+static struct pll_vco_data pll_data[] = {
+ /* Frequency post-div */
+ { 640000000, 0x1 },
+};
+
+static struct clk_alpha_pll gpu_pll0_pll_out_main = {
+ .offset = 0x0,
+ .vco_table = gpu_vco,
+ .num_vco = ARRAY_SIZE(gpu_vco),
+ .vco_data = pll_data,
+ .num_vco_data = ARRAY_SIZE(pll_data),
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_pll0_pll_out_main",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ VDD_GPU_PLL_FMAX_MAP6(
+ MIN, 266000000,
+ LOWER, 432000000,
+ LOW, 640000000,
+ LOW_L1, 800000000,
+ NOMINAL, 1020000000,
+ HIGH, 1500000000),
+ },
+ },
+};
+
+static struct clk_alpha_pll gpu_pll1_pll_out_main = {
+ .offset = 0x40,
+ .vco_table = gpu_vco,
+ .num_vco = ARRAY_SIZE(gpu_vco),
+ .vco_data = pll_data,
+ .num_vco_data = ARRAY_SIZE(pll_data),
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_pll1_pll_out_main",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ VDD_GPU_PLL_FMAX_MAP6(
+ MIN, 266000000,
+ LOWER, 432000000,
+ LOW, 640000000,
+ LOW_L1, 800000000,
+ NOMINAL, 1020000000,
+ HIGH, 1500000000),
+ },
+ },
+};
+
+/* GFX clock init data */
+static struct clk_init_data gpu_clks_init[] = {
+ [0] = {
+ .name = "gfx3d_clk_src",
+ .parent_names = gpucc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_gfx3d_src_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ [1] = {
+ .name = "gpucc_gfx3d_clk",
+ .parent_names = (const char *[]){
+ "gfx3d_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .vdd_class = &vdd_gfx,
+ },
+};
+
+/*
+ * Frequencies and PLL configuration
+ * The PLL source would be to ping-pong between GPU-PLL0
+ * and GPU-PLL1.
+ * ====================================================
+ * | F | PLL SRC Freq | PLL postdiv | RCG Div |
+ * ====================================================
+ * | 160000000 | 640000000 | 2 | 2 |
+ * | 266000000 | 532000000 | 1 | 2 |
+ * | 370000000 | 740000000 | 1 | 2 |
+ * | 465000000 | 930000000 | 1 | 2 |
+ * | 588000000 | 1176000000 | 1 | 2 |
+ * | 647000000 | 1294000000 | 1 | 2 |
+ * | 750000000 | 1500000000 | 1 | 2 |
+ * ====================================================
+*/
+
+static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
+ F_GFX( 19200000, 0, 1, 0, 0, 0),
+ F_GFX(160000000, 0, 2, 0, 0, 640000000),
+ F_GFX(266000000, 0, 2, 0, 0, 532000000),
+ F_GFX(370000000, 0, 2, 0, 0, 740000000),
+ F_GFX(465000000, 0, 2, 0, 0, 930000000),
+ F_GFX(588000000, 0, 2, 0, 0, 1176000000),
+ F_GFX(647000000, 0, 2, 0, 0, 1294000000),
+ F_GFX(750000000, 0, 2, 0, 0, 1500000000),
+ { }
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x1070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gfx3d_clk_src,
+ .parent_map = gpucc_parent_map_1,
+ .flags = FORCE_ENABLE_RCGR,
+ .clkr.hw.init = &gpu_clks_init[0],
+};
+
+static const struct freq_tbl ftbl_rbbmtimer_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbbmtimer_clk_src = {
+ .cmd_rcgr = 0x10b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpucc_parent_map_0,
+ .freq_tbl = ftbl_rbbmtimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbbmtimer_clk_src",
+ .parent_names = gpucc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP1(MIN, 19200000),
+ },
+};
+
+static const struct freq_tbl ftbl_rbcpr_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN_DIV, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbcpr_clk_src = {
+ .cmd_rcgr = 0x1030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpucc_parent_map_0,
+ .freq_tbl = ftbl_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbcpr_clk_src",
+ .parent_names = gpucc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP2(
+ MIN, 19200000,
+ NOMINAL, 50000000),
+ },
+};
+
+static struct clk_branch gpucc_cxo_clk = {
+ .halt_reg = 0x1020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_cxo_clk",
+ .parent_names = (const char *[]) {
+ "cxo_a",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_gfx3d_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &gpu_clks_init[1],
+ },
+};
+
+static struct clk_branch gpucc_rbbmtimer_clk = {
+ .halt_reg = 0x10d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_rbbmtimer_clk",
+ .parent_names = (const char *[]){
+ "rbbmtimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_rbcpr_clk = {
+ .halt_reg = 0x1054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gpucc_falcon_clocks[] = {
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [GPU_PLL0_PLL] = &gpu_pll0_pll_out_main.clkr,
+ [GPU_PLL1_PLL] = &gpu_pll1_pll_out_main.clkr,
+ [GPUCC_CXO_CLK] = &gpucc_cxo_clk.clkr,
+ [GPUCC_GFX3D_CLK] = &gpucc_gfx3d_clk.clkr,
+ [GPUCC_RBBMTIMER_CLK] = &gpucc_rbbmtimer_clk.clkr,
+ [GPUCC_RBCPR_CLK] = &gpucc_rbcpr_clk.clkr,
+ [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
+ [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
+};
+
+static const struct regmap_config gpucc_falcon_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9034,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpucc_falcon_desc = {
+ .config = &gpucc_falcon_regmap_config,
+ .clks = gpucc_falcon_clocks,
+ .num_clks = ARRAY_SIZE(gpucc_falcon_clocks),
+};
+
+static const struct of_device_id gpucc_falcon_match_table[] = {
+ { .compatible = "qcom,gpucc-msmfalcon" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpucc_falcon_match_table);
+
+static int of_get_fmax_vdd_class(struct platform_device *pdev,
+ struct clk_hw *hw, char *prop_name, u32 index)
+{
+ struct device_node *of = pdev->dev.of_node;
+ int prop_len, i, j;
+ struct clk_vdd_class *vdd = hw->init->vdd_class;
+ int num = vdd->num_regulators + 1;
+ u32 *array;
+
+ if (!of_find_property(of, prop_name, &prop_len)) {
+ dev_err(&pdev->dev, "missing %s\n", prop_name);
+ return -EINVAL;
+ }
+
+ prop_len /= sizeof(u32);
+ if (prop_len % num) {
+ dev_err(&pdev->dev, "bad length %d\n", prop_len);
+ return -EINVAL;
+ }
+
+ prop_len /= num;
+ vdd->level_votes = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
+ GFP_KERNEL);
+ if (!vdd->level_votes)
+ return -ENOMEM;
+
+ vdd->vdd_uv = devm_kzalloc(&pdev->dev,
+ prop_len * sizeof(int) * (num - 1), GFP_KERNEL);
+ if (!vdd->vdd_uv)
+ return -ENOMEM;
+
+ gpu_clks_init[index].fmax = devm_kzalloc(&pdev->dev, prop_len *
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!gpu_clks_init[index].fmax)
+ return -ENOMEM;
+
+ array = devm_kzalloc(&pdev->dev, prop_len * sizeof(u32) * num,
+ GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ of_property_read_u32_array(of, prop_name, array, prop_len * num);
+ for (i = 0; i < prop_len; i++) {
+ gpu_clks_init[index].fmax[i] = array[num * i];
+ for (j = 1; j < num; j++) {
+ vdd->vdd_uv[(num - 1) * i + (j - 1)] =
+ array[num * i + j];
+ }
+ }
+
+ devm_kfree(&pdev->dev, array);
+ vdd->num_levels = prop_len;
+ vdd->cur_level = prop_len;
+ gpu_clks_init[index].num_fmax = prop_len;
+
+ return 0;
+}
+
+static int gpucc_falcon_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpucc_falcon_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* CX Regulator for RBBMTimer and RBCPR clock */
+ vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig_gfx");
+ if (IS_ERR(vdd_dig.regulator[0])) {
+ if (!(PTR_ERR(vdd_dig.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_dig regulator\n");
+ return PTR_ERR(vdd_dig.regulator[0]);
+ }
+
+ /* Mx Regulator for GPU-PLLs */
+ vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx_gfx");
+ if (IS_ERR(vdd_mx.regulator[0])) {
+ if (!(PTR_ERR(vdd_mx.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_mx regulator\n");
+ return PTR_ERR(vdd_mx.regulator[0]);
+ }
+
+ /* GFX Rail Regulator for GFX3D clock */
+ vdd_gfx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_gfx");
+ if (IS_ERR(vdd_gfx.regulator[0])) {
+ if (!(PTR_ERR(vdd_gfx.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_gfx regulator\n");
+ return PTR_ERR(vdd_gfx.regulator[0]);
+ }
+
+ /* GFX rail fmax data linked to branch clock */
+ of_get_fmax_vdd_class(pdev, &gpucc_gfx3d_clk.clkr.hw,
+ "qcom,gfxfreq-corner", 1);
+
+ clk_alpha_pll_configure(&gpu_pll0_pll_out_main, regmap,
+ &gpu_pll0_config);
+ clk_alpha_pll_configure(&gpu_pll1_pll_out_main, regmap,
+ &gpu_pll0_config);
+
+ ret = qcom_cc_really_probe(pdev, &gpucc_falcon_desc, regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register GPUCC clocks\n");
+ return ret;
+ }
+
+ clk_prepare_enable(gpucc_cxo_clk.clkr.hw.clk);
+
+ dev_info(&pdev->dev, "Registered GPUCC clocks\n");
+
+ return ret;
+}
+
+static struct platform_driver gpucc_falcon_driver = {
+ .probe = gpucc_falcon_probe,
+ .driver = {
+ .name = "gpucc-msmfalcon",
+ .of_match_table = gpucc_falcon_match_table,
+ },
+};
+
+static int __init gpucc_falcon_init(void)
+{
+ return platform_driver_register(&gpucc_falcon_driver);
+}
+core_initcall_sync(gpucc_falcon_init);
+
+static void __exit gpucc_falcon_exit(void)
+{
+ platform_driver_unregister(&gpucc_falcon_driver);
+}
+module_exit(gpucc_falcon_exit);
diff --git a/drivers/clk/qcom/vdd-level-falcon.h b/drivers/clk/qcom/vdd-level-falcon.h
new file mode 100644
index 000000000000..e8699358cf91
--- /dev/null
+++ b/drivers/clk/qcom/vdd-level-falcon.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DRIVERS_CLK_QCOM_VDD_LEVEL_FALCON_H
+#define __DRIVERS_CLK_QCOM_VDD_LEVEL_FALCON_H
+
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/regulator/consumer.h>
+
+#define VDD_DIG_FMAX_MAP1(l1, f1) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+#define VDD_DIG_FMAX_MAP2(l1, f1, l2, f2) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP3(l1, f1, l2, f2, l3, f3) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ [VDD_DIG_##l3] = (f3), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+#define VDD_DIG_FMAX_MAP4(l1, f1, l2, f2, l3, f3, l4, f4) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ [VDD_DIG_##l3] = (f3), \
+ [VDD_DIG_##l4] = (f4), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP5(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ [VDD_DIG_##l3] = (f3), \
+ [VDD_DIG_##l4] = (f4), \
+ [VDD_DIG_##l5] = (f5), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP6(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5, l6, f6) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ [VDD_DIG_##l3] = (f3), \
+ [VDD_DIG_##l4] = (f4), \
+ [VDD_DIG_##l5] = (f5), \
+ [VDD_DIG_##l6] = (f6), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP7(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5, l6, f6, \
+ l7, f7) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ [VDD_DIG_##l3] = (f3), \
+ [VDD_DIG_##l4] = (f4), \
+ [VDD_DIG_##l5] = (f5), \
+ [VDD_DIG_##l6] = (f6), \
+ [VDD_DIG_##l7] = (f7), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP1_AO(l1, f1) \
+ .vdd_class = &vdd_dig_ao, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP3_AO(l1, f1, l2, f2, l3, f3) \
+ .vdd_class = &vdd_dig_ao, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ [VDD_DIG_##l3] = (f3), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_GPU_PLL_FMAX_MAP6(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5, l6, f6) \
+ .vdd_class = &vdd_mx, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ [VDD_DIG_##l3] = (f3), \
+ [VDD_DIG_##l4] = (f4), \
+ [VDD_DIG_##l5] = (f5), \
+ [VDD_DIG_##l6] = (f6), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+enum vdd_dig_levels {
+ VDD_DIG_NONE,
+ VDD_DIG_MIN, /* MIN SVS */
+ VDD_DIG_LOWER, /* SVS2 */
+ VDD_DIG_LOW, /* SVS */
+ VDD_DIG_LOW_L1, /* SVSL1 */
+ VDD_DIG_NOMINAL, /* NOM */
+ VDD_DIG_NOMINAL_L1, /* NOM */
+ VDD_DIG_HIGH, /* TURBO */
+ VDD_DIG_NUM
+};
+
+static int vdd_corner[] = {
+ RPM_REGULATOR_LEVEL_NONE, /* VDD_DIG_NONE */
+ RPM_REGULATOR_LEVEL_MIN_SVS, /* VDD_DIG_MIN */
+ RPM_REGULATOR_LEVEL_LOW_SVS, /* VDD_DIG_LOWER */
+ RPM_REGULATOR_LEVEL_SVS, /* VDD_DIG_LOW */
+ RPM_REGULATOR_LEVEL_SVS_PLUS, /* VDD_DIG_LOW_L1 */
+ RPM_REGULATOR_LEVEL_NOM, /* VDD_DIG_NOMINAL */
+ RPM_REGULATOR_LEVEL_NOM_PLUS, /* VDD_DIG_NOMINAL */
+ RPM_REGULATOR_LEVEL_TURBO, /* VDD_DIG_HIGH */
+};
+
+#endif
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index b622b9541279..a045b9a940e8 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -625,12 +625,15 @@ static void smp_callback(void *v)
static int cpuidle_latency_notify(struct notifier_block *b,
unsigned long l, void *v)
{
- const struct cpumask *cpus;
+ struct cpumask cpus;
- cpus = v ?: cpu_online_mask;
+ if (v)
+ cpumask_andnot(&cpus, v, cpu_isolated_mask);
+ else
+ cpumask_andnot(&cpus, cpu_online_mask, cpu_isolated_mask);
preempt_disable();
- smp_call_function_many(cpus, smp_callback, NULL, 1);
+ smp_call_function_many(&cpus, smp_callback, NULL, 1);
preempt_enable();
return NOTIFY_OK;
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index f4ae70ac9315..b40231dd8dd1 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -38,34 +38,138 @@ static const struct lpm_type_str lpm_types[] = {
{SUSPEND, "suspend_enabled"},
};
+static DEFINE_PER_CPU(uint32_t *, max_residency);
+static DEFINE_PER_CPU(uint32_t *, min_residency);
static struct lpm_level_avail *cpu_level_available[NR_CPUS];
static struct platform_device *lpm_pdev;
-static void *get_avail_val(struct kobject *kobj, struct kobj_attribute *attr)
+static void *get_enabled_ptr(struct kobj_attribute *attr,
+ struct lpm_level_avail *avail)
{
void *arg = NULL;
+
+ if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
+ arg = (void *) &avail->idle_enabled;
+ else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
+ arg = (void *) &avail->suspend_enabled;
+
+ return arg;
+}
+
+static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
+ struct kobj_attribute *attr)
+{
struct lpm_level_avail *avail = NULL;
- if (!strcmp(attr->attr.name, lpm_types[IDLE].str)) {
+ if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
avail = container_of(attr, struct lpm_level_avail,
idle_enabled_attr);
- arg = (void *) &avail->idle_enabled;
- } else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str)) {
+ else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
avail = container_of(attr, struct lpm_level_avail,
suspend_enabled_attr);
- arg = (void *) &avail->suspend_enabled;
+
+ return avail;
+}
+
+static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id,
+ bool probe_time)
+{
+ int i, j;
+ bool mode_avail;
+ uint32_t *maximum_residency = per_cpu(max_residency, cpu_id);
+ uint32_t *minimum_residency = per_cpu(min_residency, cpu_id);
+
+ for (i = 0; i < cpu->nlevels; i++) {
+ struct power_params *pwr = &cpu->levels[i].pwr;
+
+ mode_avail = probe_time ||
+ lpm_cpu_mode_allow(cpu_id, i, true);
+
+ if (!mode_avail) {
+ maximum_residency[i] = 0;
+ minimum_residency[i] = 0;
+ continue;
+ }
+
+ maximum_residency[i] = ~0;
+ for (j = i + 1; j < cpu->nlevels; j++) {
+ mode_avail = probe_time ||
+ lpm_cpu_mode_allow(cpu_id, j, true);
+
+ if (mode_avail &&
+ (maximum_residency[i] > pwr->residencies[j]) &&
+ (pwr->residencies[j] != 0))
+ maximum_residency[i] = pwr->residencies[j];
+ }
+
+ minimum_residency[i] = pwr->time_overhead_us;
+ for (j = i-1; j >= 0; j--) {
+ if (probe_time || lpm_cpu_mode_allow(cpu_id, j, true)) {
+ minimum_residency[i] = maximum_residency[j] + 1;
+ break;
+ }
+ }
}
+}
- return arg;
+static void set_optimum_cluster_residency(struct lpm_cluster *cluster,
+ bool probe_time)
+{
+ int i, j;
+ bool mode_avail;
+
+ for (i = 0; i < cluster->nlevels; i++) {
+ struct power_params *pwr = &cluster->levels[i].pwr;
+
+ mode_avail = probe_time ||
+ lpm_cluster_mode_allow(cluster, i,
+ true);
+
+ if (!mode_avail) {
+ pwr->max_residency = 0;
+ pwr->min_residency = 0;
+ continue;
+ }
+
+ pwr->max_residency = ~0;
+ for (j = i+1; j < cluster->nlevels; j++) {
+ mode_avail = probe_time ||
+ lpm_cluster_mode_allow(cluster, j,
+ true);
+ if (mode_avail &&
+ (pwr->max_residency > pwr->residencies[j]) &&
+ (pwr->residencies[j] != 0))
+ pwr->max_residency = pwr->residencies[j];
+ }
+
+ pwr->min_residency = pwr->time_overhead_us;
+ for (j = i-1; j >= 0; j--) {
+ if (probe_time ||
+ lpm_cluster_mode_allow(cluster, j, true)) {
+ pwr->min_residency =
+ cluster->levels[j].pwr.max_residency + 1;
+ break;
+ }
+ }
+ }
}
+uint32_t *get_per_cpu_max_residency(int cpu)
+{
+ return per_cpu(max_residency, cpu);
+}
+
+uint32_t *get_per_cpu_min_residency(int cpu)
+{
+ return per_cpu(min_residency, cpu);
+}
ssize_t lpm_enable_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int ret = 0;
struct kernel_param kp;
- kp.arg = get_avail_val(kobj, attr);
+ kp.arg = get_enabled_ptr(attr, get_avail_ptr(kobj, attr));
ret = param_get_bool(buf, &kp);
if (ret > 0) {
strlcat(buf, "\n", PAGE_SIZE);
@@ -80,15 +184,25 @@ ssize_t lpm_enable_store(struct kobject *kobj, struct kobj_attribute *attr,
{
int ret = 0;
struct kernel_param kp;
+ struct lpm_level_avail *avail;
- kp.arg = get_avail_val(kobj, attr);
+ avail = get_avail_ptr(kobj, attr);
+ if (WARN_ON(!avail))
+ return -EINVAL;
+ kp.arg = get_enabled_ptr(attr, avail);
ret = param_set_bool(buf, &kp);
+ if (avail->cpu_node)
+ set_optimum_cpu_residency(avail->data, avail->idx, false);
+ else
+ set_optimum_cluster_residency(avail->data, false);
+
return ret ? ret : len;
}
static int create_lvl_avail_nodes(const char *name,
- struct kobject *parent, struct lpm_level_avail *avail)
+ struct kobject *parent, struct lpm_level_avail *avail,
+ void *data, int index, bool cpu_node)
{
struct attribute_group *attr_group = NULL;
struct attribute **attr = NULL;
@@ -139,6 +253,9 @@ static int create_lvl_avail_nodes(const char *name,
avail->idle_enabled = true;
avail->suspend_enabled = true;
avail->kobj = kobj;
+ avail->data = data;
+ avail->idx = index;
+ avail->cpu_node = cpu_node;
return ret;
@@ -181,7 +298,8 @@ static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
for (i = 0; i < p->cpu->nlevels; i++) {
ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
- cpu_kobj[cpu_idx], &level_list[i]);
+ cpu_kobj[cpu_idx], &level_list[i],
+ (void *)p->cpu, cpu, true);
if (ret)
goto release_kobj;
}
@@ -215,7 +333,8 @@ int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
for (i = 0; i < p->nlevels; i++) {
ret = create_lvl_avail_nodes(p->levels[i].level_name,
- cluster_kobj, &p->levels[i].available);
+ cluster_kobj, &p->levels[i].available,
+ (void *)p, 0, false);
if (ret)
return ret;
}
@@ -421,6 +540,9 @@ static int parse_power_params(struct device_node *node,
key = "qcom,time-overhead";
ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
+ if (ret)
+ goto fail;
+
fail:
if (ret)
pr_err("%s(): %s Error reading %s\n", __func__, node->name,
@@ -615,11 +737,31 @@ static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
return 0;
}
+static int calculate_residency(struct power_params *base_pwr,
+ struct power_params *next_pwr)
+{
+ int32_t residency = (int32_t)(next_pwr->energy_overhead -
+ base_pwr->energy_overhead) -
+ ((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us)
+ - (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us));
+
+ residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power);
+
+ if (residency < 0) {
+ __WARN_printf("%s: Incorrect power attributes for LPM\n",
+ __func__);
+ return next_pwr->time_overhead_us;
+ }
+
+ return residency < next_pwr->time_overhead_us ?
+ next_pwr->time_overhead_us : residency;
+}
+
static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
{
struct device_node *n;
int ret = -ENOMEM;
- int i;
+ int i, j;
char *key;
c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL);
@@ -676,6 +818,22 @@ static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
else if (ret)
goto failed;
}
+ for (i = 0; i < c->cpu->nlevels; i++) {
+ for (j = 0; j < c->cpu->nlevels; j++) {
+ if (i >= j) {
+ c->cpu->levels[i].pwr.residencies[j] = 0;
+ continue;
+ }
+
+ c->cpu->levels[i].pwr.residencies[j] =
+ calculate_residency(&c->cpu->levels[i].pwr,
+ &c->cpu->levels[j].pwr);
+
+ pr_err("%s: idx %d %u\n", __func__, j,
+ c->cpu->levels[i].pwr.residencies[j]);
+ }
+ }
+
return 0;
failed:
for (i = 0; i < c->cpu->nlevels; i++) {
@@ -732,6 +890,7 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
struct device_node *n;
char *key;
int ret = 0;
+ int i, j;
c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
if (!c)
@@ -789,6 +948,22 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
goto failed_parse_cluster;
c->aff_level = 1;
+
+ for_each_cpu(i, &c->child_cpus) {
+ per_cpu(max_residency, i) = devm_kzalloc(
+ &lpm_pdev->dev,
+ sizeof(uint32_t) * c->cpu->nlevels,
+ GFP_KERNEL);
+ if (!per_cpu(max_residency, i))
+ return ERR_PTR(-ENOMEM);
+ per_cpu(min_residency, i) = devm_kzalloc(
+ &lpm_pdev->dev,
+ sizeof(uint32_t) * c->cpu->nlevels,
+ GFP_KERNEL);
+ if (!per_cpu(min_residency, i))
+ return ERR_PTR(-ENOMEM);
+ set_optimum_cpu_residency(c->cpu, i, true);
+ }
}
}
@@ -797,6 +972,17 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
else
c->last_level = c->nlevels-1;
+ for (i = 0; i < c->nlevels; i++) {
+ for (j = 0; j < c->nlevels; j++) {
+ if (i >= j) {
+ c->levels[i].pwr.residencies[j] = 0;
+ continue;
+ }
+ c->levels[i].pwr.residencies[j] = calculate_residency(
+ &c->levels[i].pwr, &c->levels[j].pwr);
+ }
+ }
+ set_optimum_cluster_residency(c, true);
return c;
failed_parse_cluster:
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 4f880fdd1478..37e504381313 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1,4 +1,6 @@
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
+ * Copyright (C) 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -83,9 +85,36 @@ struct lpm_debug {
struct lpm_cluster *lpm_root_node;
+static bool lpm_prediction;
+module_param_named(lpm_prediction,
+ lpm_prediction, bool, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static uint32_t ref_stddev = 100;
+module_param_named(
+ ref_stddev, ref_stddev, uint, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+static uint32_t tmr_add = 100;
+module_param_named(
+ tmr_add, tmr_add, uint, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+struct lpm_history {
+ uint32_t resi[MAXSAMPLES];
+ int mode[MAXSAMPLES];
+ int nsamp;
+ uint32_t hptr;
+ uint32_t hinvalid;
+ uint32_t htmr_wkup;
+ int64_t stime;
+};
+
+static DEFINE_PER_CPU(struct lpm_history, hist);
+
static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster);
static bool suspend_in_progress;
static struct hrtimer lpm_hrtimer;
+static struct hrtimer histtimer;
static struct lpm_debug *lpm_debug;
static phys_addr_t lpm_debug_phys;
static const int num_dbg_elements = 0x100;
@@ -327,10 +356,79 @@ static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
return HRTIMER_NORESTART;
}
+static void histtimer_cancel(void)
+{
+ hrtimer_try_to_cancel(&histtimer);
+}
+
+static enum hrtimer_restart histtimer_fn(struct hrtimer *h)
+{
+ int cpu = raw_smp_processor_id();
+ struct lpm_history *history = &per_cpu(hist, cpu);
+
+ history->hinvalid = 1;
+ return HRTIMER_NORESTART;
+}
+
+static void histtimer_start(uint32_t time_us)
+{
+ uint64_t time_ns = time_us * NSEC_PER_USEC;
+ ktime_t hist_ktime = ns_to_ktime(time_ns);
+
+ histtimer.function = histtimer_fn;
+ hrtimer_start(&histtimer, hist_ktime, HRTIMER_MODE_REL_PINNED);
+}
+
+static void cluster_timer_init(struct lpm_cluster *cluster)
+{
+ struct list_head *list;
+
+ if (!cluster)
+ return;
+
+ hrtimer_init(&cluster->histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ list_for_each(list, &cluster->child) {
+ struct lpm_cluster *n;
+
+ n = list_entry(list, typeof(*n), list);
+ cluster_timer_init(n);
+ }
+}
+
+static void clusttimer_cancel(void)
+{
+ int cpu = raw_smp_processor_id();
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+
+ hrtimer_try_to_cancel(&cluster->histtimer);
+ hrtimer_try_to_cancel(&cluster->parent->histtimer);
+}
+
+static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
+{
+ struct lpm_cluster *cluster = container_of(h,
+ struct lpm_cluster, histtimer);
+
+ cluster->history.hinvalid = 1;
+ return HRTIMER_NORESTART;
+}
+
+static void clusttimer_start(struct lpm_cluster *cluster, uint32_t time_us)
+{
+ uint64_t time_ns = time_us * NSEC_PER_USEC;
+ ktime_t clust_ktime = ns_to_ktime(time_ns);
+
+ cluster->histtimer.function = clusttimer_fn;
+ hrtimer_start(&cluster->histtimer, clust_ktime,
+ HRTIMER_MODE_REL_PINNED);
+}
+
static void msm_pm_set_timer(uint32_t modified_time_us)
{
u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
+
lpm_hrtimer.function = lpm_hrtimer_cb;
hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
}
@@ -415,22 +513,168 @@ static int set_device_mode(struct lpm_cluster *cluster, int ndevice,
return -EINVAL;
}
+static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
+ struct lpm_cpu *cpu, int *idx_restrict,
+ uint32_t *idx_restrict_time)
+{
+ int i, j, divisor;
+ uint64_t max, avg, stddev;
+ int64_t thresh = LLONG_MAX;
+ struct lpm_history *history = &per_cpu(hist, dev->cpu);
+ uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
+
+ if (!lpm_prediction)
+ return 0;
+
+ /*
+ * Samples are marked invalid when woken-up due to timer,
+ * so donot predict.
+ */
+ if (history->hinvalid) {
+ history->hinvalid = 0;
+ history->htmr_wkup = 1;
+ history->stime = 0;
+ return 0;
+ }
+
+ /*
+ * Predict only when all the samples are collected.
+ */
+ if (history->nsamp < MAXSAMPLES) {
+ history->stime = 0;
+ return 0;
+ }
+
+ /*
+ * Check if the samples are not much deviated, if so use the
+ * average of those as predicted sleep time. Else if any
+ * specific mode has more premature exits return the index of
+ * that mode.
+ */
+
+again:
+ max = avg = divisor = stddev = 0;
+ for (i = 0; i < MAXSAMPLES; i++) {
+ int64_t value = history->resi[i];
+
+ if (value <= thresh) {
+ avg += value;
+ divisor++;
+ if (value > max)
+ max = value;
+ }
+ }
+ do_div(avg, divisor);
+
+ for (i = 0; i < MAXSAMPLES; i++) {
+ int64_t value = history->resi[i];
+
+ if (value <= thresh) {
+ int64_t diff = value - avg;
+
+ stddev += diff * diff;
+ }
+ }
+ do_div(stddev, divisor);
+ stddev = int_sqrt(stddev);
+
+ /*
+ * If the deviation is less, return the average, else
+ * ignore one maximum sample and retry
+ */
+ if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
+ || stddev <= ref_stddev) {
+ history->stime = ktime_to_us(ktime_get()) + avg;
+ return avg;
+ } else if (divisor > (MAXSAMPLES - 1)) {
+ thresh = max - 1;
+ goto again;
+ }
+
+ /*
+ * Find the number of premature exits for each of the mode,
+ * excluding clockgating mode, and they are more than fifty
+ * percent restrict that and deeper modes.
+ */
+ if (history->htmr_wkup != 1) {
+ for (j = 1; j < cpu->nlevels; j++) {
+ uint32_t failed = 0;
+ uint64_t total = 0;
+
+ for (i = 0; i < MAXSAMPLES; i++) {
+ if ((history->mode[i] == j) &&
+ (history->resi[i] < min_residency[j])) {
+ failed++;
+ total += history->resi[i];
+ }
+ }
+ if (failed > (MAXSAMPLES/2)) {
+ *idx_restrict = j;
+ do_div(total, failed);
+ *idx_restrict_time = total;
+ history->stime = ktime_to_us(ktime_get())
+ + *idx_restrict_time;
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static inline void invalidate_predict_history(struct cpuidle_device *dev)
+{
+ struct lpm_history *history = &per_cpu(hist, dev->cpu);
+
+ if (!lpm_prediction)
+ return;
+
+ if (history->hinvalid) {
+ history->hinvalid = 0;
+ history->htmr_wkup = 1;
+ history->stime = 0;
+ }
+}
+
+static void clear_predict_history(void)
+{
+ struct lpm_history *history;
+ int i;
+ unsigned int cpu;
+
+ if (!lpm_prediction)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ history = &per_cpu(hist, cpu);
+ for (i = 0; i < MAXSAMPLES; i++) {
+ history->resi[i] = 0;
+ history->mode[i] = -1;
+ history->hptr = 0;
+ history->nsamp = 0;
+ history->stime = 0;
+ }
+ }
+}
+
+static void update_history(struct cpuidle_device *dev, int idx);
+
static int cpu_power_select(struct cpuidle_device *dev,
struct lpm_cpu *cpu)
{
int best_level = -1;
- uint32_t best_level_pwr = ~0U;
uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
dev->cpu);
uint32_t sleep_us =
(uint32_t)(ktime_to_us(tick_nohz_get_sleep_length()));
uint32_t modified_time_us = 0;
uint32_t next_event_us = 0;
- uint32_t pwr;
- int i;
+ int i, idx_restrict;
uint32_t lvl_latency_us = 0;
- uint32_t lvl_overhead_us = 0;
- uint32_t lvl_overhead_energy = 0;
+ uint64_t predicted = 0;
+ uint32_t htime = 0, idx_restrict_time = 0;
+ uint32_t next_wakeup_us = sleep_us;
+ uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
+ uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu);
if (!cpu)
return -EINVAL;
@@ -438,12 +682,13 @@ static int cpu_power_select(struct cpuidle_device *dev,
if (sleep_disabled)
return 0;
+ idx_restrict = cpu->nlevels + 1;
+
next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
for (i = 0; i < cpu->nlevels; i++) {
struct lpm_cpu_level *level = &cpu->levels[i];
struct power_params *pwr_params = &level->pwr;
- uint32_t next_wakeup_us = sleep_us;
enum msm_pm_sleep_mode mode = level->mode;
bool allow;
@@ -454,66 +699,88 @@ static int cpu_power_select(struct cpuidle_device *dev,
lvl_latency_us = pwr_params->latency_us;
- lvl_overhead_us = pwr_params->time_overhead_us;
-
- lvl_overhead_energy = pwr_params->energy_overhead;
-
if (latency_us < lvl_latency_us)
- continue;
+ break;
if (next_event_us) {
if (next_event_us < lvl_latency_us)
- continue;
+ break;
if (((next_event_us - lvl_latency_us) < sleep_us) ||
(next_event_us < sleep_us))
next_wakeup_us = next_event_us - lvl_latency_us;
}
- if (next_wakeup_us <= pwr_params->time_overhead_us)
- continue;
-
- /*
- * If wakeup time greater than overhead by a factor of 1000
- * assume that core steady state power dominates the power
- * equation
- */
- if ((next_wakeup_us >> 10) > lvl_overhead_us) {
- pwr = pwr_params->ss_power;
- } else {
- pwr = pwr_params->ss_power;
- pwr -= (lvl_overhead_us * pwr_params->ss_power) /
- next_wakeup_us;
- pwr += pwr_params->energy_overhead / next_wakeup_us;
+ if (!i) {
+ /*
+ * If the next_wake_us itself is not sufficient for
+ * deeper low power modes than clock gating do not
+ * call prediction.
+ */
+ if (next_wakeup_us > max_residency[i]) {
+ predicted = lpm_cpuidle_predict(dev, cpu,
+ &idx_restrict, &idx_restrict_time);
+ if (predicted < min_residency[i])
+ predicted = 0;
+ } else
+ invalidate_predict_history(dev);
}
- if (best_level_pwr >= pwr) {
- best_level = i;
- best_level_pwr = pwr;
- if (next_event_us && next_event_us < sleep_us &&
- (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
- modified_time_us
- = next_event_us - lvl_latency_us;
- else
- modified_time_us = 0;
- }
+ if (i >= idx_restrict)
+ break;
+
+ best_level = i;
+
+ if (next_event_us && next_event_us < sleep_us &&
+ (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
+ modified_time_us
+ = next_event_us - lvl_latency_us;
+ else
+ modified_time_us = 0;
+
+ if (predicted ? (predicted <= max_residency[i])
+ : (next_wakeup_us <= max_residency[i]))
+ break;
}
if (modified_time_us)
msm_pm_set_timer(modified_time_us);
+ /*
+ * Start timer to avoid staying in shallower mode forever
+ * incase of misprediciton
+ */
+ if ((predicted || (idx_restrict != (cpu->nlevels + 1)))
+ && ((best_level >= 0)
+ && (best_level < (cpu->nlevels-1)))) {
+ htime = predicted + tmr_add;
+ if (htime == tmr_add)
+ htime = idx_restrict_time;
+ else if (htime > max_residency[best_level])
+ htime = max_residency[best_level];
+
+ if ((next_wakeup_us > htime) &&
+ ((next_wakeup_us - htime) > max_residency[best_level]))
+ histtimer_start(htime);
+ }
+
trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
+ trace_cpu_pred_select(idx_restrict_time ? 2 : (predicted ? 1 : 0),
+ predicted, htime);
+
return best_level;
}
static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
- struct cpumask *mask, bool from_idle)
+ struct cpumask *mask, bool from_idle, uint32_t *pred_time)
{
int cpu;
int next_cpu = raw_smp_processor_id();
ktime_t next_event;
struct cpumask online_cpus_in_cluster;
+ struct lpm_history *history;
+ int64_t prediction = LONG_MAX;
next_event.tv64 = KTIME_MAX;
if (!suspend_wake_time)
@@ -538,11 +805,21 @@ static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
next_event.tv64 = next_event_c->tv64;
next_cpu = cpu;
}
+
+ if (from_idle && lpm_prediction) {
+ history = &per_cpu(hist, cpu);
+ if (history->stime && (history->stime < prediction))
+ prediction = history->stime;
+ }
}
if (mask)
cpumask_copy(mask, cpumask_of(next_cpu));
+ if (from_idle && lpm_prediction) {
+ if (prediction > ktime_to_us(ktime_get()))
+ *pred_time = prediction - ktime_to_us(ktime_get());
+ }
if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
return ktime_to_us(ktime_sub(next_event, ktime_get()));
@@ -550,20 +827,193 @@ static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
return 0;
}
-static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
+static int cluster_predict(struct lpm_cluster *cluster,
+ uint32_t *pred_us)
+{
+ int i, j;
+ int ret = 0;
+ struct cluster_history *history = &cluster->history;
+ int64_t cur_time = ktime_to_us(ktime_get());
+
+ if (!lpm_prediction)
+ return 0;
+
+ if (history->hinvalid) {
+ history->hinvalid = 0;
+ history->htmr_wkup = 1;
+ history->flag = 0;
+ return ret;
+ }
+
+ if (history->nsamp == MAXSAMPLES) {
+ for (i = 0; i < MAXSAMPLES; i++) {
+ if ((cur_time - history->stime[i])
+ > CLUST_SMPL_INVLD_TIME)
+ history->nsamp--;
+ }
+ }
+
+ if (history->nsamp < MAXSAMPLES) {
+ history->flag = 0;
+ return ret;
+ }
+
+ if (history->flag == 2)
+ history->flag = 0;
+
+ if (history->htmr_wkup != 1) {
+ uint64_t total = 0;
+
+ if (history->flag == 1) {
+ for (i = 0; i < MAXSAMPLES; i++)
+ total += history->resi[i];
+ do_div(total, MAXSAMPLES);
+ *pred_us = total;
+ return 2;
+ }
+
+ for (j = 1; j < cluster->nlevels; j++) {
+ uint32_t failed = 0;
+
+ total = 0;
+ for (i = 0; i < MAXSAMPLES; i++) {
+ if ((history->mode[i] == j) && (history->resi[i]
+ < cluster->levels[j].pwr.min_residency)) {
+ failed++;
+ total += history->resi[i];
+ }
+ }
+
+ if (failed > (MAXSAMPLES-2)) {
+ do_div(total, failed);
+ *pred_us = total;
+ history->flag = 1;
+ return 1;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static void update_cluster_history_time(struct cluster_history *history,
+ int idx, uint64_t start)
+{
+ history->entry_idx = idx;
+ history->entry_time = start;
+}
+
+static void update_cluster_history(struct cluster_history *history, int idx)
+{
+ uint32_t tmr = 0;
+ uint32_t residency = 0;
+ struct lpm_cluster *cluster =
+ container_of(history, struct lpm_cluster, history);
+
+ if (!lpm_prediction)
+ return;
+
+ if ((history->entry_idx == -1) || (history->entry_idx == idx)) {
+ residency = ktime_to_us(ktime_get()) - history->entry_time;
+ history->stime[history->hptr] = history->entry_time;
+ } else
+ return;
+
+ if (history->htmr_wkup) {
+ if (!history->hptr)
+ history->hptr = MAXSAMPLES-1;
+ else
+ history->hptr--;
+
+ history->resi[history->hptr] += residency;
+
+ history->htmr_wkup = 0;
+ tmr = 1;
+ } else {
+ history->resi[history->hptr] = residency;
+ }
+
+ history->mode[history->hptr] = idx;
+
+ history->entry_idx = INT_MIN;
+ history->entry_time = 0;
+
+ if (history->nsamp < MAXSAMPLES)
+ history->nsamp++;
+
+ trace_cluster_pred_hist(cluster->cluster_name,
+ history->mode[history->hptr], history->resi[history->hptr],
+ history->hptr, tmr);
+
+ (history->hptr)++;
+
+ if (history->hptr >= MAXSAMPLES)
+ history->hptr = 0;
+}
+
+static void clear_cl_history_each(struct cluster_history *history)
+{
+ int i;
+
+ for (i = 0; i < MAXSAMPLES; i++) {
+ history->resi[i] = 0;
+ history->mode[i] = -1;
+ history->stime[i] = 0;
+ }
+ history->hptr = 0;
+ history->nsamp = 0;
+ history->flag = 0;
+ history->hinvalid = 0;
+ history->htmr_wkup = 0;
+}
+
+static void clear_cl_predict_history(void)
+{
+ struct lpm_cluster *cluster = lpm_root_node;
+ struct list_head *list;
+
+ if (!lpm_prediction)
+ return;
+
+ clear_cl_history_each(&cluster->history);
+
+ list_for_each(list, &cluster->child) {
+ struct lpm_cluster *n;
+
+ n = list_entry(list, typeof(*n), list);
+ clear_cl_history_each(&n->history);
+ }
+}
+
+static int cluster_select(struct lpm_cluster *cluster, bool from_idle,
+ int *ispred)
{
int best_level = -1;
int i;
- uint32_t best_level_pwr = ~0U;
- uint32_t pwr;
struct cpumask mask;
uint32_t latency_us = ~0U;
uint32_t sleep_us;
+ uint32_t cpupred_us = 0, pred_us = 0;
+ int pred_mode = 0, predicted = 0;
if (!cluster)
return -EINVAL;
- sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL, from_idle);
+ sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL,
+ from_idle, &cpupred_us);
+
+ if (from_idle) {
+ pred_mode = cluster_predict(cluster, &pred_us);
+
+ if (cpupred_us && pred_mode && (cpupred_us < pred_us))
+ pred_us = cpupred_us;
+
+ if (pred_us && pred_mode && (pred_us < sleep_us))
+ predicted = 1;
+
+ if (predicted && (pred_us == cpupred_us))
+ predicted = 2;
+ }
if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
@@ -596,10 +1046,10 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
continue;
if (from_idle && latency_us < pwr_params->latency_us)
- continue;
+ break;
if (sleep_us < pwr_params->time_overhead_us)
- continue;
+ break;
if (suspend_in_progress && from_idle && level->notify_rpm)
continue;
@@ -607,21 +1057,21 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
if (level->notify_rpm && msm_rpm_waiting_for_ack())
continue;
- if ((sleep_us >> 10) > pwr_params->time_overhead_us) {
- pwr = pwr_params->ss_power;
- } else {
- pwr = pwr_params->ss_power;
- pwr -= (pwr_params->time_overhead_us *
- pwr_params->ss_power) / sleep_us;
- pwr += pwr_params->energy_overhead / sleep_us;
- }
+ best_level = i;
- if (best_level_pwr >= pwr) {
- best_level = i;
- best_level_pwr = pwr;
- }
+ if (predicted ? (pred_us <= pwr_params->max_residency)
+ : (sleep_us <= pwr_params->max_residency))
+ break;
}
+ if ((best_level == (cluster->nlevels - 1)) && (pred_mode == 2))
+ cluster->history.flag = 2;
+
+ *ispred = predicted;
+
+ trace_cluster_pred_select(cluster->cluster_name, best_level, sleep_us,
+ latency_us, predicted, pred_us);
+
return best_level;
}
@@ -635,7 +1085,7 @@ static void cluster_notify(struct lpm_cluster *cluster,
}
static int cluster_configure(struct lpm_cluster *cluster, int idx,
- bool from_idle)
+ bool from_idle, int predicted)
{
struct lpm_cluster_level *level = &cluster->levels[idx];
int ret, i;
@@ -653,6 +1103,10 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
lpm_stats_cluster_enter(cluster->stats, idx);
+
+ if (from_idle && lpm_prediction)
+ update_cluster_history_time(&cluster->history, idx,
+ ktime_to_us(ktime_get()));
}
for (i = 0; i < cluster->ndevices; i++) {
@@ -664,8 +1118,10 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
if (level->notify_rpm) {
struct cpumask nextcpu, *cpumask;
uint64_t us;
+ uint32_t pred_us;
- us = get_cluster_sleep_time(cluster, &nextcpu, from_idle);
+ us = get_cluster_sleep_time(cluster, &nextcpu,
+ from_idle, &pred_us);
cpumask = level->disable_dynamic_routing ? NULL : &nextcpu;
ret = msm_rpm_enter_sleep(0, cpumask);
@@ -675,6 +1131,9 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
}
us = us + 1;
+ clear_predict_history();
+ clear_cl_predict_history();
+
do_div(us, USEC_PER_SEC/SCLK_HZ);
msm_mpm_enter_sleep(us, from_idle, cpumask);
}
@@ -685,6 +1144,15 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
sched_set_cluster_dstate(&cluster->child_cpus, idx, 0, 0);
cluster->last_level = idx;
+
+ if (predicted && (idx < (cluster->nlevels - 1))) {
+ struct power_params *pwr_params = &cluster->levels[idx].pwr;
+
+ tick_broadcast_exit();
+ clusttimer_start(cluster, pwr_params->max_residency + tmr_add);
+ tick_broadcast_enter();
+ }
+
return 0;
failed_set_mode:
@@ -703,6 +1171,7 @@ static void cluster_prepare(struct lpm_cluster *cluster,
int64_t start_time)
{
int i;
+ int predicted = 0;
if (!cluster)
return;
@@ -733,12 +1202,28 @@ static void cluster_prepare(struct lpm_cluster *cluster,
&cluster->child_cpus))
goto failed;
- i = cluster_select(cluster, from_idle);
+ i = cluster_select(cluster, from_idle, &predicted);
+
+ if (((i < 0) || (i == cluster->default_level))
+ && predicted && from_idle) {
+ update_cluster_history_time(&cluster->history,
+ -1, ktime_to_us(ktime_get()));
+
+ if (i < 0) {
+ struct power_params *pwr_params =
+ &cluster->levels[0].pwr;
+
+ tick_broadcast_exit();
+ clusttimer_start(cluster,
+ pwr_params->max_residency + tmr_add);
+ tick_broadcast_enter();
+ }
+ }
if (i < 0)
goto failed;
- if (cluster_configure(cluster, i, from_idle))
+ if (cluster_configure(cluster, i, from_idle, predicted))
goto failed;
cluster->stats->sleep_time = start_time;
@@ -782,6 +1267,10 @@ static void cluster_unprepare(struct lpm_cluster *cluster,
&lvl->num_cpu_votes, cpu);
}
+ if (from_idle && first_cpu &&
+ (cluster->last_level == cluster->default_level))
+ update_cluster_history(&cluster->history, cluster->last_level);
+
if (!first_cpu || cluster->last_level == cluster->default_level)
goto unlock_return;
@@ -823,6 +1312,10 @@ static void cluster_unprepare(struct lpm_cluster *cluster,
sched_set_cluster_dstate(&cluster->child_cpus, 0, 0, 0);
cluster_notify(cluster, &cluster->levels[last_level], false);
+
+ if (from_idle)
+ update_cluster_history(&cluster->history, last_level);
+
cluster_unprepare(cluster->parent, &cluster->child_cpus,
last_level, from_idle, end_time);
unlock_return:
@@ -1009,6 +1502,39 @@ static int lpm_cpuidle_select(struct cpuidle_driver *drv,
return idx;
}
+static void update_history(struct cpuidle_device *dev, int idx)
+{
+ struct lpm_history *history = &per_cpu(hist, dev->cpu);
+ uint32_t tmr = 0;
+
+ if (!lpm_prediction)
+ return;
+
+ if (history->htmr_wkup) {
+ if (!history->hptr)
+ history->hptr = MAXSAMPLES-1;
+ else
+ history->hptr--;
+
+ history->resi[history->hptr] += dev->last_residency;
+ history->htmr_wkup = 0;
+ tmr = 1;
+ } else
+ history->resi[history->hptr] = dev->last_residency;
+
+ history->mode[history->hptr] = idx;
+
+ trace_cpu_pred_hist(history->mode[history->hptr],
+ history->resi[history->hptr], history->hptr, tmr);
+
+ if (history->nsamp < MAXSAMPLES)
+ history->nsamp++;
+
+ (history->hptr)++;
+ if (history->hptr >= MAXSAMPLES)
+ history->hptr = 0;
+}
+
static int lpm_cpuidle_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
@@ -1043,12 +1569,16 @@ exit:
cluster_unprepare(cluster, cpumask, idx, true, end_time);
cpu_unprepare(cluster, idx, true);
sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0);
-
- trace_cpu_idle_exit(idx, success);
end_time = ktime_to_ns(ktime_get()) - start_time;
- dev->last_residency = do_div(end_time, 1000);
+ do_div(end_time, 1000);
+ dev->last_residency = end_time;
+ update_history(dev, idx);
+ trace_cpu_idle_exit(idx, success);
local_irq_enable();
-
+ if (lpm_prediction) {
+ histtimer_cancel();
+ clusttimer_cancel();
+ }
return idx;
}
@@ -1320,6 +1850,8 @@ static int lpm_probe(struct platform_device *pdev)
*/
suspend_set_ops(&lpm_suspend_ops);
hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cluster_timer_init(lpm_root_node);
ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
if (ret) {
diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h
index 8e05336be21a..3c9665ea8981 100644
--- a/drivers/cpuidle/lpm-levels.h
+++ b/drivers/cpuidle/lpm-levels.h
@@ -14,6 +14,8 @@
#include <soc/qcom/spm.h>
#define NR_LPM_LEVELS 8
+#define MAXSAMPLES 5
+#define CLUST_SMPL_INVLD_TIME 40000
extern bool use_psci;
@@ -27,6 +29,9 @@ struct power_params {
uint32_t ss_power; /* Steady state power */
uint32_t energy_overhead; /* Enter + exit over head */
uint32_t time_overhead_us; /* Enter + exit overhead */
+ uint32_t residencies[NR_LPM_LEVELS];
+ uint32_t min_residency;
+ uint32_t max_residency;
};
struct lpm_cpu_level {
@@ -55,6 +60,9 @@ struct lpm_level_avail {
struct kobject *kobj;
struct kobj_attribute idle_enabled_attr;
struct kobj_attribute suspend_enabled_attr;
+ void *data;
+ int idx;
+ bool cpu_node;
};
struct lpm_cluster_level {
@@ -79,6 +87,19 @@ struct low_power_ops {
enum msm_pm_l2_scm_flag tz_flag;
};
+struct cluster_history {
+ uint32_t resi[MAXSAMPLES];
+ int mode[MAXSAMPLES];
+ int64_t stime[MAXSAMPLES];
+ uint32_t hptr;
+ uint32_t hinvalid;
+ uint32_t htmr_wkup;
+ uint64_t entry_time;
+ int entry_idx;
+ int nsamp;
+ int flag;
+};
+
struct lpm_cluster {
struct list_head list;
struct list_head child;
@@ -103,6 +124,8 @@ struct lpm_cluster {
unsigned int psci_mode_shift;
unsigned int psci_mode_mask;
bool no_saw_devices;
+ struct cluster_history history;
+ struct hrtimer histtimer;
};
int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
@@ -119,7 +142,8 @@ bool lpm_cpu_mode_allow(unsigned int cpu,
unsigned int mode, bool from_idle);
bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
unsigned int mode, bool from_idle);
-
+uint32_t *get_per_cpu_max_residency(int cpu);
+uint32_t *get_per_cpu_min_residency(int cpu);
extern struct lpm_cluster *lpm_root_node;
#ifdef CONFIG_SMP
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index 707a244e62e9..315d3a67e43e 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -377,7 +377,7 @@ static unsigned long mon_get_zone_stats(struct bwmon *m)
zone = get_zone(m);
- count = readl_relaxed(MON2_ZONE_MAX(m, zone));
+ count = readl_relaxed(MON2_ZONE_MAX(m, zone)) + 1;
count *= SZ_1M;
dev_dbg(m->dev, "Zone%d Max byte count: %08lx\n", zone, count);
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index 90aee3cad5ad..625a2640b4c4 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -3,7 +3,7 @@ ccflags-y := -Idrivers/staging/android
msm_kgsl_core-y = \
kgsl.o \
kgsl_trace.o \
- kgsl_cmdbatch.o \
+ kgsl_drawobj.o \
kgsl_ioctl.o \
kgsl_sharedmem.o \
kgsl_pwrctrl.o \
diff --git a/drivers/gpu/msm/a4xx_reg.h b/drivers/gpu/msm/a4xx_reg.h
index 4b69583a6ce1..8e658c1d54d2 100644
--- a/drivers/gpu/msm/a4xx_reg.h
+++ b/drivers/gpu/msm/a4xx_reg.h
@@ -197,6 +197,7 @@ enum a4xx_rb_perfctr_rb_sel {
#define A4XX_RBBM_CFG_DEBBUS_CLRC 0x94
#define A4XX_RBBM_CFG_DEBBUS_LOADIVT 0x95
+#define A4XX_RBBM_CLOCK_CTL_IP 0x97
#define A4XX_RBBM_POWER_CNTL_IP 0x98
#define A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0 0x99
#define A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1 0x9a
diff --git a/drivers/gpu/msm/a5xx_reg.h b/drivers/gpu/msm/a5xx_reg.h
index 3b29452ce8bd..f3b4e6622043 100644
--- a/drivers/gpu/msm/a5xx_reg.h
+++ b/drivers/gpu/msm/a5xx_reg.h
@@ -640,6 +640,7 @@
/* UCHE registers */
#define A5XX_UCHE_ADDR_MODE_CNTL 0xE80
+#define A5XX_UCHE_MODE_CNTL 0xE81
#define A5XX_UCHE_WRITE_THRU_BASE_LO 0xE87
#define A5XX_UCHE_WRITE_THRU_BASE_HI 0xE88
#define A5XX_UCHE_TRAP_BASE_LO 0xE89
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index a3b25b3d8dd1..3615be45b6d9 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -244,6 +244,28 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
.core = 5,
.major = 4,
.minor = 0,
+ .patchid = 0,
+ .features = ADRENO_PREEMPTION | ADRENO_64BIT |
+ ADRENO_CONTENT_PROTECTION |
+ ADRENO_GPMU | ADRENO_SPTP_PC,
+ .pm4fw_name = "a530_pm4.fw",
+ .pfpfw_name = "a530_pfp.fw",
+ .zap_name = "a540_zap",
+ .gpudev = &adreno_a5xx_gpudev,
+ .gmem_size = SZ_1M,
+ .num_protected_regs = 0x20,
+ .busy_mask = 0xFFFFFFFE,
+ .gpmufw_name = "a540_gpmu.fw2",
+ .gpmu_major = 3,
+ .gpmu_minor = 0,
+ .gpmu_tsens = 0x000C000D,
+ .max_power = 5448,
+ },
+ {
+ .gpurev = ADRENO_REV_A540,
+ .core = 5,
+ .major = 4,
+ .minor = 0,
.patchid = ANY_ID,
.features = ADRENO_PREEMPTION | ADRENO_64BIT |
ADRENO_CONTENT_PROTECTION |
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 362493118670..94d828027f20 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -40,6 +40,7 @@
/* Include the master list of GPU cores that are supported */
#include "adreno-gpulist.h"
+#include "adreno_dispatch.h"
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "adreno."
@@ -1015,8 +1016,8 @@ static void _adreno_free_memories(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv))
- kgsl_free_global(device, &adreno_dev->cmdbatch_profile_buffer);
+ if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv))
+ kgsl_free_global(device, &adreno_dev->profile_buffer);
/* Free local copies of firmware and other command streams */
kfree(adreno_dev->pfp_fw);
@@ -1187,22 +1188,22 @@ static int adreno_init(struct kgsl_device *device)
}
/*
- * Allocate a small chunk of memory for precise cmdbatch profiling for
+ * Allocate a small chunk of memory for precise drawobj profiling for
* those targets that have the always on timer
*/
if (!adreno_is_a3xx(adreno_dev)) {
int r = kgsl_allocate_global(device,
- &adreno_dev->cmdbatch_profile_buffer, PAGE_SIZE,
+ &adreno_dev->profile_buffer, PAGE_SIZE,
0, 0, "alwayson");
- adreno_dev->cmdbatch_profile_index = 0;
+ adreno_dev->profile_index = 0;
if (r == 0) {
- set_bit(ADRENO_DEVICE_CMDBATCH_PROFILE,
+ set_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE,
&adreno_dev->priv);
kgsl_sharedmem_set(device,
- &adreno_dev->cmdbatch_profile_buffer, 0, 0,
+ &adreno_dev->profile_buffer, 0, 0,
PAGE_SIZE);
}
@@ -1242,86 +1243,6 @@ static bool regulators_left_on(struct kgsl_device *device)
return false;
}
-static void _setup_throttling_counters(struct adreno_device *adreno_dev)
-{
- int i, ret;
-
- if (!adreno_is_a540(adreno_dev))
- return;
-
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
- return;
-
- for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
- /* reset throttled cycles ivalue */
- adreno_dev->busy_data.throttle_cycles[i] = 0;
-
- if (adreno_dev->gpmu_throttle_counters[i] != 0)
- continue;
- ret = adreno_perfcounter_get(adreno_dev,
- KGSL_PERFCOUNTER_GROUP_GPMU_PWR,
- ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i,
- &adreno_dev->gpmu_throttle_counters[i],
- NULL,
- PERFCOUNTER_FLAG_KERNEL);
- WARN_ONCE(ret, "Unable to get clock throttling counter %x\n",
- ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i);
- }
-}
-
-/* FW driven idle 10% throttle */
-#define IDLE_10PCT 0
-/* number of cycles when clock is throttled by 50% (CRC) */
-#define CRC_50PCT 1
-/* number of cycles when clock is throttled by more than 50% (CRC) */
-#define CRC_MORE50PCT 2
-/* number of cycles when clock is throttle by less than 50% (CRC) */
-#define CRC_LESS50PCT 3
-
-static uint64_t _read_throttling_counters(struct adreno_device *adreno_dev)
-{
- int i, adj;
- uint32_t th[ADRENO_GPMU_THROTTLE_COUNTERS];
- struct adreno_busy_data *busy = &adreno_dev->busy_data;
-
- if (!adreno_is_a540(adreno_dev))
- return 0;
-
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
- return 0;
-
- if (!test_bit(ADRENO_THROTTLING_CTRL, &adreno_dev->pwrctrl_flag))
- return 0;
-
- for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
- if (!adreno_dev->gpmu_throttle_counters[i])
- return 0;
-
- th[i] = counter_delta(KGSL_DEVICE(adreno_dev),
- adreno_dev->gpmu_throttle_counters[i],
- &busy->throttle_cycles[i]);
- }
- adj = th[CRC_MORE50PCT] - th[IDLE_10PCT];
- adj = th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (adj < 0 ? 0 : adj) * 3;
-
- trace_kgsl_clock_throttling(
- th[IDLE_10PCT], th[CRC_50PCT],
- th[CRC_MORE50PCT], th[CRC_LESS50PCT],
- adj);
- return adj;
-}
-
-static void _update_threshold_count(struct adreno_device *adreno_dev,
- uint64_t adj)
-{
- if (adreno_is_a530(adreno_dev))
- kgsl_regread(KGSL_DEVICE(adreno_dev),
- adreno_dev->lm_threshold_count,
- &adreno_dev->lm_threshold_cross);
- else if (adreno_is_a540(adreno_dev))
- adreno_dev->lm_threshold_cross = adj;
-}
-
static void _set_secvid(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -1418,8 +1339,8 @@ static int _adreno_start(struct adreno_device *adreno_dev)
}
}
- if (device->pwrctrl.bus_control) {
+ if (device->pwrctrl.bus_control) {
/* VBIF waiting for RAM */
if (adreno_dev->starved_ram_lo == 0) {
ret = adreno_perfcounter_get(adreno_dev,
@@ -1455,20 +1376,6 @@ static int _adreno_start(struct adreno_device *adreno_dev)
adreno_dev->busy_data.vbif_ram_cycles = 0;
adreno_dev->busy_data.vbif_starved_ram = 0;
- if (adreno_is_a530(adreno_dev) && ADRENO_FEATURE(adreno_dev, ADRENO_LM)
- && adreno_dev->lm_threshold_count == 0) {
-
- ret = adreno_perfcounter_get(adreno_dev,
- KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 27,
- &adreno_dev->lm_threshold_count, NULL,
- PERFCOUNTER_FLAG_KERNEL);
- /* Ignore noncritical ret - used for debugfs */
- if (ret)
- adreno_dev->lm_threshold_count = 0;
- }
-
- _setup_throttling_counters(adreno_dev);
-
/* Restore performance counter registers with saved values */
adreno_perfcounter_restore(adreno_dev);
@@ -1653,14 +1560,9 @@ static inline bool adreno_try_soft_reset(struct kgsl_device *device, int fault)
int adreno_reset(struct kgsl_device *device, int fault)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
int ret = -EINVAL;
int i = 0;
- /* broadcast to HW - reset is coming */
- if (gpudev->pre_reset)
- gpudev->pre_reset(adreno_dev);
-
/* Try soft reset first */
if (adreno_try_soft_reset(device, fault)) {
/* Make sure VBIF is cleared before resetting */
@@ -2340,12 +2242,12 @@ int adreno_idle(struct kgsl_device *device)
* adreno_drain() - Drain the dispatch queue
* @device: Pointer to the KGSL device structure for the GPU
*
- * Drain the dispatcher of existing command batches. This halts
+ * Drain the dispatcher of existing drawobjs. This halts
* additional commands from being issued until the gate is completed.
*/
static int adreno_drain(struct kgsl_device *device)
{
- reinit_completion(&device->cmdbatch_gate);
+ reinit_completion(&device->halt_gate);
return 0;
}
@@ -2580,27 +2482,6 @@ static inline s64 adreno_ticks_to_us(u32 ticks, u32 freq)
return ticks / freq;
}
-static unsigned int counter_delta(struct kgsl_device *device,
- unsigned int reg, unsigned int *counter)
-{
- unsigned int val;
- unsigned int ret = 0;
-
- /* Read the value */
- kgsl_regread(device, reg, &val);
-
- /* Return 0 for the first read */
- if (*counter != 0) {
- if (val < *counter)
- ret = (0xFFFFFFFF - *counter) + val;
- else
- ret = val - *counter;
- }
-
- *counter = val;
- return ret;
-}
-
/**
* adreno_power_stats() - Reads the counters needed for freq decisions
* @device: Pointer to device whose counters are read
@@ -2612,6 +2493,7 @@ static void adreno_power_stats(struct kgsl_device *device,
struct kgsl_power_stats *stats)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct adreno_busy_data *busy = &adreno_dev->busy_data;
uint64_t adj = 0;
@@ -2625,8 +2507,11 @@ static void adreno_power_stats(struct kgsl_device *device,
gpu_busy = counter_delta(device, adreno_dev->perfctr_pwr_lo,
&busy->gpu_busy);
- adj = _read_throttling_counters(adreno_dev);
- gpu_busy += adj;
+ if (gpudev->read_throttling_counters) {
+ adj = gpudev->read_throttling_counters(adreno_dev);
+ gpu_busy += adj;
+ }
+
stats->busy_time = adreno_ticks_to_us(gpu_busy,
kgsl_pwrctrl_active_freq(pwr));
}
@@ -2647,8 +2532,9 @@ static void adreno_power_stats(struct kgsl_device *device,
stats->ram_time = ram_cycles;
stats->ram_wait = starved_ram;
}
- if (adreno_dev->lm_threshold_count)
- _update_threshold_count(adreno_dev, adj);
+ if (adreno_dev->lm_threshold_count &&
+ gpudev->count_throttles)
+ gpudev->count_throttles(adreno_dev, adj);
}
static unsigned int adreno_gpuid(struct kgsl_device *device,
@@ -2728,6 +2614,14 @@ static void adreno_pwrlevel_change_settings(struct kgsl_device *device,
postlevel, post);
}
+static void adreno_clk_set_options(struct kgsl_device *device, const char *name,
+ struct clk *clk)
+{
+ if (ADRENO_GPU_DEVICE(ADRENO_DEVICE(device))->clk_set_options)
+ ADRENO_GPU_DEVICE(ADRENO_DEVICE(device))->clk_set_options(
+ ADRENO_DEVICE(device), name, clk);
+}
+
static void adreno_iommu_sync(struct kgsl_device *device, bool sync)
{
struct scm_desc desc = {0};
@@ -2791,6 +2685,18 @@ static void adreno_regulator_disable_poll(struct kgsl_device *device)
adreno_iommu_sync(device, false);
}
+static void adreno_gpu_model(struct kgsl_device *device, char *str,
+ size_t bufsz)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ snprintf(str, bufsz, "Adreno%d%d%dv%d",
+ ADRENO_CHIPID_CORE(adreno_dev->chipid),
+ ADRENO_CHIPID_MAJOR(adreno_dev->chipid),
+ ADRENO_CHIPID_MINOR(adreno_dev->chipid),
+ ADRENO_CHIPID_PATCH(adreno_dev->chipid) + 1);
+}
+
static const struct kgsl_functable adreno_functable = {
/* Mandatory functions */
.regread = adreno_regread,
@@ -2805,7 +2711,7 @@ static const struct kgsl_functable adreno_functable = {
.getproperty_compat = adreno_getproperty_compat,
.waittimestamp = adreno_waittimestamp,
.readtimestamp = adreno_readtimestamp,
- .issueibcmds = adreno_ringbuffer_issueibcmds,
+ .queue_cmds = adreno_dispatcher_queue_cmds,
.ioctl = adreno_ioctl,
.compat_ioctl = adreno_compat_ioctl,
.power_stats = adreno_power_stats,
@@ -2827,6 +2733,8 @@ static const struct kgsl_functable adreno_functable = {
.regulator_disable = adreno_regulator_disable,
.pwrlevel_change_settings = adreno_pwrlevel_change_settings,
.regulator_disable_poll = adreno_regulator_disable_poll,
+ .clk_set_options = adreno_clk_set_options,
+ .gpu_model = adreno_gpu_model,
};
static struct platform_driver adreno_platform_driver = {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index cbbfc57e27f4..0f3403cb0095 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -76,13 +76,13 @@
KGSL_CONTEXT_PREEMPT_STYLE_SHIFT)
/*
- * return the dispatcher cmdqueue in which the given cmdbatch should
+ * return the dispatcher drawqueue in which the given drawobj should
* be submitted
*/
-#define ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(c) \
+#define ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(c) \
(&((ADRENO_CONTEXT(c->context))->rb->dispatch_q))
-#define ADRENO_CMDBATCH_RB(c) \
+#define ADRENO_DRAWOBJ_RB(c) \
((ADRENO_CONTEXT(c->context))->rb)
/* Adreno core features */
@@ -346,8 +346,8 @@ struct adreno_gpu_core {
* @halt: Atomic variable to check whether the GPU is currently halted
* @ctx_d_debugfs: Context debugfs node
* @pwrctrl_flag: Flag to hold adreno specific power attributes
- * @cmdbatch_profile_buffer: Memdesc holding the cmdbatch profiling buffer
- * @cmdbatch_profile_index: Index to store the start/stop ticks in the profiling
+ * @profile_buffer: Memdesc holding the drawobj profiling buffer
+ * @profile_index: Index to store the start/stop ticks in the profiling
* buffer
* @sp_local_gpuaddr: Base GPU virtual address for SP local memory
* @sp_pvt_gpuaddr: Base GPU virtual address for SP private memory
@@ -404,8 +404,8 @@ struct adreno_device {
struct dentry *ctx_d_debugfs;
unsigned long pwrctrl_flag;
- struct kgsl_memdesc cmdbatch_profile_buffer;
- unsigned int cmdbatch_profile_index;
+ struct kgsl_memdesc profile_buffer;
+ unsigned int profile_index;
uint64_t sp_local_gpuaddr;
uint64_t sp_pvt_gpuaddr;
const struct firmware *lm_fw;
@@ -441,7 +441,7 @@ struct adreno_device {
* @ADRENO_DEVICE_STARTED - Set if the device start sequence is in progress
* @ADRENO_DEVICE_FAULT - Set if the device is currently in fault (and shouldn't
* send any more commands to the ringbuffer)
- * @ADRENO_DEVICE_CMDBATCH_PROFILE - Set if the device supports command batch
+ * @ADRENO_DEVICE_DRAWOBJ_PROFILE - Set if the device supports drawobj
* profiling via the ALWAYSON counter
* @ADRENO_DEVICE_PREEMPTION - Turn on/off preemption
* @ADRENO_DEVICE_SOFT_FAULT_DETECT - Set if soft fault detect is enabled
@@ -459,7 +459,7 @@ enum adreno_device_flags {
ADRENO_DEVICE_HANG_INTR = 4,
ADRENO_DEVICE_STARTED = 5,
ADRENO_DEVICE_FAULT = 6,
- ADRENO_DEVICE_CMDBATCH_PROFILE = 7,
+ ADRENO_DEVICE_DRAWOBJ_PROFILE = 7,
ADRENO_DEVICE_GPU_REGULATOR_ENABLED = 8,
ADRENO_DEVICE_PREEMPTION = 9,
ADRENO_DEVICE_SOFT_FAULT_DETECT = 10,
@@ -469,22 +469,22 @@ enum adreno_device_flags {
};
/**
- * struct adreno_cmdbatch_profile_entry - a single command batch entry in the
+ * struct adreno_drawobj_profile_entry - a single drawobj entry in the
* kernel profiling buffer
- * @started: Number of GPU ticks at start of the command batch
- * @retired: Number of GPU ticks at the end of the command batch
+ * @started: Number of GPU ticks at start of the drawobj
+ * @retired: Number of GPU ticks at the end of the drawobj
*/
-struct adreno_cmdbatch_profile_entry {
+struct adreno_drawobj_profile_entry {
uint64_t started;
uint64_t retired;
};
-#define ADRENO_CMDBATCH_PROFILE_COUNT \
- (PAGE_SIZE / sizeof(struct adreno_cmdbatch_profile_entry))
+#define ADRENO_DRAWOBJ_PROFILE_COUNT \
+ (PAGE_SIZE / sizeof(struct adreno_drawobj_profile_entry))
-#define ADRENO_CMDBATCH_PROFILE_OFFSET(_index, _member) \
- ((_index) * sizeof(struct adreno_cmdbatch_profile_entry) \
- + offsetof(struct adreno_cmdbatch_profile_entry, _member))
+#define ADRENO_DRAWOBJ_PROFILE_OFFSET(_index, _member) \
+ ((_index) * sizeof(struct adreno_drawobj_profile_entry) \
+ + offsetof(struct adreno_drawobj_profile_entry, _member))
/**
@@ -756,6 +756,10 @@ struct adreno_gpudev {
void (*pwrlevel_change_settings)(struct adreno_device *,
unsigned int prelevel, unsigned int postlevel,
bool post);
+ uint64_t (*read_throttling_counters)(struct adreno_device *);
+ void (*count_throttles)(struct adreno_device *, uint64_t adj);
+ int (*enable_pwr_counters)(struct adreno_device *,
+ unsigned int counter);
unsigned int (*preemption_pre_ibsubmit)(struct adreno_device *,
struct adreno_ringbuffer *rb,
unsigned int *, struct kgsl_context *);
@@ -765,7 +769,8 @@ struct adreno_gpudev {
int (*preemption_init)(struct adreno_device *);
void (*preemption_schedule)(struct adreno_device *);
void (*enable_64bit)(struct adreno_device *);
- void (*pre_reset)(struct adreno_device *);
+ void (*clk_set_options)(struct adreno_device *,
+ const char *, struct clk *);
};
/**
@@ -774,7 +779,7 @@ struct adreno_gpudev {
* @KGSL_FT_REPLAY: Replay the faulting command
* @KGSL_FT_SKIPIB: Skip the faulting indirect buffer
* @KGSL_FT_SKIPFRAME: Skip the frame containing the faulting IB
- * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command batch
+ * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command obj
* @KGSL_FT_TEMP_DISABLE: Disables FT for all commands
* @KGSL_FT_THROTTLE: Disable the context if it faults too often
* @KGSL_FT_SKIPCMD: Skip the command containing the faulting IB
@@ -791,7 +796,7 @@ enum kgsl_ft_policy_bits {
/* KGSL_FT_MAX_BITS is used to calculate the mask */
KGSL_FT_MAX_BITS,
/* Internal bits - set during GFT */
- /* Skip the PM dump on replayed command batches */
+ /* Skip the PM dump on replayed command obj's */
KGSL_FT_SKIP_PMDUMP = 31,
};
@@ -880,7 +885,7 @@ int adreno_reset(struct kgsl_device *device, int fault);
void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch);
+ struct kgsl_drawobj *drawobj);
int adreno_coresight_init(struct adreno_device *adreno_dev);
@@ -1013,6 +1018,12 @@ static inline int adreno_is_a540v1(struct adreno_device *adreno_dev)
(ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0);
}
+static inline int adreno_is_a540v2(struct adreno_device *adreno_dev)
+{
+ return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A540) &&
+ (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1);
+}
+
/*
* adreno_checkreg_off() - Checks the validity of a register enum
* @adreno_dev: Pointer to adreno device
@@ -1459,4 +1470,24 @@ static inline void adreno_ringbuffer_set_pagetable(struct adreno_ringbuffer *rb,
spin_unlock_irqrestore(&rb->preempt_lock, flags);
}
+static inline unsigned int counter_delta(struct kgsl_device *device,
+ unsigned int reg, unsigned int *counter)
+{
+ unsigned int val;
+ unsigned int ret = 0;
+
+ /* Read the value */
+ kgsl_regread(device, reg, &val);
+
+ /* Return 0 for the first read */
+ if (*counter != 0) {
+ if (val < *counter)
+ ret = (0xFFFFFFFF - *counter) + val;
+ else
+ ret = val - *counter;
+ }
+
+ *counter = val;
+ return ret;
+}
#endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c
index 7a691667e59f..bfbdb0e7ac1f 100644
--- a/drivers/gpu/msm/adreno_a4xx.c
+++ b/drivers/gpu/msm/adreno_a4xx.c
@@ -26,6 +26,8 @@
#include "adreno_perfcounter.h"
#define SP_TP_PWR_ON BIT(20)
+/* A4XX_RBBM_CLOCK_CTL_IP */
+#define CNTL_IP_SW_COLLAPSE BIT(0)
/*
* Define registers for a4xx that contain addresses used by the
@@ -201,6 +203,131 @@ static bool a4xx_is_sptp_idle(struct adreno_device *adreno_dev)
}
/*
+ * a4xx_enable_hwcg() - Program the clock control registers
+ * @device: The adreno device pointer
+ */
+static void a4xx_enable_hwcg(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_TP0, 0x02222202);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_TP1, 0x02222202);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_TP2, 0x02222202);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_TP3, 0x02222202);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_TP0, 0x00002222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_TP1, 0x00002222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_TP2, 0x00002222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_TP3, 0x00002222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_TP0, 0x0E739CE7);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_TP1, 0x0E739CE7);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_TP2, 0x0E739CE7);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_TP3, 0x0E739CE7);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_TP0, 0x00111111);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_TP1, 0x00111111);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_TP2, 0x00111111);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_TP3, 0x00111111);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_SP0, 0x22222222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_SP1, 0x22222222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_SP2, 0x22222222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_SP3, 0x22222222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_SP0, 0x00222222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_SP1, 0x00222222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_SP2, 0x00222222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_SP3, 0x00222222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_SP0, 0x00000104);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_SP1, 0x00000104);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_SP2, 0x00000104);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_SP3, 0x00000104);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_SP0, 0x00000081);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_SP1, 0x00000081);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_SP2, 0x00000081);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_SP3, 0x00000081);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_RB0, 0x22222222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_RB1, 0x22222222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_RB2, 0x22222222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_RB3, 0x22222222);
+ /* Disable L1 clocking in A420 due to CCU issues with it */
+ if (adreno_is_a420(adreno_dev)) {
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB0, 0x00002020);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB1, 0x00002020);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB2, 0x00002020);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB3, 0x00002020);
+ } else {
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB0, 0x00022020);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB1, 0x00022020);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB2, 0x00022020);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB3, 0x00022020);
+ }
+ /* No CCU for A405 */
+ if (!adreno_is_a405(adreno_dev)) {
+ kgsl_regwrite(device,
+ A4XX_RBBM_CLOCK_CTL_MARB_CCU0, 0x00000922);
+ kgsl_regwrite(device,
+ A4XX_RBBM_CLOCK_CTL_MARB_CCU1, 0x00000922);
+ kgsl_regwrite(device,
+ A4XX_RBBM_CLOCK_CTL_MARB_CCU2, 0x00000922);
+ kgsl_regwrite(device,
+ A4XX_RBBM_CLOCK_CTL_MARB_CCU3, 0x00000922);
+ kgsl_regwrite(device,
+ A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU0, 0x00000000);
+ kgsl_regwrite(device,
+ A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU1, 0x00000000);
+ kgsl_regwrite(device,
+ A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU2, 0x00000000);
+ kgsl_regwrite(device,
+ A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU3, 0x00000000);
+ kgsl_regwrite(device,
+ A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_0,
+ 0x00000001);
+ kgsl_regwrite(device,
+ A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_1,
+ 0x00000001);
+ kgsl_regwrite(device,
+ A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_2,
+ 0x00000001);
+ kgsl_regwrite(device,
+ A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_3,
+ 0x00000001);
+ }
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_HLSQ, 0x00000000);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
+ /*
+ * Due to a HW timing issue, top level HW clock gating is causing
+ * register read/writes to be dropped in adreno a430.
+ * This timing issue started happening because of SP/TP power collapse.
+ * On targets that do not have SP/TP PC there is no timing issue.
+ * The HW timing issue could be fixed by
+ * a) disabling SP/TP power collapse
+ * b) or disabling HW clock gating.
+ * Disabling HW clock gating + NAP enabled combination has
+ * minimal power impact. So this option is chosen over disabling
+ * SP/TP power collapse.
+ * Revisions of A430 which chipid 2 and above do not have the issue.
+ */
+ if (adreno_is_a430(adreno_dev) &&
+ (ADRENO_CHIPID_PATCH(adreno_dev->chipid) < 2))
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL, 0);
+ else
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2, 0);
+}
+/*
* a4xx_regulator_enable() - Enable any necessary HW regulators
* @adreno_dev: The adreno device pointer
*
@@ -212,8 +339,12 @@ static int a4xx_regulator_enable(struct adreno_device *adreno_dev)
unsigned int reg;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- if (!(adreno_is_a430(adreno_dev) || adreno_is_a418(adreno_dev)))
+ if (!(adreno_is_a430(adreno_dev) || adreno_is_a418(adreno_dev))) {
+ /* Halt the sp_input_clk at HM level */
+ kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL, 0x00000055);
+ a4xx_enable_hwcg(device);
return 0;
+ }
/* Set the default register values; set SW_COLLAPSE to 0 */
kgsl_regwrite(device, A4XX_RBBM_POWER_CNTL_IP, 0x778000);
@@ -221,6 +352,13 @@ static int a4xx_regulator_enable(struct adreno_device *adreno_dev)
udelay(5);
kgsl_regread(device, A4XX_RBBM_POWER_STATUS, &reg);
} while (!(reg & SP_TP_PWR_ON));
+
+ /* Disable SP clock */
+ kgsl_regrmw(device, A4XX_RBBM_CLOCK_CTL_IP, CNTL_IP_SW_COLLAPSE, 0);
+ /* Enable hardware clockgating */
+ a4xx_enable_hwcg(device);
+ /* Enable SP clock */
+ kgsl_regrmw(device, A4XX_RBBM_CLOCK_CTL_IP, CNTL_IP_SW_COLLAPSE, 1);
return 0;
}
@@ -328,131 +466,6 @@ static void a4xx_pwrlevel_change_settings(struct adreno_device *adreno_dev,
pre = 0;
}
-/*
- * a4xx_enable_hwcg() - Program the clock control registers
- * @device: The adreno device pointer
- */
-static void a4xx_enable_hwcg(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_TP0, 0x02222202);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_TP1, 0x02222202);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_TP2, 0x02222202);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_TP3, 0x02222202);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_TP0, 0x00002222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_TP1, 0x00002222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_TP2, 0x00002222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_TP3, 0x00002222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_TP0, 0x0E739CE7);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_TP1, 0x0E739CE7);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_TP2, 0x0E739CE7);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_TP3, 0x0E739CE7);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_TP0, 0x00111111);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_TP1, 0x00111111);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_TP2, 0x00111111);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_TP3, 0x00111111);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_SP0, 0x22222222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_SP1, 0x22222222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_SP2, 0x22222222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_SP3, 0x22222222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_SP0, 0x00222222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_SP1, 0x00222222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_SP2, 0x00222222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_SP3, 0x00222222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_SP0, 0x00000104);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_SP1, 0x00000104);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_SP2, 0x00000104);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_SP3, 0x00000104);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_SP0, 0x00000081);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_SP1, 0x00000081);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_SP2, 0x00000081);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_SP3, 0x00000081);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_RB0, 0x22222222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_RB1, 0x22222222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_RB2, 0x22222222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_RB3, 0x22222222);
- /* Disable L1 clocking in A420 due to CCU issues with it */
- if (adreno_is_a420(adreno_dev)) {
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB0, 0x00002020);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB1, 0x00002020);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB2, 0x00002020);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB3, 0x00002020);
- } else {
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB0, 0x00022020);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB1, 0x00022020);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB2, 0x00022020);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2_RB3, 0x00022020);
- }
- /* No CCU for A405 */
- if (!adreno_is_a405(adreno_dev)) {
- kgsl_regwrite(device,
- A4XX_RBBM_CLOCK_CTL_MARB_CCU0, 0x00000922);
- kgsl_regwrite(device,
- A4XX_RBBM_CLOCK_CTL_MARB_CCU1, 0x00000922);
- kgsl_regwrite(device,
- A4XX_RBBM_CLOCK_CTL_MARB_CCU2, 0x00000922);
- kgsl_regwrite(device,
- A4XX_RBBM_CLOCK_CTL_MARB_CCU3, 0x00000922);
- kgsl_regwrite(device,
- A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU0, 0x00000000);
- kgsl_regwrite(device,
- A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU1, 0x00000000);
- kgsl_regwrite(device,
- A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU2, 0x00000000);
- kgsl_regwrite(device,
- A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU3, 0x00000000);
- kgsl_regwrite(device,
- A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_0,
- 0x00000001);
- kgsl_regwrite(device,
- A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_1,
- 0x00000001);
- kgsl_regwrite(device,
- A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_2,
- 0x00000001);
- kgsl_regwrite(device,
- A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_3,
- 0x00000001);
- }
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
- /*
- * Due to a HW timing issue, top level HW clock gating is causing
- * register read/writes to be dropped in adreno a430.
- * This timing issue started happening because of SP/TP power collapse.
- * On targets that do not have SP/TP PC there is no timing issue.
- * The HW timing issue could be fixed by
- * a) disabling SP/TP power collapse
- * b) or disabling HW clock gating.
- * Disabling HW clock gating + NAP enabled combination has
- * minimal power impact. So this option is chosen over disabling
- * SP/TP power collapse.
- * Revisions of A430 which chipid 2 and above do not have the issue.
- */
- if (adreno_is_a430(adreno_dev) &&
- (ADRENO_CHIPID_PATCH(adreno_dev->chipid) < 2))
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL, 0);
- else
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
- kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2, 0);
-}
-
/**
* a4xx_protect_init() - Initializes register protection on a4xx
* @adreno_dev: Pointer to the device structure
@@ -597,7 +610,6 @@ static void a4xx_start(struct adreno_device *adreno_dev)
0x00000441);
}
- a4xx_enable_hwcg(device);
/*
* For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2
* due to timing issue with HLSQ_TP_CLK_EN
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 583de85678fc..2891940b8f5b 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -14,6 +14,7 @@
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/scm.h>
#include <linux/pm_opp.h>
+#include <linux/clk/msm-clk.h>
#include "adreno.h"
#include "a5xx_reg.h"
@@ -26,6 +27,7 @@
#include "kgsl_sharedmem.h"
#include "kgsl_log.h"
#include "kgsl.h"
+#include "kgsl_trace.h"
#include "adreno_a5xx_packets.h"
static int zap_ucode_loaded;
@@ -439,7 +441,11 @@ static int a5xx_regulator_enable(struct adreno_device *adreno_dev)
unsigned int ret;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
if (!(adreno_is_a530(adreno_dev) || adreno_is_a540(adreno_dev))) {
+ /* Halt the sp_input_clk at HM level */
+ kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL, 0x00000055);
a5xx_hwcg_set(adreno_dev, true);
+ /* Turn on sp_input_clk at HM level */
+ kgsl_regrmw(device, A5XX_RBBM_CLOCK_CNTL, 3, 0);
return 0;
}
@@ -1401,105 +1407,10 @@ static void a530_lm_enable(struct adreno_device *adreno_dev)
adreno_is_a530v2(adreno_dev) ? 0x00060011 : 0x00000011);
}
-static bool llm_is_enabled(struct adreno_device *adreno_dev)
-{
- unsigned int r;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- kgsl_regread(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, &r);
- return r & (GPMU_BCL_ENABLED | GPMU_LLM_ENABLED);
-}
-
-
-static void sleep_llm(struct adreno_device *adreno_dev)
-{
- unsigned int r, retry;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- if (!llm_is_enabled(adreno_dev))
- return;
-
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL, &r);
-
- if ((r & STATE_OF_CHILD) == 0) {
- /* If both children are on, sleep CHILD_O1 first */
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- STATE_OF_CHILD, STATE_OF_CHILD_01 | IDLE_FULL_LM_SLEEP);
- /* Wait for IDLE_FULL_ACK before continuing */
- for (retry = 0; retry < 5; retry++) {
- udelay(1);
- kgsl_regread(device,
- A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- if (r & IDLE_FULL_ACK)
- break;
- }
-
- if (retry == 5)
- KGSL_CORE_ERR("GPMU: LLM failed to idle: 0x%X\n", r);
- }
-
- /* Now turn off both children */
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- 0, STATE_OF_CHILD | IDLE_FULL_LM_SLEEP);
-
- /* wait for WAKEUP_ACK to be zero */
- for (retry = 0; retry < 5; retry++) {
- udelay(1);
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- if ((r & WAKEUP_ACK) == 0)
- break;
- }
-
- if (retry == 5)
- KGSL_CORE_ERR("GPMU: LLM failed to sleep: 0x%X\n", r);
-}
-
-static void wake_llm(struct adreno_device *adreno_dev)
-{
- unsigned int r, retry;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- if (!llm_is_enabled(adreno_dev))
- return;
-
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- STATE_OF_CHILD, STATE_OF_CHILD_01);
-
- if (((device->pwrctrl.num_pwrlevels - 2) -
- device->pwrctrl.active_pwrlevel) <= LM_DCVS_LIMIT)
- return;
-
- udelay(1);
-
- /* Turn on all children */
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- STATE_OF_CHILD | IDLE_FULL_LM_SLEEP, 0);
-
- /* Wait for IDLE_FULL_ACK to be zero and WAKEUP_ACK to be set */
- for (retry = 0; retry < 5; retry++) {
- udelay(1);
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- if ((r & (WAKEUP_ACK | IDLE_FULL_ACK)) == WAKEUP_ACK)
- break;
- }
-
- if (retry == 5)
- KGSL_CORE_ERR("GPMU: LLM failed to wake: 0x%X\n", r);
-}
-
-static bool llm_is_awake(struct adreno_device *adreno_dev)
-{
- unsigned int r;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- return r & WAKEUP_ACK;
-}
-
static void a540_lm_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- uint32_t agc_lm_config =
+ uint32_t agc_lm_config = AGC_BCL_DISABLED |
((ADRENO_CHIPID_PATCH(adreno_dev->chipid) & 0x3)
<< AGC_GPU_VERSION_SHIFT);
unsigned int r;
@@ -1513,11 +1424,6 @@ static void a540_lm_init(struct adreno_device *adreno_dev)
AGC_LM_CONFIG_ISENSE_ENABLE;
kgsl_regread(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, &r);
- if (!(r & GPMU_BCL_ENABLED))
- agc_lm_config |= AGC_BCL_DISABLED;
-
- if (r & GPMU_LLM_ENABLED)
- agc_lm_config |= AGC_LLM_ENABLED;
if ((r & GPMU_ISENSE_STATUS) == GPMU_ISENSE_END_POINT_CAL_ERR) {
KGSL_CORE_ERR(
@@ -1546,9 +1452,6 @@ static void a540_lm_init(struct adreno_device *adreno_dev)
kgsl_regwrite(device, A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK,
VOLTAGE_INTR_EN);
-
- if (lm_on(adreno_dev))
- wake_llm(adreno_dev);
}
@@ -1627,6 +1530,91 @@ static void a5xx_pwrlevel_change_settings(struct adreno_device *adreno_dev,
}
}
+static void a5xx_clk_set_options(struct adreno_device *adreno_dev,
+ const char *name, struct clk *clk)
+{
+ if (adreno_is_a540(adreno_dev)) {
+ if (!strcmp(name, "mem_iface_clk"))
+ clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
+ clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
+ if (!strcmp(name, "core_clk")) {
+ clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
+ clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
+ }
+ }
+}
+
+static void a5xx_count_throttles(struct adreno_device *adreno_dev,
+ uint64_t adj)
+{
+ if (adreno_is_a530(adreno_dev))
+ kgsl_regread(KGSL_DEVICE(adreno_dev),
+ adreno_dev->lm_threshold_count,
+ &adreno_dev->lm_threshold_cross);
+ else if (adreno_is_a540(adreno_dev))
+ adreno_dev->lm_threshold_cross = adj;
+}
+
+static int a5xx_enable_pwr_counters(struct adreno_device *adreno_dev,
+ unsigned int counter)
+{
+ /*
+ * On 5XX we have to emulate the PWR counters which are physically
+ * missing. Program countable 6 on RBBM_PERFCTR_RBBM_0 as a substitute
+ * for PWR:1. Don't emulate PWR:0 as nobody uses it and we don't want
+ * to take away too many of the generic RBBM counters.
+ */
+
+ if (counter == 0)
+ return -EINVAL;
+
+ kgsl_regwrite(KGSL_DEVICE(adreno_dev), A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
+
+ return 0;
+}
+
+/* FW driven idle 10% throttle */
+#define IDLE_10PCT 0
+/* number of cycles when clock is throttled by 50% (CRC) */
+#define CRC_50PCT 1
+/* number of cycles when clock is throttled by more than 50% (CRC) */
+#define CRC_MORE50PCT 2
+/* number of cycles when clock is throttle by less than 50% (CRC) */
+#define CRC_LESS50PCT 3
+
+static uint64_t a5xx_read_throttling_counters(struct adreno_device *adreno_dev)
+{
+ int i, adj;
+ uint32_t th[ADRENO_GPMU_THROTTLE_COUNTERS];
+ struct adreno_busy_data *busy = &adreno_dev->busy_data;
+
+ if (!adreno_is_a540(adreno_dev))
+ return 0;
+
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
+ return 0;
+
+ if (!test_bit(ADRENO_THROTTLING_CTRL, &adreno_dev->pwrctrl_flag))
+ return 0;
+
+ for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
+ if (!adreno_dev->gpmu_throttle_counters[i])
+ return 0;
+
+ th[i] = counter_delta(KGSL_DEVICE(adreno_dev),
+ adreno_dev->gpmu_throttle_counters[i],
+ &busy->throttle_cycles[i]);
+ }
+ adj = th[CRC_MORE50PCT] - th[IDLE_10PCT];
+ adj = th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (adj < 0 ? 0 : adj) * 3;
+
+ trace_kgsl_clock_throttling(
+ th[IDLE_10PCT], th[CRC_50PCT],
+ th[CRC_MORE50PCT], th[CRC_LESS50PCT],
+ adj);
+ return adj;
+}
+
static void a5xx_enable_64bit(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -1645,14 +1633,6 @@ static void a5xx_enable_64bit(struct adreno_device *adreno_dev)
kgsl_regwrite(device, A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
}
-static void a5xx_pre_reset(struct adreno_device *adreno_dev)
-{
- if (adreno_is_a540(adreno_dev) && lm_on(adreno_dev)) {
- if (llm_is_awake(adreno_dev))
- sleep_llm(adreno_dev);
- }
-}
-
/*
* a5xx_gpmu_reset() - Re-enable GPMU based power features and restart GPMU
* @work: Pointer to the work struct for gpmu reset
@@ -1687,17 +1667,47 @@ static void a5xx_gpmu_reset(struct work_struct *work)
if (a5xx_regulator_enable(adreno_dev))
goto out;
- a5xx_pre_reset(adreno_dev);
-
/* Soft reset of the GPMU block */
kgsl_regwrite(device, A5XX_RBBM_BLOCK_SW_RESET_CMD, BIT(16));
+ /* GPU comes up in secured mode, make it unsecured by default */
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
+ kgsl_regwrite(device, A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+
+
a5xx_gpmu_init(adreno_dev);
out:
mutex_unlock(&device->mutex);
}
+static void _setup_throttling_counters(struct adreno_device *adreno_dev)
+{
+ int i, ret;
+
+ if (!adreno_is_a540(adreno_dev))
+ return;
+
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
+ return;
+
+ for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
+ /* reset throttled cycles ivalue */
+ adreno_dev->busy_data.throttle_cycles[i] = 0;
+
+ if (adreno_dev->gpmu_throttle_counters[i] != 0)
+ continue;
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_GPMU_PWR,
+ ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i,
+ &adreno_dev->gpmu_throttle_counters[i],
+ NULL,
+ PERFCOUNTER_FLAG_KERNEL);
+ WARN_ONCE(ret, "Unable to get clock throttling counter %x\n",
+ ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i);
+ }
+}
+
/*
* a5xx_start() - Device start
* @adreno_dev: Pointer to adreno device
@@ -1709,6 +1719,21 @@ static void a5xx_start(struct adreno_device *adreno_dev)
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
unsigned int bit;
+ int ret;
+
+ if (adreno_is_a530(adreno_dev) && ADRENO_FEATURE(adreno_dev, ADRENO_LM)
+ && adreno_dev->lm_threshold_count == 0) {
+
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 27,
+ &adreno_dev->lm_threshold_count, NULL,
+ PERFCOUNTER_FLAG_KERNEL);
+ /* Ignore noncritical ret - used for debugfs */
+ if (ret)
+ adreno_dev->lm_threshold_count = 0;
+ }
+
+ _setup_throttling_counters(adreno_dev);
adreno_vbif_start(adreno_dev, a5xx_vbif_platforms,
ARRAY_SIZE(a5xx_vbif_platforms));
@@ -1855,6 +1880,11 @@ static void a5xx_start(struct adreno_device *adreno_dev)
*/
kgsl_regrmw(device, A5XX_RB_DBG_ECO_CNT, 0, (1 << 9));
}
+ /*
+ * Disable UCHE global filter as SP can invalidate/flush
+ * independently
+ */
+ kgsl_regwrite(device, A5XX_UCHE_MODE_CNTL, BIT(29));
/* Set the USE_RETENTION_FLOPS chicken bit */
kgsl_regwrite(device, A5XX_CP_CHICKEN_DBG, 0x02000000);
@@ -2009,11 +2039,6 @@ static int a5xx_post_start(struct adreno_device *adreno_dev)
static int a5xx_gpmu_init(struct adreno_device *adreno_dev)
{
int ret;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- /* GPU comes up in secured mode, make it unsecured by default */
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
- kgsl_regwrite(device, A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
/* Set up LM before initializing the GPMU */
a5xx_lm_init(adreno_dev);
@@ -2127,9 +2152,11 @@ static int _me_init_ucode_workarounds(struct adreno_device *adreno_dev)
case ADRENO_REV_A540:
/*
* WFI after every direct-render 3D mode draw and
- * WFI after every 2D Mode 3 draw.
+ * WFI after every 2D Mode 3 draw. This is needed
+ * only on a540v1.
*/
- return 0x0000000A;
+ if (adreno_is_a540v1(adreno_dev))
+ return 0x0000000A;
default:
return 0x00000000; /* No ucode workarounds enabled */
}
@@ -2332,20 +2359,10 @@ static int a5xx_rb_start(struct adreno_device *adreno_dev,
if (ret)
return ret;
- /* Set up LM before initializing the GPMU */
- a5xx_lm_init(adreno_dev);
-
- /* Enable SPTP based power collapse before enabling GPMU */
- a5xx_enable_pc(adreno_dev);
-
- /* Program the GPMU */
- ret = a5xx_gpmu_start(adreno_dev);
+ ret = a5xx_gpmu_init(adreno_dev);
if (ret)
return ret;
- /* Enable limits management */
- a5xx_lm_enable(adreno_dev);
-
a5xx_post_start(adreno_dev);
return 0;
@@ -3507,6 +3524,9 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
.regulator_enable = a5xx_regulator_enable,
.regulator_disable = a5xx_regulator_disable,
.pwrlevel_change_settings = a5xx_pwrlevel_change_settings,
+ .read_throttling_counters = a5xx_read_throttling_counters,
+ .count_throttles = a5xx_count_throttles,
+ .enable_pwr_counters = a5xx_enable_pwr_counters,
.preemption_pre_ibsubmit = a5xx_preemption_pre_ibsubmit,
.preemption_yield_enable =
a5xx_preemption_yield_enable,
@@ -3515,5 +3535,5 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
.preemption_init = a5xx_preemption_init,
.preemption_schedule = a5xx_preemption_schedule,
.enable_64bit = a5xx_enable_64bit,
- .pre_reset = a5xx_pre_reset,
+ .clk_set_options = a5xx_clk_set_options,
};
diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c
index 4baee4a5c0b1..09c550c9f58c 100644
--- a/drivers/gpu/msm/adreno_a5xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a5xx_preempt.c
@@ -37,7 +37,7 @@ static void _update_wptr(struct adreno_device *adreno_dev)
rb->wptr);
rb->dispatch_q.expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
+ msecs_to_jiffies(adreno_drawobj_timeout);
}
spin_unlock_irqrestore(&rb->preempt_lock, flags);
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 680827e5b848..fffe08038bcd 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -129,7 +129,7 @@ typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i,
static void sync_event_print(struct seq_file *s,
- struct kgsl_cmdbatch_sync_event *sync_event)
+ struct kgsl_drawobj_sync_event *sync_event)
{
switch (sync_event->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: {
@@ -153,12 +153,12 @@ struct flag_entry {
const char *str;
};
-static const struct flag_entry cmdbatch_flags[] = {KGSL_CMDBATCH_FLAGS};
+static const struct flag_entry drawobj_flags[] = {KGSL_DRAWOBJ_FLAGS};
-static const struct flag_entry cmdbatch_priv[] = {
- { CMDBATCH_FLAG_SKIP, "skip"},
- { CMDBATCH_FLAG_FORCE_PREAMBLE, "force_preamble"},
- { CMDBATCH_FLAG_WFI, "wait_for_idle" },
+static const struct flag_entry cmdobj_priv[] = {
+ { CMDOBJ_SKIP, "skip"},
+ { CMDOBJ_FORCE_PREAMBLE, "force_preamble"},
+ { CMDOBJ_WFI, "wait_for_idle" },
};
static const struct flag_entry context_flags[] = {KGSL_CONTEXT_FLAGS};
@@ -199,42 +199,54 @@ static void print_flags(struct seq_file *s, const struct flag_entry *table,
seq_puts(s, "None");
}
-static void cmdbatch_print(struct seq_file *s, struct kgsl_cmdbatch *cmdbatch)
+static void syncobj_print(struct seq_file *s,
+ struct kgsl_drawobj_sync *syncobj)
{
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
unsigned int i;
- /* print fences first, since they block this cmdbatch */
+ seq_puts(s, " syncobj ");
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ event = &syncobj->synclist[i];
- if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
+ if (!kgsl_drawobj_event_pending(syncobj, i))
continue;
- /*
- * Timestamp is 0 for KGSL_CONTEXT_SYNC, but print it anyways
- * so that it is clear if the fence was a separate submit
- * or part of an IB submit.
- */
- seq_printf(s, "\t%d ", cmdbatch->timestamp);
sync_event_print(s, event);
seq_puts(s, "\n");
}
+}
- /* if this flag is set, there won't be an IB */
- if (cmdbatch->flags & KGSL_CONTEXT_SYNC)
- return;
+static void cmdobj_print(struct seq_file *s,
+ struct kgsl_drawobj_cmd *cmdobj)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- seq_printf(s, "\t%d: ", cmdbatch->timestamp);
+ if (drawobj->type == CMDOBJ_TYPE)
+ seq_puts(s, " cmdobj ");
+ else
+ seq_puts(s, " markerobj ");
- seq_puts(s, " flags: ");
- print_flags(s, cmdbatch_flags, ARRAY_SIZE(cmdbatch_flags),
- cmdbatch->flags);
+ seq_printf(s, "\t %d ", drawobj->timestamp);
seq_puts(s, " priv: ");
- print_flags(s, cmdbatch_priv, ARRAY_SIZE(cmdbatch_priv),
- cmdbatch->priv);
+ print_flags(s, cmdobj_priv, ARRAY_SIZE(cmdobj_priv),
+ cmdobj->priv);
+}
+
+static void drawobj_print(struct seq_file *s,
+ struct kgsl_drawobj *drawobj)
+{
+ if (drawobj->type == SYNCOBJ_TYPE)
+ syncobj_print(s, SYNCOBJ(drawobj));
+ else if ((drawobj->type == CMDOBJ_TYPE) ||
+ (drawobj->type == MARKEROBJ_TYPE))
+ cmdobj_print(s, CMDOBJ(drawobj));
+
+ seq_puts(s, " flags: ");
+ print_flags(s, drawobj_flags, ARRAY_SIZE(drawobj_flags),
+ drawobj->flags);
seq_puts(s, "\n");
}
@@ -285,13 +297,13 @@ static int ctx_print(struct seq_file *s, void *unused)
queued, consumed, retired,
drawctxt->internal_timestamp);
- seq_puts(s, "cmdqueue:\n");
+ seq_puts(s, "drawqueue:\n");
spin_lock(&drawctxt->lock);
- for (i = drawctxt->cmdqueue_head;
- i != drawctxt->cmdqueue_tail;
- i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE))
- cmdbatch_print(s, drawctxt->cmdqueue[i]);
+ for (i = drawctxt->drawqueue_head;
+ i != drawctxt->drawqueue_tail;
+ i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE))
+ drawobj_print(s, drawctxt->drawqueue[i]);
spin_unlock(&drawctxt->lock);
seq_puts(s, "events:\n");
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 5d3b2b8a7266..cb4108b4e1f9 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -25,7 +25,7 @@
#include "adreno_trace.h"
#include "kgsl_sharedmem.h"
-#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
+#define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
/* Time in ms after which the dispatcher tries to schedule an unscheduled RB */
unsigned int adreno_dispatch_starvation_time = 2000;
@@ -43,13 +43,13 @@ unsigned int adreno_dispatch_time_slice = 25;
unsigned int adreno_disp_preempt_fair_sched;
/* Number of commands that can be queued in a context before it sleeps */
-static unsigned int _context_cmdqueue_size = 50;
+static unsigned int _context_drawqueue_size = 50;
/* Number of milliseconds to wait for the context queue to clear */
static unsigned int _context_queue_wait = 10000;
-/* Number of command batches sent at a time from a single context */
-static unsigned int _context_cmdbatch_burst = 5;
+/* Number of drawobjs sent at a time from a single context */
+static unsigned int _context_drawobj_burst = 5;
/*
* GFT throttle parameters. If GFT recovered more than
@@ -73,24 +73,25 @@ static unsigned int _dispatcher_q_inflight_hi = 15;
static unsigned int _dispatcher_q_inflight_lo = 4;
/* Command batch timeout (in milliseconds) */
-unsigned int adreno_cmdbatch_timeout = 2000;
+unsigned int adreno_drawobj_timeout = 2000;
/* Interval for reading and comparing fault detection registers */
static unsigned int _fault_timer_interval = 200;
-#define CMDQUEUE_RB(_cmdqueue) \
+#define DRAWQUEUE_RB(_drawqueue) \
((struct adreno_ringbuffer *) \
- container_of((_cmdqueue), struct adreno_ringbuffer, dispatch_q))
+ container_of((_drawqueue),\
+ struct adreno_ringbuffer, dispatch_q))
-#define CMDQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q)
+#define DRAWQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q)
-static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue);
+static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_drawqueue *drawqueue);
-static inline bool cmdqueue_is_current(
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static inline bool drawqueue_is_current(
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
- struct adreno_ringbuffer *rb = CMDQUEUE_RB(cmdqueue);
+ struct adreno_ringbuffer *rb = DRAWQUEUE_RB(drawqueue);
struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
return (adreno_dev->cur_rb == rb);
@@ -114,7 +115,8 @@ static int __count_context(struct adreno_context *drawctxt, void *data)
return time_after(jiffies, expires) ? 0 : 1;
}
-static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data)
+static int __count_drawqueue_context(struct adreno_context *drawctxt,
+ void *data)
{
unsigned long expires = drawctxt->active_time + msecs_to_jiffies(100);
@@ -122,7 +124,7 @@ static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data)
return 0;
return (&drawctxt->rb->dispatch_q ==
- (struct adreno_dispatcher_cmdqueue *) data) ? 1 : 0;
+ (struct adreno_dispatcher_drawqueue *) data) ? 1 : 0;
}
static int _adreno_count_active_contexts(struct adreno_device *adreno_dev,
@@ -142,7 +144,7 @@ static int _adreno_count_active_contexts(struct adreno_device *adreno_dev,
}
static void _track_context(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue,
+ struct adreno_dispatcher_drawqueue *drawqueue,
struct adreno_context *drawctxt)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -154,9 +156,9 @@ static void _track_context(struct adreno_device *adreno_dev,
device->active_context_count =
_adreno_count_active_contexts(adreno_dev,
__count_context, NULL);
- cmdqueue->active_context_count =
+ drawqueue->active_context_count =
_adreno_count_active_contexts(adreno_dev,
- __count_cmdqueue_context, cmdqueue);
+ __count_drawqueue_context, drawqueue);
spin_unlock(&adreno_dev->active_list_lock);
}
@@ -169,9 +171,9 @@ static void _track_context(struct adreno_device *adreno_dev,
*/
static inline int
-_cmdqueue_inflight(struct adreno_dispatcher_cmdqueue *cmdqueue)
+_drawqueue_inflight(struct adreno_dispatcher_drawqueue *drawqueue)
{
- return (cmdqueue->active_context_count > 1)
+ return (drawqueue->active_context_count > 1)
? _dispatcher_q_inflight_lo : _dispatcher_q_inflight_hi;
}
@@ -271,20 +273,20 @@ static void start_fault_timer(struct adreno_device *adreno_dev)
}
/**
- * _retire_marker() - Retire a marker command batch without sending it to the
- * hardware
- * @cmdbatch: Pointer to the cmdbatch to retire
+ * _retire_timestamp() - Retire object without sending it
+ * to the hardware
+ * @drawobj: Pointer to the object to retire
*
- * In some cases marker commands can be retired by the software without going to
- * the GPU. In those cases, update the memstore from the CPU, kick off the
- * event engine to handle expired events and destroy the command batch.
+ * In some cases ibs can be retired by the software
+ * without going to the GPU. In those cases, update the
+ * memstore from the CPU, kick off the event engine to handle
+ * expired events and destroy the ib.
*/
-static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
+static void _retire_timestamp(struct kgsl_drawobj *drawobj)
{
- struct kgsl_context *context = cmdbatch->context;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_context *context = drawobj->context;
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
struct kgsl_device *device = context->device;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
/*
* Write the start and end timestamp to the memstore to keep the
@@ -292,11 +294,11 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
*/
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
- cmdbatch->timestamp);
+ drawobj->timestamp);
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
- cmdbatch->timestamp);
+ drawobj->timestamp);
/* Retire pending GPU events for the object */
@@ -307,13 +309,13 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
* rptr scratch out address. At this point GPU clocks turned off.
* So avoid reading GPU register directly for A3xx.
*/
- if (adreno_is_a3xx(adreno_dev))
- trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
- 0);
+ if (adreno_is_a3xx(ADRENO_DEVICE(device)))
+ trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb,
+ 0, 0);
else
- trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
- adreno_get_rptr(drawctxt->rb));
- kgsl_cmdbatch_destroy(cmdbatch);
+ trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb,
+ adreno_get_rptr(drawctxt->rb), 0);
+ kgsl_drawobj_destroy(drawobj);
}
static int _check_context_queue(struct adreno_context *drawctxt)
@@ -330,7 +332,7 @@ static int _check_context_queue(struct adreno_context *drawctxt)
if (kgsl_context_invalid(&drawctxt->base))
ret = 1;
else
- ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0;
+ ret = drawctxt->queued < _context_drawqueue_size ? 1 : 0;
spin_unlock(&drawctxt->lock);
@@ -341,176 +343,151 @@ static int _check_context_queue(struct adreno_context *drawctxt)
* return true if this is a marker command and the dependent timestamp has
* retired
*/
-static bool _marker_expired(struct kgsl_cmdbatch *cmdbatch)
-{
- return (cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
- kgsl_check_timestamp(cmdbatch->device, cmdbatch->context,
- cmdbatch->marker_timestamp);
-}
-
-static inline void _pop_cmdbatch(struct adreno_context *drawctxt)
+static bool _marker_expired(struct kgsl_drawobj_cmd *markerobj)
{
- drawctxt->cmdqueue_head = CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
- ADRENO_CONTEXT_CMDQUEUE_SIZE);
- drawctxt->queued--;
-}
-/**
- * Removes all expired marker and sync cmdbatches from
- * the context queue when marker command and dependent
- * timestamp are retired. This function is recursive.
- * returns cmdbatch if context has command, NULL otherwise.
- */
-static struct kgsl_cmdbatch *_expire_markers(struct adreno_context *drawctxt)
-{
- struct kgsl_cmdbatch *cmdbatch;
-
- if (drawctxt->cmdqueue_head == drawctxt->cmdqueue_tail)
- return NULL;
-
- cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];
-
- if (cmdbatch == NULL)
- return NULL;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(markerobj);
- /* Check to see if this is a marker we can skip over */
- if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
- _marker_expired(cmdbatch)) {
- _pop_cmdbatch(drawctxt);
- _retire_marker(cmdbatch);
- return _expire_markers(drawctxt);
- }
-
- if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
- if (!kgsl_cmdbatch_events_pending(cmdbatch)) {
- _pop_cmdbatch(drawctxt);
- kgsl_cmdbatch_destroy(cmdbatch);
- return _expire_markers(drawctxt);
- }
- }
-
- return cmdbatch;
+ return (drawobj->flags & KGSL_DRAWOBJ_MARKER) &&
+ kgsl_check_timestamp(drawobj->device, drawobj->context,
+ markerobj->marker_timestamp);
}
-static void expire_markers(struct adreno_context *drawctxt)
+static inline void _pop_drawobj(struct adreno_context *drawctxt)
{
- spin_lock(&drawctxt->lock);
- _expire_markers(drawctxt);
- spin_unlock(&drawctxt->lock);
+ drawctxt->drawqueue_head = DRAWQUEUE_NEXT(drawctxt->drawqueue_head,
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE);
+ drawctxt->queued--;
}
-static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt)
+static int _retire_markerobj(struct kgsl_drawobj_cmd *cmdobj,
+ struct adreno_context *drawctxt)
{
- struct kgsl_cmdbatch *cmdbatch;
- bool pending = false;
-
- cmdbatch = _expire_markers(drawctxt);
-
- if (cmdbatch == NULL)
- return NULL;
+ if (_marker_expired(cmdobj)) {
+ _pop_drawobj(drawctxt);
+ _retire_timestamp(DRAWOBJ(cmdobj));
+ return 0;
+ }
/*
- * If the marker isn't expired but the SKIP bit is set
- * then there are real commands following this one in
- * the queue. This means that we need to dispatch the
- * command so that we can keep the timestamp accounting
- * correct. If skip isn't set then we block this queue
+ * If the marker isn't expired but the SKIP bit
+ * is set then there are real commands following
+ * this one in the queue. This means that we
+ * need to dispatch the command so that we can
+ * keep the timestamp accounting correct. If
+ * skip isn't set then we block this queue
* until the dependent timestamp expires
*/
- if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
- (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)))
- pending = true;
+ return test_bit(CMDOBJ_SKIP, &cmdobj->priv) ? 1 : -EAGAIN;
+}
- if (kgsl_cmdbatch_events_pending(cmdbatch))
- pending = true;
+static int _retire_syncobj(struct kgsl_drawobj_sync *syncobj,
+ struct adreno_context *drawctxt)
+{
+ if (!kgsl_drawobj_events_pending(syncobj)) {
+ _pop_drawobj(drawctxt);
+ kgsl_drawobj_destroy(DRAWOBJ(syncobj));
+ return 0;
+ }
/*
- * If changes are pending and the canary timer hasn't been
- * started yet, start it
+ * If we got here, there are pending events for sync object.
+ * Start the canary timer if it hasnt been started already.
*/
- if (pending) {
- /*
- * If syncpoints are pending start the canary timer if
- * it hasn't already been started
- */
- if (!cmdbatch->timeout_jiffies) {
- cmdbatch->timeout_jiffies =
- jiffies + msecs_to_jiffies(5000);
- mod_timer(&cmdbatch->timer, cmdbatch->timeout_jiffies);
- }
-
- return ERR_PTR(-EAGAIN);
+ if (!syncobj->timeout_jiffies) {
+ syncobj->timeout_jiffies = jiffies + msecs_to_jiffies(5000);
+ mod_timer(&syncobj->timer, syncobj->timeout_jiffies);
}
- _pop_cmdbatch(drawctxt);
- return cmdbatch;
+ return -EAGAIN;
}
-/**
- * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue
- * @drawctxt: Pointer to the adreno draw context
- *
- * Dequeue a new command batch from the context list
+/*
+ * Retires all expired marker and sync objs from the context
+ * queue and returns one of the below
+ * a) next drawobj that needs to be sent to ringbuffer
+ * b) -EAGAIN for syncobj with syncpoints pending.
+ * c) -EAGAIN for markerobj whose marker timestamp has not expired yet.
+ * c) NULL for no commands remaining in drawqueue.
*/
-static struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch(
- struct adreno_context *drawctxt)
+static struct kgsl_drawobj *_process_drawqueue_get_next_drawobj(
+ struct adreno_context *drawctxt)
{
- struct kgsl_cmdbatch *cmdbatch;
+ struct kgsl_drawobj *drawobj;
+ unsigned int i = drawctxt->drawqueue_head;
+ int ret = 0;
- spin_lock(&drawctxt->lock);
- cmdbatch = _get_cmdbatch(drawctxt);
- spin_unlock(&drawctxt->lock);
+ if (drawctxt->drawqueue_head == drawctxt->drawqueue_tail)
+ return NULL;
- /*
- * Delete the timer and wait for timer handler to finish executing
- * on another core before queueing the buffer. We must do this
- * without holding any spin lock that the timer handler might be using
- */
- if (!IS_ERR_OR_NULL(cmdbatch))
- del_timer_sync(&cmdbatch->timer);
+ for (i = drawctxt->drawqueue_head; i != drawctxt->drawqueue_tail;
+ i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE)) {
+
+ drawobj = drawctxt->drawqueue[i];
+
+ if (drawobj == NULL)
+ return NULL;
+
+ if (drawobj->type == CMDOBJ_TYPE)
+ return drawobj;
+ else if (drawobj->type == MARKEROBJ_TYPE) {
+ ret = _retire_markerobj(CMDOBJ(drawobj), drawctxt);
+ /* Special case where marker needs to be sent to GPU */
+ if (ret == 1)
+ return drawobj;
+ } else if (drawobj->type == SYNCOBJ_TYPE)
+ ret = _retire_syncobj(SYNCOBJ(drawobj), drawctxt);
+
+ if (ret == -EAGAIN)
+ return ERR_PTR(-EAGAIN);
+
+ continue;
+ }
- return cmdbatch;
+ return NULL;
}
/**
- * adreno_dispatcher_requeue_cmdbatch() - Put a command back on the context
+ * adreno_dispatcher_requeue_cmdobj() - Put a command back on the context
* queue
* @drawctxt: Pointer to the adreno draw context
- * @cmdbatch: Pointer to the KGSL cmdbatch to requeue
+ * @cmdobj: Pointer to the KGSL command object to requeue
*
* Failure to submit a command to the ringbuffer isn't the fault of the command
* being submitted so if a failure happens, push it back on the head of the the
* context queue to be reconsidered again unless the context got detached.
*/
-static inline int adreno_dispatcher_requeue_cmdbatch(
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch)
+static inline int adreno_dispatcher_requeue_cmdobj(
+ struct adreno_context *drawctxt,
+ struct kgsl_drawobj_cmd *cmdobj)
{
unsigned int prev;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
spin_lock(&drawctxt->lock);
if (kgsl_context_detached(&drawctxt->base) ||
kgsl_context_invalid(&drawctxt->base)) {
spin_unlock(&drawctxt->lock);
- /* get rid of this cmdbatch since the context is bad */
- kgsl_cmdbatch_destroy(cmdbatch);
+ /* get rid of this drawobj since the context is bad */
+ kgsl_drawobj_destroy(drawobj);
return -ENOENT;
}
- prev = drawctxt->cmdqueue_head == 0 ?
- (ADRENO_CONTEXT_CMDQUEUE_SIZE - 1) :
- (drawctxt->cmdqueue_head - 1);
+ prev = drawctxt->drawqueue_head == 0 ?
+ (ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1) :
+ (drawctxt->drawqueue_head - 1);
/*
* The maximum queue size always needs to be one less then the size of
- * the ringbuffer queue so there is "room" to put the cmdbatch back in
+ * the ringbuffer queue so there is "room" to put the drawobj back in
*/
- BUG_ON(prev == drawctxt->cmdqueue_tail);
+ WARN_ON(prev == drawctxt->drawqueue_tail);
- drawctxt->cmdqueue[prev] = cmdbatch;
+ drawctxt->drawqueue[prev] = drawobj;
drawctxt->queued++;
/* Reset the command queue head to reflect the newly requeued change */
- drawctxt->cmdqueue_head = prev;
+ drawctxt->drawqueue_head = prev;
spin_unlock(&drawctxt->lock);
return 0;
}
@@ -545,21 +522,22 @@ static void dispatcher_queue_context(struct adreno_device *adreno_dev,
}
/**
- * sendcmd() - Send a command batch to the GPU hardware
+ * sendcmd() - Send a drawobj to the GPU hardware
* @dispatcher: Pointer to the adreno dispatcher struct
- * @cmdbatch: Pointer to the KGSL cmdbatch being sent
+ * @drawobj: Pointer to the KGSL drawobj being sent
*
- * Send a KGSL command batch to the GPU hardware
+ * Send a KGSL drawobj to the GPU hardware
*/
static int sendcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
- struct adreno_dispatcher_cmdqueue *dispatch_q =
- ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
+ struct adreno_dispatcher_drawqueue *dispatch_q =
+ ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj);
struct adreno_submit_time time;
uint64_t secs = 0;
unsigned long nsecs = 0;
@@ -588,15 +566,15 @@ static int sendcmd(struct adreno_device *adreno_dev,
set_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
}
- if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv)) {
- set_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv);
- cmdbatch->profile_index = adreno_dev->cmdbatch_profile_index;
- adreno_dev->cmdbatch_profile_index =
- (adreno_dev->cmdbatch_profile_index + 1) %
- ADRENO_CMDBATCH_PROFILE_COUNT;
+ if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv)) {
+ set_bit(CMDOBJ_PROFILE, &cmdobj->priv);
+ cmdobj->profile_index = adreno_dev->profile_index;
+ adreno_dev->profile_index =
+ (adreno_dev->profile_index + 1) %
+ ADRENO_DRAWOBJ_PROFILE_COUNT;
}
- ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch, &time);
+ ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdobj, &time);
/*
* On the first command, if the submission was successful, then read the
@@ -649,17 +627,17 @@ static int sendcmd(struct adreno_device *adreno_dev,
secs = time.ktime;
nsecs = do_div(secs, 1000000000);
- trace_adreno_cmdbatch_submitted(cmdbatch, (int) dispatcher->inflight,
+ trace_adreno_cmdbatch_submitted(drawobj, (int) dispatcher->inflight,
time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb,
adreno_get_rptr(drawctxt->rb));
mutex_unlock(&device->mutex);
- cmdbatch->submit_ticks = time.ticks;
+ cmdobj->submit_ticks = time.ticks;
- dispatch_q->cmd_q[dispatch_q->tail] = cmdbatch;
+ dispatch_q->cmd_q[dispatch_q->tail] = cmdobj;
dispatch_q->tail = (dispatch_q->tail + 1) %
- ADRENO_DISPATCH_CMDQUEUE_SIZE;
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE;
/*
* For the first submission in any given command queue update the
@@ -670,7 +648,7 @@ static int sendcmd(struct adreno_device *adreno_dev,
if (dispatch_q->inflight == 1)
dispatch_q->expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
+ msecs_to_jiffies(adreno_drawobj_timeout);
/*
* If we believe ourselves to be current and preemption isn't a thing,
@@ -678,7 +656,7 @@ static int sendcmd(struct adreno_device *adreno_dev,
* thing and the timer will be set up in due time
*/
if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
- if (cmdqueue_is_current(dispatch_q))
+ if (drawqueue_is_current(dispatch_q))
mod_timer(&dispatcher->timer, dispatch_q->expires);
}
@@ -704,75 +682,70 @@ static int sendcmd(struct adreno_device *adreno_dev,
static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt)
{
- struct adreno_dispatcher_cmdqueue *dispatch_q =
+ struct adreno_dispatcher_drawqueue *dispatch_q =
&(drawctxt->rb->dispatch_q);
int count = 0;
int ret = 0;
- int inflight = _cmdqueue_inflight(dispatch_q);
+ int inflight = _drawqueue_inflight(dispatch_q);
unsigned int timestamp;
if (dispatch_q->inflight >= inflight) {
- expire_markers(drawctxt);
+ spin_lock(&drawctxt->lock);
+ _process_drawqueue_get_next_drawobj(drawctxt);
+ spin_unlock(&drawctxt->lock);
return -EBUSY;
}
/*
- * Each context can send a specific number of command batches per cycle
+ * Each context can send a specific number of drawobjs per cycle
*/
- while ((count < _context_cmdbatch_burst) &&
+ while ((count < _context_drawobj_burst) &&
(dispatch_q->inflight < inflight)) {
- struct kgsl_cmdbatch *cmdbatch;
+ struct kgsl_drawobj *drawobj;
+ struct kgsl_drawobj_cmd *cmdobj;
if (adreno_gpu_fault(adreno_dev) != 0)
break;
- cmdbatch = adreno_dispatcher_get_cmdbatch(drawctxt);
+ spin_lock(&drawctxt->lock);
+ drawobj = _process_drawqueue_get_next_drawobj(drawctxt);
/*
- * adreno_context_get_cmdbatch returns -EAGAIN if the current
- * cmdbatch has pending sync points so no more to do here.
+ * adreno_context_get_drawobj returns -EAGAIN if the current
+ * drawobj has pending sync points so no more to do here.
* When the sync points are satisfied then the context will get
* reqeueued
*/
- if (IS_ERR_OR_NULL(cmdbatch)) {
- if (IS_ERR(cmdbatch))
- ret = PTR_ERR(cmdbatch);
+ if (IS_ERR_OR_NULL(drawobj)) {
+ if (IS_ERR(drawobj))
+ ret = PTR_ERR(drawobj);
+ spin_unlock(&drawctxt->lock);
break;
}
+ _pop_drawobj(drawctxt);
+ spin_unlock(&drawctxt->lock);
- /*
- * If this is a synchronization submission then there are no
- * commands to submit. Discard it and get the next item from
- * the queue. Decrement count so this packet doesn't count
- * against the burst for the context
- */
-
- if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
- kgsl_cmdbatch_destroy(cmdbatch);
- continue;
- }
-
- timestamp = cmdbatch->timestamp;
-
- ret = sendcmd(adreno_dev, cmdbatch);
+ timestamp = drawobj->timestamp;
+ cmdobj = CMDOBJ(drawobj);
+ ret = sendcmd(adreno_dev, cmdobj);
/*
- * On error from sendcmd() try to requeue the command batch
+ * On error from sendcmd() try to requeue the cmdobj
* unless we got back -ENOENT which means that the context has
* been detached and there will be no more deliveries from here
*/
if (ret != 0) {
- /* Destroy the cmdbatch on -ENOENT */
+ /* Destroy the cmdobj on -ENOENT */
if (ret == -ENOENT)
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
else {
/*
* If the requeue returns an error, return that
* instead of whatever sendcmd() sent us
*/
- int r = adreno_dispatcher_requeue_cmdbatch(
- drawctxt, cmdbatch);
+ int r = adreno_dispatcher_requeue_cmdobj(
+ drawctxt, cmdobj);
if (r)
ret = r;
}
@@ -934,99 +907,87 @@ static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
/**
* get_timestamp() - Return the next timestamp for the context
* @drawctxt - Pointer to an adreno draw context struct
- * @cmdbatch - Pointer to a command batch
+ * @drawobj - Pointer to a drawobj
* @timestamp - Pointer to a timestamp value possibly passed from the user
+ * @user_ts - user generated timestamp
*
* Assign a timestamp based on the settings of the draw context and the command
* batch.
*/
static int get_timestamp(struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch, unsigned int *timestamp)
+ struct kgsl_drawobj *drawobj, unsigned int *timestamp,
+ unsigned int user_ts)
{
- /* Synchronization commands don't get a timestamp */
- if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
- *timestamp = 0;
- return 0;
- }
if (drawctxt->base.flags & KGSL_CONTEXT_USER_GENERATED_TS) {
/*
* User specified timestamps need to be greater than the last
* issued timestamp in the context
*/
- if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0)
+ if (timestamp_cmp(drawctxt->timestamp, user_ts) >= 0)
return -ERANGE;
- drawctxt->timestamp = *timestamp;
+ drawctxt->timestamp = user_ts;
} else
drawctxt->timestamp++;
*timestamp = drawctxt->timestamp;
+ drawobj->timestamp = *timestamp;
return 0;
}
-/**
- * adreno_dispactcher_queue_cmd() - Queue a new command in the context
- * @adreno_dev: Pointer to the adreno device struct
- * @drawctxt: Pointer to the adreno draw context
- * @cmdbatch: Pointer to the command batch being submitted
- * @timestamp: Pointer to the requested timestamp
- *
- * Queue a command in the context - if there isn't any room in the queue, then
- * block until there is
- */
-int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp)
+static void _set_ft_policy(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ struct kgsl_drawobj_cmd *cmdobj)
{
- struct adreno_dispatcher_cmdqueue *dispatch_q =
- ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch);
- int ret;
-
- spin_lock(&drawctxt->lock);
-
- if (kgsl_context_detached(&drawctxt->base)) {
- spin_unlock(&drawctxt->lock);
- return -ENOENT;
- }
+ /*
+ * Set the fault tolerance policy for the command batch - assuming the
+ * context hasn't disabled FT use the current device policy
+ */
+ if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
+ set_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy);
+ else
+ cmdobj->fault_policy = adreno_dev->ft_policy;
+}
+static void _cmdobj_set_flags(struct adreno_context *drawctxt,
+ struct kgsl_drawobj_cmd *cmdobj)
+{
/*
* Force the preamble for this submission only - this is usually
* requested by the dispatcher as part of fault recovery
*/
-
if (test_and_clear_bit(ADRENO_CONTEXT_FORCE_PREAMBLE,
&drawctxt->base.priv))
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv);
/*
- * Force the premable if set from userspace in the context or cmdbatch
- * flags
+ * Force the premable if set from userspace in the context or
+ * command obj flags
*/
-
if ((drawctxt->base.flags & KGSL_CONTEXT_CTX_SWITCH) ||
- (cmdbatch->flags & KGSL_CMDBATCH_CTX_SWITCH))
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+ (cmdobj->base.flags & KGSL_DRAWOBJ_CTX_SWITCH))
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv);
- /* Skip this cmdbatch commands if IFH_NOP is enabled */
+ /* Skip this ib if IFH_NOP is enabled */
if (drawctxt->base.flags & KGSL_CONTEXT_IFH_NOP)
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
+ set_bit(CMDOBJ_SKIP, &cmdobj->priv);
/*
* If we are waiting for the end of frame and it hasn't appeared yet,
- * then mark the command batch as skipped. It will still progress
+ * then mark the command obj as skipped. It will still progress
* through the pipeline but it won't actually send any commands
*/
if (test_bit(ADRENO_CONTEXT_SKIP_EOF, &drawctxt->base.priv)) {
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
+ set_bit(CMDOBJ_SKIP, &cmdobj->priv);
/*
- * If this command batch represents the EOF then clear the way
+ * If this command obj represents the EOF then clear the way
* for the dispatcher to continue submitting
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_END_OF_FRAME) {
+ if (cmdobj->base.flags & KGSL_DRAWOBJ_END_OF_FRAME) {
clear_bit(ADRENO_CONTEXT_SKIP_EOF,
&drawctxt->base.priv);
@@ -1038,10 +999,84 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
&drawctxt->base.priv);
}
}
+}
- /* Wait for room in the context queue */
+static inline int _check_context_state(struct kgsl_context *context)
+{
+ if (kgsl_context_invalid(context))
+ return -EDEADLK;
+
+ if (kgsl_context_detached(context))
+ return -ENOENT;
+
+ return 0;
+}
+
+static inline bool _verify_ib(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_memobj_node *ib)
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+
+ /* The maximum allowable size for an IB in the CP is 0xFFFFF dwords */
+ if (ib->size == 0 || ((ib->size >> 2) > 0xFFFFF)) {
+ pr_context(device, context, "ctxt %d invalid ib size %lld\n",
+ context->id, ib->size);
+ return false;
+ }
+
+ /* Make sure that the address is mapped */
+ if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) {
+ pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n",
+ context->id, ib->gpuaddr);
+ return false;
+ }
+
+ return true;
+}
+
+static inline int _verify_cmdobj(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count)
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_memobj_node *ib;
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ /* Verify the IBs before they get queued */
+ if (drawobj[i]->type == CMDOBJ_TYPE) {
+ struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj[i]);
+
+ list_for_each_entry(ib, &cmdobj->cmdlist, node)
+ if (_verify_ib(dev_priv,
+ &ADRENO_CONTEXT(context)->base, ib)
+ == false)
+ return -EINVAL;
+ /*
+ * Clear the wake on touch bit to indicate an IB has
+ * been submitted since the last time we set it.
+ * But only clear it when we have rendering commands.
+ */
+ device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH;
+ }
+
+ /* A3XX does not have support for drawobj profiling */
+ if (adreno_is_a3xx(ADRENO_DEVICE(device)) &&
+ (drawobj[i]->flags & KGSL_DRAWOBJ_PROFILING))
+ return -EOPNOTSUPP;
+ }
- while (drawctxt->queued >= _context_cmdqueue_size) {
+ return 0;
+}
+
+static inline int _wait_for_room_in_context_queue(
+ struct adreno_context *drawctxt)
+{
+ int ret = 0;
+
+ /* Wait for room in the context queue */
+ while (drawctxt->queued >= _context_drawqueue_size) {
trace_adreno_drawctxt_sleep(drawctxt);
spin_unlock(&drawctxt->lock);
@@ -1052,98 +1087,210 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
spin_lock(&drawctxt->lock);
trace_adreno_drawctxt_wake(drawctxt);
- if (ret <= 0) {
- spin_unlock(&drawctxt->lock);
+ if (ret <= 0)
return (ret == 0) ? -ETIMEDOUT : (int) ret;
- }
}
+
+ return 0;
+}
+
+static unsigned int _check_context_state_to_queue_cmds(
+ struct adreno_context *drawctxt)
+{
+ int ret = _check_context_state(&drawctxt->base);
+
+ if (ret)
+ return ret;
+
+ ret = _wait_for_room_in_context_queue(drawctxt);
+ if (ret)
+ return ret;
+
/*
* Account for the possiblity that the context got invalidated
* while we were sleeping
*/
+ return _check_context_state(&drawctxt->base);
+}
- if (kgsl_context_invalid(&drawctxt->base)) {
- spin_unlock(&drawctxt->lock);
- return -EDEADLK;
- }
- if (kgsl_context_detached(&drawctxt->base)) {
- spin_unlock(&drawctxt->lock);
- return -ENOENT;
- }
+static void _queue_drawobj(struct adreno_context *drawctxt,
+ struct kgsl_drawobj *drawobj)
+{
+ /* Put the command into the queue */
+ drawctxt->drawqueue[drawctxt->drawqueue_tail] = drawobj;
+ drawctxt->drawqueue_tail = (drawctxt->drawqueue_tail + 1) %
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE;
+ drawctxt->queued++;
+ trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued);
+}
- ret = get_timestamp(drawctxt, cmdbatch, timestamp);
- if (ret) {
- spin_unlock(&drawctxt->lock);
+static int _queue_markerobj(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *markerobj,
+ uint32_t *timestamp, unsigned int user_ts)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(markerobj);
+ int ret;
+
+ ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts);
+ if (ret)
return ret;
+
+ /*
+ * See if we can fastpath this thing - if nothing is queued
+ * and nothing is inflight retire without bothering the GPU
+ */
+ if (!drawctxt->queued && kgsl_check_timestamp(drawobj->device,
+ drawobj->context, drawctxt->queued_timestamp)) {
+ trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued);
+ _retire_timestamp(drawobj);
+ return 1;
}
- cmdbatch->timestamp = *timestamp;
+ /*
+ * Remember the last queued timestamp - the marker will block
+ * until that timestamp is expired (unless another command
+ * comes along and forces the marker to execute)
+ */
- if (cmdbatch->flags & KGSL_CMDBATCH_MARKER) {
+ markerobj->marker_timestamp = drawctxt->queued_timestamp;
+ drawctxt->queued_timestamp = *timestamp;
+ _set_ft_policy(adreno_dev, drawctxt, markerobj);
+ _cmdobj_set_flags(drawctxt, markerobj);
- /*
- * See if we can fastpath this thing - if nothing is queued
- * and nothing is inflight retire without bothering the GPU
- */
+ _queue_drawobj(drawctxt, drawobj);
- if (!drawctxt->queued && kgsl_check_timestamp(cmdbatch->device,
- cmdbatch->context, drawctxt->queued_timestamp)) {
- trace_adreno_cmdbatch_queued(cmdbatch,
- drawctxt->queued);
+ return 0;
+}
- _retire_marker(cmdbatch);
- spin_unlock(&drawctxt->lock);
- return 0;
- }
+static int _queue_cmdobj(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *cmdobj,
+ uint32_t *timestamp, unsigned int user_ts)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ unsigned int j;
+ int ret;
- /*
- * Remember the last queued timestamp - the marker will block
- * until that timestamp is expired (unless another command
- * comes along and forces the marker to execute)
- */
+ ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts);
+ if (ret)
+ return ret;
+
+ /*
+ * If this is a real command then we need to force any markers
+ * queued before it to dispatch to keep time linear - set the
+ * skip bit so the commands get NOPed.
+ */
+ j = drawctxt->drawqueue_head;
+
+ while (j != drawctxt->drawqueue_tail) {
+ if (drawctxt->drawqueue[j]->type == MARKEROBJ_TYPE) {
+ struct kgsl_drawobj_cmd *markerobj =
+ CMDOBJ(drawctxt->drawqueue[j]);
+ set_bit(CMDOBJ_SKIP, &markerobj->priv);
+ }
- cmdbatch->marker_timestamp = drawctxt->queued_timestamp;
+ j = DRAWQUEUE_NEXT(j, ADRENO_CONTEXT_DRAWQUEUE_SIZE);
}
- /* SYNC commands have timestamp 0 and will get optimized out anyway */
- if (!(cmdbatch->flags & KGSL_CONTEXT_SYNC))
- drawctxt->queued_timestamp = *timestamp;
+ drawctxt->queued_timestamp = *timestamp;
+ _set_ft_policy(adreno_dev, drawctxt, cmdobj);
+ _cmdobj_set_flags(drawctxt, cmdobj);
- /*
- * Set the fault tolerance policy for the command batch - assuming the
- * context hasn't disabled FT use the current device policy
- */
+ _queue_drawobj(drawctxt, drawobj);
- if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
- set_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy);
- else
- cmdbatch->fault_policy = adreno_dev->ft_policy;
+ return 0;
+}
- /* Put the command into the queue */
- drawctxt->cmdqueue[drawctxt->cmdqueue_tail] = cmdbatch;
- drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) %
- ADRENO_CONTEXT_CMDQUEUE_SIZE;
+static void _queue_syncobj(struct adreno_context *drawctxt,
+ struct kgsl_drawobj_sync *syncobj, uint32_t *timestamp)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
- /*
- * If this is a real command then we need to force any markers queued
- * before it to dispatch to keep time linear - set the skip bit so
- * the commands get NOPed.
- */
+ *timestamp = 0;
+ drawobj->timestamp = 0;
- if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER)) {
- unsigned int i = drawctxt->cmdqueue_head;
+ _queue_drawobj(drawctxt, drawobj);
+}
- while (i != drawctxt->cmdqueue_tail) {
- if (drawctxt->cmdqueue[i]->flags & KGSL_CMDBATCH_MARKER)
- set_bit(CMDBATCH_FLAG_SKIP,
- &drawctxt->cmdqueue[i]->priv);
+/**
+ * adreno_dispactcher_queue_drawobj() - Queue a new draw object in the context
+ * @dev_priv: Pointer to the device private struct
+ * @context: Pointer to the kgsl draw context
+ * @drawobj: Pointer to the array of drawobj's being submitted
+ * @count: Number of drawobj's being submitted
+ * @timestamp: Pointer to the requested timestamp
+ *
+ * Queue a command in the context - if there isn't any room in the queue, then
+ * block until there is
+ */
+int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count, uint32_t *timestamp)
- i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE);
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+ struct adreno_dispatcher_drawqueue *dispatch_q;
+ int ret;
+ unsigned int i, user_ts;
+
+ ret = _check_context_state(&drawctxt->base);
+ if (ret)
+ return ret;
+
+ ret = _verify_cmdobj(dev_priv, context, drawobj, count);
+ if (ret)
+ return ret;
+
+ /* wait for the suspend gate */
+ wait_for_completion(&device->halt_gate);
+
+ spin_lock(&drawctxt->lock);
+
+ ret = _check_context_state_to_queue_cmds(drawctxt);
+ if (ret) {
+ spin_unlock(&drawctxt->lock);
+ return ret;
+ }
+
+ user_ts = *timestamp;
+
+ for (i = 0; i < count; i++) {
+
+ switch (drawobj[i]->type) {
+ case MARKEROBJ_TYPE:
+ ret = _queue_markerobj(adreno_dev, drawctxt,
+ CMDOBJ(drawobj[i]),
+ timestamp, user_ts);
+ if (ret == 1) {
+ spin_unlock(&drawctxt->lock);
+ goto done;
+ } else if (ret) {
+ spin_unlock(&drawctxt->lock);
+ return ret;
+ }
+ break;
+ case CMDOBJ_TYPE:
+ ret = _queue_cmdobj(adreno_dev, drawctxt,
+ CMDOBJ(drawobj[i]),
+ timestamp, user_ts);
+ if (ret) {
+ spin_unlock(&drawctxt->lock);
+ return ret;
+ }
+ break;
+ case SYNCOBJ_TYPE:
+ _queue_syncobj(drawctxt, SYNCOBJ(drawobj[i]),
+ timestamp);
+ break;
+ default:
+ spin_unlock(&drawctxt->lock);
+ return -EINVAL;
}
+
}
- drawctxt->queued++;
- trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued);
+ dispatch_q = ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj[0]);
_track_context(adreno_dev, dispatch_q, drawctxt);
@@ -1163,8 +1310,11 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
* queue will try to schedule new commands anyway.
*/
- if (dispatch_q->inflight < _context_cmdbatch_burst)
+ if (dispatch_q->inflight < _context_drawobj_burst)
adreno_dispatcher_issuecmds(adreno_dev);
+done:
+ if (test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv))
+ return -EPROTO;
return 0;
}
@@ -1208,15 +1358,15 @@ static void mark_guilty_context(struct kgsl_device *device, unsigned int id)
}
/*
- * If an IB inside of the command batch has a gpuaddr that matches the base
+ * If an IB inside of the drawobj has a gpuaddr that matches the base
* passed in then zero the size which effectively skips it when it is submitted
* in the ringbuffer.
*/
-static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base)
+static void _skip_ib(struct kgsl_drawobj_cmd *cmdobj, uint64_t base)
{
struct kgsl_memobj_node *ib;
- list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
+ list_for_each_entry(ib, &cmdobj->cmdlist, node) {
if (ib->gpuaddr == base) {
ib->priv |= MEMOBJ_SKIP;
if (base)
@@ -1225,10 +1375,11 @@ static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base)
}
}
-static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmdbatch **replay, int count)
+static void _skip_cmd(struct kgsl_drawobj_cmd *cmdobj,
+ struct kgsl_drawobj_cmd **replay, int count)
{
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
int i;
/*
@@ -1243,9 +1394,9 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch,
* b) force preamble for next commandbatch
*/
for (i = 1; i < count; i++) {
- if (replay[i]->context->id == cmdbatch->context->id) {
+ if (DRAWOBJ(replay[i])->context->id == drawobj->context->id) {
replay[i]->fault_policy = replay[0]->fault_policy;
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv);
set_bit(KGSL_FT_SKIPCMD, &replay[i]->fault_recovery);
break;
}
@@ -1262,41 +1413,44 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch,
drawctxt->fault_policy = replay[0]->fault_policy;
}
- /* set the flags to skip this cmdbatch */
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
- cmdbatch->fault_recovery = 0;
+ /* set the flags to skip this cmdobj */
+ set_bit(CMDOBJ_SKIP, &cmdobj->priv);
+ cmdobj->fault_recovery = 0;
}
-static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmdbatch **replay, int count)
+static void _skip_frame(struct kgsl_drawobj_cmd *cmdobj,
+ struct kgsl_drawobj_cmd **replay, int count)
{
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
int skip = 1;
int i;
for (i = 0; i < count; i++) {
+ struct kgsl_drawobj *replay_obj = DRAWOBJ(replay[i]);
+
/*
- * Only operate on command batches that belong to the
+ * Only operate on drawobj's that belong to the
* faulting context
*/
- if (replay[i]->context->id != cmdbatch->context->id)
+ if (replay_obj->context->id != drawobj->context->id)
continue;
/*
- * Skip all the command batches in this context until
+ * Skip all the drawobjs in this context until
* the EOF flag is seen. If the EOF flag is seen then
* force the preamble for the next command.
*/
if (skip) {
- set_bit(CMDBATCH_FLAG_SKIP, &replay[i]->priv);
+ set_bit(CMDOBJ_SKIP, &replay[i]->priv);
- if (replay[i]->flags & KGSL_CMDBATCH_END_OF_FRAME)
+ if (replay_obj->flags & KGSL_DRAWOBJ_END_OF_FRAME)
skip = 0;
} else {
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv);
return;
}
}
@@ -1318,26 +1472,28 @@ static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch,
set_bit(ADRENO_CONTEXT_FORCE_PREAMBLE, &drawctxt->base.priv);
}
-static void remove_invalidated_cmdbatches(struct kgsl_device *device,
- struct kgsl_cmdbatch **replay, int count)
+static void remove_invalidated_cmdobjs(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd **replay, int count)
{
int i;
for (i = 0; i < count; i++) {
- struct kgsl_cmdbatch *cmd = replay[i];
- if (cmd == NULL)
+ struct kgsl_drawobj_cmd *cmdobj = replay[i];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+
+ if (cmdobj == NULL)
continue;
- if (kgsl_context_detached(cmd->context) ||
- kgsl_context_invalid(cmd->context)) {
+ if (kgsl_context_detached(drawobj->context) ||
+ kgsl_context_invalid(drawobj->context)) {
replay[i] = NULL;
mutex_lock(&device->mutex);
kgsl_cancel_events_timestamp(device,
- &cmd->context->events, cmd->timestamp);
+ &drawobj->context->events, drawobj->timestamp);
mutex_unlock(&device->mutex);
- kgsl_cmdbatch_destroy(cmd);
+ kgsl_drawobj_destroy(drawobj);
}
}
}
@@ -1361,9 +1517,10 @@ static inline const char *_kgsl_context_comm(struct kgsl_context *context)
static void adreno_fault_header(struct kgsl_device *device,
- struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch)
+ struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
unsigned int status, rptr, wptr, ib1sz, ib2sz;
uint64_t ib1base, ib2base;
@@ -1377,22 +1534,22 @@ static void adreno_fault_header(struct kgsl_device *device,
ADRENO_REG_CP_IB2_BASE_HI, &ib2base);
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &ib2sz);
- if (cmdbatch != NULL) {
+ if (drawobj != NULL) {
struct adreno_context *drawctxt =
- ADRENO_CONTEXT(cmdbatch->context);
+ ADRENO_CONTEXT(drawobj->context);
- trace_adreno_gpu_fault(cmdbatch->context->id,
- cmdbatch->timestamp,
+ trace_adreno_gpu_fault(drawobj->context->id,
+ drawobj->timestamp,
status, rptr, wptr, ib1base, ib1sz,
ib2base, ib2sz, drawctxt->rb->id);
- pr_fault(device, cmdbatch,
+ pr_fault(device, drawobj,
"gpu fault ctx %d ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
- cmdbatch->context->id, cmdbatch->timestamp, status,
+ drawobj->context->id, drawobj->timestamp, status,
rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
if (rb != NULL)
- pr_fault(device, cmdbatch,
+ pr_fault(device, drawobj,
"gpu fault rb %d rb sw r/w %4.4x/%4.4x\n",
rb->id, rptr, rb->wptr);
} else {
@@ -1411,33 +1568,34 @@ static void adreno_fault_header(struct kgsl_device *device,
void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj *drawobj)
{
if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) &&
kgsl_context_detached(&drawctxt->base)) {
- pr_context(KGSL_DEVICE(adreno_dev), cmdbatch->context,
- "gpu detached context %d\n", cmdbatch->context->id);
+ pr_context(KGSL_DEVICE(adreno_dev), drawobj->context,
+ "gpu detached context %d\n", drawobj->context->id);
clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv);
}
}
/**
- * process_cmdbatch_fault() - Process a cmdbatch for fault policies
- * @device: Device on which the cmdbatch caused a fault
- * @replay: List of cmdbatches that are to be replayed on the device. The
- * faulting cmdbatch is the first command in the replay list and the remaining
- * cmdbatches in the list are commands that were submitted to the same queue
+ * process_cmdobj_fault() - Process a cmdobj for fault policies
+ * @device: Device on which the cmdobj caused a fault
+ * @replay: List of cmdobj's that are to be replayed on the device. The
+ * first command in the replay list is the faulting command and the remaining
+ * cmdobj's in the list are commands that were submitted to the same queue
* as the faulting one.
- * @count: Number of cmdbatches in replay
+ * @count: Number of cmdobj's in replay
* @base: The IB1 base at the time of fault
* @fault: The fault type
*/
-static void process_cmdbatch_fault(struct kgsl_device *device,
- struct kgsl_cmdbatch **replay, int count,
+static void process_cmdobj_fault(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd **replay, int count,
unsigned int base,
int fault)
{
- struct kgsl_cmdbatch *cmdbatch = replay[0];
+ struct kgsl_drawobj_cmd *cmdobj = replay[0];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
int i;
char *state = "failed";
@@ -1451,18 +1609,18 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
* where 1st and 4th gpu hang are more than 3 seconds apart we
* won't disable GFT and invalidate the context.
*/
- if (test_bit(KGSL_FT_THROTTLE, &cmdbatch->fault_policy)) {
- if (time_after(jiffies, (cmdbatch->context->fault_time
+ if (test_bit(KGSL_FT_THROTTLE, &cmdobj->fault_policy)) {
+ if (time_after(jiffies, (drawobj->context->fault_time
+ msecs_to_jiffies(_fault_throttle_time)))) {
- cmdbatch->context->fault_time = jiffies;
- cmdbatch->context->fault_count = 1;
+ drawobj->context->fault_time = jiffies;
+ drawobj->context->fault_count = 1;
} else {
- cmdbatch->context->fault_count++;
- if (cmdbatch->context->fault_count >
+ drawobj->context->fault_count++;
+ if (drawobj->context->fault_count >
_fault_throttle_burst) {
set_bit(KGSL_FT_DISABLE,
- &cmdbatch->fault_policy);
- pr_context(device, cmdbatch->context,
+ &cmdobj->fault_policy);
+ pr_context(device, drawobj->context,
"gpu fault threshold exceeded %d faults in %d msecs\n",
_fault_throttle_burst,
_fault_throttle_time);
@@ -1471,45 +1629,45 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
}
/*
- * If FT is disabled for this cmdbatch invalidate immediately
+ * If FT is disabled for this cmdobj invalidate immediately
*/
- if (test_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy) ||
- test_bit(KGSL_FT_TEMP_DISABLE, &cmdbatch->fault_policy)) {
+ if (test_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy) ||
+ test_bit(KGSL_FT_TEMP_DISABLE, &cmdobj->fault_policy)) {
state = "skipped";
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+ bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG);
}
/* If the context is detached do not run FT on context */
- if (kgsl_context_detached(cmdbatch->context)) {
+ if (kgsl_context_detached(drawobj->context)) {
state = "detached";
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+ bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG);
}
/*
- * Set a flag so we don't print another PM dump if the cmdbatch fails
+ * Set a flag so we don't print another PM dump if the cmdobj fails
* again on replay
*/
- set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
+ set_bit(KGSL_FT_SKIP_PMDUMP, &cmdobj->fault_policy);
/*
* A hardware fault generally means something was deterministically
- * wrong with the command batch - no point in trying to replay it
+ * wrong with the cmdobj - no point in trying to replay it
* Clear the replay bit and move on to the next policy level
*/
if (fault & ADRENO_HARD_FAULT)
- clear_bit(KGSL_FT_REPLAY, &(cmdbatch->fault_policy));
+ clear_bit(KGSL_FT_REPLAY, &(cmdobj->fault_policy));
/*
* A timeout fault means the IB timed out - clear the policy and
* invalidate - this will clear the FT_SKIP_PMDUMP bit but that is okay
- * because we won't see this cmdbatch again
+ * because we won't see this cmdobj again
*/
if (fault & ADRENO_TIMEOUT_FAULT)
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+ bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG);
/*
* If the context had a GPU page fault then it is likely it would fault
@@ -1517,83 +1675,84 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
*/
if (test_bit(KGSL_CONTEXT_PRIV_PAGEFAULT,
- &cmdbatch->context->priv)) {
+ &drawobj->context->priv)) {
/* we'll need to resume the mmu later... */
- clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy);
+ clear_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy);
clear_bit(KGSL_CONTEXT_PRIV_PAGEFAULT,
- &cmdbatch->context->priv);
+ &drawobj->context->priv);
}
/*
- * Execute the fault tolerance policy. Each command batch stores the
+ * Execute the fault tolerance policy. Each cmdobj stores the
* current fault policy that was set when it was queued.
* As the options are tried in descending priority
* (REPLAY -> SKIPIBS -> SKIPFRAME -> NOTHING) the bits are cleared
- * from the cmdbatch policy so the next thing can be tried if the
+ * from the cmdobj policy so the next thing can be tried if the
* change comes around again
*/
- /* Replay the hanging command batch again */
- if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_REPLAY));
- set_bit(KGSL_FT_REPLAY, &cmdbatch->fault_recovery);
+ /* Replay the hanging cmdobj again */
+ if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_REPLAY));
+ set_bit(KGSL_FT_REPLAY, &cmdobj->fault_recovery);
return;
}
/*
* Skip the last IB1 that was played but replay everything else.
- * Note that the last IB1 might not be in the "hung" command batch
+ * Note that the last IB1 might not be in the "hung" cmdobj
* because the CP may have caused a page-fault while it was prefetching
* the next IB1/IB2. walk all outstanding commands and zap the
* supposedly bad IB1 where ever it lurks.
*/
- if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPIB));
- set_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_recovery);
+ if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPIB));
+ set_bit(KGSL_FT_SKIPIB, &cmdobj->fault_recovery);
for (i = 0; i < count; i++) {
if (replay[i] != NULL &&
- replay[i]->context->id == cmdbatch->context->id)
- cmdbatch_skip_ib(replay[i], base);
+ DRAWOBJ(replay[i])->context->id ==
+ drawobj->context->id)
+ _skip_ib(replay[i], base);
}
return;
}
- /* Skip the faulted command batch submission */
- if (test_and_clear_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPCMD));
+ /* Skip the faulted cmdobj submission */
+ if (test_and_clear_bit(KGSL_FT_SKIPCMD, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPCMD));
- /* Skip faulting command batch */
- cmdbatch_skip_cmd(cmdbatch, replay, count);
+ /* Skip faulting cmdobj */
+ _skip_cmd(cmdobj, replay, count);
return;
}
- if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch,
+ if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj,
BIT(KGSL_FT_SKIPFRAME));
- set_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_recovery);
+ set_bit(KGSL_FT_SKIPFRAME, &cmdobj->fault_recovery);
/*
- * Skip all the pending command batches for this context until
+ * Skip all the pending cmdobj's for this context until
* the EOF frame is seen
*/
- cmdbatch_skip_frame(cmdbatch, replay, count);
+ _skip_frame(cmdobj, replay, count);
return;
}
/* If we get here then all the policies failed */
- pr_context(device, cmdbatch->context, "gpu %s ctx %d ts %d\n",
- state, cmdbatch->context->id, cmdbatch->timestamp);
+ pr_context(device, drawobj->context, "gpu %s ctx %d ts %d\n",
+ state, drawobj->context->id, drawobj->timestamp);
/* Mark the context as failed */
- mark_guilty_context(device, cmdbatch->context->id);
+ mark_guilty_context(device, drawobj->context->id);
/* Invalidate the context */
- adreno_drawctxt_invalidate(device, cmdbatch->context);
+ adreno_drawctxt_invalidate(device, drawobj->context);
}
/**
@@ -1605,12 +1764,12 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
* @base: The IB1 base during the fault
*/
static void recover_dispatch_q(struct kgsl_device *device,
- struct adreno_dispatcher_cmdqueue *dispatch_q,
+ struct adreno_dispatcher_drawqueue *dispatch_q,
int fault,
unsigned int base)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct kgsl_cmdbatch **replay = NULL;
+ struct kgsl_drawobj_cmd **replay;
unsigned int ptr;
int first = 0;
int count = 0;
@@ -1624,14 +1783,16 @@ static void recover_dispatch_q(struct kgsl_device *device,
/* Recovery failed - mark everybody on this q guilty */
while (ptr != dispatch_q->tail) {
- struct kgsl_context *context =
- dispatch_q->cmd_q[ptr]->context;
+ struct kgsl_drawobj_cmd *cmdobj =
+ dispatch_q->cmd_q[ptr];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- mark_guilty_context(device, context->id);
- adreno_drawctxt_invalidate(device, context);
- kgsl_cmdbatch_destroy(dispatch_q->cmd_q[ptr]);
+ mark_guilty_context(device, drawobj->context->id);
+ adreno_drawctxt_invalidate(device, drawobj->context);
+ kgsl_drawobj_destroy(drawobj);
- ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ ptr = DRAWQUEUE_NEXT(ptr,
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE);
}
/*
@@ -1643,22 +1804,22 @@ static void recover_dispatch_q(struct kgsl_device *device,
goto replay;
}
- /* Copy the inflight command batches into the temporary storage */
+ /* Copy the inflight cmdobj's into the temporary storage */
ptr = dispatch_q->head;
while (ptr != dispatch_q->tail) {
replay[count++] = dispatch_q->cmd_q[ptr];
- ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ ptr = DRAWQUEUE_NEXT(ptr, ADRENO_DISPATCH_DRAWQUEUE_SIZE);
}
if (fault && count)
- process_cmdbatch_fault(device, replay,
+ process_cmdobj_fault(device, replay,
count, base, fault);
replay:
dispatch_q->inflight = 0;
dispatch_q->head = dispatch_q->tail = 0;
- /* Remove any pending command batches that have been invalidated */
- remove_invalidated_cmdbatches(device, replay, count);
+ /* Remove any pending cmdobj's that have been invalidated */
+ remove_invalidated_cmdobjs(device, replay, count);
/* Replay the pending command buffers */
for (i = 0; i < count; i++) {
@@ -1674,16 +1835,16 @@ replay:
*/
if (first == 0) {
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv);
first = 1;
}
/*
- * Force each command batch to wait for idle - this avoids weird
+ * Force each cmdobj to wait for idle - this avoids weird
* CP parse issues
*/
- set_bit(CMDBATCH_FLAG_WFI, &replay[i]->priv);
+ set_bit(CMDOBJ_WFI, &replay[i]->priv);
ret = sendcmd(adreno_dev, replay[i]);
@@ -1693,15 +1854,18 @@ replay:
*/
if (ret) {
- pr_context(device, replay[i]->context,
+ pr_context(device, replay[i]->base.context,
"gpu reset failed ctx %d ts %d\n",
- replay[i]->context->id, replay[i]->timestamp);
+ replay[i]->base.context->id,
+ replay[i]->base.timestamp);
/* Mark this context as guilty (failed recovery) */
- mark_guilty_context(device, replay[i]->context->id);
+ mark_guilty_context(device,
+ replay[i]->base.context->id);
- adreno_drawctxt_invalidate(device, replay[i]->context);
- remove_invalidated_cmdbatches(device, &replay[i],
+ adreno_drawctxt_invalidate(device,
+ replay[i]->base.context);
+ remove_invalidated_cmdobjs(device, &replay[i],
count - i);
}
}
@@ -1713,36 +1877,38 @@ replay:
}
static void do_header_and_snapshot(struct kgsl_device *device,
- struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch)
+ struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj)
{
- /* Always dump the snapshot on a non-cmdbatch failure */
- if (cmdbatch == NULL) {
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+
+ /* Always dump the snapshot on a non-drawobj failure */
+ if (cmdobj == NULL) {
adreno_fault_header(device, rb, NULL);
kgsl_device_snapshot(device, NULL);
return;
}
/* Skip everything if the PMDUMP flag is set */
- if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy))
+ if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdobj->fault_policy))
return;
/* Print the fault header */
- adreno_fault_header(device, rb, cmdbatch);
+ adreno_fault_header(device, rb, cmdobj);
- if (!(cmdbatch->context->flags & KGSL_CONTEXT_NO_SNAPSHOT))
- kgsl_device_snapshot(device, cmdbatch->context);
+ if (!(drawobj->context->flags & KGSL_CONTEXT_NO_SNAPSHOT))
+ kgsl_device_snapshot(device, drawobj->context);
}
static int dispatcher_do_fault(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct adreno_dispatcher_cmdqueue *dispatch_q = NULL, *dispatch_q_temp;
+ struct adreno_dispatcher_drawqueue *dispatch_q = NULL, *dispatch_q_temp;
struct adreno_ringbuffer *rb;
struct adreno_ringbuffer *hung_rb = NULL;
unsigned int reg;
uint64_t base;
- struct kgsl_cmdbatch *cmdbatch = NULL;
+ struct kgsl_drawobj_cmd *cmdobj = NULL;
int ret, i;
int fault;
int halt;
@@ -1792,10 +1958,10 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg);
}
/*
- * retire cmdbatches from all the dispatch_q's before starting recovery
+ * retire cmdobj's from all the dispatch_q's before starting recovery
*/
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- adreno_dispatch_retire_cmdqueue(adreno_dev,
+ adreno_dispatch_retire_drawqueue(adreno_dev,
&(rb->dispatch_q));
/* Select the active dispatch_q */
if (base == rb->buffer_desc.gpuaddr) {
@@ -1814,15 +1980,15 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
}
}
- if (!adreno_cmdqueue_is_empty(dispatch_q)) {
- cmdbatch = dispatch_q->cmd_q[dispatch_q->head];
- trace_adreno_cmdbatch_fault(cmdbatch, fault);
+ if (dispatch_q && !adreno_drawqueue_is_empty(dispatch_q)) {
+ cmdobj = dispatch_q->cmd_q[dispatch_q->head];
+ trace_adreno_cmdbatch_fault(cmdobj, fault);
}
adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
ADRENO_REG_CP_IB1_BASE_HI, &base);
- do_header_and_snapshot(device, hung_rb, cmdbatch);
+ do_header_and_snapshot(device, hung_rb, cmdobj);
/* Terminate the stalled transaction and resume the IOMMU */
if (fault & ADRENO_IOMMU_PAGE_FAULT)
@@ -1876,23 +2042,24 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
return 1;
}
-static inline int cmdbatch_consumed(struct kgsl_cmdbatch *cmdbatch,
+static inline int drawobj_consumed(struct kgsl_drawobj *drawobj,
unsigned int consumed, unsigned int retired)
{
- return ((timestamp_cmp(cmdbatch->timestamp, consumed) >= 0) &&
- (timestamp_cmp(retired, cmdbatch->timestamp) < 0));
+ return ((timestamp_cmp(drawobj->timestamp, consumed) >= 0) &&
+ (timestamp_cmp(retired, drawobj->timestamp) < 0));
}
static void _print_recovery(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
static struct {
unsigned int mask;
const char *str;
} flags[] = { ADRENO_FT_TYPES };
- int i, nr = find_first_bit(&cmdbatch->fault_recovery, BITS_PER_LONG);
+ int i, nr = find_first_bit(&cmdobj->fault_recovery, BITS_PER_LONG);
char *result = "unknown";
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
for (i = 0; i < ARRAY_SIZE(flags); i++) {
if (flags[i].mask == BIT(nr)) {
@@ -1901,40 +2068,41 @@ static void _print_recovery(struct kgsl_device *device,
}
}
- pr_context(device, cmdbatch->context,
+ pr_context(device, drawobj->context,
"gpu %s ctx %d ts %d policy %lX\n",
- result, cmdbatch->context->id, cmdbatch->timestamp,
- cmdbatch->fault_recovery);
+ result, drawobj->context->id, drawobj->timestamp,
+ cmdobj->fault_recovery);
}
-static void cmdbatch_profile_ticks(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch, uint64_t *start, uint64_t *retire)
+static void cmdobj_profile_ticks(struct adreno_device *adreno_dev,
+ struct kgsl_drawobj_cmd *cmdobj, uint64_t *start, uint64_t *retire)
{
- void *ptr = adreno_dev->cmdbatch_profile_buffer.hostptr;
- struct adreno_cmdbatch_profile_entry *entry;
+ void *ptr = adreno_dev->profile_buffer.hostptr;
+ struct adreno_drawobj_profile_entry *entry;
- entry = (struct adreno_cmdbatch_profile_entry *)
- (ptr + (cmdbatch->profile_index * sizeof(*entry)));
+ entry = (struct adreno_drawobj_profile_entry *)
+ (ptr + (cmdobj->profile_index * sizeof(*entry)));
rmb();
*start = entry->started;
*retire = entry->retired;
}
-static void retire_cmdbatch(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch)
+static void retire_cmdobj(struct adreno_device *adreno_dev,
+ struct kgsl_drawobj_cmd *cmdobj)
{
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
uint64_t start = 0, end = 0;
- if (cmdbatch->fault_recovery != 0) {
- set_bit(ADRENO_CONTEXT_FAULT, &cmdbatch->context->priv);
- _print_recovery(KGSL_DEVICE(adreno_dev), cmdbatch);
+ if (cmdobj->fault_recovery != 0) {
+ set_bit(ADRENO_CONTEXT_FAULT, &drawobj->context->priv);
+ _print_recovery(KGSL_DEVICE(adreno_dev), cmdobj);
}
- if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv))
- cmdbatch_profile_ticks(adreno_dev, cmdbatch, &start, &end);
+ if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv))
+ cmdobj_profile_ticks(adreno_dev, cmdobj, &start, &end);
/*
* For A3xx we still get the rptr from the CP_RB_RPTR instead of
@@ -1942,48 +2110,49 @@ static void retire_cmdbatch(struct adreno_device *adreno_dev,
* So avoid reading GPU register directly for A3xx.
*/
if (adreno_is_a3xx(adreno_dev))
- trace_adreno_cmdbatch_retired(cmdbatch,
- (int) dispatcher->inflight, start, end,
- ADRENO_CMDBATCH_RB(cmdbatch), 0);
+ trace_adreno_cmdbatch_retired(drawobj,
+ (int) dispatcher->inflight, start, end,
+ ADRENO_DRAWOBJ_RB(drawobj), 0, cmdobj->fault_recovery);
else
- trace_adreno_cmdbatch_retired(cmdbatch,
- (int) dispatcher->inflight, start, end,
- ADRENO_CMDBATCH_RB(cmdbatch),
- adreno_get_rptr(drawctxt->rb));
+ trace_adreno_cmdbatch_retired(drawobj,
+ (int) dispatcher->inflight, start, end,
+ ADRENO_DRAWOBJ_RB(drawobj),
+ adreno_get_rptr(drawctxt->rb), cmdobj->fault_recovery);
drawctxt->submit_retire_ticks[drawctxt->ticks_index] =
- end - cmdbatch->submit_ticks;
+ end - cmdobj->submit_ticks;
drawctxt->ticks_index = (drawctxt->ticks_index + 1) %
SUBMIT_RETIRE_TICKS_SIZE;
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
}
-static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
int count = 0;
- while (!adreno_cmdqueue_is_empty(cmdqueue)) {
- struct kgsl_cmdbatch *cmdbatch =
- cmdqueue->cmd_q[cmdqueue->head];
+ while (!adreno_drawqueue_is_empty(drawqueue)) {
+ struct kgsl_drawobj_cmd *cmdobj =
+ drawqueue->cmd_q[drawqueue->head];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- if (!kgsl_check_timestamp(device, cmdbatch->context,
- cmdbatch->timestamp))
+ if (!kgsl_check_timestamp(device, drawobj->context,
+ drawobj->timestamp))
break;
- retire_cmdbatch(adreno_dev, cmdbatch);
+ retire_cmdobj(adreno_dev, cmdobj);
dispatcher->inflight--;
- cmdqueue->inflight--;
+ drawqueue->inflight--;
- cmdqueue->cmd_q[cmdqueue->head] = NULL;
+ drawqueue->cmd_q[drawqueue->head] = NULL;
- cmdqueue->head = CMDQUEUE_NEXT(cmdqueue->head,
- ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ drawqueue->head = DRAWQUEUE_NEXT(drawqueue->head,
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE);
count++;
}
@@ -1992,13 +2161,14 @@ static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
}
static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_cmdbatch *cmdbatch = cmdqueue->cmd_q[cmdqueue->head];
+ struct kgsl_drawobj *drawobj =
+ DRAWOBJ(drawqueue->cmd_q[drawqueue->head]);
/* Don't timeout if the timer hasn't expired yet (duh) */
- if (time_is_after_jiffies(cmdqueue->expires))
+ if (time_is_after_jiffies(drawqueue->expires))
return;
/* Don't timeout if the IB timeout is disabled globally */
@@ -2006,30 +2176,30 @@ static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev,
return;
/* Don't time out if the context has disabled it */
- if (cmdbatch->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
+ if (drawobj->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
return;
- pr_context(device, cmdbatch->context, "gpu timeout ctx %d ts %d\n",
- cmdbatch->context->id, cmdbatch->timestamp);
+ pr_context(device, drawobj->context, "gpu timeout ctx %d ts %d\n",
+ drawobj->context->id, drawobj->timestamp);
adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);
}
-static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static int adreno_dispatch_process_drawqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
- int count = adreno_dispatch_retire_cmdqueue(adreno_dev, cmdqueue);
+ int count = adreno_dispatch_retire_drawqueue(adreno_dev, drawqueue);
/* Nothing to do if there are no pending commands */
- if (adreno_cmdqueue_is_empty(cmdqueue))
+ if (adreno_drawqueue_is_empty(drawqueue))
return count;
- /* Don't update the cmdqueue timeout if we are about to preempt out */
+ /* Don't update the drawqueue timeout if we are about to preempt out */
if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE))
return count;
- /* Don't update the cmdqueue timeout if it isn't active */
- if (!cmdqueue_is_current(cmdqueue))
+ /* Don't update the drawqueue timeout if it isn't active */
+ if (!drawqueue_is_current(drawqueue))
return count;
/*
@@ -2038,17 +2208,17 @@ static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
*/
if (count) {
- cmdqueue->expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
+ drawqueue->expires = jiffies +
+ msecs_to_jiffies(adreno_drawobj_timeout);
return count;
}
/*
* If we get here then 1) the ringbuffer is current and 2) we haven't
* retired anything. Check to see if the timeout if valid for the
- * current cmdbatch and fault if it has expired
+ * current drawobj and fault if it has expired
*/
- _adreno_dispatch_check_timeout(adreno_dev, cmdqueue);
+ _adreno_dispatch_check_timeout(adreno_dev, drawqueue);
return 0;
}
@@ -2067,11 +2237,11 @@ static void _dispatcher_update_timers(struct adreno_device *adreno_dev)
/* Check to see if we need to update the command timer */
if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
- struct adreno_dispatcher_cmdqueue *cmdqueue =
- CMDQUEUE(adreno_dev->cur_rb);
+ struct adreno_dispatcher_drawqueue *drawqueue =
+ DRAWQUEUE(adreno_dev->cur_rb);
- if (!adreno_cmdqueue_is_empty(cmdqueue))
- mod_timer(&dispatcher->timer, cmdqueue->expires);
+ if (!adreno_drawqueue_is_empty(drawqueue))
+ mod_timer(&dispatcher->timer, drawqueue->expires);
}
}
@@ -2111,14 +2281,14 @@ static void adreno_dispatcher_work(struct work_struct *work)
/*
* As long as there are inflight commands, process retired comamnds from
- * all cmdqueues
+ * all drawqueues
*/
for (i = 0; i < adreno_dev->num_ringbuffers; i++) {
- struct adreno_dispatcher_cmdqueue *cmdqueue =
- CMDQUEUE(&adreno_dev->ringbuffers[i]);
+ struct adreno_dispatcher_drawqueue *drawqueue =
+ DRAWQUEUE(&adreno_dev->ringbuffers[i]);
- count += adreno_dispatch_process_cmdqueue(adreno_dev,
- cmdqueue);
+ count += adreno_dispatch_process_drawqueue(adreno_dev,
+ drawqueue);
if (dispatcher->inflight == 0)
break;
}
@@ -2178,7 +2348,7 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device,
}
/*
- * This is called on a regular basis while command batches are inflight. Fault
+ * This is called on a regular basis while cmdobj's are inflight. Fault
* detection registers are read and compared to the existing values - if they
* changed then the GPU is still running. If they are the same between
* subsequent calls then the GPU may have faulted
@@ -2230,7 +2400,7 @@ static void adreno_dispatcher_timer(unsigned long data)
*/
void adreno_dispatcher_start(struct kgsl_device *device)
{
- complete_all(&device->cmdbatch_gate);
+ complete_all(&device->halt_gate);
/* Schedule the work loop to get things going */
adreno_dispatcher_schedule(device);
@@ -2267,13 +2437,13 @@ void adreno_dispatcher_close(struct adreno_device *adreno_dev)
del_timer_sync(&dispatcher->fault_timer);
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- struct adreno_dispatcher_cmdqueue *dispatch_q =
+ struct adreno_dispatcher_drawqueue *dispatch_q =
&(rb->dispatch_q);
- while (!adreno_cmdqueue_is_empty(dispatch_q)) {
- kgsl_cmdbatch_destroy(
- dispatch_q->cmd_q[dispatch_q->head]);
+ while (!adreno_drawqueue_is_empty(dispatch_q)) {
+ kgsl_drawobj_destroy(
+ DRAWOBJ(dispatch_q->cmd_q[dispatch_q->head]));
dispatch_q->head = (dispatch_q->head + 1)
- % ADRENO_DISPATCH_CMDQUEUE_SIZE;
+ % ADRENO_DISPATCH_DRAWQUEUE_SIZE;
}
}
@@ -2332,23 +2502,23 @@ static ssize_t _show_uint(struct adreno_dispatcher *dispatcher,
*((unsigned int *) attr->value));
}
-static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_CMDQUEUE_SIZE,
+static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_DRAWQUEUE_SIZE,
_dispatcher_q_inflight_hi);
static DISPATCHER_UINT_ATTR(inflight_low_latency, 0644,
- ADRENO_DISPATCH_CMDQUEUE_SIZE, _dispatcher_q_inflight_lo);
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE, _dispatcher_q_inflight_lo);
/*
* Our code that "puts back" a command from the context is much cleaner
* if we are sure that there will always be enough room in the
* ringbuffer so restrict the maximum size of the context queue to
- * ADRENO_CONTEXT_CMDQUEUE_SIZE - 1
+ * ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1
*/
-static DISPATCHER_UINT_ATTR(context_cmdqueue_size, 0644,
- ADRENO_CONTEXT_CMDQUEUE_SIZE - 1, _context_cmdqueue_size);
+static DISPATCHER_UINT_ATTR(context_drawqueue_size, 0644,
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1, _context_drawqueue_size);
static DISPATCHER_UINT_ATTR(context_burst_count, 0644, 0,
- _context_cmdbatch_burst);
-static DISPATCHER_UINT_ATTR(cmdbatch_timeout, 0644, 0,
- adreno_cmdbatch_timeout);
+ _context_drawobj_burst);
+static DISPATCHER_UINT_ATTR(drawobj_timeout, 0644, 0,
+ adreno_drawobj_timeout);
static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait);
static DISPATCHER_UINT_ATTR(fault_detect_interval, 0644, 0,
_fault_timer_interval);
@@ -2366,9 +2536,9 @@ static DISPATCHER_UINT_ATTR(dispatch_starvation_time, 0644, 0,
static struct attribute *dispatcher_attrs[] = {
&dispatcher_attr_inflight.attr,
&dispatcher_attr_inflight_low_latency.attr,
- &dispatcher_attr_context_cmdqueue_size.attr,
+ &dispatcher_attr_context_drawqueue_size.attr,
&dispatcher_attr_context_burst_count.attr,
- &dispatcher_attr_cmdbatch_timeout.attr,
+ &dispatcher_attr_drawobj_timeout.attr,
&dispatcher_attr_context_queue_wait.attr,
&dispatcher_attr_fault_detect_interval.attr,
&dispatcher_attr_fault_throttle_time.attr,
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index 699c3e4adb27..cb9106fedc82 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -15,7 +15,7 @@
#define ____ADRENO_DISPATCHER_H
extern unsigned int adreno_disp_preempt_fair_sched;
-extern unsigned int adreno_cmdbatch_timeout;
+extern unsigned int adreno_drawobj_timeout;
extern unsigned int adreno_dispatch_starvation_time;
extern unsigned int adreno_dispatch_time_slice;
@@ -44,21 +44,21 @@ enum adreno_dispatcher_starve_timer_states {
* sizes that can be chosen at runtime
*/
-#define ADRENO_DISPATCH_CMDQUEUE_SIZE 128
+#define ADRENO_DISPATCH_DRAWQUEUE_SIZE 128
-#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
+#define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
/**
- * struct adreno_dispatcher_cmdqueue - List of commands for a RB level
- * @cmd_q: List of command batches submitted to dispatcher
+ * struct adreno_dispatcher_drawqueue - List of commands for a RB level
+ * @cmd_q: List of command obj's submitted to dispatcher
* @inflight: Number of commands inflight in this q
* @head: Head pointer to the q
* @tail: Queues tail pointer
- * @active_context_count: Number of active contexts seen in this rb cmdqueue
- * @expires: The jiffies value at which this cmdqueue has run too long
+ * @active_context_count: Number of active contexts seen in this rb drawqueue
+ * @expires: The jiffies value at which this drawqueue has run too long
*/
-struct adreno_dispatcher_cmdqueue {
- struct kgsl_cmdbatch *cmd_q[ADRENO_DISPATCH_CMDQUEUE_SIZE];
+struct adreno_dispatcher_drawqueue {
+ struct kgsl_drawobj_cmd *cmd_q[ADRENO_DISPATCH_DRAWQUEUE_SIZE];
unsigned int inflight;
unsigned int head;
unsigned int tail;
@@ -70,10 +70,10 @@ struct adreno_dispatcher_cmdqueue {
* struct adreno_dispatcher - container for the adreno GPU dispatcher
* @mutex: Mutex to protect the structure
* @state: Current state of the dispatcher (active or paused)
- * @timer: Timer to monitor the progress of the command batches
- * @inflight: Number of command batch operations pending in the ringbuffer
+ * @timer: Timer to monitor the progress of the drawobjs
+ * @inflight: Number of drawobj operations pending in the ringbuffer
* @fault: Non-zero if a fault was detected.
- * @pending: Priority list of contexts waiting to submit command batches
+ * @pending: Priority list of contexts waiting to submit drawobjs
* @plist_lock: Spin lock to protect the pending queue
* @work: work_struct to put the dispatcher in a work queue
* @kobj: kobject for the dispatcher directory in the device sysfs node
@@ -109,9 +109,9 @@ int adreno_dispatcher_idle(struct adreno_device *adreno_dev);
void adreno_dispatcher_irq_fault(struct adreno_device *adreno_dev);
void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
-int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp);
+int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count, uint32_t *timestamp);
void adreno_dispatcher_schedule(struct kgsl_device *device);
void adreno_dispatcher_pause(struct adreno_device *adreno_dev);
@@ -120,11 +120,11 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device,
void adreno_dispatcher_preempt_callback(struct adreno_device *adreno_dev,
int bit);
void adreno_preempt_process_dispatch_queue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *dispatch_q);
+ struct adreno_dispatcher_drawqueue *dispatch_q);
-static inline bool adreno_cmdqueue_is_empty(
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static inline bool adreno_drawqueue_is_empty(
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
- return (cmdqueue != NULL && cmdqueue->head == cmdqueue->tail);
+ return (drawqueue != NULL && drawqueue->head == drawqueue->tail);
}
#endif /* __ADRENO_DISPATCHER_H */
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index fb95f6108fb8..3a110ed221a8 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -59,14 +59,14 @@ void adreno_drawctxt_dump(struct kgsl_device *device,
kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &retire);
/*
- * We may have cmdbatch timer running, which also uses same
+ * We may have kgsl sync obj timer running, which also uses same
* lock, take a lock with software interrupt disabled (bh)
* to avoid spin lock recursion.
*
* Use Spin trylock because dispatcher can acquire drawctxt->lock
* if context is pending and the fence it is waiting on just got
* signalled. Dispatcher acquires drawctxt->lock and tries to
- * delete the cmdbatch timer using del_timer_sync().
+ * delete the sync obj timer using del_timer_sync().
* del_timer_sync() waits till timer and its pending handlers
* are deleted. But if the timer expires at the same time,
* timer handler could be waiting on drawctxt->lock leading to a
@@ -83,23 +83,27 @@ void adreno_drawctxt_dump(struct kgsl_device *device,
context->id, queue, drawctxt->submitted_timestamp,
start, retire);
- if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
- struct kgsl_cmdbatch *cmdbatch =
- drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+ if (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
+ struct kgsl_drawobj *drawobj =
+ drawctxt->drawqueue[drawctxt->drawqueue_head];
- if (test_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv)) {
+ if (test_bit(ADRENO_CONTEXT_FENCE_LOG, &context->priv)) {
dev_err(device->dev,
" possible deadlock. Context %d might be blocked for itself\n",
context->id);
goto stats;
}
- if (kgsl_cmdbatch_events_pending(cmdbatch)) {
- dev_err(device->dev,
- " context[%d] (ts=%d) Active sync points:\n",
- context->id, cmdbatch->timestamp);
+ if (drawobj->type == SYNCOBJ_TYPE) {
+ struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
+
+ if (kgsl_drawobj_events_pending(syncobj)) {
+ dev_err(device->dev,
+ " context[%d] (ts=%d) Active sync points:\n",
+ context->id, drawobj->timestamp);
- kgsl_dump_syncpoints(device, cmdbatch);
+ kgsl_dump_syncpoints(device, syncobj);
+ }
}
}
@@ -229,19 +233,19 @@ done:
return ret;
}
-static int drawctxt_detach_cmdbatches(struct adreno_context *drawctxt,
- struct kgsl_cmdbatch **list)
+static int drawctxt_detach_drawobjs(struct adreno_context *drawctxt,
+ struct kgsl_drawobj **list)
{
int count = 0;
- while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
- struct kgsl_cmdbatch *cmdbatch =
- drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+ while (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
+ struct kgsl_drawobj *drawobj =
+ drawctxt->drawqueue[drawctxt->drawqueue_head];
- drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
- ADRENO_CONTEXT_CMDQUEUE_SIZE;
+ drawctxt->drawqueue_head = (drawctxt->drawqueue_head + 1) %
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE;
- list[count++] = cmdbatch;
+ list[count++] = drawobj;
}
return count;
@@ -259,7 +263,7 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device,
struct kgsl_context *context)
{
struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
- struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE];
+ struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
int i, count;
trace_adreno_drawctxt_invalidate(drawctxt);
@@ -280,13 +284,13 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device,
drawctxt->timestamp);
/* Get rid of commands still waiting in the queue */
- count = drawctxt_detach_cmdbatches(drawctxt, list);
+ count = drawctxt_detach_drawobjs(drawctxt, list);
spin_unlock(&drawctxt->lock);
for (i = 0; i < count; i++) {
kgsl_cancel_events_timestamp(device, &context->events,
list[i]->timestamp);
- kgsl_cmdbatch_destroy(list[i]);
+ kgsl_drawobj_destroy(list[i]);
}
/* Make sure all pending events are processed or cancelled */
@@ -453,7 +457,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
struct adreno_context *drawctxt;
struct adreno_ringbuffer *rb;
int ret, count, i;
- struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE];
+ struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
if (context == NULL)
return;
@@ -468,7 +472,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
spin_unlock(&adreno_dev->active_list_lock);
spin_lock(&drawctxt->lock);
- count = drawctxt_detach_cmdbatches(drawctxt, list);
+ count = drawctxt_detach_drawobjs(drawctxt, list);
spin_unlock(&drawctxt->lock);
for (i = 0; i < count; i++) {
@@ -478,7 +482,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
* detached status here.
*/
adreno_fault_skipcmd_detached(adreno_dev, drawctxt, list[i]);
- kgsl_cmdbatch_destroy(list[i]);
+ kgsl_drawobj_destroy(list[i]);
}
/*
@@ -499,13 +503,20 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
/*
* If the wait for global fails due to timeout then nothing after this
- * point is likely to work very well - BUG_ON() so we can take advantage
- * of the debug tools to figure out what the h - e - double hockey
- * sticks happened. If EAGAIN error is returned then recovery will kick
- * in and there will be no more commands in the RB pipe from this
- * context which is waht we are waiting for, so ignore -EAGAIN error
+ * point is likely to work very well - Get GPU snapshot and BUG_ON()
+ * so we can take advantage of the debug tools to figure out what the
+ * h - e - double hockey sticks happened. If EAGAIN error is returned
+ * then recovery will kick in and there will be no more commands in the
+ * RB pipe from this context which is waht we are waiting for, so ignore
+ * -EAGAIN error
*/
- BUG_ON(ret && ret != -EAGAIN);
+ if (ret && ret != -EAGAIN) {
+ KGSL_DRV_ERR(device, "Wait for global ts=%d type=%d error=%d\n",
+ drawctxt->internal_timestamp,
+ drawctxt->type, ret);
+ device->force_panic = 1;
+ kgsl_device_snapshot(device, context);
+ }
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 5ea911954991..0578f16ae9e1 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -18,7 +18,7 @@ struct adreno_context_type {
const char *str;
};
-#define ADRENO_CONTEXT_CMDQUEUE_SIZE 128
+#define ADRENO_CONTEXT_DRAWQUEUE_SIZE 128
#define SUBMIT_RETIRE_TICKS_SIZE 7
struct kgsl_device;
@@ -32,20 +32,21 @@ struct kgsl_context;
* @internal_timestamp: Global timestamp of the last issued command
* NOTE: guarded by device->mutex, not drawctxt->mutex!
* @type: Context type (GL, CL, RS)
- * @mutex: Mutex to protect the cmdqueue
- * @cmdqueue: Queue of command batches waiting to be dispatched for this context
- * @cmdqueue_head: Head of the cmdqueue queue
- * @cmdqueue_tail: Tail of the cmdqueue queue
+ * @mutex: Mutex to protect the drawqueue
+ * @drawqueue: Queue of drawobjs waiting to be dispatched for this
+ * context
+ * @drawqueue_head: Head of the drawqueue queue
+ * @drawqueue_tail: Tail of the drawqueue queue
* @pending: Priority list node for the dispatcher list of pending contexts
* @wq: Workqueue structure for contexts to sleep pending room in the queue
* @waiting: Workqueue structure for contexts waiting for a timestamp or event
- * @queued: Number of commands queued in the cmdqueue
- * @fault_policy: GFT fault policy set in cmdbatch_skip_cmd();
+ * @queued: Number of commands queued in the drawqueue
+ * @fault_policy: GFT fault policy set in _skip_cmd();
* @debug_root: debugfs entry for this context.
* @queued_timestamp: The last timestamp that was queued on this context
* @rb: The ringbuffer in which this context submits commands.
* @submitted_timestamp: The last timestamp that was submitted for this context
- * @submit_retire_ticks: Array to hold cmdbatch execution times from submit
+ * @submit_retire_ticks: Array to hold command obj execution times from submit
* to retire
* @ticks_index: The index into submit_retire_ticks[] where the new delta will
* be written.
@@ -60,9 +61,9 @@ struct adreno_context {
spinlock_t lock;
/* Dispatcher */
- struct kgsl_cmdbatch *cmdqueue[ADRENO_CONTEXT_CMDQUEUE_SIZE];
- unsigned int cmdqueue_head;
- unsigned int cmdqueue_tail;
+ struct kgsl_drawobj *drawqueue[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
+ unsigned int drawqueue_head;
+ unsigned int drawqueue_tail;
struct plist_node pending;
wait_queue_head_t wq;
@@ -92,8 +93,9 @@ struct adreno_context {
* @ADRENO_CONTEXT_SKIP_EOF - Context skip IBs until the next end of frame
* marker.
* @ADRENO_CONTEXT_FORCE_PREAMBLE - Force the preamble for the next submission.
- * @ADRENO_CONTEXT_SKIP_CMD - Context's command batch is skipped during
+ * @ADRENO_CONTEXT_SKIP_CMD - Context's drawobj's skipped during
fault tolerance.
+ * @ADRENO_CONTEXT_FENCE_LOG - Dump fences on this context.
*/
enum adreno_context_priv {
ADRENO_CONTEXT_FAULT = KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC,
@@ -102,6 +104,7 @@ enum adreno_context_priv {
ADRENO_CONTEXT_SKIP_EOF,
ADRENO_CONTEXT_FORCE_PREAMBLE,
ADRENO_CONTEXT_SKIP_CMD,
+ ADRENO_CONTEXT_FENCE_LOG,
};
/* Flags for adreno_drawctxt_switch() */
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 8e354d71a291..42f8119ad8b4 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -598,28 +598,6 @@ int adreno_perfcounter_put(struct adreno_device *adreno_dev,
return -EINVAL;
}
-static int _perfcounter_enable_pwr(struct adreno_device *adreno_dev,
- unsigned int counter)
-{
- /* PWR counters enabled by default on A3XX/A4XX so nothing to do */
- if (adreno_is_a3xx(adreno_dev) || adreno_is_a4xx(adreno_dev))
- return 0;
-
- /*
- * On 5XX we have to emulate the PWR counters which are physically
- * missing. Program countable 6 on RBBM_PERFCTR_RBBM_0 as a substitute
- * for PWR:1. Don't emulate PWR:0 as nobody uses it and we don't want
- * to take away too many of the generic RBBM counters.
- */
-
- if (counter == 0)
- return -EINVAL;
-
- kgsl_regwrite(KGSL_DEVICE(adreno_dev), A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
-
- return 0;
-}
-
static void _perfcounter_enable_vbif(struct adreno_device *adreno_dev,
struct adreno_perfcounters *counters, unsigned int counter,
unsigned int countable)
@@ -771,6 +749,7 @@ static int adreno_perfcounter_enable(struct adreno_device *adreno_dev,
unsigned int group, unsigned int counter, unsigned int countable)
{
struct adreno_perfcounters *counters = ADRENO_PERFCOUNTERS(adreno_dev);
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
if (counters == NULL)
return -EINVAL;
@@ -786,7 +765,9 @@ static int adreno_perfcounter_enable(struct adreno_device *adreno_dev,
/* alwayson counter is global, so init value is 0 */
break;
case KGSL_PERFCOUNTER_GROUP_PWR:
- return _perfcounter_enable_pwr(adreno_dev, counter);
+ if (gpudev->enable_pwr_counters)
+ return gpudev->enable_pwr_counters(adreno_dev, counter);
+ return 0;
case KGSL_PERFCOUNTER_GROUP_VBIF:
if (countable > VBIF2_PERF_CNT_SEL_MASK)
return -EINVAL;
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 07ef09034d7c..fc0602a60ac1 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -671,96 +671,17 @@ adreno_ringbuffer_issuecmds(struct adreno_ringbuffer *rb,
sizedwords, 0, NULL);
}
-/**
- * _ringbuffer_verify_ib() - Check if an IB's size is within a permitted limit
- * @device: The kgsl device pointer
- * @ibdesc: Pointer to the IB descriptor
- */
-static inline bool _ringbuffer_verify_ib(struct kgsl_device_private *dev_priv,
- struct kgsl_context *context, struct kgsl_memobj_node *ib)
-{
- struct kgsl_device *device = dev_priv->device;
- struct kgsl_process_private *private = dev_priv->process_priv;
-
- /* The maximum allowable size for an IB in the CP is 0xFFFFF dwords */
- if (ib->size == 0 || ((ib->size >> 2) > 0xFFFFF)) {
- pr_context(device, context, "ctxt %d invalid ib size %lld\n",
- context->id, ib->size);
- return false;
- }
-
- /* Make sure that the address is mapped */
- if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) {
- pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n",
- context->id, ib->gpuaddr);
- return false;
- }
-
- return true;
-}
-
-int
-adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
- struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp)
-{
- struct kgsl_device *device = dev_priv->device;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
- struct kgsl_memobj_node *ib;
- int ret;
-
- if (kgsl_context_invalid(context))
- return -EDEADLK;
-
- /* Verify the IBs before they get queued */
- list_for_each_entry(ib, &cmdbatch->cmdlist, node)
- if (_ringbuffer_verify_ib(dev_priv, context, ib) == false)
- return -EINVAL;
-
- /* wait for the suspend gate */
- wait_for_completion(&device->cmdbatch_gate);
-
- /*
- * Clear the wake on touch bit to indicate an IB has been
- * submitted since the last time we set it. But only clear
- * it when we have rendering commands.
- */
- if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER)
- && !(cmdbatch->flags & KGSL_CMDBATCH_SYNC))
- device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH;
-
- /* A3XX does not have support for command batch profiling */
- if (adreno_is_a3xx(adreno_dev) &&
- (cmdbatch->flags & KGSL_CMDBATCH_PROFILING))
- return -EOPNOTSUPP;
-
- /* Queue the command in the ringbuffer */
- ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch,
- timestamp);
-
- /*
- * Return -EPROTO if the device has faulted since the last time we
- * checked - userspace uses this to perform post-fault activities
- */
- if (!ret && test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv))
- ret = -EPROTO;
-
- return ret;
-}
-
static void adreno_ringbuffer_set_constraint(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj *drawobj)
{
- struct kgsl_context *context = cmdbatch->context;
+ struct kgsl_context *context = drawobj->context;
/*
* Check if the context has a constraint and constraint flags are
* set.
*/
if (context->pwr_constraint.type &&
((context->flags & KGSL_CONTEXT_PWR_CONSTRAINT) ||
- (cmdbatch->flags & KGSL_CONTEXT_PWR_CONSTRAINT)))
+ (drawobj->flags & KGSL_CONTEXT_PWR_CONSTRAINT)))
kgsl_pwrctrl_set_constraint(device, &context->pwr_constraint,
context->id);
}
@@ -792,10 +713,12 @@ static inline int _get_alwayson_counter(struct adreno_device *adreno_dev,
/* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */
int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch, struct adreno_submit_time *time)
+ struct kgsl_drawobj_cmd *cmdobj,
+ struct adreno_submit_time *time)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
struct kgsl_memobj_node *ib;
unsigned int numibs = 0;
unsigned int *link;
@@ -803,25 +726,25 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
struct kgsl_context *context;
struct adreno_context *drawctxt;
bool use_preamble = true;
- bool cmdbatch_user_profiling = false;
- bool cmdbatch_kernel_profiling = false;
+ bool user_profiling = false;
+ bool kernel_profiling = false;
int flags = KGSL_CMD_FLAGS_NONE;
int ret;
struct adreno_ringbuffer *rb;
- struct kgsl_cmdbatch_profiling_buffer *profile_buffer = NULL;
+ struct kgsl_drawobj_profiling_buffer *profile_buffer = NULL;
unsigned int dwords = 0;
struct adreno_submit_time local;
- struct kgsl_mem_entry *entry = cmdbatch->profiling_buf_entry;
+ struct kgsl_mem_entry *entry = cmdobj->profiling_buf_entry;
if (entry)
profile_buffer = kgsl_gpuaddr_to_vaddr(&entry->memdesc,
- cmdbatch->profiling_buffer_gpuaddr);
+ cmdobj->profiling_buffer_gpuaddr);
- context = cmdbatch->context;
+ context = drawobj->context;
drawctxt = ADRENO_CONTEXT(context);
/* Get the total IBs in the list */
- list_for_each_entry(ib, &cmdbatch->cmdlist, node)
+ list_for_each_entry(ib, &cmdobj->cmdlist, node)
numibs++;
rb = drawctxt->rb;
@@ -838,14 +761,14 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
* c) force preamble for commandbatch
*/
if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) &&
- (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv))) {
+ (!test_bit(CMDOBJ_SKIP, &cmdobj->priv))) {
- set_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_recovery);
- cmdbatch->fault_policy = drawctxt->fault_policy;
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+ set_bit(KGSL_FT_SKIPCMD, &cmdobj->fault_recovery);
+ cmdobj->fault_policy = drawctxt->fault_policy;
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv);
/* if context is detached print fault recovery */
- adreno_fault_skipcmd_detached(adreno_dev, drawctxt, cmdbatch);
+ adreno_fault_skipcmd_detached(adreno_dev, drawctxt, drawobj);
/* clear the drawctxt flags */
clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv);
@@ -857,7 +780,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
if a context switch hasn't occured */
if ((drawctxt->base.flags & KGSL_CONTEXT_PREAMBLE) &&
- !test_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv) &&
+ !test_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv) &&
(rb->drawctxt_active == drawctxt))
use_preamble = false;
@@ -867,7 +790,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
* the accounting sane. Set start_index and numibs to 0 to just
* generate the start and end markers and skip everything else
*/
- if (test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)) {
+ if (test_bit(CMDOBJ_SKIP, &cmdobj->priv)) {
use_preamble = false;
numibs = 0;
}
@@ -884,9 +807,9 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
/* Each IB takes up 30 dwords in worst case */
dwords += (numibs * 30);
- if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING &&
+ if (drawobj->flags & KGSL_DRAWOBJ_PROFILING &&
!adreno_is_a3xx(adreno_dev) && profile_buffer) {
- cmdbatch_user_profiling = true;
+ user_profiling = true;
dwords += 6;
/*
@@ -907,8 +830,8 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
time = &local;
}
- if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv)) {
- cmdbatch_kernel_profiling = true;
+ if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv)) {
+ kernel_profiling = true;
dwords += 6;
if (adreno_is_a5xx(adreno_dev))
dwords += 2;
@@ -929,26 +852,26 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
*cmds++ = cp_packet(adreno_dev, CP_NOP, 1);
*cmds++ = KGSL_START_OF_IB_IDENTIFIER;
- if (cmdbatch_kernel_profiling) {
+ if (kernel_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- adreno_dev->cmdbatch_profile_buffer.gpuaddr +
- ADRENO_CMDBATCH_PROFILE_OFFSET(cmdbatch->profile_index,
+ adreno_dev->profile_buffer.gpuaddr +
+ ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index,
started));
}
/*
- * Add cmds to read the GPU ticks at the start of the cmdbatch and
- * write it into the appropriate cmdbatch profiling buffer offset
+ * Add cmds to read the GPU ticks at the start of command obj and
+ * write it into the appropriate command obj profiling buffer offset
*/
- if (cmdbatch_user_profiling) {
+ if (user_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- cmdbatch->profiling_buffer_gpuaddr +
- offsetof(struct kgsl_cmdbatch_profiling_buffer,
+ cmdobj->profiling_buffer_gpuaddr +
+ offsetof(struct kgsl_drawobj_profiling_buffer,
gpu_ticks_submitted));
}
if (numibs) {
- list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
+ list_for_each_entry(ib, &cmdobj->cmdlist, node) {
/*
* Skip 0 sized IBs - these are presumed to have been
* removed from consideration by the FT policy
@@ -972,21 +895,21 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
adreno_is_preemption_enabled(adreno_dev))
cmds += gpudev->preemption_yield_enable(cmds);
- if (cmdbatch_kernel_profiling) {
+ if (kernel_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- adreno_dev->cmdbatch_profile_buffer.gpuaddr +
- ADRENO_CMDBATCH_PROFILE_OFFSET(cmdbatch->profile_index,
+ adreno_dev->profile_buffer.gpuaddr +
+ ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index,
retired));
}
/*
- * Add cmds to read the GPU ticks at the end of the cmdbatch and
- * write it into the appropriate cmdbatch profiling buffer offset
+ * Add cmds to read the GPU ticks at the end of command obj and
+ * write it into the appropriate command obj profiling buffer offset
*/
- if (cmdbatch_user_profiling) {
+ if (user_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- cmdbatch->profiling_buffer_gpuaddr +
- offsetof(struct kgsl_cmdbatch_profiling_buffer,
+ cmdobj->profiling_buffer_gpuaddr +
+ offsetof(struct kgsl_drawobj_profiling_buffer,
gpu_ticks_retired));
}
@@ -1012,7 +935,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
goto done;
}
- if (test_bit(CMDBATCH_FLAG_WFI, &cmdbatch->priv))
+ if (test_bit(CMDOBJ_WFI, &cmdobj->priv))
flags = KGSL_CMD_FLAGS_WFI;
/*
@@ -1025,26 +948,26 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
flags |= KGSL_CMD_FLAGS_PWRON_FIXUP;
/* Set the constraints before adding to ringbuffer */
- adreno_ringbuffer_set_constraint(device, cmdbatch);
+ adreno_ringbuffer_set_constraint(device, drawobj);
/* CFF stuff executed only if CFF is enabled */
- kgsl_cffdump_capture_ib_desc(device, context, cmdbatch);
+ kgsl_cffdump_capture_ib_desc(device, context, cmdobj);
ret = adreno_ringbuffer_addcmds(rb, flags,
&link[0], (cmds - link),
- cmdbatch->timestamp, time);
+ drawobj->timestamp, time);
if (!ret) {
- cmdbatch->global_ts = drawctxt->internal_timestamp;
+ cmdobj->global_ts = drawctxt->internal_timestamp;
/* Put the timevalues in the profiling buffer */
- if (cmdbatch_user_profiling) {
+ if (user_profiling) {
/*
* Return kernel clock time to the the client
* if requested
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING_KTIME) {
+ if (drawobj->flags & KGSL_DRAWOBJ_PROFILING_KTIME) {
uint64_t secs = time->ktime;
profile_buffer->wall_clock_ns =
@@ -1069,9 +992,8 @@ done:
kgsl_memdesc_unmap(&entry->memdesc);
- trace_kgsl_issueibcmds(device, context->id, cmdbatch,
- numibs, cmdbatch->timestamp,
- cmdbatch->flags, ret, drawctxt->type);
+ trace_kgsl_issueibcmds(device, context->id, numibs, drawobj->timestamp,
+ drawobj->flags, ret, drawctxt->type);
kfree(link);
return ret;
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index b126f710b5e6..63374af1e3f7 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -119,7 +119,7 @@ struct adreno_ringbuffer {
struct adreno_context *drawctxt_active;
struct kgsl_memdesc preemption_desc;
struct kgsl_memdesc pagetable_desc;
- struct adreno_dispatcher_cmdqueue dispatch_q;
+ struct adreno_dispatcher_drawqueue dispatch_q;
wait_queue_head_t ts_expire_waitq;
unsigned int wptr_preempt_end;
unsigned int gpr11;
@@ -136,11 +136,11 @@ int cp_secure_mode(struct adreno_device *adreno_dev, uint *cmds, int set);
int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj *drawobj,
uint32_t *timestamp);
int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj_cmd *cmdobj,
struct adreno_submit_time *time);
int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt);
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index 0eff3da0e494..08d7d3c1b935 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -108,7 +108,7 @@ static void push_object(int type,
}
/*
- * Return a 1 if the specified object is already on the list of buffers
+ * Returns index of the specified object is already on the list of buffers
* to be dumped
*/
@@ -120,10 +120,9 @@ static int find_object(int type, uint64_t gpuaddr,
for (index = 0; index < objbufptr; index++) {
if (objbuf[index].gpuaddr == gpuaddr &&
objbuf[index].entry->priv == process)
- return 1;
+ return index;
}
-
- return 0;
+ return -ENOENT;
}
/*
@@ -196,8 +195,6 @@ static inline void parse_ib(struct kgsl_device *device,
struct kgsl_process_private *process,
uint64_t gpuaddr, uint64_t dwords)
{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- uint64_t ib1base;
struct adreno_ib_object_list *ib_obj_list;
/*
@@ -205,11 +202,7 @@ static inline void parse_ib(struct kgsl_device *device,
* then push it into the static blob otherwise put it in the dynamic
* list
*/
-
- adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
- ADRENO_REG_CP_IB1_BASE_HI, &ib1base);
-
- if (gpuaddr == ib1base) {
+ if (gpuaddr == snapshot->ib1base) {
push_object(SNAPSHOT_OBJ_TYPE_IB, process,
gpuaddr, dwords);
return;
@@ -295,17 +288,12 @@ static void snapshot_rb_ibs(struct kgsl_device *device,
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned int rptr, *rbptr;
- uint64_t ibbase;
int index, i;
int parse_ibs = 0, ib_parse_start;
/* Get the current read pointers for the RB */
adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &rptr);
- /* Address of the last processed IB */
- adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
- ADRENO_REG_CP_IB1_BASE_HI, &ibbase);
-
/*
* Figure out the window of ringbuffer data to dump. First we need to
* find where the last processed IB ws submitted. Start walking back
@@ -333,14 +321,14 @@ static void snapshot_rb_ibs(struct kgsl_device *device,
if (adreno_cmd_is_ib(adreno_dev, rbptr[index])) {
if (ADRENO_LEGACY_PM4(adreno_dev)) {
- if (rbptr[index + 1] == ibbase)
+ if (rbptr[index + 1] == snapshot->ib1base)
break;
} else {
uint64_t ibaddr;
ibaddr = rbptr[index + 2];
ibaddr = ibaddr << 32 | rbptr[index + 1];
- if (ibaddr == ibbase)
+ if (ibaddr == snapshot->ib1base)
break;
}
}
@@ -564,6 +552,67 @@ struct snapshot_ib_meta {
uint64_t ib2size;
};
+void kgsl_snapshot_add_active_ib_obj_list(struct kgsl_device *device,
+ struct kgsl_snapshot *snapshot)
+{
+ struct adreno_ib_object_list *ib_obj_list;
+ int index = -ENOENT;
+
+ if (!snapshot->ib1dumped)
+ index = find_object(SNAPSHOT_OBJ_TYPE_IB, snapshot->ib1base,
+ snapshot->process);
+
+ /* only do this for IB1 because the IB2's are part of IB1 objects */
+ if ((index != -ENOENT) &&
+ (snapshot->ib1base == objbuf[index].gpuaddr)) {
+ if (-E2BIG == adreno_ib_create_object_list(device,
+ objbuf[index].entry->priv,
+ objbuf[index].gpuaddr,
+ objbuf[index].size >> 2,
+ &ib_obj_list))
+ ib_max_objs = 1;
+ if (ib_obj_list) {
+ /* freeze the IB objects in the IB */
+ snapshot_freeze_obj_list(snapshot,
+ objbuf[index].entry->priv,
+ ib_obj_list, snapshot->ib2base);
+ adreno_ib_destroy_obj_list(ib_obj_list);
+ }
+ } else {
+ /* Get the IB2 index from parsed object */
+ index = find_object(SNAPSHOT_OBJ_TYPE_IB, snapshot->ib2base,
+ snapshot->process);
+
+ if (index != -ENOENT)
+ parse_ib(device, snapshot, snapshot->process,
+ snapshot->ib2base, objbuf[index].size >> 2);
+ }
+}
+
+/*
+ * active_ib_is_parsed() - Checks if active ib is already parsed
+ * @gpuaddr: Active IB base address at the time of fault
+ * @size: Active IB size
+ * @process: The process to which the IB belongs
+ *
+ * Function returns true if the active is already is parsed
+ * else false
+ */
+static bool active_ib_is_parsed(uint64_t gpuaddr, uint64_t size,
+ struct kgsl_process_private *process)
+{
+ int index;
+ /* go through the static list for gpuaddr is in list or not */
+ for (index = 0; index < objbufptr; index++) {
+ if ((objbuf[index].gpuaddr <= gpuaddr) &&
+ ((objbuf[index].gpuaddr +
+ (objbuf[index].size)) >=
+ (gpuaddr + size)) &&
+ (objbuf[index].entry->priv == process))
+ return true;
+ }
+ return false;
+}
/* Snapshot the memory for an indirect buffer */
static size_t snapshot_ib(struct kgsl_device *device, u8 *buf,
size_t remain, void *priv)
@@ -596,13 +645,11 @@ static size_t snapshot_ib(struct kgsl_device *device, u8 *buf,
return 0;
}
- if (remain < (obj->size + sizeof(*header))) {
- KGSL_CORE_ERR("snapshot: Not enough memory for the ib\n");
- return 0;
- }
-
/* only do this for IB1 because the IB2's are part of IB1 objects */
if (meta->ib1base == obj->gpuaddr) {
+
+ snapshot->ib1dumped = active_ib_is_parsed(obj->gpuaddr,
+ obj->size, obj->entry->priv);
if (-E2BIG == adreno_ib_create_object_list(device,
obj->entry->priv,
obj->gpuaddr, obj->size >> 2,
@@ -617,6 +664,11 @@ static size_t snapshot_ib(struct kgsl_device *device, u8 *buf,
}
}
+
+ if (meta->ib2base == obj->gpuaddr)
+ snapshot->ib2dumped = active_ib_is_parsed(obj->gpuaddr,
+ obj->size, obj->entry->priv);
+
/* Write the sub-header for the section */
header->gpuaddr = obj->gpuaddr;
header->ptbase =
@@ -632,9 +684,7 @@ static size_t snapshot_ib(struct kgsl_device *device, u8 *buf,
/* Dump another item on the current pending list */
static void dump_object(struct kgsl_device *device, int obj,
- struct kgsl_snapshot *snapshot,
- uint64_t ib1base, uint64_t ib1size,
- uint64_t ib2base, uint64_t ib2size)
+ struct kgsl_snapshot *snapshot)
{
struct snapshot_ib_meta meta;
@@ -642,10 +692,10 @@ static void dump_object(struct kgsl_device *device, int obj,
case SNAPSHOT_OBJ_TYPE_IB:
meta.snapshot = snapshot;
meta.obj = &objbuf[obj];
- meta.ib1base = ib1base;
- meta.ib1size = ib1size;
- meta.ib2base = ib2base;
- meta.ib2size = ib2size;
+ meta.ib1base = snapshot->ib1base;
+ meta.ib1size = snapshot->ib1size;
+ meta.ib2base = snapshot->ib2base;
+ meta.ib2size = snapshot->ib2size;
kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_IB_V2,
snapshot, snapshot_ib, &meta);
@@ -792,8 +842,6 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot,
struct kgsl_context *context)
{
unsigned int i;
- uint64_t ib1base, ib2base;
- unsigned int ib1size, ib2size;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
@@ -806,6 +854,16 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot,
setup_fault_process(device, snapshot,
context ? context->proc_priv : NULL);
+ adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
+ ADRENO_REG_CP_IB1_BASE_HI, &snapshot->ib1base);
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ, &snapshot->ib1size);
+ adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB2_BASE,
+ ADRENO_REG_CP_IB2_BASE_HI, &snapshot->ib2base);
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &snapshot->ib2size);
+
+ snapshot->ib1dumped = false;
+ snapshot->ib2dumped = false;
+
adreno_snapshot_ringbuffer(device, snapshot, adreno_dev->cur_rb);
/* Dump the prev ringbuffer */
@@ -818,13 +876,6 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot,
adreno_snapshot_ringbuffer(device, snapshot,
adreno_dev->next_rb);
- adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
- ADRENO_REG_CP_IB1_BASE_HI, &ib1base);
- adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ, &ib1size);
- adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB2_BASE,
- ADRENO_REG_CP_IB2_BASE_HI, &ib2base);
- adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &ib2size);
-
/* Add GPU specific sections - registers mainly, but other stuff too */
if (gpudev->snapshot)
gpudev->snapshot(adreno_dev, snapshot);
@@ -858,13 +909,13 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot,
* figure how often this really happens.
*/
- if (!find_object(SNAPSHOT_OBJ_TYPE_IB, ib1base,
- snapshot->process) && ib1size) {
+ if (-ENOENT == find_object(SNAPSHOT_OBJ_TYPE_IB, snapshot->ib1base,
+ snapshot->process) && snapshot->ib1size) {
push_object(SNAPSHOT_OBJ_TYPE_IB, snapshot->process,
- ib1base, ib1size);
+ snapshot->ib1base, snapshot->ib1size);
KGSL_CORE_ERR(
"CP_IB1_BASE not found in the ringbuffer.Dumping %x dwords of the buffer.\n",
- ib1size);
+ snapshot->ib1size);
}
/*
@@ -875,10 +926,10 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot,
* correct size.
*/
- if (!find_object(SNAPSHOT_OBJ_TYPE_IB, ib2base,
- snapshot->process) && ib2size) {
+ if (-ENOENT == find_object(SNAPSHOT_OBJ_TYPE_IB, snapshot->ib2base,
+ snapshot->process)) {
push_object(SNAPSHOT_OBJ_TYPE_IB, snapshot->process,
- ib2base, ib2size);
+ snapshot->ib2base, snapshot->ib2size);
}
/*
@@ -886,8 +937,15 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot,
* are parsed, more objects might be found, and objbufptr will increase
*/
for (i = 0; i < objbufptr; i++)
- dump_object(device, i, snapshot, ib1base, ib1size,
- ib2base, ib2size);
+ dump_object(device, i, snapshot);
+
+ /*
+ * Incase snapshot static blob is running out of memory, Add Active IB1
+ * and IB2 entries to obj_list so that active ib's can be dumped to
+ * snapshot dynamic blob.
+ */
+ if (!snapshot->ib1dumped || !snapshot->ib2dumped)
+ kgsl_snapshot_add_active_ib_obj_list(device, snapshot);
if (ib_max_objs)
KGSL_CORE_ERR("Max objects found in IB\n");
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
index f52ddfa894d5..16ca0980cfbe 100644
--- a/drivers/gpu/msm/adreno_trace.h
+++ b/drivers/gpu/msm/adreno_trace.h
@@ -27,8 +27,8 @@
#include "adreno_a5xx.h"
TRACE_EVENT(adreno_cmdbatch_queued,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int queued),
- TP_ARGS(cmdbatch, queued),
+ TP_PROTO(struct kgsl_drawobj *drawobj, unsigned int queued),
+ TP_ARGS(drawobj, queued),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -37,26 +37,26 @@ TRACE_EVENT(adreno_cmdbatch_queued,
__field(unsigned int, prio)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->queued = queued;
- __entry->flags = cmdbatch->flags;
- __entry->prio = cmdbatch->context->priority;
+ __entry->flags = drawobj->flags;
+ __entry->prio = drawobj->context->priority;
),
TP_printk(
"ctx=%u ctx_prio=%u ts=%u queued=%u flags=%s",
__entry->id, __entry->prio,
__entry->timestamp, __entry->queued,
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "none"
+ KGSL_DRAWOBJ_FLAGS) : "none"
)
);
TRACE_EVENT(adreno_cmdbatch_submitted,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight, uint64_t ticks,
+ TP_PROTO(struct kgsl_drawobj *drawobj, int inflight, uint64_t ticks,
unsigned long secs, unsigned long usecs,
struct adreno_ringbuffer *rb, unsigned int rptr),
- TP_ARGS(cmdbatch, inflight, ticks, secs, usecs, rb, rptr),
+ TP_ARGS(drawobj, inflight, ticks, secs, usecs, rb, rptr),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -72,14 +72,14 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
__field(int, q_inflight)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->inflight = inflight;
- __entry->flags = cmdbatch->flags;
+ __entry->flags = drawobj->flags;
__entry->ticks = ticks;
__entry->secs = secs;
__entry->usecs = usecs;
- __entry->prio = cmdbatch->context->priority;
+ __entry->prio = drawobj->context->priority;
__entry->rb_id = rb->id;
__entry->rptr = rptr;
__entry->wptr = rb->wptr;
@@ -90,7 +90,7 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
__entry->id, __entry->prio, __entry->timestamp,
__entry->inflight,
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "none",
+ KGSL_DRAWOBJ_FLAGS) : "none",
__entry->ticks, __entry->secs, __entry->usecs,
__entry->rb_id, __entry->rptr, __entry->wptr,
__entry->q_inflight
@@ -98,10 +98,11 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
);
TRACE_EVENT(adreno_cmdbatch_retired,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight,
+ TP_PROTO(struct kgsl_drawobj *drawobj, int inflight,
uint64_t start, uint64_t retire,
- struct adreno_ringbuffer *rb, unsigned int rptr),
- TP_ARGS(cmdbatch, inflight, start, retire, rb, rptr),
+ struct adreno_ringbuffer *rb, unsigned int rptr,
+ unsigned long fault_recovery),
+ TP_ARGS(drawobj, inflight, start, retire, rb, rptr, fault_recovery),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -115,16 +116,17 @@ TRACE_EVENT(adreno_cmdbatch_retired,
__field(unsigned int, rptr)
__field(unsigned int, wptr)
__field(int, q_inflight)
+ __field(unsigned long, fault_recovery)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->inflight = inflight;
- __entry->recovery = cmdbatch->fault_recovery;
- __entry->flags = cmdbatch->flags;
+ __entry->recovery = fault_recovery;
+ __entry->flags = drawobj->flags;
__entry->start = start;
__entry->retire = retire;
- __entry->prio = cmdbatch->context->priority;
+ __entry->prio = drawobj->context->priority;
__entry->rb_id = rb->id;
__entry->rptr = rptr;
__entry->wptr = rb->wptr;
@@ -138,7 +140,7 @@ TRACE_EVENT(adreno_cmdbatch_retired,
__print_flags(__entry->recovery, "|",
ADRENO_FT_TYPES) : "none",
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "none",
+ KGSL_DRAWOBJ_FLAGS) : "none",
__entry->start,
__entry->retire,
__entry->rb_id, __entry->rptr, __entry->wptr,
@@ -147,16 +149,16 @@ TRACE_EVENT(adreno_cmdbatch_retired,
);
TRACE_EVENT(adreno_cmdbatch_fault,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int fault),
- TP_ARGS(cmdbatch, fault),
+ TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int fault),
+ TP_ARGS(cmdobj, fault),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
__field(unsigned int, fault)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = cmdobj->base.context->id;
+ __entry->timestamp = cmdobj->base.timestamp;
__entry->fault = fault;
),
TP_printk(
@@ -171,16 +173,16 @@ TRACE_EVENT(adreno_cmdbatch_fault,
);
TRACE_EVENT(adreno_cmdbatch_recovery,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int action),
- TP_ARGS(cmdbatch, action),
+ TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int action),
+ TP_ARGS(cmdobj, action),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
__field(unsigned int, action)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = cmdobj->base.context->id;
+ __entry->timestamp = cmdobj->base.timestamp;
__entry->action = action;
),
TP_printk(
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 6e9abc99bcc4..add4590bbb90 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -36,7 +36,7 @@
#include "kgsl_cffdump.h"
#include "kgsl_log.h"
#include "kgsl_sharedmem.h"
-#include "kgsl_cmdbatch.h"
+#include "kgsl_drawobj.h"
#include "kgsl_device.h"
#include "kgsl_trace.h"
#include "kgsl_sync.h"
@@ -485,6 +485,7 @@ err_put_proc_priv:
static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
{
unsigned int type;
+ int ret;
if (entry == NULL)
return;
@@ -501,9 +502,14 @@ static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
entry->priv->stats[type].cur -= entry->memdesc.size;
spin_unlock(&entry->priv->mem_lock);
- kgsl_mmu_unmap(entry->memdesc.pagetable, &entry->memdesc);
-
- kgsl_mem_entry_untrack_gpuaddr(entry->priv, entry);
+ ret = kgsl_mmu_unmap(entry->memdesc.pagetable, &entry->memdesc);
+ /*
+ * Do not free the gpuaddr/size if unmap fails. Because if we try
+ * to map this range in future, the iommu driver will throw
+ * a BUG_ON() because it feels we are overwriting a mapping.
+ */
+ if (ret == 0)
+ kgsl_mem_entry_untrack_gpuaddr(entry->priv, entry);
kgsl_process_private_put(entry->priv);
@@ -1491,11 +1497,17 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_ringbuffer_issueibcmds *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch = NULL;
+ struct kgsl_drawobj *drawobj;
+ struct kgsl_drawobj_cmd *cmdobj;
long result = -EINVAL;
/* The legacy functions don't support synchronization commands */
- if ((param->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER)))
+ if ((param->flags & (KGSL_DRAWOBJ_SYNC | KGSL_DRAWOBJ_MARKER)))
+ return -EINVAL;
+
+ /* Sanity check the number of IBs */
+ if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST &&
+ (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS))
return -EINVAL;
/* Get the context */
@@ -1503,23 +1515,20 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
if (context == NULL)
return -EINVAL;
- /* Create a command batch */
- cmdbatch = kgsl_cmdbatch_create(device, context, param->flags);
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
- goto done;
+ cmdobj = kgsl_drawobj_cmd_create(device, context, param->flags,
+ CMDOBJ_TYPE);
+ if (IS_ERR(cmdobj)) {
+ kgsl_context_put(context);
+ return PTR_ERR(cmdobj);
}
- if (param->flags & KGSL_CMDBATCH_SUBMIT_IB_LIST) {
- /* Sanity check the number of IBs */
- if (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS) {
- result = -EINVAL;
- goto done;
- }
- result = kgsl_cmdbatch_add_ibdesc_list(device, cmdbatch,
+ drawobj = DRAWOBJ(cmdobj);
+
+ if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST)
+ result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj,
(void __user *) param->ibdesc_addr,
param->numibs);
- } else {
+ else {
struct kgsl_ibdesc ibdesc;
/* Ultra legacy path */
@@ -1527,83 +1536,119 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
ibdesc.sizedwords = param->numibs;
ibdesc.ctrl = 0;
- result = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc);
+ result = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
}
- if (result)
- goto done;
-
- result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, &param->timestamp);
+ if (result == 0)
+ result = dev_priv->device->ftbl->queue_cmds(dev_priv, context,
+ &drawobj, 1, &param->timestamp);
-done:
/*
* -EPROTO is a "success" error - it just tells the user that the
* context had previously faulted
*/
if (result && result != -EPROTO)
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
kgsl_context_put(context);
return result;
}
+/* Returns 0 on failure. Returns command type(s) on success */
+static unsigned int _process_command_input(struct kgsl_device *device,
+ unsigned int flags, unsigned int numcmds,
+ unsigned int numobjs, unsigned int numsyncs)
+{
+ if (numcmds > KGSL_MAX_NUMIBS ||
+ numobjs > KGSL_MAX_NUMIBS ||
+ numsyncs > KGSL_MAX_SYNCPOINTS)
+ return 0;
+
+ /*
+ * The SYNC bit is supposed to identify a dummy sync object
+ * so warn the user if they specified any IBs with it.
+ * A MARKER command can either have IBs or not but if the
+ * command has 0 IBs it is automatically assumed to be a marker.
+ */
+
+ /* If they specify the flag, go with what they say */
+ if (flags & KGSL_DRAWOBJ_MARKER)
+ return MARKEROBJ_TYPE;
+ else if (flags & KGSL_DRAWOBJ_SYNC)
+ return SYNCOBJ_TYPE;
+
+ /* If not, deduce what they meant */
+ if (numsyncs && numcmds)
+ return SYNCOBJ_TYPE | CMDOBJ_TYPE;
+ else if (numsyncs)
+ return SYNCOBJ_TYPE;
+ else if (numcmds)
+ return CMDOBJ_TYPE;
+ else if (numcmds == 0)
+ return MARKEROBJ_TYPE;
+
+ return 0;
+}
+
long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_submit_commands *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch = NULL;
- long result = -EINVAL;
-
- /*
- * The SYNC bit is supposed to identify a dummy sync object so warn the
- * user if they specified any IBs with it. A MARKER command can either
- * have IBs or not but if the command has 0 IBs it is automatically
- * assumed to be a marker. If none of the above make sure that the user
- * specified a sane number of IBs
- */
-
- if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds)
- KGSL_DEV_ERR_ONCE(device,
- "Commands specified with the SYNC flag. They will be ignored\n");
- else if (param->numcmds > KGSL_MAX_NUMIBS)
- return -EINVAL;
- else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0)
- param->flags |= KGSL_CMDBATCH_MARKER;
+ struct kgsl_drawobj *drawobj[2];
+ unsigned int type;
+ long result;
+ unsigned int i = 0;
- /* Make sure that we don't have too many syncpoints */
- if (param->numsyncs > KGSL_MAX_SYNCPOINTS)
+ type = _process_command_input(device, param->flags, param->numcmds, 0,
+ param->numsyncs);
+ if (!type)
return -EINVAL;
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context == NULL)
return -EINVAL;
- /* Create a command batch */
- cmdbatch = kgsl_cmdbatch_create(device, context, param->flags);
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
- goto done;
+ if (type & SYNCOBJ_TYPE) {
+ struct kgsl_drawobj_sync *syncobj =
+ kgsl_drawobj_sync_create(device, context);
+ if (IS_ERR(syncobj)) {
+ result = PTR_ERR(syncobj);
+ goto done;
+ }
+
+ drawobj[i++] = DRAWOBJ(syncobj);
+
+ result = kgsl_drawobj_sync_add_syncpoints(device, syncobj,
+ param->synclist, param->numsyncs);
+ if (result)
+ goto done;
}
- result = kgsl_cmdbatch_add_ibdesc_list(device, cmdbatch,
- param->cmdlist, param->numcmds);
- if (result)
- goto done;
+ if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) {
+ struct kgsl_drawobj_cmd *cmdobj =
+ kgsl_drawobj_cmd_create(device,
+ context, param->flags, type);
+ if (IS_ERR(cmdobj)) {
+ result = PTR_ERR(cmdobj);
+ goto done;
+ }
- result = kgsl_cmdbatch_add_syncpoints(device, cmdbatch,
- param->synclist, param->numsyncs);
- if (result)
- goto done;
+ drawobj[i++] = DRAWOBJ(cmdobj);
- /* If no profiling buffer was specified, clear the flag */
- if (cmdbatch->profiling_buf_entry == NULL)
- cmdbatch->flags &= ~KGSL_CMDBATCH_PROFILING;
+ result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj,
+ param->cmdlist, param->numcmds);
+ if (result)
+ goto done;
- result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, &param->timestamp);
+ /* If no profiling buffer was specified, clear the flag */
+ if (cmdobj->profiling_buf_entry == NULL)
+ DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING;
+ }
+
+ result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
+ i, &param->timestamp);
done:
/*
@@ -1611,7 +1656,9 @@ done:
* context had previously faulted
*/
if (result && result != -EPROTO)
- kgsl_cmdbatch_destroy(cmdbatch);
+ while (i--)
+ kgsl_drawobj_destroy(drawobj[i]);
+
kgsl_context_put(context);
return result;
@@ -1623,63 +1670,69 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
struct kgsl_gpu_command *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch = NULL;
-
- long result = -EINVAL;
+ struct kgsl_drawobj *drawobj[2];
+ unsigned int type;
+ long result;
+ unsigned int i = 0;
- /*
- * The SYNC bit is supposed to identify a dummy sync object so warn the
- * user if they specified any IBs with it. A MARKER command can either
- * have IBs or not but if the command has 0 IBs it is automatically
- * assumed to be a marker. If none of the above make sure that the user
- * specified a sane number of IBs
- */
- if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds)
- KGSL_DEV_ERR_ONCE(device,
- "Commands specified with the SYNC flag. They will be ignored\n");
- else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0)
- param->flags |= KGSL_CMDBATCH_MARKER;
-
- /* Make sure that the memobj and syncpoint count isn't too big */
- if (param->numcmds > KGSL_MAX_NUMIBS ||
- param->numobjs > KGSL_MAX_NUMIBS ||
- param->numsyncs > KGSL_MAX_SYNCPOINTS)
+ type = _process_command_input(device, param->flags, param->numcmds,
+ param->numobjs, param->numsyncs);
+ if (!type)
return -EINVAL;
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context == NULL)
return -EINVAL;
- cmdbatch = kgsl_cmdbatch_create(device, context, param->flags);
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
- goto done;
+ if (type & SYNCOBJ_TYPE) {
+ struct kgsl_drawobj_sync *syncobj =
+ kgsl_drawobj_sync_create(device, context);
+
+ if (IS_ERR(syncobj)) {
+ result = PTR_ERR(syncobj);
+ goto done;
+ }
+
+ drawobj[i++] = DRAWOBJ(syncobj);
+
+ result = kgsl_drawobj_sync_add_synclist(device, syncobj,
+ to_user_ptr(param->synclist),
+ param->syncsize, param->numsyncs);
+ if (result)
+ goto done;
}
- result = kgsl_cmdbatch_add_cmdlist(device, cmdbatch,
- to_user_ptr(param->cmdlist),
- param->cmdsize, param->numcmds);
- if (result)
- goto done;
+ if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) {
+ struct kgsl_drawobj_cmd *cmdobj =
+ kgsl_drawobj_cmd_create(device,
+ context, param->flags, type);
- result = kgsl_cmdbatch_add_memlist(device, cmdbatch,
- to_user_ptr(param->objlist),
- param->objsize, param->numobjs);
- if (result)
- goto done;
+ if (IS_ERR(cmdobj)) {
+ result = PTR_ERR(cmdobj);
+ goto done;
+ }
- result = kgsl_cmdbatch_add_synclist(device, cmdbatch,
- to_user_ptr(param->synclist),
- param->syncsize, param->numsyncs);
- if (result)
- goto done;
+ drawobj[i++] = DRAWOBJ(cmdobj);
+
+ result = kgsl_drawobj_cmd_add_cmdlist(device, cmdobj,
+ to_user_ptr(param->cmdlist),
+ param->cmdsize, param->numcmds);
+ if (result)
+ goto done;
- /* If no profiling buffer was specified, clear the flag */
- if (cmdbatch->profiling_buf_entry == NULL)
- cmdbatch->flags &= ~KGSL_CMDBATCH_PROFILING;
+ result = kgsl_drawobj_cmd_add_memlist(device, cmdobj,
+ to_user_ptr(param->objlist),
+ param->objsize, param->numobjs);
+ if (result)
+ goto done;
+
+ /* If no profiling buffer was specified, clear the flag */
+ if (cmdobj->profiling_buf_entry == NULL)
+ DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING;
+ }
- result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, &param->timestamp);
+ result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
+ i, &param->timestamp);
done:
/*
@@ -1687,7 +1740,8 @@ done:
* context had previously faulted
*/
if (result && result != -EPROTO)
- kgsl_cmdbatch_destroy(cmdbatch);
+ while (i--)
+ kgsl_drawobj_destroy(drawobj[i]);
kgsl_context_put(context);
return result;
@@ -4594,7 +4648,7 @@ static void kgsl_core_exit(void)
kgsl_driver.class = NULL;
}
- kgsl_cmdbatch_exit();
+ kgsl_drawobj_exit();
kgsl_memfree_exit();
unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
@@ -4670,7 +4724,7 @@ static int __init kgsl_core_init(void)
kgsl_events_init();
- result = kgsl_cmdbatch_init();
+ result = kgsl_drawobj_init();
if (result)
goto err;
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 25f5de6ce645..826c4edb3582 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -28,6 +28,25 @@
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
+/*
+ * --- kgsl drawobj flags ---
+ * These flags are same as --- drawobj flags ---
+ * but renamed to reflect that cmdbatch is renamed to drawobj.
+ */
+#define KGSL_DRAWOBJ_MEMLIST KGSL_CMDBATCH_MEMLIST
+#define KGSL_DRAWOBJ_MARKER KGSL_CMDBATCH_MARKER
+#define KGSL_DRAWOBJ_SUBMIT_IB_LIST KGSL_CMDBATCH_SUBMIT_IB_LIST
+#define KGSL_DRAWOBJ_CTX_SWITCH KGSL_CMDBATCH_CTX_SWITCH
+#define KGSL_DRAWOBJ_PROFILING KGSL_CMDBATCH_PROFILING
+#define KGSL_DRAWOBJ_PROFILING_KTIME KGSL_CMDBATCH_PROFILING_KTIME
+#define KGSL_DRAWOBJ_END_OF_FRAME KGSL_CMDBATCH_END_OF_FRAME
+#define KGSL_DRAWOBJ_SYNC KGSL_CMDBATCH_SYNC
+#define KGSL_DRAWOBJ_PWR_CONSTRAINT KGSL_CMDBATCH_PWR_CONSTRAINT
+#define KGSL_DRAWOBJ_SPARSE KGSL_CMDBATCH_SPARSE
+
+#define kgsl_drawobj_profiling_buffer kgsl_cmdbatch_profiling_buffer
+
+
/* The number of memstore arrays limits the number of contexts allowed.
* If more contexts are needed, update multiple for MEMSTORE_SIZE
*/
@@ -579,4 +598,19 @@ static inline void __user *to_user_ptr(uint64_t address)
return (void __user *)(uintptr_t)address;
}
+static inline void kgsl_gpu_sysfs_add_link(struct kobject *dst,
+ struct kobject *src, const char *src_name,
+ const char *dst_name)
+{
+ struct kernfs_node *old;
+
+ if (dst == NULL || src == NULL)
+ return;
+
+ old = sysfs_get_dirent(src->sd, src_name);
+ if (IS_ERR_OR_NULL(old))
+ return;
+
+ kernfs_create_link(dst->sd, dst_name, old);
+}
#endif /* __KGSL_H */
diff --git a/drivers/gpu/msm/kgsl_cffdump.c b/drivers/gpu/msm/kgsl_cffdump.c
index 8e783f8ce017..3337570477f9 100644
--- a/drivers/gpu/msm/kgsl_cffdump.c
+++ b/drivers/gpu/msm/kgsl_cffdump.c
@@ -705,7 +705,7 @@ static int kgsl_cffdump_capture_adreno_ib_cff(struct kgsl_device *device,
*/
int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
int ret = 0;
struct kgsl_memobj_node *ib;
@@ -713,7 +713,7 @@ int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
if (!device->cff_dump_enable)
return 0;
/* Dump CFF for IB and all objects in it */
- list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
+ list_for_each_entry(ib, &cmdobj->cmdlist, node) {
ret = kgsl_cffdump_capture_adreno_ib_cff(
device, context->proc_priv, ib->gpuaddr,
ib->size >> 2);
diff --git a/drivers/gpu/msm/kgsl_cffdump.h b/drivers/gpu/msm/kgsl_cffdump.h
index 315a097ba817..14bc397cb570 100644
--- a/drivers/gpu/msm/kgsl_cffdump.h
+++ b/drivers/gpu/msm/kgsl_cffdump.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011,2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2011,2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -58,7 +58,7 @@ int kgsl_cff_dump_enable_set(void *data, u64 val);
int kgsl_cff_dump_enable_get(void *data, u64 *val);
int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch);
+ struct kgsl_drawobj_cmd *cmdobj);
void kgsl_cffdump_printline(int id, uint opcode, uint op1, uint op2,
uint op3, uint op4, uint op5);
@@ -164,7 +164,7 @@ static inline void kgsl_cffdump_user_event(struct kgsl_device *device,
static inline int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_cmdbatch.h b/drivers/gpu/msm/kgsl_cmdbatch.h
deleted file mode 100644
index d5cbf375b5d3..000000000000
--- a/drivers/gpu/msm/kgsl_cmdbatch.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __KGSL_CMDBATCH_H
-#define __KGSL_CMDBATCH_H
-
-#define KGSL_CMDBATCH_FLAGS \
- { KGSL_CMDBATCH_MARKER, "MARKER" }, \
- { KGSL_CMDBATCH_CTX_SWITCH, "CTX_SWITCH" }, \
- { KGSL_CMDBATCH_SYNC, "SYNC" }, \
- { KGSL_CMDBATCH_END_OF_FRAME, "EOF" }, \
- { KGSL_CMDBATCH_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \
- { KGSL_CMDBATCH_SUBMIT_IB_LIST, "IB_LIST" }
-
-/**
- * struct kgsl_cmdbatch - KGSl command descriptor
- * @device: KGSL GPU device that the command was created for
- * @context: KGSL context that created the command
- * @timestamp: Timestamp assigned to the command
- * @flags: flags
- * @priv: Internal flags
- * @fault_policy: Internal policy describing how to handle this command in case
- * of a fault
- * @fault_recovery: recovery actions actually tried for this batch
- * @refcount: kref structure to maintain the reference count
- * @cmdlist: List of IBs to issue
- * @memlist: List of all memory used in this command batch
- * @synclist: Array of context/timestamp tuples to wait for before issuing
- * @numsyncs: Number of sync entries in the array
- * @pending: Bitmask of sync events that are active
- * @timer: a timer used to track possible sync timeouts for this cmdbatch
- * @marker_timestamp: For markers, the timestamp of the last "real" command that
- * was queued
- * @profiling_buf_entry: Mem entry containing the profiling buffer
- * @profiling_buffer_gpuaddr: GPU virt address of the profile buffer added here
- * for easy access
- * @profile_index: Index to store the start/stop ticks in the kernel profiling
- * buffer
- * @submit_ticks: Variable to hold ticks at the time of cmdbatch submit.
- * @global_ts: The ringbuffer timestamp corresponding to this cmdbatch
- * @timeout_jiffies: For a syncpoint cmdbatch the jiffies at which the
- * timer will expire
- * This structure defines an atomic batch of command buffers issued from
- * userspace.
- */
-struct kgsl_cmdbatch {
- struct kgsl_device *device;
- struct kgsl_context *context;
- uint32_t timestamp;
- uint32_t flags;
- unsigned long priv;
- unsigned long fault_policy;
- unsigned long fault_recovery;
- struct kref refcount;
- struct list_head cmdlist;
- struct list_head memlist;
- struct kgsl_cmdbatch_sync_event *synclist;
- unsigned int numsyncs;
- unsigned long pending;
- struct timer_list timer;
- unsigned int marker_timestamp;
- struct kgsl_mem_entry *profiling_buf_entry;
- uint64_t profiling_buffer_gpuaddr;
- unsigned int profile_index;
- uint64_t submit_ticks;
- unsigned int global_ts;
- unsigned long timeout_jiffies;
-};
-
-/**
- * struct kgsl_cmdbatch_sync_event
- * @id: identifer (positiion within the pending bitmap)
- * @type: Syncpoint type
- * @cmdbatch: Pointer to the cmdbatch that owns the sync event
- * @context: Pointer to the KGSL context that owns the cmdbatch
- * @timestamp: Pending timestamp for the event
- * @handle: Pointer to a sync fence handle
- * @device: Pointer to the KGSL device
- */
-struct kgsl_cmdbatch_sync_event {
- unsigned int id;
- int type;
- struct kgsl_cmdbatch *cmdbatch;
- struct kgsl_context *context;
- unsigned int timestamp;
- struct kgsl_sync_fence_waiter *handle;
- struct kgsl_device *device;
-};
-
-/**
- * enum kgsl_cmdbatch_priv - Internal cmdbatch flags
- * @CMDBATCH_FLAG_SKIP - skip the entire command batch
- * @CMDBATCH_FLAG_FORCE_PREAMBLE - Force the preamble on for the cmdbatch
- * @CMDBATCH_FLAG_WFI - Force wait-for-idle for the submission
- * @CMDBATCH_FLAG_PROFILE - store the start / retire ticks for the command batch
- * in the profiling buffer
- * @CMDBATCH_FLAG_FENCE_LOG - Set if the cmdbatch is dumping fence logs via the
- * cmdbatch timer - this is used to avoid recursion
- */
-
-enum kgsl_cmdbatch_priv {
- CMDBATCH_FLAG_SKIP = 0,
- CMDBATCH_FLAG_FORCE_PREAMBLE,
- CMDBATCH_FLAG_WFI,
- CMDBATCH_FLAG_PROFILE,
- CMDBATCH_FLAG_FENCE_LOG,
-};
-
-
-int kgsl_cmdbatch_add_memobj(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_ibdesc *ibdesc);
-
-int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmd_syncpoint *sync);
-
-struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int flags);
-int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, struct kgsl_ibdesc *ibdesc);
-int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count);
-int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count);
-int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
- unsigned int size, unsigned int count);
-int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
- unsigned int size, unsigned int count);
-int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
- unsigned int size, unsigned int count);
-
-int kgsl_cmdbatch_init(void);
-void kgsl_cmdbatch_exit(void);
-
-void kgsl_dump_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch);
-
-void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch);
-
-void kgsl_cmdbatch_destroy_object(struct kref *kref);
-
-static inline bool kgsl_cmdbatch_events_pending(struct kgsl_cmdbatch *cmdbatch)
-{
- return !bitmap_empty(&cmdbatch->pending, KGSL_MAX_SYNCPOINTS);
-}
-
-static inline bool kgsl_cmdbatch_event_pending(struct kgsl_cmdbatch *cmdbatch,
- unsigned int bit)
-{
- if (bit >= KGSL_MAX_SYNCPOINTS)
- return false;
-
- return test_bit(bit, &cmdbatch->pending);
-}
-
-#endif /* __KGSL_CMDBATCH_H */
diff --git a/drivers/gpu/msm/kgsl_compat.h b/drivers/gpu/msm/kgsl_compat.h
index ca1685e5fcf5..7681d74fb108 100644
--- a/drivers/gpu/msm/kgsl_compat.h
+++ b/drivers/gpu/msm/kgsl_compat.h
@@ -236,8 +236,8 @@ static inline compat_size_t sizet_to_compat(size_t size)
return (compat_size_t)size;
}
-int kgsl_cmdbatch_create_compat(struct kgsl_device *device, unsigned int flags,
- struct kgsl_cmdbatch *cmdbatch, void __user *cmdlist,
+int kgsl_drawobj_create_compat(struct kgsl_device *device, unsigned int flags,
+ struct kgsl_drawobj *drawobj, void __user *cmdlist,
unsigned int numcmds, void __user *synclist,
unsigned int numsyncs);
@@ -245,8 +245,8 @@ long kgsl_compat_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg);
#else
-static inline int kgsl_cmdbatch_create_compat(struct kgsl_device *device,
- unsigned int flags, struct kgsl_cmdbatch *cmdbatch,
+static inline int kgsl_drawobj_create_compat(struct kgsl_device *device,
+ unsigned int flags, struct kgsl_drawobj *drawobj,
void __user *cmdlist, unsigned int numcmds,
void __user *synclist, unsigned int numsyncs)
{
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index f42d822b451b..04935e8d0019 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -25,7 +25,7 @@
#include "kgsl_pwrscale.h"
#include "kgsl_snapshot.h"
#include "kgsl_sharedmem.h"
-#include "kgsl_cmdbatch.h"
+#include "kgsl_drawobj.h"
#define KGSL_IOCTL_FUNC(_cmd, _func) \
[_IOC_NR((_cmd))] = \
@@ -127,9 +127,9 @@ struct kgsl_functable {
unsigned int msecs);
int (*readtimestamp) (struct kgsl_device *device, void *priv,
enum kgsl_timestamp_type type, unsigned int *timestamp);
- int (*issueibcmds) (struct kgsl_device_private *dev_priv,
- struct kgsl_context *context, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamps);
+ int (*queue_cmds)(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count, uint32_t *timestamp);
void (*power_stats)(struct kgsl_device *device,
struct kgsl_power_stats *stats);
unsigned int (*gpuid)(struct kgsl_device *device, unsigned int *chipid);
@@ -165,6 +165,10 @@ struct kgsl_functable {
void (*pwrlevel_change_settings)(struct kgsl_device *device,
unsigned int prelevel, unsigned int postlevel, bool post);
void (*regulator_disable_poll)(struct kgsl_device *device);
+ void (*clk_set_options)(struct kgsl_device *device,
+ const char *name, struct clk *clk);
+ void (*gpu_model)(struct kgsl_device *device, char *str,
+ size_t bufsz);
};
struct kgsl_ioctl {
@@ -182,7 +186,7 @@ long kgsl_ioctl_helper(struct file *filep, unsigned int cmd, unsigned long arg,
/**
* struct kgsl_memobj_node - Memory object descriptor
- * @node: Local list node for the cmdbatch
+ * @node: Local list node for the object
* @id: GPU memory ID for the object
* offset: Offset within the object
* @gpuaddr: GPU address for the object
@@ -231,7 +235,7 @@ struct kgsl_device {
struct kgsl_mmu mmu;
struct completion hwaccess_gate;
- struct completion cmdbatch_gate;
+ struct completion halt_gate;
const struct kgsl_functable *ftbl;
struct work_struct idle_check_ws;
struct timer_list idle_timer;
@@ -259,6 +263,7 @@ struct kgsl_device {
struct kgsl_snapshot *snapshot;
u32 snapshot_faultcount; /* Total number of faults since boot */
+ bool force_panic; /* Force panic after snapshot dump */
struct kobject snapshot_kobj;
struct kobject ppd_kobj;
@@ -279,6 +284,7 @@ struct kgsl_device {
/* Number of active contexts seen globally for this device */
int active_context_count;
+ struct kobject *gpu_sysfs_kobj;
};
#define KGSL_MMU_DEVICE(_mmu) \
@@ -286,7 +292,7 @@ struct kgsl_device {
#define KGSL_DEVICE_COMMON_INIT(_dev) \
.hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
- .cmdbatch_gate = COMPLETION_INITIALIZER((_dev).cmdbatch_gate),\
+ .halt_gate = COMPLETION_INITIALIZER((_dev).halt_gate),\
.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
kgsl_idle_check),\
.context_idr = IDR_INIT((_dev).context_idr),\
@@ -422,6 +428,12 @@ struct kgsl_device_private {
/**
* struct kgsl_snapshot - details for a specific snapshot instance
+ * @ib1base: Active IB1 base address at the time of fault
+ * @ib2base: Active IB2 base address at the time of fault
+ * @ib1size: Number of DWORDS pending in IB1 at the time of fault
+ * @ib2size: Number of DWORDS pending in IB2 at the time of fault
+ * @ib1dumped: Active IB1 dump status to sansphot binary
+ * @ib2dumped: Active IB2 dump status to sansphot binary
* @start: Pointer to the start of the static snapshot region
* @size: Size of the current snapshot instance
* @ptr: Pointer to the next block of memory to write to during snapshotting
@@ -437,6 +449,12 @@ struct kgsl_device_private {
* @sysfs_read: An atomic for concurrent snapshot reads via syfs.
*/
struct kgsl_snapshot {
+ uint64_t ib1base;
+ uint64_t ib2base;
+ unsigned int ib1size;
+ unsigned int ib2size;
+ bool ib1dumped;
+ bool ib2dumped;
u8 *start;
size_t size;
u8 *ptr;
diff --git a/drivers/gpu/msm/kgsl_cmdbatch.c b/drivers/gpu/msm/kgsl_drawobj.c
index 6272410ce544..7840daa6a3e2 100644
--- a/drivers/gpu/msm/kgsl_cmdbatch.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,17 +11,17 @@
*/
/*
- * KGSL command batch management
- * A command batch is a single submission from userland. The cmdbatch
+ * KGSL drawobj management
+ * A drawobj is a single submission from userland. The drawobj
* encapsulates everything about the submission : command buffers, flags and
* sync points.
*
* Sync points are events that need to expire before the
- * cmdbatch can be queued to the hardware. All synpoints are contained in an
- * array of kgsl_cmdbatch_sync_event structs in the command batch. There can be
+ * drawobj can be queued to the hardware. All synpoints are contained in an
+ * array of kgsl_drawobj_sync_event structs in the drawobj. There can be
* multiple types of events both internal ones (GPU events) and external
* triggers. As the events expire bits are cleared in a pending bitmap stored
- * in the command batch. The GPU will submit the command as soon as the bitmap
+ * in the drawobj. The GPU will submit the command as soon as the bitmap
* goes to zero indicating no more pending events.
*/
@@ -31,7 +31,7 @@
#include "kgsl.h"
#include "kgsl_device.h"
-#include "kgsl_cmdbatch.h"
+#include "kgsl_drawobj.h"
#include "kgsl_sync.h"
#include "kgsl_trace.h"
#include "kgsl_compat.h"
@@ -42,26 +42,43 @@
*/
static struct kmem_cache *memobjs_cache;
-/**
- * kgsl_cmdbatch_put() - Decrement the refcount for a command batch object
- * @cmdbatch: Pointer to the command batch object
- */
-static inline void kgsl_cmdbatch_put(struct kgsl_cmdbatch *cmdbatch)
+static void drawobj_destroy_object(struct kref *kref)
{
- if (cmdbatch)
- kref_put(&cmdbatch->refcount, kgsl_cmdbatch_destroy_object);
+ struct kgsl_drawobj *drawobj = container_of(kref,
+ struct kgsl_drawobj, refcount);
+ struct kgsl_drawobj_sync *syncobj;
+
+ kgsl_context_put(drawobj->context);
+
+ switch (drawobj->type) {
+ case SYNCOBJ_TYPE:
+ syncobj = SYNCOBJ(drawobj);
+ kfree(syncobj->synclist);
+ kfree(syncobj);
+ break;
+ case CMDOBJ_TYPE:
+ case MARKEROBJ_TYPE:
+ kfree(CMDOBJ(drawobj));
+ break;
+ }
+}
+
+static inline void drawobj_put(struct kgsl_drawobj *drawobj)
+{
+ if (drawobj)
+ kref_put(&drawobj->refcount, drawobj_destroy_object);
}
void kgsl_dump_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_sync *syncobj)
{
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
unsigned int i;
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ event = &syncobj->synclist[i];
- if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
+ if (!kgsl_drawobj_event_pending(syncobj, i))
continue;
switch (event->type) {
@@ -90,32 +107,33 @@ void kgsl_dump_syncpoints(struct kgsl_device *device,
}
}
-static void _kgsl_cmdbatch_timer(unsigned long data)
+static void syncobj_timer(unsigned long data)
{
struct kgsl_device *device;
- struct kgsl_cmdbatch *cmdbatch = (struct kgsl_cmdbatch *) data;
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync *syncobj = (struct kgsl_drawobj_sync *) data;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ struct kgsl_drawobj_sync_event *event;
unsigned int i;
- if (cmdbatch == NULL || cmdbatch->context == NULL)
+ if (syncobj == NULL || drawobj->context == NULL)
return;
- device = cmdbatch->context->device;
+ device = drawobj->context->device;
dev_err(device->dev,
"kgsl: possible gpu syncpoint deadlock for context %d timestamp %d\n",
- cmdbatch->context->id, cmdbatch->timestamp);
+ drawobj->context->id, drawobj->timestamp);
- set_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv);
- kgsl_context_dump(cmdbatch->context);
- clear_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv);
+ set_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
+ kgsl_context_dump(drawobj->context);
+ clear_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
dev_err(device->dev, " pending events:\n");
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ event = &syncobj->synclist[i];
- if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
+ if (!kgsl_drawobj_event_pending(syncobj, i))
continue;
switch (event->type) {
@@ -137,48 +155,31 @@ static void _kgsl_cmdbatch_timer(unsigned long data)
dev_err(device->dev, "--gpu syncpoint deadlock print end--\n");
}
-/**
- * kgsl_cmdbatch_destroy_object() - Destroy a cmdbatch object
- * @kref: Pointer to the kref structure for this object
- *
- * Actually destroy a command batch object. Called from kgsl_cmdbatch_put
- */
-void kgsl_cmdbatch_destroy_object(struct kref *kref)
-{
- struct kgsl_cmdbatch *cmdbatch = container_of(kref,
- struct kgsl_cmdbatch, refcount);
-
- kgsl_context_put(cmdbatch->context);
-
- kfree(cmdbatch->synclist);
- kfree(cmdbatch);
-}
-EXPORT_SYMBOL(kgsl_cmdbatch_destroy_object);
-
/*
* a generic function to retire a pending sync event and (possibly)
* kick the dispatcher
*/
-static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device,
- struct kgsl_cmdbatch_sync_event *event)
+static void drawobj_sync_expire(struct kgsl_device *device,
+ struct kgsl_drawobj_sync_event *event)
{
+ struct kgsl_drawobj_sync *syncobj = event->syncobj;
/*
* Clear the event from the pending mask - if it is already clear, then
* leave without doing anything useful
*/
- if (!test_and_clear_bit(event->id, &event->cmdbatch->pending))
+ if (!test_and_clear_bit(event->id, &syncobj->pending))
return;
/*
* If no more pending events, delete the timer and schedule the command
* for dispatch
*/
- if (!kgsl_cmdbatch_events_pending(event->cmdbatch)) {
- del_timer_sync(&event->cmdbatch->timer);
+ if (!kgsl_drawobj_events_pending(event->syncobj)) {
+ del_timer_sync(&syncobj->timer);
if (device->ftbl->drawctxt_sched)
device->ftbl->drawctxt_sched(device,
- event->cmdbatch->context);
+ event->syncobj->base.context);
}
}
@@ -186,20 +187,20 @@ static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device,
* This function is called by the GPU event when the sync event timestamp
* expires
*/
-static void kgsl_cmdbatch_sync_func(struct kgsl_device *device,
+static void drawobj_sync_func(struct kgsl_device *device,
struct kgsl_event_group *group, void *priv, int result)
{
- struct kgsl_cmdbatch_sync_event *event = priv;
+ struct kgsl_drawobj_sync_event *event = priv;
- trace_syncpoint_timestamp_expire(event->cmdbatch,
+ trace_syncpoint_timestamp_expire(event->syncobj,
event->context, event->timestamp);
- kgsl_cmdbatch_sync_expire(device, event);
+ drawobj_sync_expire(device, event);
kgsl_context_put(event->context);
- kgsl_cmdbatch_put(event->cmdbatch);
+ drawobj_put(&event->syncobj->base);
}
-static inline void _free_memobj_list(struct list_head *list)
+static inline void memobj_list_free(struct list_head *list)
{
struct kgsl_memobj_node *mem, *tmpmem;
@@ -210,39 +211,28 @@ static inline void _free_memobj_list(struct list_head *list)
}
}
-/**
- * kgsl_cmdbatch_destroy() - Destroy a cmdbatch structure
- * @cmdbatch: Pointer to the command batch object to destroy
- *
- * Start the process of destroying a command batch. Cancel any pending events
- * and decrement the refcount. Asynchronous events can still signal after
- * kgsl_cmdbatch_destroy has returned.
- */
-void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
+static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj)
{
- unsigned int i;
+ struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
unsigned long pending;
-
- if (IS_ERR_OR_NULL(cmdbatch))
- return;
+ unsigned int i;
/* Zap the canary timer */
- del_timer_sync(&cmdbatch->timer);
+ del_timer_sync(&syncobj->timer);
/*
* Copy off the pending list and clear all pending events - this will
* render any subsequent asynchronous callback harmless
*/
- bitmap_copy(&pending, &cmdbatch->pending, KGSL_MAX_SYNCPOINTS);
- bitmap_zero(&cmdbatch->pending, KGSL_MAX_SYNCPOINTS);
+ bitmap_copy(&pending, &syncobj->pending, KGSL_MAX_SYNCPOINTS);
+ bitmap_zero(&syncobj->pending, KGSL_MAX_SYNCPOINTS);
/*
* Clear all pending events - this will render any subsequent async
* callbacks harmless
*/
-
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- struct kgsl_cmdbatch_sync_event *event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i];
/* Don't do anything if the event has already expired */
if (!test_bit(i, &pending))
@@ -250,127 +240,152 @@ void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
switch (event->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
- kgsl_cancel_event(cmdbatch->device,
+ kgsl_cancel_event(drawobj->device,
&event->context->events, event->timestamp,
- kgsl_cmdbatch_sync_func, event);
+ drawobj_sync_func, event);
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
if (kgsl_sync_fence_async_cancel(event->handle))
- kgsl_cmdbatch_put(cmdbatch);
+ drawobj_put(drawobj);
break;
}
}
/*
- * Release the the refcount on the mem entry associated with the
- * cmdbatch profiling buffer
+ * If we cancelled an event, there's a good chance that the context is
+ * on a dispatcher queue, so schedule to get it removed.
+ */
+ if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) &&
+ drawobj->device->ftbl->drawctxt_sched)
+ drawobj->device->ftbl->drawctxt_sched(drawobj->device,
+ drawobj->context);
+
+}
+
+static void drawobj_destroy_cmd(struct kgsl_drawobj *drawobj)
+{
+ struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj);
+
+ /*
+ * Release the refcount on the mem entry associated with the
+ * ib profiling buffer
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING)
- kgsl_mem_entry_put(cmdbatch->profiling_buf_entry);
+ if (cmdobj->base.flags & KGSL_DRAWOBJ_PROFILING)
+ kgsl_mem_entry_put(cmdobj->profiling_buf_entry);
/* Destroy the cmdlist we created */
- _free_memobj_list(&cmdbatch->cmdlist);
+ memobj_list_free(&cmdobj->cmdlist);
/* Destroy the memlist we created */
- _free_memobj_list(&cmdbatch->memlist);
+ memobj_list_free(&cmdobj->memlist);
+}
- /*
- * If we cancelled an event, there's a good chance that the context is
- * on a dispatcher queue, so schedule to get it removed.
+/**
+ * kgsl_drawobj_destroy() - Destroy a kgsl object structure
+ * @obj: Pointer to the kgsl object to destroy
+ *
+ * Start the process of destroying a command batch. Cancel any pending events
+ * and decrement the refcount. Asynchronous events can still signal after
+ * kgsl_drawobj_destroy has returned.
*/
- if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) &&
- cmdbatch->device->ftbl->drawctxt_sched)
- cmdbatch->device->ftbl->drawctxt_sched(cmdbatch->device,
- cmdbatch->context);
+void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj)
+{
+ if (!drawobj)
+ return;
+
+ if (drawobj->type & SYNCOBJ_TYPE)
+ drawobj_destroy_sync(drawobj);
+ else if (drawobj->type & (CMDOBJ_TYPE | MARKEROBJ_TYPE))
+ drawobj_destroy_cmd(drawobj);
+ else
+ return;
- kgsl_cmdbatch_put(cmdbatch);
+ drawobj_put(drawobj);
}
-EXPORT_SYMBOL(kgsl_cmdbatch_destroy);
+EXPORT_SYMBOL(kgsl_drawobj_destroy);
-/*
- * A callback that gets registered with kgsl_sync_fence_async_wait and is fired
- * when a fence is expired
- */
-static void kgsl_cmdbatch_sync_fence_func(void *priv)
+static void drawobj_sync_fence_func(void *priv)
{
- struct kgsl_cmdbatch_sync_event *event = priv;
+ struct kgsl_drawobj_sync_event *event = priv;
- trace_syncpoint_fence_expire(event->cmdbatch,
+ trace_syncpoint_fence_expire(event->syncobj,
event->handle ? event->handle->name : "unknown");
- kgsl_cmdbatch_sync_expire(event->device, event);
+ drawobj_sync_expire(event->device, event);
- kgsl_cmdbatch_put(event->cmdbatch);
+ drawobj_put(&event->syncobj->base);
}
-/* kgsl_cmdbatch_add_sync_fence() - Add a new sync fence syncpoint
+/* drawobj_add_sync_fence() - Add a new sync fence syncpoint
* @device: KGSL device
- * @cmdbatch: KGSL cmdbatch to add the sync point to
- * @priv: Private sructure passed by the user
+ * @syncobj: KGSL sync obj to add the sync point to
+ * @priv: Private structure passed by the user
*
- * Add a new fence sync syncpoint to the cmdbatch.
+ * Add a new fence sync syncpoint to the sync obj.
*/
-static int kgsl_cmdbatch_add_sync_fence(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void *priv)
+static int drawobj_add_sync_fence(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void *priv)
{
struct kgsl_cmd_syncpoint_fence *sync = priv;
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ struct kgsl_drawobj_sync_event *event;
unsigned int id;
- kref_get(&cmdbatch->refcount);
+ kref_get(&drawobj->refcount);
- id = cmdbatch->numsyncs++;
+ id = syncobj->numsyncs++;
- event = &cmdbatch->synclist[id];
+ event = &syncobj->synclist[id];
event->id = id;
event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE;
- event->cmdbatch = cmdbatch;
+ event->syncobj = syncobj;
event->device = device;
event->context = NULL;
- set_bit(event->id, &cmdbatch->pending);
+ set_bit(event->id, &syncobj->pending);
event->handle = kgsl_sync_fence_async_wait(sync->fd,
- kgsl_cmdbatch_sync_fence_func, event);
+ drawobj_sync_fence_func, event);
if (IS_ERR_OR_NULL(event->handle)) {
int ret = PTR_ERR(event->handle);
- clear_bit(event->id, &cmdbatch->pending);
+ clear_bit(event->id, &syncobj->pending);
event->handle = NULL;
- kgsl_cmdbatch_put(cmdbatch);
+ drawobj_put(drawobj);
/*
* If ret == 0 the fence was already signaled - print a trace
* message so we can track that
*/
if (ret == 0)
- trace_syncpoint_fence_expire(cmdbatch, "signaled");
+ trace_syncpoint_fence_expire(syncobj, "signaled");
return ret;
}
- trace_syncpoint_fence(cmdbatch, event->handle->name);
+ trace_syncpoint_fence(syncobj, event->handle->name);
return 0;
}
-/* kgsl_cmdbatch_add_sync_timestamp() - Add a new sync point for a cmdbatch
+/* drawobj_add_sync_timestamp() - Add a new sync point for a sync obj
* @device: KGSL device
- * @cmdbatch: KGSL cmdbatch to add the sync point to
- * @priv: Private sructure passed by the user
+ * @syncobj: KGSL sync obj to add the sync point to
+ * @priv: Private structure passed by the user
*
- * Add a new sync point timestamp event to the cmdbatch.
+ * Add a new sync point timestamp event to the sync obj.
*/
-static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void *priv)
+static int drawobj_add_sync_timestamp(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void *priv)
{
struct kgsl_cmd_syncpoint_timestamp *sync = priv;
- struct kgsl_context *context = kgsl_context_get(cmdbatch->device,
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ struct kgsl_context *context = kgsl_context_get(device,
sync->context_id);
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
int ret = -EINVAL;
unsigned int id;
@@ -384,8 +399,9 @@ static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
* create a sync point on a future timestamp.
*/
- if (context == cmdbatch->context) {
+ if (context == drawobj->context) {
unsigned int queued;
+
kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED,
&queued);
@@ -397,29 +413,29 @@ static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
}
}
- kref_get(&cmdbatch->refcount);
+ kref_get(&drawobj->refcount);
- id = cmdbatch->numsyncs++;
+ id = syncobj->numsyncs++;
- event = &cmdbatch->synclist[id];
+ event = &syncobj->synclist[id];
event->id = id;
event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP;
- event->cmdbatch = cmdbatch;
+ event->syncobj = syncobj;
event->context = context;
event->timestamp = sync->timestamp;
event->device = device;
- set_bit(event->id, &cmdbatch->pending);
+ set_bit(event->id, &syncobj->pending);
ret = kgsl_add_event(device, &context->events, sync->timestamp,
- kgsl_cmdbatch_sync_func, event);
+ drawobj_sync_func, event);
if (ret) {
- clear_bit(event->id, &cmdbatch->pending);
- kgsl_cmdbatch_put(cmdbatch);
+ clear_bit(event->id, &syncobj->pending);
+ drawobj_put(drawobj);
} else {
- trace_syncpoint_timestamp(cmdbatch, context, sync->timestamp);
+ trace_syncpoint_timestamp(syncobj, context, sync->timestamp);
}
done:
@@ -430,43 +446,46 @@ done:
}
/**
- * kgsl_cmdbatch_add_sync() - Add a sync point to a command batch
+ * kgsl_drawobj_sync_add_sync() - Add a sync point to a command
+ * batch
* @device: Pointer to the KGSL device struct for the GPU
- * @cmdbatch: Pointer to the cmdbatch
+ * @syncobj: Pointer to the sync obj
* @sync: Pointer to the user-specified struct defining the syncpoint
*
- * Create a new sync point in the cmdbatch based on the user specified
- * parameters
+ * Create a new sync point in the sync obj based on the
+ * user specified parameters
*/
-int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch,
+int kgsl_drawobj_sync_add_sync(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj,
struct kgsl_cmd_syncpoint *sync)
{
void *priv;
int ret, psize;
- int (*func)(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ int (*func)(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj,
void *priv);
switch (sync->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
psize = sizeof(struct kgsl_cmd_syncpoint_timestamp);
- func = kgsl_cmdbatch_add_sync_timestamp;
+ func = drawobj_add_sync_timestamp;
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
psize = sizeof(struct kgsl_cmd_syncpoint_fence);
- func = kgsl_cmdbatch_add_sync_fence;
+ func = drawobj_add_sync_fence;
break;
default:
KGSL_DRV_ERR(device,
"bad syncpoint type ctxt %d type 0x%x size %zu\n",
- cmdbatch->context->id, sync->type, sync->size);
+ drawobj->context->id, sync->type, sync->size);
return -EINVAL;
}
if (sync->size != psize) {
KGSL_DRV_ERR(device,
"bad syncpoint size ctxt %d type 0x%x size %zu\n",
- cmdbatch->context->id, sync->type, sync->size);
+ drawobj->context->id, sync->type, sync->size);
return -EINVAL;
}
@@ -479,30 +498,32 @@ int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
return -EFAULT;
}
- ret = func(device, cmdbatch, priv);
+ ret = func(device, syncobj, priv);
kfree(priv);
return ret;
}
static void add_profiling_buffer(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, uint64_t gpuaddr, uint64_t size,
+ struct kgsl_drawobj_cmd *cmdobj,
+ uint64_t gpuaddr, uint64_t size,
unsigned int id, uint64_t offset)
{
struct kgsl_mem_entry *entry;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- if (!(cmdbatch->flags & KGSL_CMDBATCH_PROFILING))
+ if (!(drawobj->flags & KGSL_DRAWOBJ_PROFILING))
return;
/* Only the first buffer entry counts - ignore the rest */
- if (cmdbatch->profiling_buf_entry != NULL)
+ if (cmdobj->profiling_buf_entry != NULL)
return;
if (id != 0)
- entry = kgsl_sharedmem_find_id(cmdbatch->context->proc_priv,
+ entry = kgsl_sharedmem_find_id(drawobj->context->proc_priv,
id);
else
- entry = kgsl_sharedmem_find(cmdbatch->context->proc_priv,
+ entry = kgsl_sharedmem_find(drawobj->context->proc_priv,
gpuaddr);
if (entry != NULL) {
@@ -515,47 +536,50 @@ static void add_profiling_buffer(struct kgsl_device *device,
if (entry == NULL) {
KGSL_DRV_ERR(device,
"ignore bad profile buffer ctxt %d id %d offset %lld gpuaddr %llx size %lld\n",
- cmdbatch->context->id, id, offset, gpuaddr, size);
+ drawobj->context->id, id, offset, gpuaddr, size);
return;
}
- cmdbatch->profiling_buf_entry = entry;
+ cmdobj->profiling_buf_entry = entry;
if (id != 0)
- cmdbatch->profiling_buffer_gpuaddr =
+ cmdobj->profiling_buffer_gpuaddr =
entry->memdesc.gpuaddr + offset;
else
- cmdbatch->profiling_buffer_gpuaddr = gpuaddr;
+ cmdobj->profiling_buffer_gpuaddr = gpuaddr;
}
/**
- * kgsl_cmdbatch_add_ibdesc() - Add a legacy ibdesc to a command batch
- * @cmdbatch: Pointer to the cmdbatch
+ * kgsl_drawobj_cmd_add_ibdesc() - Add a legacy ibdesc to a command
+ * batch
+ * @cmdobj: Pointer to the ib
* @ibdesc: Pointer to the user-specified struct defining the memory or IB
*
- * Create a new memory entry in the cmdbatch based on the user specified
- * parameters
+ * Create a new memory entry in the ib based on the
+ * user specified parameters
*/
-int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, struct kgsl_ibdesc *ibdesc)
+int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc)
{
uint64_t gpuaddr = (uint64_t) ibdesc->gpuaddr;
uint64_t size = (uint64_t) ibdesc->sizedwords << 2;
struct kgsl_memobj_node *mem;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
/* sanitize the ibdesc ctrl flags */
ibdesc->ctrl &= KGSL_IBDESC_MEMLIST | KGSL_IBDESC_PROFILING_BUFFER;
- if (cmdbatch->flags & KGSL_CMDBATCH_MEMLIST &&
+ if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
ibdesc->ctrl & KGSL_IBDESC_MEMLIST) {
if (ibdesc->ctrl & KGSL_IBDESC_PROFILING_BUFFER) {
- add_profiling_buffer(device, cmdbatch,
+ add_profiling_buffer(device, cmdobj,
gpuaddr, size, 0, 0);
return 0;
}
}
- if (cmdbatch->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER))
+ /* Ignore if SYNC or MARKER is specified */
+ if (drawobj->type & (SYNCOBJ_TYPE | MARKEROBJ_TYPE))
return 0;
mem = kmem_cache_alloc(memobjs_cache, GFP_KERNEL);
@@ -569,74 +593,121 @@ int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
mem->offset = 0;
mem->flags = 0;
- if (cmdbatch->flags & KGSL_CMDBATCH_MEMLIST &&
- ibdesc->ctrl & KGSL_IBDESC_MEMLIST) {
+ if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
+ ibdesc->ctrl & KGSL_IBDESC_MEMLIST)
/* add to the memlist */
- list_add_tail(&mem->node, &cmdbatch->memlist);
- } else {
+ list_add_tail(&mem->node, &cmdobj->memlist);
+ else {
/* set the preamble flag if directed to */
- if (cmdbatch->context->flags & KGSL_CONTEXT_PREAMBLE &&
- list_empty(&cmdbatch->cmdlist))
+ if (drawobj->context->flags & KGSL_CONTEXT_PREAMBLE &&
+ list_empty(&cmdobj->cmdlist))
mem->flags = KGSL_CMDLIST_CTXTSWITCH_PREAMBLE;
/* add to the cmd list */
- list_add_tail(&mem->node, &cmdbatch->cmdlist);
+ list_add_tail(&mem->node, &cmdobj->cmdlist);
}
return 0;
}
+static inline int drawobj_init(struct kgsl_device *device,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj,
+ unsigned int type)
+{
+ /*
+ * Increase the reference count on the context so it doesn't disappear
+ * during the lifetime of this object
+ */
+ if (!_kgsl_context_get(context))
+ return -ENOENT;
+
+ kref_init(&drawobj->refcount);
+
+ drawobj->device = device;
+ drawobj->context = context;
+ drawobj->type = type;
+
+ return 0;
+}
+
/**
- * kgsl_cmdbatch_create() - Create a new cmdbatch structure
+ * kgsl_drawobj_sync_create() - Create a new sync obj
+ * structure
* @device: Pointer to a KGSL device struct
* @context: Pointer to a KGSL context struct
- * @flags: Flags for the cmdbatch
*
- * Allocate an new cmdbatch structure
+ * Allocate an new kgsl_drawobj_sync structure
*/
-struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int flags)
+struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device,
+ struct kgsl_context *context)
{
- struct kgsl_cmdbatch *cmdbatch = kzalloc(sizeof(*cmdbatch), GFP_KERNEL);
- if (cmdbatch == NULL)
+ struct kgsl_drawobj_sync *syncobj = kzalloc(sizeof(*syncobj),
+ GFP_KERNEL);
+ if (syncobj == NULL)
return ERR_PTR(-ENOMEM);
- /*
- * Increase the reference count on the context so it doesn't disappear
- * during the lifetime of this command batch
- */
+ if (drawobj_init(device, context, DRAWOBJ(syncobj), SYNCOBJ_TYPE)) {
+ kfree(syncobj);
+ return ERR_PTR(-ENOENT);
+ }
+
+ /* Add a timer to help debug sync deadlocks */
+ setup_timer(&syncobj->timer, syncobj_timer, (unsigned long) syncobj);
+
+ return syncobj;
+}
+
+/**
+ * kgsl_drawobj_cmd_create() - Create a new command obj
+ * structure
+ * @device: Pointer to a KGSL device struct
+ * @context: Pointer to a KGSL context struct
+ * @flags: Flags for the command obj
+ * @type: type of cmdobj MARKER/CMD
+ *
+ * Allocate a new kgsl_drawobj_cmd structure
+ */
+struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int flags,
+ unsigned int type)
+{
+ struct kgsl_drawobj_cmd *cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
+ struct kgsl_drawobj *drawobj;
+
+ if (cmdobj == NULL)
+ return ERR_PTR(-ENOMEM);
- if (!_kgsl_context_get(context)) {
- kfree(cmdbatch);
+ type &= CMDOBJ_TYPE | MARKEROBJ_TYPE;
+ if (type == 0) {
+ kfree(cmdobj);
+ return ERR_PTR(-EINVAL);
+ }
+
+ drawobj = DRAWOBJ(cmdobj);
+
+ if (drawobj_init(device, context, drawobj, type)) {
+ kfree(cmdobj);
return ERR_PTR(-ENOENT);
}
- kref_init(&cmdbatch->refcount);
- INIT_LIST_HEAD(&cmdbatch->cmdlist);
- INIT_LIST_HEAD(&cmdbatch->memlist);
-
- cmdbatch->device = device;
- cmdbatch->context = context;
- /* sanitize our flags for cmdbatches */
- cmdbatch->flags = flags & (KGSL_CMDBATCH_CTX_SWITCH
- | KGSL_CMDBATCH_MARKER
- | KGSL_CMDBATCH_END_OF_FRAME
- | KGSL_CMDBATCH_SYNC
- | KGSL_CMDBATCH_PWR_CONSTRAINT
- | KGSL_CMDBATCH_MEMLIST
- | KGSL_CMDBATCH_PROFILING
- | KGSL_CMDBATCH_PROFILING_KTIME);
+ /* sanitize our flags for drawobj's */
+ drawobj->flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH
+ | KGSL_DRAWOBJ_MARKER
+ | KGSL_DRAWOBJ_END_OF_FRAME
+ | KGSL_DRAWOBJ_PWR_CONSTRAINT
+ | KGSL_DRAWOBJ_MEMLIST
+ | KGSL_DRAWOBJ_PROFILING
+ | KGSL_DRAWOBJ_PROFILING_KTIME);
- /* Add a timer to help debug sync deadlocks */
- setup_timer(&cmdbatch->timer, _kgsl_cmdbatch_timer,
- (unsigned long) cmdbatch);
+ INIT_LIST_HEAD(&cmdobj->cmdlist);
+ INIT_LIST_HEAD(&cmdobj->memlist);
- return cmdbatch;
+ return cmdobj;
}
#ifdef CONFIG_COMPAT
static int add_ibdesc_list_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
{
int i, ret = 0;
struct kgsl_ibdesc_compat ibdesc32;
@@ -654,7 +725,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device,
ibdesc.sizedwords = (size_t) ibdesc32.sizedwords;
ibdesc.ctrl = (unsigned int) ibdesc32.ctrl;
- ret = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc);
+ ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
if (ret)
break;
@@ -665,7 +736,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device,
}
static int add_syncpoints_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
{
struct kgsl_cmd_syncpoint_compat sync32;
struct kgsl_cmd_syncpoint sync;
@@ -683,7 +754,7 @@ static int add_syncpoints_compat(struct kgsl_device *device,
sync.priv = compat_ptr(sync32.priv);
sync.size = (size_t) sync32.size;
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+ ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
if (ret)
break;
@@ -694,26 +765,54 @@ static int add_syncpoints_compat(struct kgsl_device *device,
}
#else
static int add_ibdesc_list_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
{
return -EINVAL;
}
static int add_syncpoints_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
{
return -EINVAL;
}
#endif
-int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+/* Returns:
+ * -EINVAL: Bad data
+ * 0: All data fields are empty (nothing to do)
+ * 1: All list information is valid
+ */
+static int _verify_input_list(unsigned int count, void __user *ptr,
+ unsigned int size)
+{
+ /* Return early if nothing going on */
+ if (count == 0 && ptr == NULL && size == 0)
+ return 0;
+
+ /* Sanity check inputs */
+ if (count == 0 || ptr == NULL || size == 0)
+ return -EINVAL;
+
+ return 1;
+}
+
+int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
{
struct kgsl_ibdesc ibdesc;
+ struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
int i, ret;
+ /* Ignore everything if this is a MARKER */
+ if (baseobj->type & MARKEROBJ_TYPE)
+ return 0;
+
+ ret = _verify_input_list(count, ptr, sizeof(ibdesc));
+ if (ret <= 0)
+ return -EINVAL;
+
if (is_compat_task())
- return add_ibdesc_list_compat(device, cmdbatch, ptr, count);
+ return add_ibdesc_list_compat(device, cmdobj, ptr, count);
for (i = 0; i < count; i++) {
memset(&ibdesc, 0, sizeof(ibdesc));
@@ -721,7 +820,7 @@ int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
if (copy_from_user(&ibdesc, ptr, sizeof(ibdesc)))
return -EFAULT;
- ret = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc);
+ ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
if (ret)
return ret;
@@ -731,8 +830,8 @@ int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
return 0;
}
-int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
{
struct kgsl_cmd_syncpoint sync;
int i, ret;
@@ -740,17 +839,14 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
if (count == 0)
return 0;
- if (count > KGSL_MAX_SYNCPOINTS)
- return -EINVAL;
-
- cmdbatch->synclist = kcalloc(count,
- sizeof(struct kgsl_cmdbatch_sync_event), GFP_KERNEL);
+ syncobj->synclist = kcalloc(count,
+ sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
- if (cmdbatch->synclist == NULL)
+ if (syncobj->synclist == NULL)
return -ENOMEM;
if (is_compat_task())
- return add_syncpoints_compat(device, cmdbatch, ptr, count);
+ return add_syncpoints_compat(device, syncobj, ptr, count);
for (i = 0; i < count; i++) {
memset(&sync, 0, sizeof(sync));
@@ -758,7 +854,7 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
if (copy_from_user(&sync, ptr, sizeof(sync)))
return -EFAULT;
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+ ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
if (ret)
return ret;
@@ -768,7 +864,7 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
return 0;
}
-static int kgsl_cmdbatch_add_object(struct list_head *head,
+static int drawobj_add_object(struct list_head *head,
struct kgsl_command_object *obj)
{
struct kgsl_memobj_node *mem;
@@ -793,24 +889,22 @@ static int kgsl_cmdbatch_add_object(struct list_head *head,
KGSL_CMDLIST_CTXTSWITCH_PREAMBLE | \
KGSL_CMDLIST_IB_PREAMBLE)
-int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
+/* This can only accept MARKEROBJ_TYPE and CMDOBJ_TYPE */
+int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
unsigned int size, unsigned int count)
{
struct kgsl_command_object obj;
- int i, ret = 0;
+ struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
+ int i, ret;
- /* Return early if nothing going on */
- if (count == 0 && ptr == NULL && size == 0)
+ /* Ignore everything if this is a MARKER */
+ if (baseobj->type & MARKEROBJ_TYPE)
return 0;
- /* Sanity check inputs */
- if (count == 0 || ptr == NULL || size == 0)
- return -EINVAL;
-
- /* Ignore all if SYNC or MARKER is specified */
- if (cmdbatch->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER))
- return 0;
+ ret = _verify_input_list(count, ptr, size);
+ if (ret <= 0)
+ return ret;
for (i = 0; i < count; i++) {
memset(&obj, 0, sizeof(obj));
@@ -823,12 +917,12 @@ int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
if (!(obj.flags & CMDLIST_FLAGS)) {
KGSL_DRV_ERR(device,
"invalid cmdobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n",
- cmdbatch->context->id, obj.flags, obj.id,
+ baseobj->context->id, obj.flags, obj.id,
obj.offset, obj.gpuaddr, obj.size);
return -EINVAL;
}
- ret = kgsl_cmdbatch_add_object(&cmdbatch->cmdlist, &obj);
+ ret = drawobj_add_object(&cmdobj->cmdlist, &obj);
if (ret)
return ret;
@@ -838,20 +932,21 @@ int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
return 0;
}
-int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
+int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
unsigned int size, unsigned int count)
{
struct kgsl_command_object obj;
- int i, ret = 0;
+ struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
+ int i, ret;
- /* Return early if nothing going on */
- if (count == 0 && ptr == NULL && size == 0)
+ /* Ignore everything if this is a MARKER */
+ if (baseobj->type & MARKEROBJ_TYPE)
return 0;
- /* Sanity check inputs */
- if (count == 0 || ptr == NULL || size == 0)
- return -EINVAL;
+ ret = _verify_input_list(count, ptr, size);
+ if (ret <= 0)
+ return ret;
for (i = 0; i < count; i++) {
memset(&obj, 0, sizeof(obj));
@@ -863,17 +958,16 @@ int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
if (!(obj.flags & KGSL_OBJLIST_MEMOBJ)) {
KGSL_DRV_ERR(device,
"invalid memobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n",
- cmdbatch->context->id, obj.flags, obj.id,
- obj.offset, obj.gpuaddr, obj.size);
+ DRAWOBJ(cmdobj)->context->id, obj.flags,
+ obj.id, obj.offset, obj.gpuaddr, obj.size);
return -EINVAL;
}
if (obj.flags & KGSL_OBJLIST_PROFILE)
- add_profiling_buffer(device, cmdbatch, obj.gpuaddr,
+ add_profiling_buffer(device, cmdobj, obj.gpuaddr,
obj.size, obj.id, obj.offset);
else {
- ret = kgsl_cmdbatch_add_object(&cmdbatch->memlist,
- &obj);
+ ret = drawobj_add_object(&cmdobj->memlist, &obj);
if (ret)
return ret;
}
@@ -884,29 +978,23 @@ int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
return 0;
}
-int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
+int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr,
unsigned int size, unsigned int count)
{
struct kgsl_command_syncpoint syncpoint;
struct kgsl_cmd_syncpoint sync;
- int i, ret = 0;
-
- /* Return early if nothing going on */
- if (count == 0 && ptr == NULL && size == 0)
- return 0;
-
- /* Sanity check inputs */
- if (count == 0 || ptr == NULL || size == 0)
- return -EINVAL;
+ int i, ret;
- if (count > KGSL_MAX_SYNCPOINTS)
+ /* If creating a sync and the data is not there or wrong then error */
+ ret = _verify_input_list(count, ptr, size);
+ if (ret <= 0)
return -EINVAL;
- cmdbatch->synclist = kcalloc(count,
- sizeof(struct kgsl_cmdbatch_sync_event), GFP_KERNEL);
+ syncobj->synclist = kcalloc(count,
+ sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
- if (cmdbatch->synclist == NULL)
+ if (syncobj->synclist == NULL)
return -ENOMEM;
for (i = 0; i < count; i++) {
@@ -920,7 +1008,7 @@ int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
sync.priv = to_user_ptr(syncpoint.priv);
sync.size = syncpoint.size;
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+ ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
if (ret)
return ret;
@@ -930,13 +1018,13 @@ int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
return 0;
}
-void kgsl_cmdbatch_exit(void)
+void kgsl_drawobj_exit(void)
{
if (memobjs_cache != NULL)
kmem_cache_destroy(memobjs_cache);
}
-int kgsl_cmdbatch_init(void)
+int kgsl_drawobj_init(void)
{
memobjs_cache = KMEM_CACHE(kgsl_memobj_node, 0);
if (memobjs_cache == NULL) {
diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h
new file mode 100644
index 000000000000..89ed944c539a
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_drawobj.h
@@ -0,0 +1,198 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __KGSL_DRAWOBJ_H
+#define __KGSL_DRAWOBJ_H
+
+#define DRAWOBJ(obj) (&obj->base)
+#define SYNCOBJ(obj) \
+ container_of(obj, struct kgsl_drawobj_sync, base)
+#define CMDOBJ(obj) \
+ container_of(obj, struct kgsl_drawobj_cmd, base)
+
+#define CMDOBJ_TYPE BIT(0)
+#define MARKEROBJ_TYPE BIT(1)
+#define SYNCOBJ_TYPE BIT(2)
+
+/**
+ * struct kgsl_drawobj - KGSL drawobj descriptor
+ * @device: KGSL GPU device that the command was created for
+ * @context: KGSL context that created the command
+ * @type: Object type
+ * @timestamp: Timestamp assigned to the command
+ * @flags: flags
+ * @refcount: kref structure to maintain the reference count
+ */
+struct kgsl_drawobj {
+ struct kgsl_device *device;
+ struct kgsl_context *context;
+ uint32_t type;
+ uint32_t timestamp;
+ unsigned long flags;
+ struct kref refcount;
+};
+
+/**
+ * struct kgsl_drawobj_cmd - KGSL command obj, This covers marker
+ * cmds also since markers are special form of cmds that do not
+ * need their cmds to be executed.
+ * @base: Base kgsl_drawobj
+ * @priv: Internal flags
+ * @global_ts: The ringbuffer timestamp corresponding to this
+ * command obj
+ * @fault_policy: Internal policy describing how to handle this command in case
+ * of a fault
+ * @fault_recovery: recovery actions actually tried for this batch
+ * be hung
+ * @refcount: kref structure to maintain the reference count
+ * @cmdlist: List of IBs to issue
+ * @memlist: List of all memory used in this command batch
+ * @marker_timestamp: For markers, the timestamp of the last "real" command that
+ * was queued
+ * @profiling_buf_entry: Mem entry containing the profiling buffer
+ * @profiling_buffer_gpuaddr: GPU virt address of the profile buffer added here
+ * for easy access
+ * @profile_index: Index to store the start/stop ticks in the kernel profiling
+ * buffer
+ * @submit_ticks: Variable to hold ticks at the time of
+ * command obj submit.
+
+ */
+struct kgsl_drawobj_cmd {
+ struct kgsl_drawobj base;
+ unsigned long priv;
+ unsigned int global_ts;
+ unsigned long fault_policy;
+ unsigned long fault_recovery;
+ struct list_head cmdlist;
+ struct list_head memlist;
+ unsigned int marker_timestamp;
+ struct kgsl_mem_entry *profiling_buf_entry;
+ uint64_t profiling_buffer_gpuaddr;
+ unsigned int profile_index;
+ uint64_t submit_ticks;
+};
+
+/**
+ * struct kgsl_drawobj_sync - KGSL sync object
+ * @base: Base kgsl_drawobj, this needs to be the first entry
+ * @synclist: Array of context/timestamp tuples to wait for before issuing
+ * @numsyncs: Number of sync entries in the array
+ * @pending: Bitmask of sync events that are active
+ * @timer: a timer used to track possible sync timeouts for this
+ * sync obj
+ * @timeout_jiffies: For a sync obj the jiffies at
+ * which the timer will expire
+ */
+struct kgsl_drawobj_sync {
+ struct kgsl_drawobj base;
+ struct kgsl_drawobj_sync_event *synclist;
+ unsigned int numsyncs;
+ unsigned long pending;
+ struct timer_list timer;
+ unsigned long timeout_jiffies;
+};
+
+/**
+ * struct kgsl_drawobj_sync_event
+ * @id: identifer (positiion within the pending bitmap)
+ * @type: Syncpoint type
+ * @syncobj: Pointer to the syncobj that owns the sync event
+ * @context: KGSL context for whose timestamp we want to
+ * register this event
+ * @timestamp: Pending timestamp for the event
+ * @handle: Pointer to a sync fence handle
+ * @device: Pointer to the KGSL device
+ */
+struct kgsl_drawobj_sync_event {
+ unsigned int id;
+ int type;
+ struct kgsl_drawobj_sync *syncobj;
+ struct kgsl_context *context;
+ unsigned int timestamp;
+ struct kgsl_sync_fence_waiter *handle;
+ struct kgsl_device *device;
+};
+
+#define KGSL_DRAWOBJ_FLAGS \
+ { KGSL_DRAWOBJ_MARKER, "MARKER" }, \
+ { KGSL_DRAWOBJ_CTX_SWITCH, "CTX_SWITCH" }, \
+ { KGSL_DRAWOBJ_SYNC, "SYNC" }, \
+ { KGSL_DRAWOBJ_END_OF_FRAME, "EOF" }, \
+ { KGSL_DRAWOBJ_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \
+ { KGSL_DRAWOBJ_SUBMIT_IB_LIST, "IB_LIST" }
+
+/**
+ * enum kgsl_drawobj_cmd_priv - Internal command obj flags
+ * @CMDOBJ_SKIP - skip the entire command obj
+ * @CMDOBJ_FORCE_PREAMBLE - Force the preamble on for
+ * command obj
+ * @CMDOBJ_WFI - Force wait-for-idle for the submission
+ * @CMDOBJ_PROFILE - store the start / retire ticks for
+ * the command obj in the profiling buffer
+ */
+enum kgsl_drawobj_cmd_priv {
+ CMDOBJ_SKIP = 0,
+ CMDOBJ_FORCE_PREAMBLE,
+ CMDOBJ_WFI,
+ CMDOBJ_PROFILE,
+};
+
+struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int flags,
+ unsigned int type);
+int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc);
+int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count);
+int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
+ unsigned int size, unsigned int count);
+int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
+ unsigned int size, unsigned int count);
+
+struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device,
+ struct kgsl_context *context);
+int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr,
+ int count);
+int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr,
+ unsigned int size, unsigned int count);
+int kgsl_drawobj_sync_add_sync(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_cmd_syncpoint *sync);
+
+int kgsl_drawobj_init(void);
+void kgsl_drawobj_exit(void);
+
+void kgsl_dump_syncpoints(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj);
+
+void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj);
+
+static inline bool kgsl_drawobj_events_pending(
+ struct kgsl_drawobj_sync *syncobj)
+{
+ return !bitmap_empty(&syncobj->pending, KGSL_MAX_SYNCPOINTS);
+}
+
+static inline bool kgsl_drawobj_event_pending(
+ struct kgsl_drawobj_sync *syncobj, unsigned int bit)
+{
+ if (bit >= KGSL_MAX_SYNCPOINTS)
+ return false;
+
+ return test_bit(bit, &syncobj->pending);
+}
+#endif /* __KGSL_DRAWOBJ_H */
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 71b6086423d6..9f35a3197a4c 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1118,7 +1118,6 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
int ret = 0;
struct kgsl_iommu_pt *iommu_pt = NULL;
- int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
unsigned int cb_num;
struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
@@ -1128,9 +1127,6 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
if (IS_ERR(iommu_pt))
return PTR_ERR(iommu_pt);
- iommu_domain_set_attr(iommu_pt->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
-
if (kgsl_mmu_is_perprocess(mmu)) {
ret = iommu_domain_set_attr(iommu_pt->domain,
DOMAIN_ATTR_PROCID, &pt->name);
@@ -1189,7 +1185,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
int ret = 0;
struct kgsl_iommu_pt *iommu_pt = NULL;
struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
- int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
int secure_vmid = VMID_CP_PIXEL;
unsigned int cb_num;
@@ -1207,9 +1202,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
if (IS_ERR(iommu_pt))
return PTR_ERR(iommu_pt);
- iommu_domain_set_attr(iommu_pt->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
-
ret = iommu_domain_set_attr(iommu_pt->domain,
DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
if (ret) {
@@ -1251,7 +1243,6 @@ static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
int dynamic = 1;
unsigned int cb_num = ctx->cb_num;
- int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
@@ -1278,9 +1269,6 @@ static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
goto done;
}
- iommu_domain_set_attr(iommu_pt->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
-
ret = _attach_pt(iommu_pt, ctx);
if (ret)
goto done;
@@ -2492,7 +2480,6 @@ static const struct {
{ "qcom,global_pt", KGSL_MMU_GLOBAL_PAGETABLE },
{ "qcom,hyp_secure_alloc", KGSL_MMU_HYP_SECURE_ALLOC },
{ "qcom,force-32bit", KGSL_MMU_FORCE_32BIT },
- { "qcom,coherent-htw", KGSL_MMU_COHERENT_HTW },
};
static int _kgsl_iommu_probe(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 4371c9a1b87e..ba564b2851f9 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -450,24 +450,23 @@ int
kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc)
{
+ int ret = 0;
+
if (memdesc->size == 0)
return -EINVAL;
if (PT_OP_VALID(pagetable, mmu_unmap)) {
- int ret;
uint64_t size;
size = kgsl_memdesc_footprint(memdesc);
ret = pagetable->pt_ops->mmu_unmap(pagetable, memdesc);
- if (ret)
- return ret;
atomic_dec(&pagetable->stats.entries);
atomic_long_sub(size, &pagetable->stats.mapped);
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL(kgsl_mmu_unmap);
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index acbc0e784cf2..3e32c25b3dbe 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -130,8 +130,6 @@ struct kgsl_mmu_pt_ops {
#define KGSL_MMU_FORCE_32BIT BIT(5)
/* 64 bit address is live */
#define KGSL_MMU_64BIT BIT(6)
-/* MMU can do coherent hardware table walks */
-#define KGSL_MMU_COHERENT_HTW BIT(7)
/* The MMU supports non-contigious pages */
#define KGSL_MMU_PAGED BIT(8)
/* The device requires a guard page */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index ea760d9198ee..d71c6a63f2d3 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -21,6 +21,7 @@
#include <linux/delay.h>
#include <linux/msm_adreno_devfreq.h>
#include <linux/of_device.h>
+#include <linux/thermal.h>
#include "kgsl.h"
#include "kgsl_pwrscale.h"
@@ -590,22 +591,10 @@ static ssize_t kgsl_pwrctrl_max_pwrlevel_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%u\n", pwr->max_pwrlevel);
}
-static ssize_t kgsl_pwrctrl_min_pwrlevel_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{ struct kgsl_device *device = kgsl_device_from_dev(dev);
- struct kgsl_pwrctrl *pwr;
- int ret;
- unsigned int level = 0;
-
- if (device == NULL)
- return 0;
-
- pwr = &device->pwrctrl;
-
- ret = kgsl_sysfs_store(buf, &level);
- if (ret)
- return ret;
+static void kgsl_pwrctrl_min_pwrlevel_set(struct kgsl_device *device,
+ int level)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
mutex_lock(&device->mutex);
if (level > pwr->num_pwrlevels - 2)
@@ -621,6 +610,24 @@ static ssize_t kgsl_pwrctrl_min_pwrlevel_store(struct device *dev,
kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel);
mutex_unlock(&device->mutex);
+}
+
+static ssize_t kgsl_pwrctrl_min_pwrlevel_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ int ret;
+ unsigned int level = 0;
+
+ if (device == NULL)
+ return 0;
+
+ ret = kgsl_sysfs_store(buf, &level);
+ if (ret)
+ return ret;
+
+ kgsl_pwrctrl_min_pwrlevel_set(device, level);
return count;
}
@@ -664,24 +671,13 @@ static int _get_nearest_pwrlevel(struct kgsl_pwrctrl *pwr, unsigned int clock)
return -ERANGE;
}
-static ssize_t kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static void kgsl_pwrctrl_max_clock_set(struct kgsl_device *device, int val)
{
- struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
- unsigned int val = 0;
- int level, ret;
-
- if (device == NULL)
- return 0;
+ int level;
pwr = &device->pwrctrl;
- ret = kgsl_sysfs_store(buf, &val);
- if (ret)
- return ret;
-
mutex_lock(&device->mutex);
level = _get_nearest_pwrlevel(pwr, val);
/* If the requested power level is not supported by hw, try cycling */
@@ -715,21 +711,37 @@ static ssize_t kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
if (pwr->sysfs_pwr_limit)
kgsl_pwr_limits_set_freq(pwr->sysfs_pwr_limit,
pwr->pwrlevels[level].gpu_freq);
- return count;
+ return;
err:
mutex_unlock(&device->mutex);
- return count;
}
-static ssize_t kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
-
struct kgsl_device *device = kgsl_device_from_dev(dev);
+ unsigned int val = 0;
+ int ret;
+
+ if (device == NULL)
+ return 0;
+
+ ret = kgsl_sysfs_store(buf, &val);
+ if (ret)
+ return ret;
+
+ kgsl_pwrctrl_max_clock_set(device, val);
+
+ return count;
+}
+
+static unsigned int kgsl_pwrctrl_max_clock_get(struct kgsl_device *device)
+{
struct kgsl_pwrctrl *pwr;
unsigned int freq;
+
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
@@ -743,7 +755,17 @@ static ssize_t kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
(TH_HZ - pwr->thermal_timeout) * (hfreq / TH_HZ);
}
- return snprintf(buf, PAGE_SIZE, "%d\n", freq);
+ return freq;
+}
+
+static ssize_t kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ kgsl_pwrctrl_max_clock_get(device));
}
static ssize_t kgsl_pwrctrl_gpuclk_store(struct device *dev,
@@ -903,9 +925,14 @@ static ssize_t kgsl_pwrctrl_gpu_available_frequencies_show(
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
- for (index = 0; index < pwr->num_pwrlevels - 1; index++)
- num_chars += snprintf(buf + num_chars, PAGE_SIZE, "%d ",
- pwr->pwrlevels[index].gpu_freq);
+ for (index = 0; index < pwr->num_pwrlevels - 1; index++) {
+ num_chars += scnprintf(buf + num_chars,
+ PAGE_SIZE - num_chars - 1,
+ "%d ", pwr->pwrlevels[index].gpu_freq);
+ /* One space for trailing null and another for the newline */
+ if (num_chars >= PAGE_SIZE - 2)
+ break;
+ }
buf[num_chars++] = '\n';
return num_chars;
}
@@ -1171,6 +1198,195 @@ static ssize_t kgsl_popp_show(struct device *dev,
test_bit(POPP_ON, &device->pwrscale.popp_state));
}
+static ssize_t kgsl_pwrctrl_gpu_model_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ char model_str[32] = {0};
+
+ if (device == NULL)
+ return 0;
+
+ device->ftbl->gpu_model(device, model_str, sizeof(model_str));
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", model_str);
+}
+
+static ssize_t kgsl_pwrctrl_gpu_busy_percentage_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_clk_stats *stats;
+ unsigned int busy_percent = 0;
+
+ if (device == NULL)
+ return 0;
+ stats = &device->pwrctrl.clk_stats;
+
+ if (stats->total_old != 0)
+ busy_percent = (stats->busy_old * 100) / stats->total_old;
+
+ ret = snprintf(buf, PAGE_SIZE, "%d %%\n", busy_percent);
+
+ /* Reset the stats if GPU is OFF */
+ if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
+ stats->busy_old = 0;
+ stats->total_old = 0;
+ }
+ return ret;
+}
+
+static ssize_t kgsl_pwrctrl_min_clock_mhz_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pwr->pwrlevels[pwr->min_pwrlevel].gpu_freq / 1000000);
+}
+
+static ssize_t kgsl_pwrctrl_min_clock_mhz_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ int level, ret;
+ unsigned int freq;
+ struct kgsl_pwrctrl *pwr;
+
+ if (device == NULL)
+ return 0;
+
+ pwr = &device->pwrctrl;
+
+ ret = kgsl_sysfs_store(buf, &freq);
+ if (ret)
+ return ret;
+
+ freq *= 1000000;
+ level = _get_nearest_pwrlevel(pwr, freq);
+
+ if (level >= 0)
+ kgsl_pwrctrl_min_pwrlevel_set(device, level);
+
+ return count;
+}
+
+static ssize_t kgsl_pwrctrl_max_clock_mhz_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ unsigned int freq;
+
+ if (device == NULL)
+ return 0;
+
+ freq = kgsl_pwrctrl_max_clock_get(device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", freq / 1000000);
+}
+
+static ssize_t kgsl_pwrctrl_max_clock_mhz_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ unsigned int val = 0;
+ int ret;
+
+ if (device == NULL)
+ return 0;
+
+ ret = kgsl_sysfs_store(buf, &val);
+ if (ret)
+ return ret;
+
+ val *= 1000000;
+ kgsl_pwrctrl_max_clock_set(device, val);
+
+ return count;
+}
+
+static ssize_t kgsl_pwrctrl_clock_mhz_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+
+ if (device == NULL)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n",
+ kgsl_pwrctrl_active_freq(&device->pwrctrl) / 1000000);
+}
+
+static ssize_t kgsl_pwrctrl_freq_table_mhz_show(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ int index, num_chars = 0;
+
+ if (device == NULL)
+ return 0;
+
+ pwr = &device->pwrctrl;
+ for (index = 0; index < pwr->num_pwrlevels - 1; index++) {
+ num_chars += scnprintf(buf + num_chars,
+ PAGE_SIZE - num_chars - 1,
+ "%d ", pwr->pwrlevels[index].gpu_freq / 1000000);
+ /* One space for trailing null and another for the newline */
+ if (num_chars >= PAGE_SIZE - 2)
+ break;
+ }
+
+ buf[num_chars++] = '\n';
+
+ return num_chars;
+}
+
+static ssize_t kgsl_pwrctrl_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ int ret, id = 0, temperature = 0;
+
+ if (device == NULL)
+ goto done;
+
+ pwr = &device->pwrctrl;
+
+ if (!pwr->tsens_name)
+ goto done;
+
+ id = sensor_get_id((char *)pwr->tsens_name);
+ if (id < 0)
+ goto done;
+
+ ret = sensor_get_temp(id, &temperature);
+ if (ret)
+ goto done;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ temperature);
+done:
+ return 0;
+}
+
static DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show,
kgsl_pwrctrl_gpuclk_store);
static DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
@@ -1222,6 +1438,17 @@ static DEVICE_ATTR(popp, 0644, kgsl_popp_show, kgsl_popp_store);
static DEVICE_ATTR(force_no_nap, 0644,
kgsl_pwrctrl_force_no_nap_show,
kgsl_pwrctrl_force_no_nap_store);
+static DEVICE_ATTR(gpu_model, 0444, kgsl_pwrctrl_gpu_model_show, NULL);
+static DEVICE_ATTR(gpu_busy_percentage, 0444,
+ kgsl_pwrctrl_gpu_busy_percentage_show, NULL);
+static DEVICE_ATTR(min_clock_mhz, 0644, kgsl_pwrctrl_min_clock_mhz_show,
+ kgsl_pwrctrl_min_clock_mhz_store);
+static DEVICE_ATTR(max_clock_mhz, 0644, kgsl_pwrctrl_max_clock_mhz_show,
+ kgsl_pwrctrl_max_clock_mhz_store);
+static DEVICE_ATTR(clock_mhz, 0444, kgsl_pwrctrl_clock_mhz_show, NULL);
+static DEVICE_ATTR(freq_table_mhz, 0444,
+ kgsl_pwrctrl_freq_table_mhz_show, NULL);
+static DEVICE_ATTR(temp, 0444, kgsl_pwrctrl_temp_show, NULL);
static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_gpuclk,
@@ -1243,12 +1470,50 @@ static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_bus_split,
&dev_attr_default_pwrlevel,
&dev_attr_popp,
+ &dev_attr_gpu_model,
+ &dev_attr_gpu_busy_percentage,
+ &dev_attr_min_clock_mhz,
+ &dev_attr_max_clock_mhz,
+ &dev_attr_clock_mhz,
+ &dev_attr_freq_table_mhz,
+ &dev_attr_temp,
NULL
};
+struct sysfs_link {
+ const char *src;
+ const char *dst;
+};
+
+static struct sysfs_link link_names[] = {
+ { "gpu_model", "gpu_model",},
+ { "gpu_busy_percentage", "gpu_busy",},
+ { "min_clock_mhz", "gpu_min_clock",},
+ { "max_clock_mhz", "gpu_max_clock",},
+ { "clock_mhz", "gpu_clock",},
+ { "freq_table_mhz", "gpu_freq_table",},
+ { "temp", "gpu_tmu",},
+};
+
int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
{
- return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
+ int i, ret;
+
+ ret = kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
+ if (ret)
+ return ret;
+
+ device->gpu_sysfs_kobj = kobject_create_and_add("gpu", kernel_kobj);
+ if (IS_ERR_OR_NULL(device->gpu_sysfs_kobj))
+ return (device->gpu_sysfs_kobj == NULL) ?
+ -ENOMEM : PTR_ERR(device->gpu_sysfs_kobj);
+
+ for (i = 0; i < ARRAY_SIZE(link_names); i++)
+ kgsl_gpu_sysfs_add_link(device->gpu_sysfs_kobj,
+ &device->dev->kobj, link_names[i].src,
+ link_names[i].dst);
+
+ return 0;
}
void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
@@ -1655,6 +1920,10 @@ static int _get_clocks(struct kgsl_device *device)
if (!strcmp(name, "isense_clk"))
pwr->isense_clk_indx = i;
+
+ if (device->ftbl->clk_set_options)
+ device->ftbl->clk_set_options(device, name,
+ pwr->grp_clks[i]);
break;
}
}
@@ -1856,6 +2125,10 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
kgsl_pwrctrl_vbif_init();
+ /* temperature sensor name */
+ of_property_read_string(pdev->dev.of_node, "qcom,tsens-name",
+ &pwr->tsens_name);
+
return result;
}
@@ -1909,20 +2182,38 @@ void kgsl_idle_check(struct work_struct *work)
{
struct kgsl_device *device = container_of(work, struct kgsl_device,
idle_check_ws);
+ int ret = 0;
+ unsigned int requested_state;
+
WARN_ON(device == NULL);
if (device == NULL)
return;
mutex_lock(&device->mutex);
+ requested_state = device->requested_state;
+
if (device->state == KGSL_STATE_ACTIVE
|| device->state == KGSL_STATE_NAP) {
- if (!atomic_read(&device->active_cnt))
- kgsl_pwrctrl_change_state(device,
+ if (!atomic_read(&device->active_cnt)) {
+ ret = kgsl_pwrctrl_change_state(device,
device->requested_state);
+ if (ret == -EBUSY) {
+ /*
+ * If the GPU is currently busy, restore
+ * the requested state and reschedule
+ * idle work.
+ */
+ kgsl_pwrctrl_request_state(device,
+ requested_state);
+ kgsl_schedule_work(&device->idle_check_ws);
+ }
+ }
+
+ if (!ret)
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
if (device->state == KGSL_STATE_ACTIVE)
mod_timer(&device->idle_timer,
jiffies +
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index ae21a274fada..2de42d87bcbe 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -152,6 +152,7 @@ struct kgsl_regulator {
* @sysfs_pwr_limit - pointer to the sysfs limits node
* isense_clk_indx - index of isense clock, 0 if no isense
* isense_clk_on_level - isense clock rate is XO rate below this level.
+ * tsens_name - pointer to temperature sensor name of GPU temperature sensor
*/
struct kgsl_pwrctrl {
@@ -204,6 +205,7 @@ struct kgsl_pwrctrl {
struct kgsl_pwr_limit *sysfs_pwr_limit;
unsigned int gpu_bimc_int_clk_freq;
bool gpu_bimc_interface_enabled;
+ const char *tsens_name;
};
int kgsl_pwrctrl_init(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index d90aec42f30a..01d3b74c16fd 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -910,6 +910,14 @@ int kgsl_pwrscale_init(struct device *dev, const char *governor)
pwrscale->history[i].type = i;
}
+ /* Add links to the devfreq sysfs nodes */
+ kgsl_gpu_sysfs_add_link(device->gpu_sysfs_kobj,
+ &pwrscale->devfreqptr->dev.kobj, "governor",
+ "gpu_governor");
+ kgsl_gpu_sysfs_add_link(device->gpu_sysfs_kobj,
+ &pwrscale->devfreqptr->dev.kobj,
+ "available_governors", "gpu_available_governor");
+
return 0;
}
EXPORT_SYMBOL(kgsl_pwrscale_init);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 73edc3f7e146..72895c18119f 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -807,8 +807,16 @@ void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
return;
if (memdesc->gpuaddr) {
- kgsl_mmu_unmap(memdesc->pagetable, memdesc);
- kgsl_mmu_put_gpuaddr(memdesc->pagetable, memdesc);
+ int ret = 0;
+
+ ret = kgsl_mmu_unmap(memdesc->pagetable, memdesc);
+ /*
+ * Do not free the gpuaddr/size if unmap fails. Because if we
+ * try to map this range in future, the iommu driver will throw
+ * a BUG_ON() because it feels we are overwriting a mapping.
+ */
+ if (ret == 0)
+ kgsl_mmu_put_gpuaddr(memdesc->pagetable, memdesc);
}
if (memdesc->ops && memdesc->ops->free)
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index dd004f9588e9..a2e4a909062f 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -100,8 +100,8 @@ static u8 *_ctxtptr;
static int snapshot_context_info(int id, void *ptr, void *data)
{
- struct kgsl_snapshot_linux_context *header =
- (struct kgsl_snapshot_linux_context *)_ctxtptr;
+ struct kgsl_snapshot_linux_context_v2 *header =
+ (struct kgsl_snapshot_linux_context_v2 *)_ctxtptr;
struct kgsl_context *context = ptr;
struct kgsl_device *device;
@@ -115,10 +115,12 @@ static int snapshot_context_info(int id, void *ptr, void *data)
kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED,
&header->timestamp_queued);
+ kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_CONSUMED,
+ &header->timestamp_consumed);
kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED,
&header->timestamp_retired);
- _ctxtptr += sizeof(struct kgsl_snapshot_linux_context);
+ _ctxtptr += sizeof(struct kgsl_snapshot_linux_context_v2);
return 0;
}
@@ -127,11 +129,11 @@ static int snapshot_context_info(int id, void *ptr, void *data)
static size_t snapshot_os(struct kgsl_device *device,
u8 *buf, size_t remain, void *priv)
{
- struct kgsl_snapshot_linux *header = (struct kgsl_snapshot_linux *)buf;
+ struct kgsl_snapshot_linux_v2 *header =
+ (struct kgsl_snapshot_linux_v2 *)buf;
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
int ctxtcount = 0;
size_t size = sizeof(*header);
- u64 temp_ptbase;
struct kgsl_context *context;
/* Figure out how many active contexts there are - these will
@@ -141,7 +143,7 @@ static size_t snapshot_os(struct kgsl_device *device,
idr_for_each(&device->context_idr, snapshot_context_count, &ctxtcount);
read_unlock(&device->context_lock);
- size += ctxtcount * sizeof(struct kgsl_snapshot_linux_context);
+ size += ctxtcount * sizeof(struct kgsl_snapshot_linux_context_v2);
/* Make sure there is enough room for the data */
if (remain < size) {
@@ -151,9 +153,7 @@ static size_t snapshot_os(struct kgsl_device *device,
memset(header, 0, sizeof(*header));
- header->osid = KGSL_SNAPSHOT_OS_LINUX;
-
- header->state = SNAPSHOT_STATE_HUNG;
+ header->osid = KGSL_SNAPSHOT_OS_LINUX_V3;
/* Get the kernel build information */
strlcpy(header->release, utsname()->release, sizeof(header->release));
@@ -178,9 +178,8 @@ static size_t snapshot_os(struct kgsl_device *device,
context = kgsl_context_get(device, header->current_context);
/* Get the current PT base */
- temp_ptbase = kgsl_mmu_get_current_ttbr0(&device->mmu);
- /* Truncate to 32 bits in case LPAE is used */
- header->ptbase = (__u32)temp_ptbase;
+ header->ptbase = kgsl_mmu_get_current_ttbr0(&device->mmu);
+
/* And the PID for the task leader */
if (context) {
header->pid = context->tid;
@@ -811,6 +810,29 @@ static ssize_t faultcount_store(struct kgsl_device *device, const char *buf,
return count;
}
+/* Show the force_panic request status */
+static ssize_t force_panic_show(struct kgsl_device *device, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", device->force_panic);
+}
+
+/* Store the panic request value to force_panic */
+static ssize_t force_panic_store(struct kgsl_device *device, const char *buf,
+ size_t count)
+{
+ unsigned int val = 0;
+ int ret;
+
+ if (device && count > 0)
+ device->force_panic = 0;
+
+ ret = kgsl_sysfs_store(buf, &val);
+
+ if (!ret && device)
+ device->force_panic = (bool)val;
+
+ return (ssize_t) ret < 0 ? ret : count;
+}
/* Show the timestamp of the last collected snapshot */
static ssize_t timestamp_show(struct kgsl_device *device, char *buf)
{
@@ -836,6 +858,7 @@ struct kgsl_snapshot_attribute attr_##_name = { \
static SNAPSHOT_ATTR(timestamp, 0444, timestamp_show, NULL);
static SNAPSHOT_ATTR(faultcount, 0644, faultcount_show, faultcount_store);
+static SNAPSHOT_ATTR(force_panic, 0644, force_panic_show, force_panic_store);
static ssize_t snapshot_sysfs_show(struct kobject *kobj,
struct attribute *attr, char *buf)
@@ -915,6 +938,7 @@ int kgsl_device_snapshot_init(struct kgsl_device *device)
device->snapshot = NULL;
device->snapshot_faultcount = 0;
+ device->force_panic = 0;
ret = kobject_init_and_add(&device->snapshot_kobj, &ktype_snapshot,
&device->dev->kobj, "snapshot");
@@ -930,7 +954,11 @@ int kgsl_device_snapshot_init(struct kgsl_device *device)
goto done;
ret = sysfs_create_file(&device->snapshot_kobj, &attr_faultcount.attr);
+ if (ret)
+ goto done;
+ ret = sysfs_create_file(&device->snapshot_kobj,
+ &attr_force_panic.attr);
done:
return ret;
}
@@ -955,6 +983,7 @@ void kgsl_device_snapshot_close(struct kgsl_device *device)
device->snapshot_memory.ptr = NULL;
device->snapshot_memory.size = 0;
device->snapshot_faultcount = 0;
+ device->force_panic = 0;
}
EXPORT_SYMBOL(kgsl_device_snapshot_close);
@@ -982,7 +1011,8 @@ int kgsl_snapshot_add_ib_obj_list(struct kgsl_snapshot *snapshot,
return 0;
}
-static size_t _mempool_add_object(u8 *data, struct kgsl_snapshot_object *obj)
+static size_t _mempool_add_object(struct kgsl_snapshot *snapshot, u8 *data,
+ struct kgsl_snapshot_object *obj)
{
struct kgsl_snapshot_section_header *section =
(struct kgsl_snapshot_section_header *)data;
@@ -1008,6 +1038,14 @@ static size_t _mempool_add_object(u8 *data, struct kgsl_snapshot_object *obj)
kgsl_mmu_pagetable_get_ttbr0(obj->entry->priv->pagetable);
header->type = obj->type;
+ if (kgsl_addr_range_overlap(obj->gpuaddr, obj->size,
+ snapshot->ib1base, snapshot->ib1size))
+ snapshot->ib1dumped = true;
+
+ if (kgsl_addr_range_overlap(obj->gpuaddr, obj->size,
+ snapshot->ib2base, snapshot->ib2size))
+ snapshot->ib2dumped = true;
+
memcpy(dest, obj->entry->memdesc.hostptr + obj->offset, size);
kgsl_memdesc_unmap(&obj->entry->memdesc);
@@ -1024,6 +1062,7 @@ void kgsl_snapshot_save_frozen_objs(struct work_struct *work)
{
struct kgsl_snapshot *snapshot = container_of(work,
struct kgsl_snapshot, work);
+ struct kgsl_device *device = kgsl_get_device(KGSL_DEVICE_3D0);
struct kgsl_snapshot_object *obj, *tmp;
size_t size = 0;
void *ptr;
@@ -1049,7 +1088,7 @@ void kgsl_snapshot_save_frozen_objs(struct work_struct *work)
/* even if vmalloc fails, make sure we clean up the obj_list */
list_for_each_entry_safe(obj, tmp, &snapshot->obj_list, node) {
if (snapshot->mempool) {
- size_t ret = _mempool_add_object(ptr, obj);
+ size_t ret = _mempool_add_object(snapshot, ptr, obj);
ptr += ret;
snapshot->mempool_size += ret;
}
@@ -1064,6 +1103,16 @@ done:
kgsl_process_private_put(snapshot->process);
snapshot->process = NULL;
+ if (snapshot->ib1base && !snapshot->ib1dumped)
+ KGSL_DRV_ERR(device,
+ "snapshot: Active IB1:%016llx not dumped\n",
+ snapshot->ib1base);
+ else if (snapshot->ib2base && !snapshot->ib2dumped)
+ KGSL_DRV_ERR(device,
+ "snapshot: Active IB2:%016llx not dumped\n",
+ snapshot->ib2base);
+
complete_all(&snapshot->dump_gate);
+ BUG_ON(device->force_panic);
return;
}
diff --git a/drivers/gpu/msm/kgsl_snapshot.h b/drivers/gpu/msm/kgsl_snapshot.h
index 8167ff83a18b..e2ded87b7431 100644
--- a/drivers/gpu/msm/kgsl_snapshot.h
+++ b/drivers/gpu/msm/kgsl_snapshot.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -63,12 +63,9 @@ struct kgsl_snapshot_section_header {
/* OS sub-section header */
#define KGSL_SNAPSHOT_OS_LINUX 0x0001
+#define KGSL_SNAPSHOT_OS_LINUX_V3 0x00000202
/* Linux OS specific information */
-
-#define SNAPSHOT_STATE_HUNG 0
-#define SNAPSHOT_STATE_RUNNING 1
-
struct kgsl_snapshot_linux {
int osid; /* subsection OS identifier */
int state; /* 1 if the thread is running, 0 for hung */
@@ -87,6 +84,23 @@ struct kgsl_snapshot_linux {
unsigned char comm[16]; /* Name of the process that owns the PT */
} __packed;
+struct kgsl_snapshot_linux_v2 {
+ int osid; /* subsection OS identifier */
+ __u32 seconds; /* Unix timestamp for the snapshot */
+ __u32 power_flags; /* Current power flags */
+ __u32 power_level; /* Current power level */
+ __u32 power_interval_timeout; /* Power interval timeout */
+ __u32 grpclk; /* Current GP clock value */
+ __u32 busclk; /* Current busclk value */
+ __u64 ptbase; /* Current ptbase */
+ __u32 pid; /* PID of the process that owns the PT */
+ __u32 current_context; /* ID of the current context */
+ __u32 ctxtcount; /* Number of contexts appended to section */
+ unsigned char release[32]; /* kernel release */
+ unsigned char version[32]; /* kernel version */
+ unsigned char comm[16]; /* Name of the process that owns the PT */
+} __packed;
+
/*
* This structure contains a record of an active context.
* These are appended one after another in the OS section below
@@ -99,6 +113,12 @@ struct kgsl_snapshot_linux_context {
__u32 timestamp_retired; /* The last timestamp retired by HW */
};
+struct kgsl_snapshot_linux_context_v2 {
+ __u32 id; /* The context ID */
+ __u32 timestamp_queued; /* The last queued timestamp */
+ __u32 timestamp_consumed; /* The last timestamp consumed by HW */
+ __u32 timestamp_retired; /* The last timestamp retired by HW */
+};
/* Ringbuffer sub-section header */
struct kgsl_snapshot_rb {
int start; /* dword at the start of the dump */
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 4ef9f80177d6..6438c6e65b97 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -36,14 +36,13 @@ TRACE_EVENT(kgsl_issueibcmds,
TP_PROTO(struct kgsl_device *device,
int drawctxt_id,
- struct kgsl_cmdbatch *cmdbatch,
unsigned int numibs,
int timestamp,
int flags,
int result,
unsigned int type),
- TP_ARGS(device, drawctxt_id, cmdbatch, numibs, timestamp,
+ TP_ARGS(device, drawctxt_id, numibs, timestamp,
flags, result, type),
TP_STRUCT__entry(
@@ -74,7 +73,7 @@ TRACE_EVENT(kgsl_issueibcmds,
__entry->numibs,
__entry->timestamp,
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "None",
+ KGSL_DRAWOBJ_FLAGS) : "None",
__entry->result,
__print_symbolic(__entry->drawctxt_type, KGSL_CONTEXT_TYPES)
)
@@ -1028,59 +1027,62 @@ TRACE_EVENT(kgsl_pagetable_destroy,
);
DECLARE_EVENT_CLASS(syncpoint_timestamp_template,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context,
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_context *context,
unsigned int timestamp),
- TP_ARGS(cmdbatch, context, timestamp),
+ TP_ARGS(syncobj, context, timestamp),
TP_STRUCT__entry(
- __field(unsigned int, cmdbatch_context_id)
+ __field(unsigned int, syncobj_context_id)
__field(unsigned int, context_id)
__field(unsigned int, timestamp)
),
TP_fast_assign(
- __entry->cmdbatch_context_id = cmdbatch->context->id;
+ __entry->syncobj_context_id = syncobj->base.context->id;
__entry->context_id = context->id;
__entry->timestamp = timestamp;
),
TP_printk("ctx=%d sync ctx=%d ts=%d",
- __entry->cmdbatch_context_id, __entry->context_id,
+ __entry->syncobj_context_id, __entry->context_id,
__entry->timestamp)
);
DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context,
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_context *context,
unsigned int timestamp),
- TP_ARGS(cmdbatch, context, timestamp)
+ TP_ARGS(syncobj, context, timestamp)
);
DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp_expire,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context,
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_context *context,
unsigned int timestamp),
- TP_ARGS(cmdbatch, context, timestamp)
+ TP_ARGS(syncobj, context, timestamp)
);
DECLARE_EVENT_CLASS(syncpoint_fence_template,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name),
- TP_ARGS(cmdbatch, name),
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name),
+ TP_ARGS(syncobj, name),
TP_STRUCT__entry(
__string(fence_name, name)
- __field(unsigned int, cmdbatch_context_id)
+ __field(unsigned int, syncobj_context_id)
),
TP_fast_assign(
- __entry->cmdbatch_context_id = cmdbatch->context->id;
+ __entry->syncobj_context_id = syncobj->base.context->id;
__assign_str(fence_name, name);
),
TP_printk("ctx=%d fence=%s",
- __entry->cmdbatch_context_id, __get_str(fence_name))
+ __entry->syncobj_context_id, __get_str(fence_name))
);
DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name),
- TP_ARGS(cmdbatch, name)
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name),
+ TP_ARGS(syncobj, name)
);
DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence_expire,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name),
- TP_ARGS(cmdbatch, name)
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name),
+ TP_ARGS(syncobj, name)
);
TRACE_EVENT(kgsl_msg,
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 884d82f9190e..37e8b61b5c98 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -440,6 +440,9 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI),
.driver_data = APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_ANSI),
+ .driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO),
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index ec791e169f8f..9dc9f93f4e36 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1754,6 +1754,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index 3776e748647e..d8ef5f59edcb 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -39,6 +39,8 @@
#define PMI_CHG_SCALE_2 391750000000
#define QPNP_VADC_HC_VREF_CODE 0x4000
#define QPNP_VADC_HC_VDD_REFERENCE_MV 1875
+/* Clamp negative ADC code to 0 */
+#define QPNP_VADC_HC_MAX_CODE 0x7FFF
/* Units for temperature below (on x axis) is in 0.1DegC as
required by the battery driver. Note the resolution used
@@ -752,6 +754,8 @@ int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *vadc,
if (adc_properties->adc_hc) {
/* (ADC code * vref_vadc (1.875V)) / 0x4000 */
+ if (adc_code > QPNP_VADC_HC_MAX_CODE)
+ adc_code = 0;
pmic_voltage = (int64_t) adc_code;
pmic_voltage *= (int64_t) (adc_properties->adc_vdd_reference
* 1000);
@@ -862,6 +866,8 @@ int32_t qpnp_adc_tdkntcg_therm(struct qpnp_vadc_chip *chip,
if (adc_properties->adc_hc) {
/* (ADC code * vref_vadc (1.875V) * 1000) / (0x4000 * 1000) */
+ if (adc_code > QPNP_VADC_HC_MAX_CODE)
+ adc_code = 0;
xo_thm_voltage = (int64_t) adc_code;
xo_thm_voltage *= (int64_t) (adc_properties->adc_vdd_reference
* 1000);
@@ -1059,6 +1065,8 @@ int32_t qpnp_adc_scale_therm_pu2(struct qpnp_vadc_chip *chip,
if (adc_properties->adc_hc) {
/* (ADC code * vref_vadc (1.875V) * 1000) / (0x4000 * 1000) */
+ if (adc_code > QPNP_VADC_HC_MAX_CODE)
+ adc_code = 0;
therm_voltage = (int64_t) adc_code;
therm_voltage *= (int64_t) (adc_properties->adc_vdd_reference
* 1000);
@@ -1094,6 +1102,8 @@ int32_t qpnp_adc_tm_scale_voltage_therm_pu2(struct qpnp_vadc_chip *chip,
if (adc_properties->adc_hc) {
/* (ADC code * vref_vadc (1.875V)) / 0x4000 */
+ if (reg > QPNP_VADC_HC_MAX_CODE)
+ reg = 0;
adc_voltage = (int64_t) reg;
adc_voltage *= QPNP_VADC_HC_VDD_REFERENCE_MV;
adc_voltage = div64_s64(adc_voltage,
@@ -1228,6 +1238,8 @@ int32_t qpnp_adc_scale_default(struct qpnp_vadc_chip *vadc,
if (adc_properties->adc_hc) {
/* (ADC code * vref_vadc (1.875V)) / 0x4000 */
+ if (adc_code > QPNP_VADC_HC_MAX_CODE)
+ adc_code = 0;
scale_voltage = (int64_t) adc_code;
scale_voltage *= (adc_properties->adc_vdd_reference * 1000);
scale_voltage = div64_s64(scale_voltage,
@@ -1820,7 +1832,7 @@ int32_t qpnp_adc_get_devicetree_data(struct platform_device *pdev,
struct qpnp_adc_amux *adc_channel_list;
struct qpnp_adc_properties *adc_prop;
struct qpnp_adc_amux_properties *amux_prop;
- int count_adc_channel_list = 0, decimation, rc = 0, i = 0;
+ int count_adc_channel_list = 0, decimation = 0, rc = 0, i = 0;
int decimation_tm_hc = 0, fast_avg_setup_tm_hc = 0, cal_val_hc = 0;
bool adc_hc;
@@ -1927,22 +1939,6 @@ int32_t qpnp_adc_get_devicetree_data(struct platform_device *pdev,
return -EINVAL;
}
- /*
- * ADC_TM_HC decimation setting is common across
- * channels.
- */
- if (!of_device_is_compatible(node,
- "qcom,qpnp-adc-tm-hc")) {
- rc = of_property_read_u32(child,
- "qcom,decimation", &decimation);
- if (rc) {
- pr_err("Invalid decimation\n");
- return -EINVAL;
- }
- } else {
- decimation = decimation_tm_hc;
- }
-
if (!strcmp(calibration_param, "absolute")) {
if (adc_hc)
calib_type = ADC_HC_ABS_CAL;
@@ -1980,6 +1976,18 @@ int32_t qpnp_adc_get_devicetree_data(struct platform_device *pdev,
fast_avg_setup = fast_avg_setup_tm_hc;
}
+ /* ADC_TM_HC decimation setting is common across channels */
+ if (!of_device_is_compatible(node, "qcom,qpnp-adc-tm-hc")) {
+ rc = of_property_read_u32(child,
+ "qcom,decimation", &decimation);
+ if (rc) {
+ pr_err("Invalid decimation\n");
+ return -EINVAL;
+ }
+ } else {
+ decimation = decimation_tm_hc;
+ }
+
if (of_device_is_compatible(node, "qcom,qpnp-vadc-hc")) {
rc = of_property_read_u32(child, "qcom,cal-val",
&cal_val_hc);
diff --git a/drivers/hwtracing/coresight/coresight-csr.c b/drivers/hwtracing/coresight/coresight-csr.c
index dfb2922b6f33..3c18d686091a 100644
--- a/drivers/hwtracing/coresight/coresight-csr.c
+++ b/drivers/hwtracing/coresight/coresight-csr.c
@@ -191,8 +191,6 @@ static int csr_probe(struct platform_device *pdev)
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- /* Store the driver data pointer for use in exported functions */
- csrdrvdata = drvdata;
drvdata->dev = &pdev->dev;
platform_set_drvdata(pdev, drvdata);
@@ -220,6 +218,8 @@ static int csr_probe(struct platform_device *pdev)
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
+ /* Store the driver data pointer for use in exported functions */
+ csrdrvdata = drvdata;
dev_info(dev, "CSR initialized\n");
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index a234d61802ce..fb2f27299417 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -774,8 +774,6 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- /* Store the driver data pointer for use in exported functions */
- stmdrvdata = drvdata;
drvdata->dev = &adev->dev;
dev_set_drvdata(dev, drvdata);
@@ -846,6 +844,8 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
if (boot_enable)
coresight_enable(drvdata->csdev);
+ /* Store the driver data pointer for use in exported functions */
+ stmdrvdata = drvdata;
return 0;
err:
coresight_unregister(drvdata->csdev);
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index a0fcad198f62..ebb49230d4d7 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/qpnp/qpnp-revid.h>
#define FG_ADC_RR_EN_CTL 0x46
#define FG_ADC_RR_SKIN_TEMP_LSB 0x50
@@ -150,13 +151,18 @@
#define FG_ADC_RR_TEMP_FS_VOLTAGE_NUM 5000000
#define FG_ADC_RR_TEMP_FS_VOLTAGE_DEN 3
-#define FG_ADC_RR_DIE_TEMP_OFFSET 600000
+#define FG_ADC_RR_DIE_TEMP_OFFSET 601400
#define FG_ADC_RR_DIE_TEMP_SLOPE 2
#define FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC 25000
-#define FG_ADC_RR_CHG_TEMP_OFFSET 1288000
-#define FG_ADC_RR_CHG_TEMP_SLOPE 4
-#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC 27000
+#define FAB_ID_GF 0x30
+#define FAB_ID_SMIC 0x11
+#define FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV 1296794
+#define FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C 3858
+#define FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV 1339518
+#define FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C 3598
+#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC 25000
+#define FG_ADC_RR_CHG_THRESHOLD_SCALE 4
#define FG_ADC_RR_VOLT_INPUT_FACTOR 8
#define FG_ADC_RR_CURR_INPUT_FACTOR 2
@@ -201,6 +207,8 @@ struct rradc_chip {
struct iio_chan_spec *iio_chans;
unsigned int nchannels;
struct rradc_chan_prop *chan_props;
+ struct device_node *revid_dev_node;
+ struct pmic_revid_data *pmic_fab_id;
};
struct rradc_channels {
@@ -347,16 +355,34 @@ static int rradc_post_process_chg_temp_hot(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
int *result_millidegc)
{
- int64_t temp = 0;
+ int64_t uv = 0, offset = 0, slope = 0;
- temp = (int64_t) adc_code * 4;
- temp = temp * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM;
- temp = div64_s64(temp, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+ if (chip->revid_dev_node) {
+ switch (chip->pmic_fab_id->fab_id) {
+ case FAB_ID_GF:
+ offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+ break;
+ case FAB_ID_SMIC:
+ offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ pr_err("No temperature scaling coefficients\n");
+ return -EINVAL;
+ }
+
+ uv = (int64_t) adc_code * FG_ADC_RR_CHG_THRESHOLD_SCALE;
+ uv = uv * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM;
+ uv = div64_s64(uv, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
FG_MAX_ADC_READINGS));
- temp = FG_ADC_RR_CHG_TEMP_OFFSET - temp;
- temp = div64_s64(temp, FG_ADC_RR_CHG_TEMP_SLOPE);
- temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
- *result_millidegc = temp;
+ uv = offset - uv;
+ uv = div64_s64((uv * FG_ADC_SCALE_MILLI_FACTOR), slope);
+ uv = uv + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+ *result_millidegc = uv;
return 0;
}
@@ -380,15 +406,33 @@ static int rradc_post_process_chg_temp(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
int *result_millidegc)
{
- int64_t temp = 0;
+ int64_t uv = 0, offset = 0, slope = 0;
- temp = ((int64_t) adc_code * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM);
- temp = div64_s64(temp, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+ if (chip->revid_dev_node) {
+ switch (chip->pmic_fab_id->fab_id) {
+ case FAB_ID_GF:
+ offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+ break;
+ case FAB_ID_SMIC:
+ offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ pr_err("No temperature scaling coefficients\n");
+ return -EINVAL;
+ }
+
+ uv = ((int64_t) adc_code * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM);
+ uv = div64_s64(uv, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
FG_MAX_ADC_READINGS));
- temp = FG_ADC_RR_CHG_TEMP_OFFSET - temp;
- temp = div64_s64(temp, FG_ADC_RR_CHG_TEMP_SLOPE);
- temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
- *result_millidegc = temp;
+ uv = offset - uv;
+ uv = div64_s64((uv * FG_ADC_SCALE_MILLI_FACTOR), slope);
+ uv += FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+ *result_millidegc = uv;
return 0;
}
@@ -418,9 +462,9 @@ static int rradc_post_process_gpio(struct rradc_chip *chip,
.sts = _sts, \
}, \
-#define RR_ADC_CHAN_TEMP(_dname, _scale, _lsb, _msb, _sts) \
+#define RR_ADC_CHAN_TEMP(_dname, _scale, mask, _lsb, _msb, _sts) \
RR_ADC_CHAN(_dname, IIO_TEMP, \
- BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED), \
+ mask, \
_scale, _lsb, _msb, _sts) \
#define RR_ADC_CHAN_VOLT(_dname, _scale, _lsb, _msb, _sts) \
@@ -443,9 +487,11 @@ static const struct rradc_channels rradc_chans[] = {
FG_ADC_RR_BATT_ID_5_LSB, FG_ADC_RR_BATT_ID_5_MSB,
FG_ADC_RR_BATT_ID_STS)
RR_ADC_CHAN_TEMP("batt_therm", &rradc_post_process_therm,
+ BIT(IIO_CHAN_INFO_RAW),
FG_ADC_RR_BATT_THERM_LSB, FG_ADC_RR_BATT_THERM_MSB,
FG_ADC_RR_BATT_THERM_STS)
RR_ADC_CHAN_TEMP("skin_temp", &rradc_post_process_therm,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
FG_ADC_RR_SKIN_TEMP_LSB, FG_ADC_RR_SKIN_TEMP_MSB,
FG_ADC_RR_AUX_THERM_STS)
RR_ADC_CHAN_CURRENT("usbin_i", &rradc_post_process_curr,
@@ -461,24 +507,30 @@ static const struct rradc_channels rradc_chans[] = {
FG_ADC_RR_DC_IN_V_LSB, FG_ADC_RR_DC_IN_V_MSB,
FG_ADC_RR_DC_IN_V_STS)
RR_ADC_CHAN_TEMP("die_temp", &rradc_post_process_die_temp,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
FG_ADC_RR_PMI_DIE_TEMP_LSB, FG_ADC_RR_PMI_DIE_TEMP_MSB,
FG_ADC_RR_PMI_DIE_TEMP_STS)
RR_ADC_CHAN_TEMP("chg_temp", &rradc_post_process_chg_temp,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
FG_ADC_RR_CHARGER_TEMP_LSB, FG_ADC_RR_CHARGER_TEMP_MSB,
FG_ADC_RR_CHARGER_TEMP_STS)
RR_ADC_CHAN_VOLT("gpio", &rradc_post_process_gpio,
FG_ADC_RR_GPIO_LSB, FG_ADC_RR_GPIO_MSB,
FG_ADC_RR_GPIO_STS)
RR_ADC_CHAN_TEMP("chg_temp_hot", &rradc_post_process_chg_temp_hot,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
FG_ADC_RR_CHARGER_HOT, FG_ADC_RR_CHARGER_HOT,
FG_ADC_RR_CHARGER_TEMP_STS)
RR_ADC_CHAN_TEMP("chg_temp_too_hot", &rradc_post_process_chg_temp_hot,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
FG_ADC_RR_CHARGER_TOO_HOT, FG_ADC_RR_CHARGER_TOO_HOT,
FG_ADC_RR_CHARGER_TEMP_STS)
RR_ADC_CHAN_TEMP("skin_temp_hot", &rradc_post_process_skin_temp_hot,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
FG_ADC_RR_SKIN_HOT, FG_ADC_RR_SKIN_HOT,
FG_ADC_RR_AUX_THERM_STS)
RR_ADC_CHAN_TEMP("skin_temp_too_hot", &rradc_post_process_skin_temp_hot,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),
FG_ADC_RR_SKIN_TOO_HOT, FG_ADC_RR_SKIN_TOO_HOT,
FG_ADC_RR_AUX_THERM_STS)
};
@@ -508,7 +560,7 @@ static int rradc_do_conversion(struct rradc_chip *chip,
buf[0] &= FG_RR_ADC_STS_CHANNEL_READING_MASK;
if (buf[0] != FG_RR_ADC_STS_CHANNEL_READING_MASK) {
- pr_warn("%s is not ready; nothing to read\n",
+ pr_debug("%s is not ready; nothing to read\n",
rradc_chans[prop->channel].datasheet_name);
rc = -ENODATA;
goto fail;
@@ -645,6 +697,22 @@ static int rradc_get_dt_data(struct rradc_chip *chip, struct device_node *node)
}
chip->base = base;
+ chip->revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+ if (chip->revid_dev_node) {
+ chip->pmic_fab_id = get_revid_data(chip->revid_dev_node);
+ if (IS_ERR(chip->pmic_fab_id)) {
+ rc = PTR_ERR(chip->pmic_fab_id);
+ if (rc != -EPROBE_DEFER)
+ pr_err("Unable to get pmic_revid rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->pmic_fab_id->fab_id == -EINVAL) {
+ rc = chip->pmic_fab_id->fab_id;
+ pr_debug("Unable to read fabid rc=%d\n", rc);
+ }
+ }
+
iio_chan = chip->iio_chans;
for (i = 0; i < RR_ADC_MAX; i++) {
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 31369d8c0ef3..9c1380b65b77 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -823,4 +823,6 @@ config INPUT_DRV2667_HAPTICS
To compile this driver as a module, choose M here: the
module will be called drv2667-haptics.
+source "drivers/input/misc/ots_pat9125/Kconfig"
+
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 4019f19dd848..4e806ac056ce 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -78,3 +78,4 @@ obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o
obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o
obj-$(CONFIG_INPUT_YEALINK) += yealink.o
obj-$(CONFIG_INPUT_IDEAPAD_SLIDEBAR) += ideapad_slidebar.o
+obj-$(CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH) += ots_pat9125/
diff --git a/drivers/input/misc/ots_pat9125/Kconfig b/drivers/input/misc/ots_pat9125/Kconfig
new file mode 100644
index 000000000000..af82edd0faae
--- /dev/null
+++ b/drivers/input/misc/ots_pat9125/Kconfig
@@ -0,0 +1,14 @@
+#
+# PixArt OTS switch driver configuration
+#
+
+config INPUT_PIXART_OTS_PAT9125_SWITCH
+ tristate "PixArt PAT9125 Rotating Switch driver"
+ depends on INPUT && I2C && GPIOLIB
+ help
+ Say Y to enable support for the PixArt OTS pat9125
+ rotating switch driver.
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ots_pat9125.
diff --git a/drivers/input/misc/ots_pat9125/Makefile b/drivers/input/misc/ots_pat9125/Makefile
new file mode 100644
index 000000000000..a697caf69644
--- /dev/null
+++ b/drivers/input/misc/ots_pat9125/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the PixArt OST switch driver.
+#
+
+# Each configuration option enables a list of files.
+
+obj-$(CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH) += pat9125_linux_driver.o pixart_ots.o
diff --git a/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c b/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c
new file mode 100644
index 000000000000..e5edaf5f908d
--- /dev/null
+++ b/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c
@@ -0,0 +1,296 @@
+/* drivers/input/misc/ots_pat9125/pat9125_linux_driver.c
+ *
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of_gpio.h>
+#include "pixart_ots.h"
+
+struct pixart_pat9125_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ int irq_gpio;
+ u32 irq_flags;
+};
+
+static int pat9125_i2c_write(struct i2c_client *client, u8 reg, u8 *data,
+ int len)
+{
+ u8 buf[MAX_BUF_SIZE];
+ int ret = 0, i;
+ struct device *dev = &client->dev;
+
+ buf[0] = reg;
+ if (len >= MAX_BUF_SIZE) {
+ dev_err(dev, "%s Failed: buffer size is %d [Max Limit is %d]\n",
+ __func__, len, MAX_BUF_SIZE);
+ return -ENODEV;
+ }
+ for (i = 0 ; i < len; i++)
+ buf[i+1] = data[i];
+ /* Returns negative errno, or else the number of bytes written. */
+ ret = i2c_master_send(client, buf, len+1);
+ if (ret != len+1)
+ dev_err(dev, "%s Failed: writing to reg 0x%x\n", __func__, reg);
+
+ return ret;
+}
+
+static int pat9125_i2c_read(struct i2c_client *client, u8 reg, u8 *data)
+{
+ u8 buf[MAX_BUF_SIZE];
+ int ret;
+ struct device *dev = &client->dev;
+
+ buf[0] = reg;
+ /*
+ * If everything went ok (1 msg transmitted), return #bytes transmitted,
+ * else error code. thus if transmit is ok return value 1
+ */
+ ret = i2c_master_send(client, buf, 1);
+ if (ret != 1) {
+ dev_err(dev, "%s Failed: writing to reg 0x%x\n", __func__, reg);
+ return ret;
+ }
+ /* returns negative errno, or else the number of bytes read */
+ ret = i2c_master_recv(client, buf, 1);
+ if (ret != 1) {
+ dev_err(dev, "%s Failed: reading reg 0x%x\n", __func__, reg);
+ return ret;
+ }
+ *data = buf[0];
+
+ return ret;
+}
+
+unsigned char read_data(struct i2c_client *client, u8 addr)
+{
+ u8 data = 0xff;
+
+ pat9125_i2c_read(client, addr, &data);
+ return data;
+}
+
+void write_data(struct i2c_client *client, u8 addr, u8 data)
+{
+ pat9125_i2c_write(client, addr, &data, 1);
+}
+
+static irqreturn_t pixart_pat9125_irq(int irq, void *data)
+{
+ return IRQ_HANDLED;
+}
+
+static ssize_t pat9125_test_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ char s[256], *p = s;
+ int reg_data = 0, i;
+ long rd_addr, wr_addr, wr_data;
+ struct pixart_pat9125_data *data =
+ (struct pixart_pat9125_data *)dev->driver_data;
+ struct i2c_client *client = data->client;
+
+ for (i = 0; i < sizeof(s); i++)
+ s[i] = buf[i];
+ *(s+1) = '\0';
+ *(s+4) = '\0';
+ *(s+7) = '\0';
+ /* example(in console): echo w 12 34 > rw_reg */
+ if (*p == 'w') {
+ p += 2;
+ if (!kstrtol(p, 16, &wr_addr)) {
+ p += 3;
+ if (!kstrtol(p, 16, &wr_data)) {
+ dev_dbg(dev, "w 0x%x 0x%x\n",
+ (u8)wr_addr, (u8)wr_data);
+ write_data(client, (u8)wr_addr, (u8)wr_data);
+ }
+ }
+ }
+ /* example(in console): echo r 12 > rw_reg */
+ else if (*p == 'r') {
+ p += 2;
+
+ if (!kstrtol(p, 16, &rd_addr)) {
+ reg_data = read_data(client, (u8)rd_addr);
+ dev_dbg(dev, "r 0x%x 0x%x\n",
+ (unsigned int)rd_addr, reg_data);
+ }
+ }
+ return count;
+}
+
+static ssize_t pat9125_test_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return 0;
+}
+static DEVICE_ATTR(test, S_IRUGO | S_IWUSR | S_IWGRP,
+ pat9125_test_show, pat9125_test_store);
+
+static struct attribute *pat9125_attr_list[] = {
+ &dev_attr_test.attr,
+ NULL,
+};
+
+static struct attribute_group pat9125_attr_grp = {
+ .attrs = pat9125_attr_list,
+};
+
+static int pat9125_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err = 0;
+ struct pixart_pat9125_data *data;
+ struct input_dev *input;
+ struct device_node *np;
+ struct device *dev = &client->dev;
+
+ err = i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE);
+ if (err < 0) {
+ dev_err(dev, "I2C not supported\n");
+ return -ENXIO;
+ }
+
+ if (client->dev.of_node) {
+ data = devm_kzalloc(dev, sizeof(struct pixart_pat9125_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ } else {
+ data = client->dev.platform_data;
+ if (!data) {
+ dev_err(dev, "Invalid pat9125 data\n");
+ return -EINVAL;
+ }
+ }
+ data->client = client;
+
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "Failed to alloc input device\n");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(client, data);
+ input_set_drvdata(input, data);
+ input->name = PAT9125_DEV_NAME;
+
+ data->input = input;
+ err = input_register_device(data->input);
+ if (err < 0) {
+ dev_err(dev, "Failed to register input device\n");
+ goto err_register_input_device;
+ }
+
+ if (!gpio_is_valid(data->irq_gpio)) {
+ dev_err(dev, "invalid irq_gpio: %d\n", data->irq_gpio);
+ return -EINVAL;
+ }
+
+ err = gpio_request(data->irq_gpio, "pixart_pat9125_irq_gpio");
+ if (err) {
+ dev_err(dev, "unable to request gpio %d\n", data->irq_gpio);
+ return err;
+ }
+
+ err = gpio_direction_input(data->irq_gpio);
+ if (err) {
+ dev_err(dev, "unable to set dir for gpio %d\n", data->irq_gpio);
+ goto free_gpio;
+ }
+
+ if (!ots_sensor_init(client)) {
+ err = -ENODEV;
+ goto err_sensor_init;
+ }
+
+ err = devm_request_threaded_irq(dev, client->irq, NULL,
+ pixart_pat9125_irq, (unsigned long)data->irq_flags,
+ "pixart_pat9125_irq", data);
+ if (err) {
+ dev_err(dev, "Req irq %d failed, errno:%d\n", client->irq, err);
+ goto err_request_threaded_irq;
+ }
+
+ err = sysfs_create_group(&(input->dev.kobj), &pat9125_attr_grp);
+ if (err) {
+ dev_err(dev, "Failed to create sysfs group, errno:%d\n", err);
+ goto err_sysfs_create;
+ }
+
+ return 0;
+
+err_sysfs_create:
+err_request_threaded_irq:
+err_sensor_init:
+free_gpio:
+ gpio_free(data->irq_gpio);
+err_register_input_device:
+ input_free_device(data->input);
+ return err;
+}
+
+static int pat9125_i2c_remove(struct i2c_client *client)
+{
+ struct pixart_pat9125_data *data = i2c_get_clientdata(client);
+
+ devm_free_irq(&client->dev, client->irq, data);
+ if (gpio_is_valid(data->irq_gpio))
+ gpio_free(data->irq_gpio);
+ input_unregister_device(data->input);
+ devm_kfree(&client->dev, data);
+ data = NULL;
+ return 0;
+}
+
+static int pat9125_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int pat9125_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct i2c_device_id pat9125_device_id[] = {
+ {PAT9125_DEV_NAME, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pat9125_device_id);
+
+static const struct dev_pm_ops pat9125_pm_ops = {
+ .suspend = pat9125_suspend,
+ .resume = pat9125_resume
+};
+
+static const struct of_device_id pixart_pat9125_match_table[] = {
+ { .compatible = "pixart,pat9125",},
+ { },
+};
+
+static struct i2c_driver pat9125_i2c_driver = {
+ .driver = {
+ .name = PAT9125_DEV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &pat9125_pm_ops,
+ .of_match_table = pixart_pat9125_match_table,
+ },
+ .probe = pat9125_i2c_probe,
+ .remove = pat9125_i2c_remove,
+ .id_table = pat9125_device_id,
+};
+module_i2c_driver(pat9125_i2c_driver);
+
+MODULE_AUTHOR("pixart");
+MODULE_DESCRIPTION("pixart pat9125 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/ots_pat9125/pixart_ots.c b/drivers/input/misc/ots_pat9125/pixart_ots.c
new file mode 100644
index 000000000000..fa73ffe40985
--- /dev/null
+++ b/drivers/input/misc/ots_pat9125/pixart_ots.c
@@ -0,0 +1,77 @@
+/* drivers/input/misc/ots_pat9125/pixart_ots.c
+ *
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include "pixart_platform.h"
+#include "pixart_ots.h"
+
+static void ots_write_read(struct i2c_client *client, u8 address, u8 wdata)
+{
+ u8 read_value;
+
+ do {
+ write_data(client, address, wdata);
+ read_value = read_data(client, address);
+ } while (read_value != wdata);
+}
+
+bool ots_sensor_init(struct i2c_client *client)
+{
+ unsigned char sensor_pid = 0, read_id_ok = 0;
+
+ /*
+ * Read sensor_pid in address 0x00 to check if the
+ * serial link is valid, read value should be 0x31.
+ */
+ sensor_pid = read_data(client, PIXART_PAT9125_PRODUCT_ID1_REG);
+
+ if (sensor_pid == PIXART_PAT9125_SENSOR_ID) {
+ read_id_ok = 1;
+
+ /*
+ * PAT9125 sensor recommended settings:
+ * switch to bank0, not allowed to perform ots_write_read
+ */
+ write_data(client, PIXART_PAT9125_SELECT_BANK_REG,
+ PIXART_PAT9125_BANK0);
+ /*
+ * software reset (i.e. set bit7 to 1).
+ * It will reset to 0 automatically
+ * so perform OTS_RegWriteRead is not allowed.
+ */
+ write_data(client, PIXART_PAT9125_CONFIG_REG,
+ PIXART_PAT9125_RESET);
+
+ /* delay 1ms */
+ usleep_range(RESET_DELAY_US, RESET_DELAY_US + 1);
+
+ /* disable write protect */
+ ots_write_read(client, PIXART_PAT9125_WRITE_PROTECT_REG,
+ PIXART_PAT9125_DISABLE_WRITE_PROTECT);
+ /* set X-axis resolution (depends on application) */
+ ots_write_read(client, PIXART_PAT9125_SET_CPI_RES_X_REG,
+ PIXART_PAT9125_CPI_RESOLUTION_X);
+ /* set Y-axis resolution (depends on application) */
+ ots_write_read(client, PIXART_PAT9125_SET_CPI_RES_Y_REG,
+ PIXART_PAT9125_CPI_RESOLUTION_Y);
+ /* set 12-bit X/Y data format (depends on application) */
+ ots_write_read(client, PIXART_PAT9125_ORIENTATION_REG,
+ PIXART_PAT9125_MOTION_DATA_LENGTH);
+ /* ONLY for VDD=VDDA=1.7~1.9V: for power saving */
+ ots_write_read(client, PIXART_PAT9125_VOLTAGE_SEGMENT_SEL_REG,
+ PIXART_PAT9125_LOW_VOLTAGE_SEGMENT);
+
+ if (read_data(client, PIXART_PAT9125_MISC2_REG) == 0x04) {
+ ots_write_read(client, PIXART_PAT9125_MISC2_REG, 0x08);
+ if (read_data(client, PIXART_PAT9125_MISC1_REG) == 0x10)
+ ots_write_read(client, PIXART_PAT9125_MISC1_REG,
+ 0x19);
+ }
+ /* enable write protect */
+ ots_write_read(client, PIXART_PAT9125_WRITE_PROTECT_REG,
+ PIXART_PAT9125_ENABLE_WRITE_PROTECT);
+ }
+ return read_id_ok;
+}
diff --git a/drivers/input/misc/ots_pat9125/pixart_ots.h b/drivers/input/misc/ots_pat9125/pixart_ots.h
new file mode 100644
index 000000000000..a66ded5c9d08
--- /dev/null
+++ b/drivers/input/misc/ots_pat9125/pixart_ots.h
@@ -0,0 +1,45 @@
+/* drivers/input/misc/ots_pat9125/pixart_ots.h
+ *
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#ifndef __PIXART_OTS_H_
+#define __PIXART_OTS_H_
+
+#define PAT9125_DEV_NAME "pixart_pat9125"
+#define MAX_BUF_SIZE 20
+#define RESET_DELAY_US 1000
+
+/* Register addresses */
+#define PIXART_PAT9125_PRODUCT_ID1_REG 0x00
+#define PIXART_PAT9125_PRODUCT_ID2_REG 0x01
+#define PIXART_PAT9125_MOTION_STATUS_REG 0x02
+#define PIXART_PAT9125_DELTA_X_LO_REG 0x03
+#define PIXART_PAT9125_DELTA_Y_LO_REG 0x04
+#define PIXART_PAT9125_CONFIG_REG 0x06
+#define PIXART_PAT9125_WRITE_PROTECT_REG 0x09
+#define PIXART_PAT9125_SET_CPI_RES_X_REG 0x0D
+#define PIXART_PAT9125_SET_CPI_RES_Y_REG 0x0E
+#define PIXART_PAT9125_DELTA_XY_HI_REG 0x12
+#define PIXART_PAT9125_ORIENTATION_REG 0x19
+#define PIXART_PAT9125_VOLTAGE_SEGMENT_SEL_REG 0x4B
+#define PIXART_PAT9125_SELECT_BANK_REG 0x7F
+#define PIXART_PAT9125_MISC1_REG 0x5D
+#define PIXART_PAT9125_MISC2_REG 0x5E
+/*Register configuration data */
+#define PIXART_PAT9125_SENSOR_ID 0x31
+#define PIXART_PAT9125_RESET 0x97
+#define PIXART_PAT9125_MOTION_DATA_LENGTH 0x04
+#define PIXART_PAT9125_BANK0 0x00
+#define PIXART_PAT9125_DISABLE_WRITE_PROTECT 0x5A
+#define PIXART_PAT9125_ENABLE_WRITE_PROTECT 0x00
+#define PIXART_PAT9125_CPI_RESOLUTION_X 0x65
+#define PIXART_PAT9125_CPI_RESOLUTION_Y 0xFF
+#define PIXART_PAT9125_LOW_VOLTAGE_SEGMENT 0x04
+#define PIXART_PAT9125_VALID_MOTION_DATA 0x80
+
+/* Export functions */
+bool ots_sensor_init(struct i2c_client *);
+
+#endif
diff --git a/drivers/input/misc/ots_pat9125/pixart_platform.h b/drivers/input/misc/ots_pat9125/pixart_platform.h
new file mode 100644
index 000000000000..1fe448fdc2cb
--- /dev/null
+++ b/drivers/input/misc/ots_pat9125/pixart_platform.h
@@ -0,0 +1,17 @@
+/* drivers/input/misc/ots_pat9125/pixart_platform.h
+ *
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#ifndef __PIXART_PLATFORM_H_
+#define __PIXART_PLATFORM_H_
+
+#include <linux/i2c.h>
+#include <linux/delay.h>
+
+/* extern functions */
+extern unsigned char read_data(struct i2c_client *, u8 addr);
+extern void write_data(struct i2c_client *, u8 addr, u8 data);
+
+#endif
diff --git a/drivers/input/touchscreen/gt9xx/goodix_tool.c b/drivers/input/touchscreen/gt9xx/goodix_tool.c
index 63fad6c46836..99a29401b36f 100644
--- a/drivers/input/touchscreen/gt9xx/goodix_tool.c
+++ b/drivers/input/touchscreen/gt9xx/goodix_tool.c
@@ -23,16 +23,15 @@
#include "gt9xx.h"
#include <linux/mutex.h>
+#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
#define DATA_LENGTH_UINT 512
-#define CMD_HEAD_LENGTH (sizeof(st_cmd_head) - sizeof(u8 *))
+#define CMD_HEAD_LENGTH (sizeof(struct st_cmd_head) - sizeof(u8 *))
static char procname[20] = {0};
-#define UPDATE_FUNCTIONS
-
-#pragma pack(1)
-struct {
- u8 wr; /* write read flag£¬0:R 1:W 2:PID 3: */
+struct st_cmd_head {
+ u8 wr; /* write read flag 0:R 1:W 2:PID 3: */
u8 flag; /* 0:no need flag/int 1: need flag 2:need int */
u8 flag_addr[2];/* flag address */
u8 flag_val; /* flag val */
@@ -46,9 +45,9 @@ struct {
u8 addr[2]; /* address */
u8 res[3]; /* reserved */
u8 *data; /* data pointer */
-} st_cmd_head;
-#pragma pack()
-st_cmd_head cmd_head;
+} __packed;
+
+static struct st_cmd_head cmd_head;
static struct i2c_client *gt_client;
@@ -56,15 +55,11 @@ static struct proc_dir_entry *goodix_proc_entry;
static struct mutex lock;
-static s32 goodix_tool_write(struct file *filp, const char __user *buff,
- unsigned long len, void *data);
-static s32 goodix_tool_read(char *page, char **start, off_t off, int count,
- int *eof, void *data);
static s32 (*tool_i2c_read)(u8 *, u16);
static s32 (*tool_i2c_write)(u8 *, u16);
-s32 DATA_LENGTH;
-s8 IC_TYPE[16] = {0};
+s32 data_length;
+s8 ic_type[16] = {0};
static void tool_set_proc_name(char *procname)
{
@@ -113,7 +108,7 @@ static s32 tool_i2c_read_no_extra(u8 *buf, u16 len)
}
if (i == cmd_head.retry) {
- dev_err(&client->dev, "I2C read retry limit over.\n");
+ dev_err(&gt_client->dev, "I2C read retry limit over\n");
ret = -EIO;
}
@@ -138,7 +133,7 @@ static s32 tool_i2c_write_no_extra(u8 *buf, u16 len)
}
if (i == cmd_head.retry) {
- dev_err(&client->dev, "I2C write retry limit over.\n");
+ dev_err(&gt_client->dev, "I2C write retry limit over\n");
ret = -EIO;
}
@@ -173,17 +168,17 @@ static s32 tool_i2c_write_with_extra(u8 *buf, u16 len)
static void register_i2c_func(void)
{
- if (strcmp(IC_TYPE, "GT8110") && strcmp(IC_TYPE, "GT8105")
- && strcmp(IC_TYPE, "GT801") && strcmp(IC_TYPE, "GT800")
- && strcmp(IC_TYPE, "GT801PLUS") && strcmp(IC_TYPE, "GT811")
- && strcmp(IC_TYPE, "GTxxx")) {
+ if (strcmp(ic_type, "GT8110") && strcmp(ic_type, "GT8105")
+ && strcmp(ic_type, "GT801") && strcmp(ic_type, "GT800")
+ && strcmp(ic_type, "GT801PLUS") && strcmp(ic_type, "GT811")
+ && strcmp(ic_type, "GTxxx")) {
tool_i2c_read = tool_i2c_read_with_extra;
tool_i2c_write = tool_i2c_write_with_extra;
- pr_debug("I2C function: with pre and end cmd!\n");
+ pr_debug("I2C function: with pre and end cmd\n");
} else {
tool_i2c_read = tool_i2c_read_no_extra;
tool_i2c_write = tool_i2c_write_no_extra;
- pr_info("I2C function: without pre and end cmd!\n");
+ pr_info("I2C function: without pre and end cmd\n");
}
}
@@ -191,57 +186,14 @@ static void unregister_i2c_func(void)
{
tool_i2c_read = NULL;
tool_i2c_write = NULL;
- pr_info("I2C function: unregister i2c transfer function!\n");
-}
-
-s32 init_wr_node(struct i2c_client *client)
-{
- u8 i;
-
- gt_client = client;
- memset(&cmd_head, 0, sizeof(cmd_head));
- cmd_head.data = NULL;
-
- i = 5;
- while ((!cmd_head.data) && i) {
- cmd_head.data = devm_kzalloc(&client->dev,
- i * DATA_LENGTH_UINT, GFP_KERNEL);
- if (cmd_head.data)
- break;
- i--;
- }
- if (i) {
- DATA_LENGTH = i * DATA_LENGTH_UINT;
- dev_dbg(&client->dev, "Applied memory size:%d.", DATA_LENGTH);
- } else {
- pr_err("Apply for memory failed.\n");
- return FAIL;
- }
-
- cmd_head.addr_len = 2;
- cmd_head.retry = 5;
-
- register_i2c_func();
-
- mutex_init(&lock);
- tool_set_proc_name(procname);
- goodix_proc_entry = create_proc_entry(procname, 0660, NULL);
- if (goodix_proc_entry == NULL) {
- pr_err("Couldn't create proc entry!\n");
- return FAIL;
- }
- GTP_INFO("Create proc entry success!");
- goodix_proc_entry->write_proc = goodix_tool_write;
- dix_proc_entry->read_proc = goodix_tool_read;
-
- return SUCCESS;
+ pr_info("I2C function: unregister i2c transfer function\n");
}
void uninit_wr_node(void)
{
cmd_head.data = NULL;
unregister_i2c_func();
- remove_proc_entry(procname, NULL);
+ proc_remove(goodix_proc_entry);
}
static u8 relation(u8 src, u8 dst, u8 rlt)
@@ -256,7 +208,7 @@ static u8 relation(u8 src, u8 dst, u8 rlt)
case 1:
ret = (src == dst) ? true : false;
- pr_debug("equal:src:0x%02x dst:0x%02x ret:%d.\n",
+ pr_debug("equal:src:0x%02x dst:0x%02x ret:%d\n",
src, dst, (s32)ret);
break;
@@ -298,23 +250,18 @@ static u8 comfirm(void)
s32 i = 0;
u8 buf[32];
-/* memcpy(&buf[GTP_ADDR_LENGTH - cmd_head.addr_len],
- * &cmd_head.flag_addr, cmd_head.addr_len);
- * memcpy(buf, &cmd_head.flag_addr, cmd_head.addr_len);
- * //Modified by Scott, 2012-02-17
- */
memcpy(buf, cmd_head.flag_addr, cmd_head.addr_len);
for (i = 0; i < cmd_head.times; i++) {
if (tool_i2c_read(buf, 1) <= 0) {
- pr_err("Read flag data failed!\n");
+ dev_err(&gt_client->dev, "Read flag data failed");
return FAIL;
}
if (true == relation(buf[GTP_ADDR_LENGTH], cmd_head.flag_val,
cmd_head.flag_relation)) {
- pr_debug("value at flag addr:0x%02x.\n",
+ pr_debug("value at flag addr:0x%02x\n",
buf[GTP_ADDR_LENGTH]);
- pr_debug("flag value:0x%02x.\n", cmd_head.flag_val);
+ pr_debug("flag value:0x%02x\n", cmd_head.flag_val);
break;
}
@@ -322,89 +269,99 @@ static u8 comfirm(void)
}
if (i >= cmd_head.times) {
- pr_debug("Didn't get the flag to continue!\n");
+ dev_err(&gt_client->dev, "Didn't get the flag to continue");
return FAIL;
}
return SUCCESS;
}
-/********************************************************
+#ifdef CONFIG_GT9XX_TOUCHPANEL_UPDATE
+static s32 fill_update_info(char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ u8 buf[4];
+
+ buf[0] = show_len >> 8;
+ buf[1] = show_len & 0xff;
+ buf[2] = total_len >> 8;
+ buf[3] = total_len & 0xff;
+ return simple_read_from_buffer(user_buf, count, ppos,
+ buf, sizeof(buf));
+}
+#else
+static s32 fill_update_info(char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENODEV;
+}
+#endif
+
+/*
* Function:
* Goodix tool write function.
* Input:
* standard proc write function param.
* Output:
* Return write length.
- *******************************************************
*/
-static s32 goodix_tool_write(struct file *filp, const char __user *buff,
- unsigned long len, void *data)
+static s32 goodix_tool_write(struct file *filp, const char __user *userbuf,
+ size_t count, loff_t *ppos)
{
s32 ret = 0;
mutex_lock(&lock);
- ret = copy_from_user(&cmd_head, buff, CMD_HEAD_LENGTH);
+ ret = copy_from_user(&cmd_head, userbuf, CMD_HEAD_LENGTH);
if (ret) {
- pr_err("copy_from_user failed.\n");
- ret = -EACCES;
+ dev_err(&gt_client->dev, "copy_from_user failed");
+ ret = -EFAULT;
goto exit;
}
- pr_debug("wr :0x%02x.\n", cmd_head.wr);
- pr_debug("flag:0x%02x.\n", cmd_head.flag);
- pr_debug("flag addr:0x%02x%02x.\n", cmd_head.flag_addr[0],
- cmd_head.flag_addr[1]);
- pr_debug("flag val:0x%02x.\n", cmd_head.flag_val);
- pr_debug("flag rel:0x%02x.\n", cmd_head.flag_relation);
- pr_debug("circle :%d.\n", (s32)cmd_head.circle);
- pr_debug("times :%d.\n", (s32)cmd_head.times);
- pr_debug("retry :%d.\n", (s32)cmd_head.retry);
- pr_debug("delay :%d.\n", (s32)cmd_head.delay);
- pr_debug("data len:%d.\n", (s32)cmd_head.data_len);
- pr_debug("addr len:%d.\n", (s32)cmd_head.addr_len);
- pr_debug("addr:0x%02x%02x.\n", cmd_head.addr[0], cmd_head.addr[1]);
- pr_debug("len:%d.\n", (s32)len);
- pr_debug("buf[20]:0x%02x.\n", buff[CMD_HEAD_LENGTH]);
-
- if (cmd_head.data_len > (DATA_LENGTH - GTP_ADDR_LENGTH)) {
- pr_debug("data len %d > data buff %d, rejected!\n",
- cmd_head.data_len, (DATA_LENGTH - GTP_ADDR_LENGTH));
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_head.addr_len > GTP_ADDR_LENGTH) {
- pr_debug(" addr len %d > data buff %d, rejected!\n",
- cmd_head.addr_len, GTP_ADDR_LENGTH);
+ dev_dbg(&gt_client->dev, "wr:0x%02x, flag:0x%02x, flag addr:0x%02x%02x,
+ flag val:0x%02x, flag rel:0x%02x,", cmd_headd.wr,
+ cmd_head.flag, cmd_head.flag_addr[0],
+ cmd_head.flag_addr[1], cmd_head.flag_val,
+ cmd_head.flag_relation);
+ dev_dbg(&gt_client->dev, "circle:%d, times:%d, retry:%d, delay:%d,
+ data len:%d, addr len:%d, addr:0x%02x%02x, write len: %d",
+ (s32)cmd_head.circle, (s32)cmd_head.times, (s32)cmd_head.retry,
+ (s32)cmd_head.delay, (s32)cmd_head.data_len,
+ (s32)cmd_head.addr_len, cmd_head.addr[0], cmd_head.addr[1],
+ (s32)count);
+
+ if (cmd_head.data_len > (data_length - GTP_ADDR_LENGTH)) {
+ dev_err(&gt_client->dev, "data len %d > data buff %d, rejected\n",
+ cmd_head.data_len, (data_length - GTP_ADDR_LENGTH));
ret = -EINVAL;
goto exit;
}
- if (cmd_head.wr == 1) {
- /* copy_from_user(&cmd_head.data[cmd_head.addr_len],
- * &buff[CMD_HEAD_LENGTH], cmd_head.data_len);
- */
+ if (cmd_head.wr == GTP_RW_WRITE) {
ret = copy_from_user(&cmd_head.data[GTP_ADDR_LENGTH],
- &buff[CMD_HEAD_LENGTH], cmd_head.data_len);
- if (ret)
- pr_err("copy_from_user failed.\n");
+ &userbuf[CMD_HEAD_LENGTH], cmd_head.data_len);
+ if (ret) {
+ dev_err(&gt_client->dev, "copy_from_user failed");
+ ret = -EFAULT;
+ goto exit;
+ }
memcpy(&cmd_head.data[GTP_ADDR_LENGTH - cmd_head.addr_len],
cmd_head.addr, cmd_head.addr_len);
- if (cmd_head.flag == 1) {
- if (comfirm() == FAIL) {
- pr_err("[WRITE]Comfirm fail!\n");
+ if (cmd_head.flag == GTP_NEED_FLAG) {
+ if (comfirm() == FAIL) {
+ dev_err(&gt_client->dev, "Confirm fail");
ret = -EINVAL;
goto exit;
}
- } else if (cmd_head.flag == 2) {
+ } else if (cmd_head.flag == GTP_NEED_INTERRUPT) {
/* Need interrupt! */
}
if (tool_i2c_write(
&cmd_head.data[GTP_ADDR_LENGTH - cmd_head.addr_len],
cmd_head.data_len + cmd_head.addr_len) <= 0) {
- pr_err("[WRITE]Write data failed!\n");
+ dev_err(&gt_client->dev, "Write data failed");
ret = -EIO;
goto exit;
}
@@ -414,32 +371,33 @@ static s32 goodix_tool_write(struct file *filp, const char __user *buff,
ret = cmd_head.data_len + CMD_HEAD_LENGTH;
goto exit;
- } else if (cmd_head.wr == 3) { /* Write ic type */
-
- ret = copy_from_user(&cmd_head.data[0], &buff[CMD_HEAD_LENGTH],
+ } else if (cmd_head.wr == GTP_RW_WRITE_IC_TYPE) { /* Write ic type */
+ ret = copy_from_user(&cmd_head.data[0],
+ &userbuf[CMD_HEAD_LENGTH],
cmd_head.data_len);
- if (ret)
- pr_err("copy_from_user failed.\n");
+ if (ret) {
+ dev_err(&gt_client->dev, "copy_from_user failed");
+ ret = -EFAULT;
+ goto exit;
+ }
- if (cmd_head.data_len > sizeof(IC_TYPE)) {
- pr_debug("<<-GTP->> data len %d > data buff %d, rejected!\n",
- cmd_head.data_len, sizeof(IC_TYPE));
+ if (cmd_head.data_len > sizeof(ic_type)) {
+ dev_err(&gt_client->dev,
+ "data len %d > data buff %d, rejected\n",
+ cmd_head.data_len, sizeof(ic_type));
ret = -EINVAL;
goto exit;
}
- memcpy(IC_TYPE, cmd_head.data, cmd_head.data_len);
+ memcpy(ic_type, cmd_head.data, cmd_head.data_len);
register_i2c_func();
ret = cmd_head.data_len + CMD_HEAD_LENGTH;
goto exit;
- } else if (cmd_head.wr == 5) {
-
- /* memcpy(IC_TYPE, cmd_head.data, cmd_head.data_len); */
-
+ } else if (cmd_head.wr == GTP_RW_NO_WRITE) {
ret = cmd_head.data_len + CMD_HEAD_LENGTH;
goto exit;
- } else if (cmd_head.wr == 7) { /* disable irq! */
+ } else if (cmd_head.wr == GTP_RW_DISABLE_IRQ) { /* disable irq! */
gtp_irq_disable(i2c_get_clientdata(gt_client));
#if GTP_ESD_PROTECT
@@ -447,7 +405,7 @@ static s32 goodix_tool_write(struct file *filp, const char __user *buff,
#endif
ret = CMD_HEAD_LENGTH;
goto exit;
- } else if (cmd_head.wr == 9) { /* enable irq! */
+ } else if (cmd_head.wr == GTP_RW_ENABLE_IRQ) { /* enable irq! */
gtp_irq_enable(i2c_get_clientdata(gt_client));
#if GTP_ESD_PROTECT
@@ -455,41 +413,45 @@ static s32 goodix_tool_write(struct file *filp, const char __user *buff,
#endif
ret = CMD_HEAD_LENGTH;
goto exit;
- } else if (cmd_head.wr == 17) {
+ } else if (cmd_head.wr == GTP_RW_CHECK_RAWDIFF_MODE) {
struct goodix_ts_data *ts = i2c_get_clientdata(gt_client);
ret = copy_from_user(&cmd_head.data[GTP_ADDR_LENGTH],
- &buff[CMD_HEAD_LENGTH], cmd_head.data_len);
- if (ret)
- pr_debug("copy_from_user failed.\n");
+ &userbuf[CMD_HEAD_LENGTH], cmd_head.data_len);
+ if (ret) {
+ dev_err(&gt_client->dev, "copy_from_user failed");
+ goto exit;
+ }
if (cmd_head.data[GTP_ADDR_LENGTH]) {
- pr_debug("gtp enter rawdiff.\n");
+ pr_debug("gtp enter rawdiff\n");
ts->gtp_rawdiff_mode = true;
} else {
ts->gtp_rawdiff_mode = false;
- pr_debug("gtp leave rawdiff.\n");
+ pr_debug("gtp leave rawdiff\n");
}
ret = CMD_HEAD_LENGTH;
goto exit;
- }
-#ifdef UPDATE_FUNCTIONS
- else if (cmd_head.wr == 11) { /* Enter update mode! */
- if (gup_enter_update_mode(gt_client) == FAIL)
+ } else if (cmd_head.wr == GTP_RW_ENTER_UPDATE_MODE) {
+ /* Enter update mode! */
+ if (gup_enter_update_mode(gt_client) == FAIL) {
ret = -EBUSY;
goto exit;
- } else if (cmd_head.wr == 13) { /* Leave update mode! */
- gup_leave_update_mode();
- } else if (cmd_head.wr == 15) { /* Update firmware! */
+ }
+ } else if (cmd_head.wr == GTP_RW_LEAVE_UPDATE_MODE) {
+ /* Leave update mode! */
+ gup_leave_update_mode(gt_client);
+ } else if (cmd_head.wr == GTP_RW_UPDATE_FW) {
+ /* Update firmware! */
show_len = 0;
total_len = 0;
- if (cmd_head.data_len + 1 > DATA_LENGTH) {
- pr_debug("<<-GTP->> data len %d > data buff %d, rejected!\n",
- cmd_head.data_len + 1, DATA_LENGTH);
+ if (cmd_head.data_len + 1 > data_length) {
+ dev_err(&gt_client->dev, "data len %d > data buff %d, rejected\n",
+ cmd_head.data_len + 1, data_length);
ret = -EINVAL;
goto exit;
}
memset(cmd_head.data, 0, cmd_head.data_len + 1);
- memcpy(cmd_head.data, &buff[CMD_HEAD_LENGTH],
+ memcpy(cmd_head.data, &userbuf[CMD_HEAD_LENGTH],
cmd_head.data_len);
if (gup_update_proc((void *)cmd_head.data) == FAIL) {
@@ -497,7 +459,6 @@ static s32 goodix_tool_write(struct file *filp, const char __user *buff,
goto exit;
}
}
-#endif
ret = CMD_HEAD_LENGTH;
exit:
@@ -505,37 +466,37 @@ exit:
return ret;
}
-/*******************************************************
+/*
* Function:
* Goodix tool read function.
* Input:
* standard proc read function param.
* Output:
* Return read length.
- *******************************************************
-*/
-static s32 goodix_tool_read(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+ */
+static s32 goodix_tool_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
{
+ u16 data_len = 0;
s32 ret;
+ u8 buf[32];
mutex_lock(&lock);
- if (cmd_head.wr % 2) {
- pr_err("<< [READ]command head wrong\n");
+ if (cmd_head.wr & 0x1) {
+ dev_err(&gt_client->dev, "command head wrong\n");
ret = -EINVAL;
goto exit;
- } else if (!cmd_head.wr) {
- u16 len = 0;
- s16 data_len = 0;
- u16 loc = 0;
+ }
- if (cmd_head.flag == 1) {
+ switch (cmd_head.wr) {
+ case GTP_RW_READ:
+ if (cmd_head.flag == GTP_NEED_FLAG) {
if (comfirm() == FAIL) {
- pr_err("[READ]Comfirm fail!\n");
+ dev_err(&gt_client->dev, "Confirm fail");
ret = -EINVAL;
goto exit;
}
- } else if (cmd_head.flag == 2) {
+ } else if (cmd_head.flag == GTP_NEED_INTERRUPT) {
/* Need interrupt! */
}
@@ -550,54 +511,86 @@ static s32 goodix_tool_read(char *page, char **start, off_t off, int count,
msleep(cmd_head.delay);
data_len = cmd_head.data_len;
- while (data_len > 0) {
- if (data_len > DATA_LENGTH)
- len = DATA_LENGTH;
- else
- len = data_len;
-
- data_len -= len;
+ if (data_len <= 0 || (data_len > data_length)) {
+ dev_err(&gt_client->dev, "Invalid data length %d\n",
+ data_len);
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (data_len > count)
+ data_len = count;
- if (tool_i2c_read(cmd_head.data, len) <= 0) {
- pr_err("[READ]Read data failed!\n");
- ret = -EINVAL;
- goto exit;
- }
- memcpy(&page[loc], &cmd_head.data[GTP_ADDR_LENGTH],
- len);
- loc += len;
+ if (tool_i2c_read(cmd_head.data, data_len) <= 0) {
+ dev_err(&gt_client->dev, "Read data failed\n");
+ ret = -EIO;
+ goto exit;
}
- } else if (cmd_head.wr == 2) {
- /* memcpy(page, "gt8", cmd_head.data_len);
- * memcpy(page, "GT818", 5);
- * page[5] = 0;
- */
-
- pr_debug("Return ic type:%s len:%d.\n", page,
- (s32)cmd_head.data_len);
- ret = cmd_head.data_len;
- goto exit;
- /* return sizeof(IC_TYPE_NAME); */
- } else if (cmd_head.wr == 4) {
- page[0] = show_len >> 8;
- page[1] = show_len & 0xff;
- page[2] = total_len >> 8;
- page[3] = total_len & 0xff;
- } else if (cmd_head.wr == 6) {
- /* Read error code! */
- } else if (cmd_head.wr == 8) { /*Read driver version */
- /* memcpy(page, GTP_DRIVER_VERSION,
- * strlen(GTP_DRIVER_VERSION));
- */
- s32 tmp_len;
-
- tmp_len = strlen(GTP_DRIVER_VERSION);
- memcpy(page, GTP_DRIVER_VERSION, tmp_len);
- page[tmp_len] = 0;
+ ret = simple_read_from_buffer(user_buf, count, ppos,
+ &cmd_head.data[GTP_ADDR_LENGTH], data_len);
+ break;
+ case GTP_RW_FILL_INFO:
+ ret = fill_update_info(user_buf, count, ppos);
+ break;
+ case GTP_RW_READ_VERSION:
+ /* Read driver version */
+ data_len = scnprintf(buf, sizeof(buf), "%s\n",
+ GTP_DRIVER_VERSION);
+ ret = simple_read_from_buffer(user_buf, count, ppos,
+ buf, data_len);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
- ret = cmd_head.data_len;
exit:
mutex_unlock(&lock);
return ret;
}
+
+static const struct file_operations goodix_proc_fops = {
+ .write = goodix_tool_write,
+ .read = goodix_tool_read,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+};
+
+s32 init_wr_node(struct i2c_client *client)
+{
+ u8 i;
+
+ gt_client = client;
+ memset(&cmd_head, 0, sizeof(cmd_head));
+ cmd_head.data = NULL;
+
+ i = GTP_I2C_RETRY_5;
+ while ((!cmd_head.data) && i) {
+ cmd_head.data = devm_kzalloc(&client->dev,
+ i * DATA_LENGTH_UINT, GFP_KERNEL);
+ if (cmd_head.data)
+ break;
+ i--;
+ }
+ if (i) {
+ data_length = i * DATA_LENGTH_UINT;
+ dev_dbg(&client->dev, "Applied memory size:%d", data_length);
+ }
+
+ cmd_head.addr_len = 2;
+ cmd_head.retry = GTP_I2C_RETRY_5;
+
+ register_i2c_func();
+
+ mutex_init(&lock);
+ tool_set_proc_name(procname);
+ goodix_proc_entry = proc_create(procname,
+ S_IWUSR | S_IWGRP | S_IRUSR | S_IRGRP,
+ goodix_proc_entry,
+ &goodix_proc_fops);
+ if (goodix_proc_entry == NULL) {
+ dev_err(&client->dev, "Couldn't create proc entry");
+ return FAIL;
+ }
+
+ return SUCCESS;
+}
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.c b/drivers/input/touchscreen/gt9xx/gt9xx.c
index bc0ff0e4e7ac..a9d7666a6d6f 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx.c
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.c
@@ -44,18 +44,16 @@
#include "gt9xx.h"
#include <linux/of_gpio.h>
-
+#include <linux/irq.h>
+#include <linux/module.h>
#include <linux/input/mt.h>
+#include <linux/debugfs.h>
#define GOODIX_DEV_NAME "Goodix-CTP"
#define CFG_MAX_TOUCH_POINTS 5
#define GOODIX_COORDS_ARR_SIZE 4
#define MAX_BUTTONS 4
-/* HIGH: 0x28/0x29, LOW: 0xBA/0xBB */
-#define GTP_I2C_ADDRESS_HIGH 0x14
-#define GTP_I2C_ADDRESS_LOW 0x5D
-
#define GOODIX_VTG_MIN_UV 2600000
#define GOODIX_VTG_MAX_UV 3300000
#define GOODIX_I2C_VTG_MIN_UV 1800000
@@ -79,7 +77,6 @@ static const u16 touch_key_array[] = {KEY_MENU, KEY_HOMEPAGE, KEY_BACK};
#endif
-static void gtp_reset_guitar(struct goodix_ts_data *ts, int ms);
static void gtp_int_sync(struct goodix_ts_data *ts, int ms);
static int gtp_i2c_test(struct i2c_client *client);
static int goodix_power_off(struct goodix_ts_data *ts);
@@ -87,7 +84,9 @@ static int goodix_power_on(struct goodix_ts_data *ts);
#if defined(CONFIG_FB)
static int fb_notifier_callback(struct notifier_block *self,
- unsigned long event, void *data);
+ unsigned long event, void *data);
+static int goodix_ts_suspend(struct device *dev);
+static int goodix_ts_resume(struct device *dev);
#elif defined(CONFIG_HAS_EARLYSUSPEND)
static void goodix_ts_early_suspend(struct early_suspend *h);
static void goodix_ts_late_resume(struct early_suspend *h);
@@ -98,7 +97,6 @@ static struct delayed_work gtp_esd_check_work;
static struct workqueue_struct *gtp_esd_check_workqueue;
static void gtp_esd_check_func(struct work_struct *work);
static int gtp_init_ext_watchdog(struct i2c_client *client);
-struct i2c_client *i2c_connect_client;
#endif
#if GTP_SLIDE_WAKEUP
@@ -113,6 +111,10 @@ static s8 gtp_enter_doze(struct goodix_ts_data *ts);
bool init_done;
static u8 chip_gt9xxs; /* true if ic is gt9xxs, like gt915s */
u8 grp_cfg_version;
+struct i2c_client *i2c_connect_client;
+
+#define GTP_DEBUGFS_DIR "ts_debug"
+#define GTP_DEBUGFS_FILE_SUSPEND "suspend"
/*******************************************************
Function:
@@ -264,7 +266,7 @@ Output:
result of i2c write operation.
> 0: succeed, otherwise: failed
*********************************************************/
-static int gtp_send_cfg(struct goodix_ts_data *ts)
+int gtp_send_cfg(struct goodix_ts_data *ts)
{
int ret;
#if GTP_DRIVER_SEND_CFG
@@ -595,26 +597,6 @@ exit_work_func:
/*******************************************************
Function:
- Timer interrupt service routine for polling mode.
-Input:
- timer: timer struct pointer
-Output:
- Timer work mode.
- HRTIMER_NORESTART: no restart mode
-*********************************************************/
-static enum hrtimer_restart goodix_ts_timer_handler(struct hrtimer *timer)
-{
- struct goodix_ts_data
- *ts = container_of(timer, struct goodix_ts_data, timer);
-
- queue_work(ts->goodix_wq, &ts->work);
- hrtimer_start(&ts->timer, ktime_set(0, (GTP_POLL_TIME + 6) * 1000000),
- HRTIMER_MODE_REL);
- return HRTIMER_NORESTART;
-}
-
-/*******************************************************
-Function:
External interrupt service routine for interrupt mode.
Input:
irq: interrupt number.
@@ -656,7 +638,7 @@ Input:
Output:
None.
*******************************************************/
-static void gtp_reset_guitar(struct goodix_ts_data *ts, int ms)
+void gtp_reset_guitar(struct goodix_ts_data *ts, int ms)
{
/* This reset sequence will selcet I2C slave address */
gpio_direction_output(ts->pdata->reset_gpio, 0);
@@ -730,16 +712,13 @@ static s8 gtp_enter_doze(struct goodix_ts_data *ts)
return ret;
}
#else
-/*******************************************************
-Function:
- Enter sleep mode.
-Input:
- ts: private data.
-Output:
- Executive outcomes.
- >0: succeed, otherwise failed.
-*******************************************************/
-static s8 gtp_enter_sleep(struct goodix_ts_data *ts)
+/**
+ * gtp_enter_sleep - Enter sleep mode
+ * @ts: driver private data
+ *
+ * Returns zero on success, else an error.
+ */
+static u8 gtp_enter_sleep(struct goodix_ts_data *ts)
{
int ret = -1;
s8 retry = 0;
@@ -761,16 +740,16 @@ static s8 gtp_enter_sleep(struct goodix_ts_data *ts)
ret = goodix_power_off(ts);
if (ret) {
dev_err(&ts->client->dev, "GTP power off failed.\n");
- return 0;
+ return ret;
}
- return 1;
+ return 0;
}
usleep(5000);
while (retry++ < GTP_I2C_RETRY_5) {
ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
if (ret == 1) {
dev_dbg(&ts->client->dev, "GTP enter sleep!");
- return ret;
+ return 0;
}
msleep(20);
}
@@ -1196,19 +1175,14 @@ static int gtp_request_irq(struct goodix_ts_data *ts)
int ret;
const u8 irq_table[] = GTP_IRQ_TAB;
- ret = request_irq(ts->client->irq, goodix_ts_irq_handler,
+ GTP_DEBUG("INT trigger type:%x, irq=%d", ts->int_trigger_type,
+ ts->client->irq);
+
+ ret = request_threaded_irq(ts->client->irq, NULL,
+ goodix_ts_irq_handler,
irq_table[ts->int_trigger_type],
ts->client->name, ts);
if (ret) {
- dev_err(&ts->client->dev, "Request IRQ failed!ERRNO:%d.\n",
- ret);
- gpio_direction_input(ts->pdata->irq_gpio);
-
- hrtimer_init(&ts->timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ts->timer.function = goodix_ts_timer_handler;
- hrtimer_start(&ts->timer, ktime_set(1, 0),
- HRTIMER_MODE_REL);
ts->use_irq = false;
return ret;
}
@@ -1560,6 +1534,56 @@ static const struct attribute_group gtp_attr_grp = {
.attrs = gtp_attrs,
};
+static int gtp_debug_suspend_set(void *_data, u64 val)
+{
+ struct goodix_ts_data *ts = _data;
+
+ mutex_lock(&ts->input_dev->mutex);
+ if (val)
+ goodix_ts_suspend(&ts->client->dev);
+ else
+ goodix_ts_resume(&ts->client->dev);
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return 0;
+}
+
+static int gtp_debug_suspend_get(void *_data, u64 *val)
+{
+ struct goodix_ts_data *ts = _data;
+
+ mutex_lock(&ts->input_dev->mutex);
+ *val = ts->gtp_is_suspend;
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_suspend_fops, gtp_debug_suspend_get,
+ gtp_debug_suspend_set, "%lld\n");
+
+static int gtp_debugfs_init(struct goodix_ts_data *data)
+{
+ data->debug_base = debugfs_create_dir(GTP_DEBUGFS_DIR, NULL);
+
+ if (IS_ERR_OR_NULL(data->debug_base)) {
+ pr_err("Failed to create debugfs dir\n");
+ return -EINVAL;
+ }
+
+ if ((IS_ERR_OR_NULL(debugfs_create_file(GTP_DEBUGFS_FILE_SUSPEND,
+ S_IWUSR | S_IWGRP | S_IRUSR | S_IRGRP,
+ data->debug_base,
+ data,
+ &debug_suspend_fops)))) {
+ pr_err("Failed to create suspend file\n");
+ debugfs_remove_recursive(data->debug_base);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int goodix_ts_get_dt_coords(struct device *dev, char *name,
struct goodix_ts_platform_data *pdata)
{
@@ -1694,7 +1718,7 @@ static int goodix_parse_dt(struct device *dev,
prop->value, pdata->config_data_len[i]);
read_cfg_num++;
}
- dev_dbg(dev, "%d config data read from device tree.\n", read_cfg_num);
+ dev_dbg(dev, "%d config data read from device tree\n", read_cfg_num);
return 0;
}
@@ -1783,7 +1807,7 @@ static int goodix_ts_probe(struct i2c_client *client,
ret = gtp_i2c_test(client);
if (ret != 2) {
- dev_err(&client->dev, "I2C communication ERROR!\n");
+ dev_err(&client->dev, "I2C communication ERROR\n");
goto exit_power_off;
}
@@ -1791,7 +1815,7 @@ static int goodix_ts_probe(struct i2c_client *client,
strlcpy(ts->fw_name, pdata->fw_name,
strlen(pdata->fw_name) + 1);
-#if GTP_AUTO_UPDATE
+#ifdef CONFIG_GT9XX_TOUCHPANEL_UPDATE
ret = gup_init_update_proc(ts);
if (ret < 0) {
dev_err(&client->dev,
@@ -1834,24 +1858,24 @@ static int goodix_ts_probe(struct i2c_client *client,
INIT_WORK(&ts->work, goodix_ts_work_func);
ret = gtp_request_irq(ts);
- if (ret < 0)
- dev_info(&client->dev, "GTP works in polling mode.\n");
+ if (ret)
+ dev_info(&client->dev, "GTP request irq failed %d\n", ret);
else
- dev_info(&client->dev, "GTP works in interrupt mode.\n");
+ dev_info(&client->dev, "GTP works in interrupt mode\n");
ret = gtp_read_fw_version(client, &version_info);
if (ret != 2)
- dev_err(&client->dev, "GTP firmware version read failed.\n");
+ dev_err(&client->dev, "GTP firmware version read failed\n");
ret = gtp_check_product_id(client);
if (ret != 0) {
- dev_err(&client->dev, "GTP Product id doesn't match.\n");
+ dev_err(&client->dev, "GTP Product id doesn't match\n");
goto exit_free_irq;
}
if (ts->use_irq)
gtp_irq_enable(ts);
-#if GTP_CREATE_WR_NODE
+#ifdef CONFIG_GT9XX_TOUCHPANEL_DEBUG
init_wr_node(client);
#endif
@@ -1860,10 +1884,14 @@ static int goodix_ts_probe(struct i2c_client *client,
#endif
ret = sysfs_create_group(&client->dev.kobj, &gtp_attr_grp);
if (ret < 0) {
- dev_err(&client->dev, "sys file creation failed.\n");
+ dev_err(&client->dev, "sys file creation failed\n");
goto exit_free_irq;
}
+ ret = gtp_debugfs_init(ts);
+ if (ret != 0)
+ goto exit_remove_sysfs;
+
init_done = true;
return 0;
exit_free_irq:
@@ -1871,7 +1899,7 @@ exit_free_irq:
#if defined(CONFIG_FB)
if (fb_unregister_client(&ts->fb_notif))
dev_err(&client->dev,
- "Error occurred while unregistering fb_notifier.\n");
+ "Error occurred while unregistering fb_notifier\n");
#elif defined(CONFIG_HAS_EARLYSUSPEND)
unregister_early_suspend(&ts->early_suspend);
#endif
@@ -1888,6 +1916,8 @@ exit_free_irq:
input_free_device(ts->input_dev);
ts->input_dev = NULL;
}
+exit_remove_sysfs:
+ sysfs_remove_group(&ts->input_dev->dev.kobj, &gtp_attr_grp);
exit_free_inputdev:
kfree(ts->config_data);
exit_power_off:
@@ -1921,13 +1951,13 @@ static int goodix_ts_remove(struct i2c_client *client)
#if defined(CONFIG_FB)
if (fb_unregister_client(&ts->fb_notif))
dev_err(&client->dev,
- "Error occurred while unregistering fb_notifier.\n");
+ "Error occurred while unregistering fb_notifier\n");
#elif defined(CONFIG_HAS_EARLYSUSPEND)
unregister_early_suspend(&ts->early_suspend);
#endif
mutex_destroy(&ts->lock);
-#if GTP_CREATE_WR_NODE
+#ifdef CONFIG_GT9XX_TOUCHPANEL_DEBUG
uninit_wr_node();
#endif
@@ -1962,6 +1992,7 @@ static int goodix_ts_remove(struct i2c_client *client)
goodix_power_deinit(ts);
i2c_set_clientdata(client, NULL);
}
+ debugfs_remove_recursive(ts->debug_base);
return 0;
}
@@ -1980,9 +2011,13 @@ static int goodix_ts_suspend(struct device *dev)
struct goodix_ts_data *ts = dev_get_drvdata(dev);
int ret = 0, i;
+ if (ts->gtp_is_suspend) {
+ dev_dbg(&ts->client->dev, "Already in suspend state\n");
+ return 0;
+ }
+
mutex_lock(&ts->lock);
#if GTP_ESD_PROTECT
- ts->gtp_is_suspend = 1;
gtp_esd_switch(ts->client, SWITCH_OFF);
#endif
@@ -2001,13 +2036,14 @@ static int goodix_ts_suspend(struct device *dev)
ret = gtp_enter_sleep(ts);
#endif
- if (ret <= 0)
- dev_err(&ts->client->dev, "GTP early suspend failed.\n");
+ if (ret < 0)
+ dev_err(&ts->client->dev, "GTP early suspend failed\n");
/* to avoid waking up while not sleeping,
* delay 48 + 10ms to ensure reliability
*/
msleep(58);
mutex_unlock(&ts->lock);
+ ts->gtp_is_suspend = 1;
return ret;
}
@@ -2025,6 +2061,11 @@ static int goodix_ts_resume(struct device *dev)
struct goodix_ts_data *ts = dev_get_drvdata(dev);
int ret = 0;
+ if (!ts->gtp_is_suspend) {
+ dev_dbg(&ts->client->dev, "Already in awake state\n");
+ return 0;
+ }
+
mutex_lock(&ts->lock);
ret = gtp_wakeup_sleep(ts);
@@ -2033,7 +2074,7 @@ static int goodix_ts_resume(struct device *dev)
#endif
if (ret <= 0)
- dev_err(&ts->client->dev, "GTP resume failed.\n");
+ dev_err(&ts->client->dev, "GTP resume failed\n");
if (ts->use_irq)
gtp_irq_enable(ts);
@@ -2042,10 +2083,10 @@ static int goodix_ts_resume(struct device *dev)
ktime_set(1, 0), HRTIMER_MODE_REL);
#if GTP_ESD_PROTECT
- ts->gtp_is_suspend = 0;
gtp_esd_switch(ts->client, SWITCH_ON);
#endif
mutex_unlock(&ts->lock);
+ ts->gtp_is_suspend = 0;
return ret;
}
@@ -2236,8 +2277,15 @@ static void gtp_esd_check_func(struct work_struct *work)
}
#endif
-static SIMPLE_DEV_PM_OPS(goodix_ts_dev_pm_ops, goodix_ts_suspend,
- goodix_ts_resume);
+#if (!defined(CONFIG_FB) && !defined(CONFIG_HAS_EARLYSUSPEND))
+static const struct dev_pm_ops goodix_ts_dev_pm_ops = {
+ .suspend = goodix_ts_suspend,
+ .resume = goodix_ts_resume,
+};
+#else
+static const struct dev_pm_ops goodix_ts_dev_pm_ops = {
+};
+#endif
static const struct i2c_device_id goodix_ts_id[] = {
{ GTP_I2C_NAME, 0 },
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.h b/drivers/input/touchscreen/gt9xx/gt9xx.h
index 56e561ab3925..38487eea7b10 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx.h
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.h
@@ -22,18 +22,9 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
-#include <linux/irq.h>
-#include <linux/input.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/gpio.h>
-#include <linux/regulator/consumer.h>
-#include <linux/firmware.h>
-#include <linux/debugfs.h>
-#include <linux/mutex.h>
+#include <linux/uaccess.h>
#if defined(CONFIG_FB)
#include <linux/notifier.h>
@@ -105,6 +96,7 @@ struct goodix_ts_data {
#elif defined(CONFIG_HAS_EARLYSUSPEND)
struct early_suspend early_suspend;
#endif
+ struct dentry *debug_base;
};
extern u16 show_len;
@@ -116,14 +108,6 @@ extern u16 total_len;
#define GTP_DRIVER_SEND_CFG 1
#define GTP_HAVE_TOUCH_KEY 1
-/* auto updated by .bin file as default */
-#define GTP_AUTO_UPDATE 0
-/* auto updated by head_fw_array in gt9xx_firmware.h,
- * function together with GTP_AUTO_UPDATE
- */
-#define GTP_HEADER_FW_UPDATE 0
-
-#define GTP_CREATE_WR_NODE 0
#define GTP_ESD_PROTECT 0
#define GTP_WITH_PEN 0
@@ -132,26 +116,14 @@ extern u16 total_len;
/* double-click wakeup, function together with GTP_SLIDE_WAKEUP */
#define GTP_DBL_CLK_WAKEUP 0
-/*************************** PART2:TODO define *******************************/
-/* STEP_1(REQUIRED): Define Configuration Information Group(s) */
-/* Sensor_ID Map: */
-/* sensor_opt1 sensor_opt2 Sensor_ID
- * GND GND 0
- * VDDIO GND 1
- * NC GND 2
- * GND NC/300K 3
- * VDDIO NC/300K 4
- * NC NC/300K 5
- */
-
-#define GTP_IRQ_TAB {\
+#define GTP_IRQ_TAB {\
IRQ_TYPE_EDGE_RISING,\
IRQ_TYPE_EDGE_FALLING,\
IRQ_TYPE_LEVEL_LOW,\
IRQ_TYPE_LEVEL_HIGH\
}
-/* STEP_3(optional): Specify your special config info if needed */
+
#define GTP_IRQ_TAB_RISING 0
#define GTP_IRQ_TAB_FALLING 1
#if GTP_CUSTOM_CFG
@@ -197,16 +169,52 @@ extern u16 total_len;
#define RESOLUTION_LOC 3
#define TRIGGER_LOC 8
+/* HIGH: 0x28/0x29, LOW: 0xBA/0xBB */
+#define GTP_I2C_ADDRESS_HIGH 0x14
+#define GTP_I2C_ADDRESS_LOW 0x5D
+
+/* GTP CM_HEAD RW flags */
+#define GTP_RW_READ 0
+#define GTP_RW_WRITE 1
+#define GTP_RW_READ_IC_TYPE 2
+#define GTP_RW_WRITE_IC_TYPE 3
+#define GTP_RW_FILL_INFO 4
+#define GTP_RW_NO_WRITE 5
+#define GTP_RW_READ_ERROR 6
+#define GTP_RW_DISABLE_IRQ 7
+#define GTP_RW_READ_VERSION 8
+#define GTP_RW_ENABLE_IRQ 9
+#define GTP_RW_ENTER_UPDATE_MODE 11
+#define GTP_RW_LEAVE_UPDATE_MODE 13
+#define GTP_RW_UPDATE_FW 15
+#define GTP_RW_CHECK_RAWDIFF_MODE 17
+
+/* GTP need flag or interrupt */
+#define GTP_NO_NEED 0
+#define GTP_NEED_FLAG 1
+#define GTP_NEED_INTERRUPT 2
+
/*****************************End of Part III********************************/
void gtp_esd_switch(struct i2c_client *client, int on);
-#if GTP_CREATE_WR_NODE
-extern s32 init_wr_node(struct i2c_client *client);
-extern void uninit_wr_node(void);
+int gtp_i2c_read_dbl_check(struct i2c_client *client, u16 addr,
+ u8 *rxbuf, int len);
+int gtp_send_cfg(struct goodix_ts_data *ts);
+void gtp_reset_guitar(struct goodix_ts_data *ts, int ms);
+void gtp_irq_disable(struct goodix_ts_data *ts);
+void gtp_irq_enable(struct goodix_ts_data *ts);
+
+#ifdef CONFIG_GT9XX_TOUCHPANEL_DEBUG
+s32 init_wr_node(struct i2c_client *client);
+void uninit_wr_node(void);
#endif
-#if GTP_AUTO_UPDATE
+#ifdef CONFIG_GT9XX_TOUCHPANEL_UPDATE
extern u8 gup_init_update_proc(struct goodix_ts_data *ts);
+s32 gup_enter_update_mode(struct i2c_client *client);
+void gup_leave_update_mode(struct i2c_client *client);
+s32 gup_update_proc(void *dir);
+extern struct i2c_client *i2c_connect_client;
#endif
#endif /* _GOODIX_GT9XX_H_ */
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx_firmware.h b/drivers/input/touchscreen/gt9xx/gt9xx_firmware.h
deleted file mode 100644
index 81e3affe62e9..000000000000
--- a/drivers/input/touchscreen/gt9xx/gt9xx_firmware.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/*
- * make sense only when GTP_HEADER_FW_UPDATE & GTP_AUTO_UPDATE are enabled
- * define your own firmware array here
-*/
-const unsigned char header_fw_array[] = {
-};
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx_update.c b/drivers/input/touchscreen/gt9xx/gt9xx_update.c
index c991bfd3ffdf..4660b27d156c 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx_update.c
+++ b/drivers/input/touchscreen/gt9xx/gt9xx_update.c
@@ -32,23 +32,18 @@
* By Meta, 2013/03/11
*/
#include "gt9xx.h"
+#include <linux/firmware.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
-#if GTP_HEADER_FW_UPDATE
-#include <linux/namei.h>
-#include <linux/mount.h>
-#include "gt9xx_firmware.h"
-#endif
+#define FIRMWARE_NAME_LEN_MAX 256
#define GUP_REG_HW_INFO 0x4220
#define GUP_REG_FW_MSG 0x41E4
#define GUP_REG_PID_VID 0x8140
-#define GUP_SEARCH_FILE_TIMES 50
-#define UPDATE_FILE_PATH_2 "/data/_goodix_update_.bin"
-#define UPDATE_FILE_PATH_1 "/sdcard/_goodix_update_.bin"
-
-#define CONFIG_FILE_PATH_1 "/data/_goodix_config_.cfg"
-#define CONFIG_FILE_PATH_2 "/sdcard/_goodix_config_.cfg"
+#define GOODIX_FIRMWARE_FILE_NAME "_goodix_update_.bin"
+#define GOODIX_CONFIG_FILE_NAME "_goodix_config_.cfg"
#define FW_HEAD_LENGTH 14
#define FW_SECTION_LENGTH 0x2000
@@ -72,24 +67,22 @@
#define FAIL 0
#define SUCCESS 1
-#pragma pack(1)
-struct {
+struct st_fw_head {
u8 hw_info[4]; /* hardware info */
u8 pid[8]; /* product id */
u16 vid; /* version id */
-} st_fw_head;
-#pragma pack()
+} __packed;
-struct {
+struct st_update_msg {
u8 force_update;
u8 fw_flag;
- struct file *file;
- struct file *cfg_file;
- st_fw_head ic_fw_msg;
- mm_segment_t old_fs;
-} st_update_msg;
+ bool need_free;
+ u8 *fw_data;
+ u32 fw_len;
+ struct st_fw_head ic_fw_msg;
+};
-st_update_msg update_msg;
+static struct st_update_msg update_msg;
u16 show_len;
u16 total_len;
u8 got_file_flag;
@@ -106,7 +99,7 @@ Output:
numbers of i2c_msgs to transfer:
2: succeed, otherwise: failed
*********************************************************/
-s32 gup_i2c_read(struct i2c_client *client, u8 *buf, s32 len)
+static s32 gup_i2c_read(struct i2c_client *client, u8 *buf, s32 len)
{
s32 ret = -1;
u8 retries = 0;
@@ -171,7 +164,7 @@ s32 gup_i2c_write(struct i2c_client *client, u8 *buf, s32 len)
}
if (retries == 5) {
- dev_err(&client->dev, "I2C write retry limit over.\n");
+ dev_err(&client->dev, "I2C write retry limit over\n");
ret = -EIO;
}
@@ -180,112 +173,92 @@ s32 gup_i2c_write(struct i2c_client *client, u8 *buf, s32 len)
static s32 gup_init_panel(struct goodix_ts_data *ts)
{
+ struct i2c_client *client = ts->client;
+ u8 *config_data;
s32 ret = 0;
s32 i = 0;
u8 check_sum = 0;
u8 opr_buf[16];
u8 sensor_id = 0;
- u8 cfg_info_group1[] = CTP_CFG_GROUP1;
- u8 cfg_info_group2[] = CTP_CFG_GROUP2;
- u8 cfg_info_group3[] = CTP_CFG_GROUP3;
- u8 cfg_info_group4[] = CTP_CFG_GROUP4;
- u8 cfg_info_group5[] = CTP_CFG_GROUP5;
- u8 cfg_info_group6[] = CTP_CFG_GROUP6;
- u8 *send_cfg_buf[] = {cfg_info_group1, cfg_info_group2, cfg_info_group3,
- cfg_info_group4, cfg_info_group5, cfg_info_group6};
- u8 cfg_info_len[] = { CFG_GROUP_LEN(cfg_info_group1),
- CFG_GROUP_LEN(cfg_info_group2),
- CFG_GROUP_LEN(cfg_info_group3),
- CFG_GROUP_LEN(cfg_info_group4),
- CFG_GROUP_LEN(cfg_info_group5),
- CFG_GROUP_LEN(cfg_info_group6)};
-
- if ((!cfg_info_len[1]) && (!cfg_info_len[2]) &&
- (!cfg_info_len[3]) && (!cfg_info_len[4]) &&
- (!cfg_info_len[5])) {
+ for (i = 0; i < GOODIX_MAX_CFG_GROUP; i++)
+ if (ts->pdata->config_data_len[i])
+ break;
+
+ if (i == GOODIX_MAX_CFG_GROUP) {
sensor_id = 0;
} else {
- ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_SENSOR_ID,
+ ret = gtp_i2c_read_dbl_check(client, GTP_REG_SENSOR_ID,
&sensor_id, 1);
if (ret == SUCCESS) {
- if (sensor_id >= 0x06) {
- pr_err("Invalid sensor_id(0x%02X), No Config Sent!\n",
+ if (sensor_id >= GOODIX_MAX_CFG_GROUP) {
+ pr_err("Invalid sensor_id(0x%02X), No Config Sent",
sensor_id);
return -EINVAL;
}
} else {
- pr_err("Failed to get sensor_id, No config sent!\n");
+ pr_err("Failed to get sensor_id, No config sent\n");
return -EINVAL;
}
}
- pr_debug("Sensor_ID: %d\n", sensor_id);
+ pr_debug("Sensor ID selected: %d", sensor_id);
- ts->gtp_cfg_len = cfg_info_len[sensor_id];
-
- if (ts->gtp_cfg_len < GTP_CONFIG_MIN_LENGTH) {
- pr_err("Sensor_ID(%d) matches with NULL or INVALID CONFIG GROUP! NO Config Sent! You need to check you header file CFG_GROUP section!\n",
- sensor_id);
+ if (ts->pdata->config_data_len[sensor_id] < GTP_CONFIG_MIN_LENGTH ||
+ !ts->pdata->config_data_len[sensor_id]) {
+ pr_err("Sensor_ID(%d) matches with NULL or INVALID CONFIG GROUP",
+ sensor_id);
return -EINVAL;
}
- ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_CONFIG_DATA,
+ ret = gtp_i2c_read_dbl_check(client, GTP_REG_CONFIG_DATA,
&opr_buf[0], 1);
-
if (ret == SUCCESS) {
- pr_debug("CFG_GROUP%d Config Version: %d, IC Config Version: %d\n",
- sensor_id+1, send_cfg_buf[sensor_id][0], opr_buf[0]);
+ pr_debug("CFG_GROUP%d Config Version: %d, IC Config Version: %d",
+ sensor_id + 1,
+ ts->pdata->config_data[sensor_id][0],
+ opr_buf[0]);
- send_cfg_buf[sensor_id][0] = opr_buf[0];
+ ts->pdata->config_data[sensor_id][0] = opr_buf[0];
ts->fixed_cfg = 0;
- /*
- * if (opr_buf[0] < 90) {
- * grp_cfg_version = send_cfg_buf[sensor_id][0];
- * *** backup group config version ***
- * send_cfg_buf[sensor_id][0] = 0x00;
- * ts->fixed_cfg = 0;
- * } else { *** treated as fixed config, not send config ***
- * pr_info("Ic fixed config with config version(%d)",
- * opr_buf[0]);
- * ts->fixed_cfg = 1;
- * }
- */
+ } else {
+ pr_err("Failed to get ic config version. No config sent");
return -EINVAL;
}
- memset(&config[GTP_ADDR_LENGTH], 0, GTP_CONFIG_MAX_LENGTH);
- memcpy(&config[GTP_ADDR_LENGTH], send_cfg_buf[sensor_id],
- ts->gtp_cfg_len);
+ config_data = ts->pdata->config_data[sensor_id];
+ ts->config_data = ts->pdata->config_data[sensor_id];
+ ts->gtp_cfg_len = ts->pdata->config_data_len[sensor_id];
pr_debug("X_MAX = %d, Y_MAX = %d, TRIGGER = 0x%02x\n",
ts->abs_x_max, ts->abs_y_max, ts->int_trigger_type);
- config[RESOLUTION_LOC] = (u8)GTP_MAX_WIDTH;
- config[RESOLUTION_LOC + 1] = (u8)(GTP_MAX_WIDTH>>8);
- config[RESOLUTION_LOC + 2] = (u8)GTP_MAX_HEIGHT;
- config[RESOLUTION_LOC + 3] = (u8)(GTP_MAX_HEIGHT>>8);
+ config_data[RESOLUTION_LOC] = (u8)GTP_MAX_WIDTH;
+ config_data[RESOLUTION_LOC + 1] = (u8)(GTP_MAX_WIDTH>>8);
+ config_data[RESOLUTION_LOC + 2] = (u8)GTP_MAX_HEIGHT;
+ config_data[RESOLUTION_LOC + 3] = (u8)(GTP_MAX_HEIGHT>>8);
if (GTP_INT_TRIGGER == 0) /* RISING */
- config[TRIGGER_LOC] &= 0xfe;
+ config_data[TRIGGER_LOC] &= 0xfe;
else if (GTP_INT_TRIGGER == 1) /* FALLING */
- config[TRIGGER_LOC] |= 0x01;
+ config_data[TRIGGER_LOC] |= 0x01;
check_sum = 0;
for (i = GTP_ADDR_LENGTH; i < ts->gtp_cfg_len; i++)
- check_sum += config[i];
+ check_sum += config_data[i];
- config[ts->gtp_cfg_len] = (~check_sum) + 1;
+ config_data[ts->gtp_cfg_len] = (~check_sum) + 1;
- ret = gtp_send_cfg(ts->client);
+ ret = gtp_send_cfg(ts);
if (ret < 0)
- pr_err("Send config error.\n");
+ pr_err("Send config error\n");
+ ts->config_data = NULL;
+ ts->gtp_cfg_len = 0;
msleep(20);
return 0;
}
-
static u8 gup_get_ic_msg(struct i2c_client *client, u16 addr, u8 *msg, s32 len)
{
u8 i = 0;
@@ -298,7 +271,7 @@ static u8 gup_get_ic_msg(struct i2c_client *client, u16 addr, u8 *msg, s32 len)
break;
if (i >= 5) {
- pr_err("Read data from 0x%02x%02x failed!\n", msg[0], msg[1]);
+ pr_err("Read data from 0x%02x%02x failed\n", msg[0], msg[1]);
return FAIL;
}
@@ -319,7 +292,7 @@ static u8 gup_set_ic_msg(struct i2c_client *client, u16 addr, u8 val)
break;
if (i >= 5) {
- pr_err("Set data to 0x%02x%02x failed!\n", msg[0], msg[1]);
+ pr_err("Set data to 0x%02x%02x failed\n", msg[0], msg[1]);
return FAIL;
}
@@ -337,7 +310,7 @@ static u8 gup_get_ic_fw_msg(struct i2c_client *client)
ret = gtp_i2c_read_dbl_check(client, GUP_REG_HW_INFO,
&buf[GTP_ADDR_LENGTH], 4);
if (ret == FAIL) {
- pr_err("[get_ic_fw_msg]get hw_info failed,exit\n");
+ pr_err("get hw_info failed,exit");
return FAIL;
}
@@ -356,14 +329,14 @@ static u8 gup_get_ic_fw_msg(struct i2c_client *client)
for (retry = 0; retry < 2; retry++) {
ret = gup_get_ic_msg(client, GUP_REG_FW_MSG, buf, 1);
if (ret == FAIL) {
- pr_err("Read firmware message fail.\n");
+ pr_err("Read firmware message fail\n");
return ret;
}
update_msg.force_update = buf[GTP_ADDR_LENGTH];
if ((update_msg.force_update != 0xBE) && (!retry)) {
- pr_info("The check sum in ic is error.\n");
- pr_info("The IC will be updated by force.\n");
+ pr_info("The check sum in ic is error\n");
+ pr_info("The IC will be updated by force\n");
continue;
}
break;
@@ -374,7 +347,7 @@ static u8 gup_get_ic_fw_msg(struct i2c_client *client)
ret = gtp_i2c_read_dbl_check(client, GUP_REG_PID_VID,
&buf[GTP_ADDR_LENGTH], 6);
if (ret == FAIL) {
- pr_err("[get_ic_fw_msg]get pid & vid failed,exit\n");
+ pr_err("get pid & vid failed,exit");
return FAIL;
}
@@ -413,17 +386,19 @@ s32 gup_enter_update_mode(struct i2c_client *client)
s32 ret = -1;
u8 retry = 0;
u8 rd_buf[3];
+ struct goodix_ts_data *ts = i2c_get_clientdata(client);
/* step1:RST output low last at least 2ms */
- GTP_GPIO_OUTPUT(GTP_RST_PORT, 0);
- msleep(20);
+ gpio_direction_output(ts->pdata->reset_gpio, 0);
+ usleep(20000);
/* step2:select I2C slave addr,INT:0--0xBA;1--0x28. */
- GTP_GPIO_OUTPUT(GTP_INT_PORT, (client->addr == 0x14));
+ gpio_direction_output(ts->pdata->irq_gpio,
+ (client->addr == GTP_I2C_ADDRESS_HIGH));
msleep(20);
/* step3:RST output high reset guitar */
- GTP_GPIO_OUTPUT(GTP_RST_PORT, 1);
+ gpio_direction_output(ts->pdata->reset_gpio, 1);
/* 20121211 modify start */
msleep(20);
@@ -449,7 +424,7 @@ s32 gup_enter_update_mode(struct i2c_client *client)
rd_buf[GTP_ADDR_LENGTH]);
}
if (retry >= 200) {
- pr_err("Enter update Hold ss51 failed.\n");
+ pr_err("Enter update Hold ss51 failed\n");
return FAIL;
}
@@ -460,12 +435,13 @@ s32 gup_enter_update_mode(struct i2c_client *client)
return ret;
}
-void gup_leave_update_mode(void)
+void gup_leave_update_mode(struct i2c_client *client)
{
- GTP_GPIO_AS_INT(GTP_INT_PORT);
+ struct goodix_ts_data *ts = i2c_get_clientdata(client);
- pr_debug("[leave_update_mode]reset chip.\n");
- gtp_reset_guitar(i2c_connect_client, 20);
+ gpio_direction_input(ts->pdata->irq_gpio);
+ pr_debug("reset chip");
+ gtp_reset_guitar(ts, 20);
}
/* Get the correct nvram data
@@ -486,7 +462,8 @@ void gup_leave_update_mode(void)
* 3. IC PID == 91XX || File PID == 91XX
*/
-static u8 gup_enter_update_judge(st_fw_head *fw_head)
+static u8 gup_enter_update_judge(struct i2c_client *client,
+ struct st_fw_head *fw_head)
{
u16 u16_tmp;
s32 i = 0;
@@ -510,9 +487,9 @@ static u8 gup_enter_update_judge(st_fw_head *fw_head)
/* First two conditions */
if (!memcmp(fw_head->hw_info, update_msg.ic_fw_msg.hw_info,
sizeof(update_msg.ic_fw_msg.hw_info))) {
- pr_debug("Get the same hardware info.\n");
+ pr_debug("Get the same hardware info\n");
if (update_msg.force_update != 0xBE) {
- pr_info("FW chksum error,need enter update.\n");
+ pr_info("FW chksum error,need enter update\n");
return SUCCESS;
}
@@ -535,67 +512,48 @@ static u8 gup_enter_update_judge(st_fw_head *fw_head)
(!memcmp(update_msg.ic_fw_msg.pid, "91XX", 4)) ||
(!memcmp(fw_head->pid, "91XX", 4))) {
if (!memcmp(fw_head->pid, "91XX", 4))
- pr_debug("Force none same pid update mode.\n");
+ pr_debug("Force none same pid update mode\n");
else
- pr_debug("Get the same pid.\n");
+ pr_debug("Get the same pid\n");
/* The third condition */
if (fw_head->vid > update_msg.ic_fw_msg.vid) {
- pr_info("Need enter update.");
+ pr_info("Need enter update");
return SUCCESS;
}
- pr_err("Don't meet the third condition.\n");
- pr_err("File VID <= Ic VID, update aborted!\n");
+ pr_err("Don't meet the third condition\n");
+ pr_err("File VID <= Ic VID, update aborted\n");
} else {
- pr_err("File PID != Ic PID, update aborted!\n");
+ pr_err("File PID != Ic PID, update aborted\n");
}
} else {
- pr_err("Different Hardware, update aborted!\n");
+ pr_err("Different Hardware, update aborted\n");
}
return FAIL;
}
-static u8 ascii2hex(u8 a)
-{
- s8 value = 0;
-
- if (a >= '0' && a <= '9')
- value = a - '0';
- else if (a >= 'A' && a <= 'F')
- value = a - 'A' + 0x0A;
- else if (a >= 'a' && a <= 'f')
- value = a - 'a' + 0x0A;
- else
- value = 0xff;
-
- return value;
-}
-
-static s8 gup_update_config(struct i2c_client *client)
+static s8 gup_update_config(struct i2c_client *client,
+ const struct firmware *cfg)
{
- u32 file_len = 0;
s32 ret = 0;
s32 i = 0;
s32 file_cfg_len = 0;
u32 chip_cfg_len = 0;
s32 count = 0;
u8 *buf;
- u8 *pre_buf;
u8 *file_config;
- /* u8 checksum = 0; */
u8 pid[8];
+ u8 high, low;
- if (update_msg.cfg_file == NULL) {
- pr_err("[update_cfg]No need to upgrade config!\n");
+ if (!cfg || !cfg->data) {
+ pr_err("No need to upgrade config");
return FAIL;
}
- file_len = update_msg.cfg_file->f_op->llseek(update_msg.cfg_file,
- 0, SEEK_END);
ret = gup_get_ic_msg(client, GUP_REG_PID_VID, pid, 6);
if (ret == FAIL) {
- pr_err("[update_cfg]Read product id & version id fail.\n");
+ pr_err("Read product id & version id fail");
return FAIL;
}
pid[5] = '\0';
@@ -603,338 +561,185 @@ static s8 gup_update_config(struct i2c_client *client)
chip_cfg_len = 186;
if (!memcmp(&pid[GTP_ADDR_LENGTH], "968", 3) ||
- !memcmp(&pid[GTP_ADDR_LENGTH], "910", 3) ||
- !memcmp(&pid[GTP_ADDR_LENGTH], "960", 3)) {
+ !memcmp(&pid[GTP_ADDR_LENGTH], "910", 3) ||
+ !memcmp(&pid[GTP_ADDR_LENGTH], "960", 3)) {
chip_cfg_len = 228;
}
- pr_debug("[update_cfg]config file len:%d\n", file_len);
- pr_debug("[update_cfg]need config len:%d\n", chip_cfg_len);
- if ((file_len+5) < chip_cfg_len*5) {
+ pr_debug("config file ASCII len:%d", cfg->size);
+ pr_debug("need config binary len:%d", chip_cfg_len);
+ if ((cfg->size + 5) < chip_cfg_len * 5) {
pr_err("Config length error");
return -EINVAL;
}
- buf = devm_kzalloc(&client->dev, file_len, GFP_KERNEL);
+ buf = devm_kzalloc(&client->dev, cfg->size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- pre_buf = devm_kzalloc(&client->dev, file_len, GFP_KERNEL);
- if (!pre_buf)
- return -ENOMEM;
-
file_config = devm_kzalloc(&client->dev, chip_cfg_len + GTP_ADDR_LENGTH,
GFP_KERNEL);
if (!file_config)
return -ENOMEM;
- update_msg.cfg_file->f_op->llseek(update_msg.cfg_file, 0, SEEK_SET);
-
- pr_debug("[update_cfg]Read config from file.\n");
- ret = update_msg.cfg_file->f_op->read(update_msg.cfg_file,
- (char *)pre_buf, file_len, &update_msg.cfg_file->f_pos);
- if (ret < 0) {
- pr_err("[update_cfg]Read config file failed.\n");
- return ret;
- }
-
- pr_debug("[update_cfg]Delete illegal character.\n");
- for (i = 0, count = 0; i < file_len; i++) {
- if (pre_buf[i] == ' ' || pre_buf[i] == '\r'
- || pre_buf[i] == '\n')
+ pr_debug("Delete illegal character");
+ for (i = 0, count = 0; i < cfg->size; i++) {
+ if (cfg->data[i] == ' ' || cfg->data[i] == '\r'
+ || cfg->data[i] == '\n')
continue;
- buf[count++] = pre_buf[i];
+ buf[count++] = cfg->data[i];
}
- pr_debug("[update_cfg]Ascii to hex.\n");
+ pr_debug("Ascii to hex");
file_config[0] = GTP_REG_CONFIG_DATA >> 8;
file_config[1] = GTP_REG_CONFIG_DATA & 0xff;
- for (i = 0, file_cfg_len = GTP_ADDR_LENGTH; i < count; i + = 5) {
- if ((buf[i] == '0') && ((buf[i+1] == 'x') ||
- (buf[i+1] == 'X'))) {
- u8 high, low;
-
- high = ascii2hex(buf[i+2]);
- low = ascii2hex(buf[i+3]);
+ for (i = 0, file_cfg_len = GTP_ADDR_LENGTH; i < count; i = i + 5) {
+ if ((buf[i] == '0') && ((buf[i + 1] == 'x') ||
+ (buf[i + 1] == 'X'))) {
+ ret = hex2bin(&high, &buf[i + 2], 1);
+ if (ret) {
+ pr_err("Failed to convert high address from hex2bin");
+ return ret;
+ }
+ ret = hex2bin(&low, &buf[i + 3], 1);
+ if (ret) {
+ pr_err("Failed to convert low address from hex2bin");
+ return ret;
+ }
if ((high == 0xFF) || (low == 0xFF)) {
ret = 0;
- pr_err("[update_cfg]Illegal config file.\n");
+ pr_err("Illegal config file");
return ret;
}
file_config[file_cfg_len++] = (high<<4) + low;
} else {
ret = 0;
- pr_err("[update_cfg]Illegal config file.\n");
+ pr_err("Illegal config file");
return ret;
}
}
- /* cal checksum */
- /* for (i=GTP_ADDR_LENGTH; i<chip_cfg_len; i++)
- * checksum += file_config[i];
- * file_config[chip_cfg_len] = (~checksum) + 1;
- * file_config[chip_cfg_len+1] = 0x01;
- */
-
i = 0;
while (i++ < 5) {
ret = gup_i2c_write(client, file_config, file_cfg_len);
- if (ret > 0) {
- pr_info("[update_cfg]Send config SUCCESS.\n");
+ if (ret > 0)
break;
- }
- pr_err("[update_cfg]Send config i2c error.\n");
+ pr_err("Send config i2c error");
}
return ret;
}
-#if GTP_HEADER_FW_UPDATE
-static u8 gup_check_fs_mounted(char *path_name)
+static s32 gup_get_firmware_file(struct i2c_client *client,
+ struct st_update_msg *msg, u8 *path)
{
- struct path root_path;
- struct path path;
- int err;
+ s32 ret;
+ const struct firmware *fw = NULL;
- err = kern_path("/", LOOKUP_FOLLOW, &root_path);
+ ret = request_firmware(&fw, path, &client->dev);
+ if (ret < 0) {
+ dev_info(&client->dev, "Cannot get firmware - %s (%d)\n",
+ path, ret);
+ return -EEXIST;
+ }
- if (err) {
- pr_debug("\"/\" NOT Mounted: %d\n", err);
- return FAIL;
+ dev_dbg(&client->dev, "Config File: %s size=%d", path, fw->size);
+ msg->fw_data =
+ devm_kzalloc(&client->dev, fw->size, GFP_KERNEL);
+ if (!msg->fw_data) {
+ release_firmware(fw);
+ return -ENOMEM;
}
- err = kern_path(path_name, LOOKUP_FOLLOW, &path);
- if (err) {
- pr_debug("/data/ NOT Mounted: %d\n", err);
- return FAIL;
+ memcpy(msg->fw_data, fw->data, fw->size);
+ msg->fw_len = fw->size;
+ msg->need_free = true;
+ release_firmware(fw);
+ return 0;
+}
+
+static u8 gup_check_firmware_name(struct i2c_client *client,
+ u8 **path_p)
+{
+ u8 len;
+ u8 *fname;
+
+ if (!(*path_p)) {
+ *path_p = GOODIX_FIRMWARE_FILE_NAME;
+ return 0;
}
- return SUCCESS;
+ len = strnlen(*path_p, FIRMWARE_NAME_LEN_MAX);
+ if (len >= FIRMWARE_NAME_LEN_MAX) {
+ dev_err(&client->dev, "firmware name too long");
+ return -EINVAL;
+ }
- /* if (path.mnt->mnt_sb == root_path.mnt->mnt_sb)
- * return FAIL;
- * else
- * return SUCCESS;
- */
+ fname = strrchr(*path_p, '/');
+ if (fname) {
+ fname = fname + 1;
+ *path_p = fname;
+ }
+ return 0;
}
-#endif
-static u8 gup_check_update_file(struct i2c_client *client, st_fw_head *fw_head,
- u8 *path)
+static u8 gup_check_update_file(struct i2c_client *client,
+ struct st_fw_head *fw_head, u8 *path)
{
s32 ret = 0;
s32 i = 0;
s32 fw_checksum = 0;
- u8 buf[FW_HEAD_LENGTH];
-
- if (path) {
- pr_debug("Update File path:%s, %d\n", path, strlen(path));
- update_msg.file = file_open(path, O_RDONLY, 0);
+ u16 temp;
+ const struct firmware *fw = NULL;
- if (IS_ERR(update_msg.file)) {
- pr_err("Open update file(%s) error!\n", path);
- return FAIL;
- }
+ ret = request_firmware(&fw, GOODIX_CONFIG_FILE_NAME, &client->dev);
+ if (ret < 0) {
+ dev_info(&client->dev, "Cannot get config file - %s (%d)\n",
+ GOODIX_CONFIG_FILE_NAME, ret);
} else {
-#if GTP_HEADER_FW_UPDATE
- for (i = 0; i < (GUP_SEARCH_FILE_TIMES); i++) {
- pr_debug("Waiting for /data mounted [%d]\n", i);
-
- if (gup_check_fs_mounted("/data") == SUCCESS) {
- pr_debug("/data Mounted!\n");
- break;
- }
- msleep(3000);
- }
- if (i >= (GUP_SEARCH_FILE_TIMES)) {
- pr_err("Wait for /data mounted timeout!\n");
- return FAIL;
- }
-
- /* update config */
- update_msg.cfg_file = file_open(CONFIG_FILE_PATH_1,
- O_RDONLY, 0);
-
- if (IS_ERR(update_msg.cfg_file)) {
- pr_debug("%s is unavailable\n", CONFIG_FILE_PATH_1);
- } else {
- pr_info("Update Config File: %s\n", CONFIG_FILE_PATH_1);
- ret = gup_update_config(client);
- if (ret <= 0)
- pr_err("Update config failed.\n");
- filp_close(update_msg.cfg_file, NULL);
- }
-
- if (sizeof(header_fw_array) < (FW_HEAD_LENGTH+FW_SECTION_LENGTH
- *4 + FW_DSP_ISP_LENGTH+FW_DSP_LENGTH + FW_BOOT_LENGTH)) {
- pr_err("INVALID header_fw_array, check your gt9xx_firmware.h file!\n");
- return FAIL;
- }
- update_msg.file = file_open(UPDATE_FILE_PATH_2, O_CREAT |
- O_RDWR, 0666);
- if ((IS_ERR(update_msg.file))) {
- pr_err("Failed to Create file: %s for fw_header!\n",
- UPDATE_FILE_PATH_2);
- return FAIL;
- }
- update_msg.file->f_op->llseek(update_msg.file, 0, SEEK_SET);
- update_msg.file->f_op->write(update_msg.file,
- (char *)header_fw_array, sizeof(header_fw_array),
- &update_msg.file->f_pos);
- file_close(update_msg.file, NULL);
- update_msg.file = file_open(UPDATE_FILE_PATH_2, O_RDONLY, 0);
-#else
- u8 fp_len = max(sizeof(UPDATE_FILE_PATH_1),
- sizeof(UPDATE_FILE_PATH_2));
- u8 cfp_len = max(sizeof(CONFIG_FILE_PATH_1),
- sizeof(CONFIG_FILE_PATH_2));
-
- u8 *search_update_path = devm_kzalloc(&client->dev, fp_len,
- GFP_KERNEL);
- if (!search_update_path)
- goto load_failed;
-
- u8 *search_cfg_path = devm_kzalloc(&client->dev, cfp_len,
- GFP_KERNEL);
- if (!search_cfg_path)
- goto load_failed;
- /* Begin to search update file,the config file & firmware
- * file must be in the same path,single or double.
- */
- searching_file = 1;
- for (i = 0; i < GUP_SEARCH_FILE_TIMES; i++) {
- if (searching_file == 0) {
- pr_info(".bin/.cfg update file search forcely terminated!\n");
- return FAIL;
- }
- if (i % 2) {
- memcpy(search_update_path, UPDATE_FILE_PATH_1,
- sizeof(UPDATE_FILE_PATH_1));
- memcpy(search_cfg_path, CONFIG_FILE_PATH_1,
- sizeof(CONFIG_FILE_PATH_1));
- } else {
- memcpy(search_update_path, UPDATE_FILE_PATH_2,
- sizeof(UPDATE_FILE_PATH_2));
- memcpy(search_cfg_path, CONFIG_FILE_PATH_2,
- sizeof(CONFIG_FILE_PATH_2));
- }
-
- if (!(got_file_flag&0x0F)) {
- update_msg.file = file_open(search_update_path,
- O_RDONLY, 0);
- if (!IS_ERR(update_msg.file)) {
- pr_debug("Find the bin file\n");
- got_file_flag |= 0x0F;
- }
- }
- if (!(got_file_flag & 0xF0)) {
- update_msg.cfg_file = file_open(search_cfg_path,
- O_RDONLY, 0);
- if (!IS_ERR(update_msg.cfg_file)) {
- pr_debug("Find the cfg file\n");
- got_file_flag |= 0xF0;
- }
- }
-
- if (got_file_flag) {
- if (got_file_flag == 0xFF)
- break;
- i += 4;
- }
- pr_debug("%3d:Searching %s %s file...\n", i,
- (got_file_flag & 0x0F) ? "" : "bin",
- (got_file_flag & 0xF0) ? "" : "cfg");
-
- msleep(3000);
- }
-
- searching_file = 0;
-
- if (!got_file_flag) {
- pr_err("Can't find update file.\n");
- goto load_failed;
- }
-
- if (got_file_flag & 0xF0) {
- pr_debug("Got the update config file.\n");
- ret = gup_update_config(client);
- if (ret <= 0)
- pr_err("Update config failed.\n");
- filp_close(update_msg.cfg_file, NULL);
- msleep(500); /* waiting config to be stored in FLASH. */
- }
- if (got_file_flag & 0x0F) {
- pr_debug("Got the update firmware file.\n");
- } else {
- pr_err("No need to upgrade firmware.\n");
- goto load_failed;
- }
-#endif
+ dev_dbg(&client->dev,
+ "Update config File: %s", GOODIX_CONFIG_FILE_NAME);
+ ret = gup_update_config(client, fw);
+ if (ret <= 0)
+ dev_err(&client->dev, "Update config failed");
+ release_firmware(fw);
}
- update_msg.old_fs = get_fs();
- set_fs(KERNEL_DS);
+ update_msg.need_free = false;
+ update_msg.fw_len = 0;
- update_msg.file->f_op->llseek(update_msg.file, 0, SEEK_SET);
- /* update_msg.file->f_pos = 0; */
+ if (gup_check_firmware_name(client, &path))
+ goto load_failed;
- ret = update_msg.file->f_op->read(update_msg.file, (char *)buf,
- FW_HEAD_LENGTH, &update_msg.file->f_pos);
- if (ret < 0) {
- pr_err("Read firmware head in update file error.\n");
+ if (gup_get_firmware_file(client, &update_msg, path))
goto load_failed;
- }
- memcpy(fw_head, buf, FW_HEAD_LENGTH);
+
+ memcpy(fw_head, update_msg.fw_data, FW_HEAD_LENGTH);
/* check firmware legality */
fw_checksum = 0;
for (i = 0; i < FW_SECTION_LENGTH * 4 + FW_DSP_ISP_LENGTH +
- FW_DSP_LENGTH + FW_BOOT_LENGTH; i + = 2) {
- u16 temp;
-
- ret = update_msg.file->f_op->read(update_msg.file, (char *)buf,
- 2, &update_msg.file->f_pos);
- if (ret < 0) {
- pr_err("Read firmware file error.\n");
- goto load_failed;
- }
- temp = (buf[0]<<8) + buf[1];
+ FW_DSP_LENGTH + FW_BOOT_LENGTH; i += 2) {
+ temp = (update_msg.fw_data[FW_HEAD_LENGTH + i] << 8) +
+ update_msg.fw_data[FW_HEAD_LENGTH + i + 1];
fw_checksum += temp;
}
- pr_debug("firmware checksum:%x\n", fw_checksum&0xFFFF);
+ pr_debug("firmware checksum:%x", fw_checksum & 0xFFFF);
if (fw_checksum & 0xFFFF) {
- pr_err("Illegal firmware file.\n");
+ dev_err(&client->dev, "Illegal firmware file");
goto load_failed;
}
return SUCCESS;
load_failed:
- set_fs(update_msg.old_fs);
- return FAIL;
-}
-
-#if 0
-static u8 gup_check_update_header(struct i2c_client *client,
- st_fw_head *fw_head)
-{
- const u8 *pos;
- int i = 0;
- u8 mask_num = 0;
- s32 ret = 0;
-
- pos = HEADER_UPDATE_DATA;
-
- memcpy(fw_head, pos, FW_HEAD_LENGTH);
- pos += FW_HEAD_LENGTH;
-
- ret = gup_enter_update_judge(fw_head);
- if (ret == SUCCESS)
- return SUCCESS;
+ if (update_msg.need_free) {
+ devm_kfree(&client->dev, update_msg.fw_data);
+ update_msg.need_free = false;
+ }
return FAIL;
}
-#endif
static u8 gup_burn_proc(struct i2c_client *client, u8 *burn_buf, u16 start_addr,
u16 total_length)
@@ -947,7 +752,7 @@ static u8 gup_burn_proc(struct i2c_client *client, u8 *burn_buf, u16 start_addr,
u8 rd_buf[PACK_SIZE + GTP_ADDR_LENGTH];
u8 retry = 0;
- pr_debug("Begin burn %dk data to addr 0x%x\n", (total_length/1024),
+ pr_debug("Begin burn %dk data to addr 0x%x", (total_length / 1024),
start_addr);
while (burn_length < total_length) {
pr_debug("B/T:%04d/%04d", burn_length, total_length);
@@ -964,26 +769,26 @@ static u8 gup_burn_proc(struct i2c_client *client, u8 *burn_buf, u16 start_addr,
ret = gup_i2c_write(client, wr_buf,
GTP_ADDR_LENGTH + frame_length);
if (ret <= 0) {
- pr_err("Write frame data i2c error.\n");
+ pr_err("Write frame data i2c error\n");
continue;
}
ret = gup_i2c_read(client, rd_buf, GTP_ADDR_LENGTH +
frame_length);
if (ret <= 0) {
- pr_err("Read back frame data i2c error.\n");
+ pr_err("Read back frame data i2c error\n");
continue;
}
if (memcmp(&wr_buf[GTP_ADDR_LENGTH],
&rd_buf[GTP_ADDR_LENGTH], frame_length)) {
- pr_err("Check frame data fail,not equal.\n");
+ pr_err("Check frame data fail,not equal\n");
continue;
} else {
break;
}
}
if (retry >= MAX_FRAME_CHECK_TIME) {
- pr_err("Burn frame data time out,exit.\n");
+ pr_err("Burn frame data time out,exit\n");
return FAIL;
}
burn_length += frame_length;
@@ -994,20 +799,15 @@ static u8 gup_burn_proc(struct i2c_client *client, u8 *burn_buf, u16 start_addr,
static u8 gup_load_section_file(u8 *buf, u16 offset, u16 length)
{
- s32 ret = 0;
-
- if (update_msg.file == NULL) {
- pr_err("cannot find update file,load section file fail.\n");
- return FAIL;
- }
- update_msg.file->f_pos = FW_HEAD_LENGTH + offset;
-
- ret = update_msg.file->f_op->read(update_msg.file, (char *)buf, length,
- &update_msg.file->f_pos);
- if (ret < 0) {
- pr_err("Read update file fail.\n");
+ if (!update_msg.fw_data ||
+ update_msg.fw_len < FW_HEAD_LENGTH + offset + length) {
+ pr_err(
+ "<<-GTP->> cannot load section data. fw_len=%d read end=%d\n",
+ update_msg.fw_len,
+ FW_HEAD_LENGTH + offset + length);
return FAIL;
}
+ memcpy(buf, &update_msg.fw_data[FW_HEAD_LENGTH + offset], length);
return SUCCESS;
}
@@ -1032,20 +832,20 @@ static u8 gup_recall_check(struct i2c_client *client, u8 *chk_src,
if (memcmp(&rd_buf[GTP_ADDR_LENGTH], &chk_src[recall_length],
frame_length)) {
- pr_err("Recall frame data fail,not equal.\n");
+ pr_err("Recall frame data fail,not equal\n");
return FAIL;
}
recall_length += frame_length;
recall_addr += frame_length;
}
- pr_debug("Recall check %dk firmware success.\n", (chk_length/1024));
+ pr_debug("Recall check %dk firmware success\n", (chk_length/1024));
return SUCCESS;
}
static u8 gup_burn_fw_section(struct i2c_client *client, u8 *fw_section,
- u16 start_addr, u8 bank_cmdi)
+ u16 start_addr, u8 bank_cmd)
{
s32 ret = 0;
u8 rd_buf[5];
@@ -1053,14 +853,14 @@ static u8 gup_burn_fw_section(struct i2c_client *client, u8 *fw_section,
/* step1:hold ss51 & dsp */
ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
if (ret <= 0) {
- pr_err("[burn_fw_section]hold ss51 & dsp fail.\n");
+ pr_err("hold ss51 & dsp fail");
return FAIL;
}
/* step2:set scramble */
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_OPT_B0_, 0x00);
if (ret <= 0) {
- pr_err("[burn_fw_section]set scramble fail.\n");
+ pr_err("set scramble fail");
return FAIL;
}
@@ -1068,7 +868,7 @@ static u8 gup_burn_fw_section(struct i2c_client *client, u8 *fw_section,
ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK,
(bank_cmd >> 4)&0x0F);
if (ret <= 0) {
- pr_err("[burn_fw_section]select bank %d fail.\n",
+ pr_err("select bank %d fail",
(bank_cmd >> 4)&0x0F);
return FAIL;
}
@@ -1076,21 +876,21 @@ static u8 gup_burn_fw_section(struct i2c_client *client, u8 *fw_section,
/* step4:enable accessing code */
ret = gup_set_ic_msg(client, _bRW_MISCTL__MEM_CD_EN, 0x01);
if (ret <= 0) {
- pr_err("[burn_fw_section]enable accessing code fail.\n");
+ pr_err("enable accessing code fail");
return FAIL;
}
/* step5:burn 8k fw section */
ret = gup_burn_proc(client, fw_section, start_addr, FW_SECTION_LENGTH);
if (ret == FAIL) {
- pr_err("[burn_fw_section]burn fw_section fail.\n");
+ pr_err("burn fw_section fail");
return FAIL;
}
/* step6:hold ss51 & release dsp */
ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x04);
if (ret <= 0) {
- pr_err("[burn_fw_section]hold ss51 & release dsp fail.\n");
+ pr_err("hold ss51 & release dsp fail");
return FAIL;
}
/* must delay */
@@ -1099,14 +899,14 @@ static u8 gup_burn_fw_section(struct i2c_client *client, u8 *fw_section,
/* step7:send burn cmd to move data to flash from sram */
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, bank_cmd&0x0f);
if (ret <= 0) {
- pr_err("[burn_fw_section]send burn cmd fail.\n");
+ pr_err("send burn cmd fail");
return FAIL;
}
- pr_debug("[burn_fw_section]Wait for the burn is complete......\n");
+ pr_debug("Wait for the burn is complete");
do {
ret = gup_get_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, rd_buf, 1);
if (ret <= 0) {
- pr_err("[burn_fw_section]Get burn state fail\n");
+ pr_err("Get burn state fail");
return FAIL;
}
msleep(20);
@@ -1116,7 +916,7 @@ static u8 gup_burn_fw_section(struct i2c_client *client, u8 *fw_section,
ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK,
(bank_cmd >> 4)&0x0F);
if (ret <= 0) {
- pr_err("[burn_fw_section]select bank %d fail.\n",
+ pr_err("select bank %d fail",
(bank_cmd >> 4)&0x0F);
return FAIL;
}
@@ -1124,7 +924,7 @@ static u8 gup_burn_fw_section(struct i2c_client *client, u8 *fw_section,
/* step9:enable accessing code */
ret = gup_set_ic_msg(client, _bRW_MISCTL__MEM_CD_EN, 0x01);
if (ret <= 0) {
- pr_err("[burn_fw_section]enable accessing code fail.\n");
+ pr_err("enable accessing code fail");
return FAIL;
}
@@ -1132,14 +932,14 @@ static u8 gup_burn_fw_section(struct i2c_client *client, u8 *fw_section,
ret = gup_recall_check(client, fw_section, start_addr,
FW_SECTION_LENGTH);
if (ret == FAIL) {
- pr_err("[burn_fw_section]recall check 8k firmware fail.\n");
+ pr_err("recall check 8k firmware fail");
return FAIL;
}
/* step11:disable accessing code */
ret = gup_set_ic_msg(client, _bRW_MISCTL__MEM_CD_EN, 0x00);
if (ret <= 0) {
- pr_err("[burn_fw_section]disable accessing code fail.\n");
+ pr_err("disable accessing code fail");
return FAIL;
}
@@ -1152,101 +952,97 @@ static u8 gup_burn_dsp_isp(struct i2c_client *client)
u8 *fw_dsp_isp = NULL;
u8 retry = 0;
- pr_debug("[burn_dsp_isp]Begin burn dsp isp---->>\n");
+ pr_debug("Begin burn dsp isp");
/* step1:alloc memory */
- pr_debug("[burn_dsp_isp]step1:alloc memory\n");
+ pr_debug("step1:alloc memory");
while (retry++ < 5) {
fw_dsp_isp = devm_kzalloc(&client->dev, FW_DSP_ISP_LENGTH,
GFP_KERNEL);
if (fw_dsp_isp == NULL) {
continue;
} else {
- pr_info("[burn_dsp_isp]Alloc %dk byte memory success.\n",
- (FW_DSP_ISP_LENGTH/1024));
break;
}
}
- if (retry == 5) {
- pr_err("[burn_dsp_isp]Alloc memory fail,exit.\n");
+ if (retry == 5)
return FAIL;
- }
/* step2:load dsp isp file data */
- pr_debug("[burn_dsp_isp]step2:load dsp isp file data\n");
+ pr_debug("step2:load dsp isp file data");
ret = gup_load_section_file(fw_dsp_isp, (4 * FW_SECTION_LENGTH +
FW_DSP_LENGTH + FW_BOOT_LENGTH), FW_DSP_ISP_LENGTH);
if (ret == FAIL) {
- pr_err("[burn_dsp_isp]load firmware dsp_isp fail.\n");
+ pr_err("load firmware dsp_isp fail");
return FAIL;
}
/* step3:disable wdt,clear cache enable */
- pr_debug("[burn_dsp_isp]step3:disable wdt,clear cache enable\n");
+ pr_debug("step3:disable wdt,clear cache enable");
ret = gup_set_ic_msg(client, _bRW_MISCTL__TMR0_EN, 0x00);
if (ret <= 0) {
- pr_err("[burn_dsp_isp]disable wdt fail.\n");
+ pr_err("disable wdt fail");
return FAIL;
}
ret = gup_set_ic_msg(client, _bRW_MISCTL__CACHE_EN, 0x00);
if (ret <= 0) {
- pr_err("[burn_dsp_isp]clear cache enable fail.\n");
+ pr_err("clear cache enable fail");
return FAIL;
}
/* step4:hold ss51 & dsp */
- pr_debug("[burn_dsp_isp]step4:hold ss51 & dsp\n");
+ pr_debug("step4:hold ss51 & dsp");
ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
if (ret <= 0) {
- pr_err("[burn_dsp_isp]hold ss51 & dsp fail.\n");
+ pr_err("hold ss51 & dsp fail");
return FAIL;
}
/* step5:set boot from sram */
- pr_debug("[burn_dsp_isp]step5:set boot from sram\n");
+ pr_debug("step5:set boot from sram");
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOTCTL_B0_, 0x02);
if (ret <= 0) {
- pr_err("[burn_dsp_isp]set boot from sram fail.\n");
+ pr_err("set boot from sram fail");
return FAIL;
}
/* step6:software reboot */
- pr_debug("[burn_dsp_isp]step6:software reboot\n");
+ pr_debug("step6:software reboot");
ret = gup_set_ic_msg(client, _bWO_MISCTL__CPU_SWRST_PULSE, 0x01);
if (ret <= 0) {
- pr_err("[burn_dsp_isp]software reboot fail.\n");
+ pr_err("software reboot fail");
return FAIL;
}
/* step7:select bank2 */
- pr_debug("[burn_dsp_isp]step7:select bank2\n");
+ pr_debug("step7:select bank2");
ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, 0x02);
if (ret <= 0) {
- pr_err("[burn_dsp_isp]select bank2 fail.\n");
+ pr_err("select bank2 fail");
return FAIL;
}
/* step8:enable accessing code */
- pr_debug("[burn_dsp_isp]step8:enable accessing code\n");
+ pr_debug("step8:enable accessing code");
ret = gup_set_ic_msg(client, _bRW_MISCTL__MEM_CD_EN, 0x01);
if (ret <= 0) {
- pr_err("[burn_dsp_isp]enable accessing code fail.\n");
+ pr_err("enable accessing code fail");
return FAIL;
}
/* step9:burn 4k dsp_isp */
- pr_debug("[burn_dsp_isp]step9:burn 4k dsp_isp\n");
+ pr_debug("step9:burn 4k dsp_isp");
ret = gup_burn_proc(client, fw_dsp_isp, 0xC000, FW_DSP_ISP_LENGTH);
if (ret == FAIL) {
- pr_err("[burn_dsp_isp]burn dsp_isp fail.\n");
+ pr_err("burn dsp_isp fail");
return FAIL;
}
/* step10:set scramble */
- pr_debug("[burn_dsp_isp]step10:set scramble\n");
+ pr_debug("step10:set scramble");
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_OPT_B0_, 0x00);
if (ret <= 0) {
- pr_err("[burn_dsp_isp]set scramble fail.\n");
+ pr_err("set scramble fail");
return FAIL;
}
@@ -1259,98 +1055,94 @@ static u8 gup_burn_fw_ss51(struct i2c_client *client)
u8 retry = 0;
s32 ret = 0;
- pr_debug("[burn_fw_ss51]Begin burn ss51 firmware---->>\n");
+ pr_debug("Begin burn ss51 firmware");
/* step1:alloc memory */
- pr_debug("[burn_fw_ss51]step1:alloc memory\n");
+ pr_debug("step1:alloc memory");
while (retry++ < 5) {
fw_ss51 = devm_kzalloc(&client->dev, FW_SECTION_LENGTH,
GFP_KERNEL);
if (fw_ss51 == NULL) {
continue;
} else {
- pr_info("[burn_fw_ss51]Alloc %dk byte memory success.\n",
- (FW_SECTION_LENGTH/1024));
break;
}
}
- if (retry == 5) {
- pr_err("[burn_fw_ss51]Alloc memory fail,exit.\n");
+ if (retry == 5)
return FAIL;
- }
/* step2:load ss51 firmware section 1 file data */
- pr_debug("[burn_fw_ss51]step2:load ss51 firmware section 1 file data\n");
+ pr_debug("step2:load ss51 firmware section 1 file data");
ret = gup_load_section_file(fw_ss51, 0, FW_SECTION_LENGTH);
if (ret == FAIL) {
- pr_err("[burn_fw_ss51]load ss51 firmware section 1 fail.\n");
+ pr_err("load ss51 firmware section 1 fail");
return FAIL;
}
/* step3:clear control flag */
- pr_debug("[burn_fw_ss51]step3:clear control flag\n");
+ pr_debug("step3:clear control flag");
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x00);
if (ret <= 0) {
- pr_err("[burn_fw_ss51]clear control flag fail.\n");
+ pr_err("clear control flag fail");
return FAIL;
}
/* step4:burn ss51 firmware section 1 */
- pr_debug("[burn_fw_ss51]step4:burn ss51 firmware section 1\n");
+ pr_debug("step4:burn ss51 firmware section 1");
ret = gup_burn_fw_section(client, fw_ss51, 0xC000, 0x01);
if (ret == FAIL) {
- pr_err("[burn_fw_ss51]burn ss51 firmware section 1 fail.\n");
+ pr_err("burn ss51 firmware section 1 fail");
return FAIL;
}
/* step5:load ss51 firmware section 2 file data */
- pr_debug("[burn_fw_ss51]step5:load ss51 firmware section 2 file data\n");
+ pr_debug("step5:load ss51 firmware section 2 file data");
ret = gup_load_section_file(fw_ss51, FW_SECTION_LENGTH,
FW_SECTION_LENGTH);
if (ret == FAIL) {
- pr_err("[burn_fw_ss51]load ss51 firmware section 2 fail.\n");
+ pr_err("[burn_fw_ss51]load ss51 firmware section 2 fail\n");
return FAIL;
}
/* step6:burn ss51 firmware section 2 */
- pr_debug("[burn_fw_ss51]step6:burn ss51 firmware section 2\n");
+ pr_debug("step6:burn ss51 firmware section 2");
ret = gup_burn_fw_section(client, fw_ss51, 0xE000, 0x02);
if (ret == FAIL) {
- pr_err("[burn_fw_ss51]burn ss51 firmware section 2 fail.\n");
+ pr_err("burn ss51 firmware section 2 fail");
return FAIL;
}
/* step7:load ss51 firmware section 3 file data */
- pr_debug("[burn_fw_ss51]step7:load ss51 firmware section 3 file data\n");
+ pr_debug("step7:load ss51 firmware section 3 file data");
ret = gup_load_section_file(fw_ss51, 2*FW_SECTION_LENGTH,
FW_SECTION_LENGTH);
if (ret == FAIL) {
- pr_err("[burn_fw_ss51]load ss51 firmware section 3 fail.\n");
+ pr_err("load ss51 firmware section 3 fail");
return FAIL;
}
/* step8:burn ss51 firmware section 3 */
- pr_debug("[burn_fw_ss51]step8:burn ss51 firmware section 3\n");
+ pr_debug("step8:burn ss51 firmware section 3");
ret = gup_burn_fw_section(client, fw_ss51, 0xC000, 0x13);
if (ret == FAIL) {
- pr_err("[burn_fw_ss51]burn ss51 firmware section 3 fail.\n");
+ pr_err("burn ss51 firmware section 3 fail");
return FAIL;
}
/* step9:load ss51 firmware section 4 file data */
- pr_debug("[burn_fw_ss51]step9:load ss51 firmware section 4 file data\n");
+ pr_debug("step9:load ss51 firmware section 4 file data");
ret = gup_load_section_file(fw_ss51, 3*FW_SECTION_LENGTH,
FW_SECTION_LENGTH);
if (ret == FAIL) {
- pr_err("[burn_fw_ss51]load ss51 firmware section 4 fail.\n");
+ pr_err("load ss51 firmware section 4 fail");
return FAIL;
}
/* step10:burn ss51 firmware section 4 */
- pr_debug("[burn_fw_ss51]step10:burn ss51 firmware section 4\n");
+ pr_debug("step10:burn ss51 firmware section 4");
ret = gup_burn_fw_section(client, fw_ss51, 0xE000, 0x14);
if (ret == FAIL) {
- pr_err("[burn_fw_ss51]burn ss51 firmware section 4 fail.\n");
+ pr_err("burn ss51 firmware section 4 fail");
return FAIL;
}
@@ -1364,101 +1156,97 @@ static u8 gup_burn_fw_dsp(struct i2c_client *client)
u8 retry = 0;
u8 rd_buf[5];
- pr_debug("[burn_fw_dsp]Begin burn dsp firmware---->>\n");
+ pr_debug("Begin burn dsp firmware");
/* step1:alloc memory */
- pr_debug("[burn_fw_dsp]step1:alloc memory\n");
+ pr_debug("step1:alloc memory");
while (retry++ < 5) {
fw_dsp = devm_kzalloc(&client->dev, FW_DSP_LENGTH,
GFP_KERNEL);
if (fw_dsp == NULL) {
continue;
} else {
- pr_info("[burn_fw_dsp]Alloc %dk byte memory success.\n",
- (FW_SECTION_LENGTH/1024));
break;
}
}
- if (retry == 5) {
- pr_err("[burn_fw_dsp]Alloc memory fail,exit.\n");
+ if (retry == 5)
return FAIL;
- }
/* step2:load firmware dsp */
- pr_debug("[burn_fw_dsp]step2:load firmware dsp\n");
+ pr_debug("step2:load firmware dsp");
ret = gup_load_section_file(fw_dsp, 4*FW_SECTION_LENGTH, FW_DSP_LENGTH);
if (ret == FAIL) {
- pr_err("[burn_fw_dsp]load firmware dsp fail.\n");
+ pr_err("load firmware dsp fail");
return ret;
}
/* step3:select bank3 */
- pr_debug("[burn_fw_dsp]step3:select bank3\n");
+ pr_debug("step3:select bank3");
ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, 0x03);
if (ret <= 0) {
- pr_err("[burn_fw_dsp]select bank3 fail.\n");
+ pr_err("select bank3 fail");
return FAIL;
}
/* Step4:hold ss51 & dsp */
- pr_debug("[burn_fw_dsp]step4:hold ss51 & dsp\n");
+ pr_debug("step4:hold ss51 & dsp");
ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
if (ret <= 0) {
- pr_err("[burn_fw_dsp]hold ss51 & dsp fail.\n");
+ pr_err("hold ss51 & dsp fail");
return FAIL;
}
/* step5:set scramble */
- pr_debug("[burn_fw_dsp]step5:set scramble\n");
+ pr_debug("step5:set scramble");
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_OPT_B0_, 0x00);
if (ret <= 0) {
- pr_err("[burn_fw_dsp]set scramble fail.\n");
+ pr_err("set scramble fail");
return FAIL;
}
/* step6:release ss51 & dsp */
- pr_debug("[burn_fw_dsp]step6:release ss51 & dsp\n");
+ pr_debug("step6:release ss51 & dsp");
ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x04);
if (ret <= 0) {
- pr_err("[burn_fw_dsp]release ss51 & dsp fail.\n");
+ pr_err("release ss51 & dsp fail");
return FAIL;
}
/* must delay */
msleep(20);
/* step7:burn 4k dsp firmware */
- pr_debug("[burn_fw_dsp]step7:burn 4k dsp firmware\n");
+ pr_debug("step7:burn 4k dsp firmware");
ret = gup_burn_proc(client, fw_dsp, 0x9000, FW_DSP_LENGTH);
if (ret == FAIL) {
- pr_err("[burn_fw_dsp]burn fw_section fail.\n");
+ pr_err("[burn_fw_dsp]burn fw_section fail\n");
return ret;
}
/* step8:send burn cmd to move data to flash from sram */
- pr_debug("[burn_fw_dsp]step8:send burn cmd to move data to flash from sram\n");
+ pr_debug("step8:send burn cmd to move data to flash from sram");
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x05);
if (ret <= 0) {
- pr_err("[burn_fw_dsp]send burn cmd fail.\n");
+ pr_err("send burn cmd fail");
return ret;
}
- pr_debug("[burn_fw_dsp]Wait for the burn is complete......\n");
+ pr_debug("Wait for the burn is complete");
do {
ret = gup_get_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, rd_buf, 1);
if (ret <= 0) {
- pr_err("[burn_fw_dsp]Get burn state fail\n");
+ pr_err("Get burn state fail");
return ret;
}
msleep(20);
} while (rd_buf[GTP_ADDR_LENGTH]);
/* step9:recall check 4k dsp firmware */
- pr_debug("[burn_fw_dsp]step9:recall check 4k dsp firmware\n");
+ pr_debug("step9:recall check 4k dsp firmware");
ret = gup_recall_check(client, fw_dsp, 0x9000, FW_DSP_LENGTH);
if (ret == FAIL) {
- pr_err("[burn_fw_dsp]recall check 4k dsp firmware fail.\n");
+ pr_err("recall check 4k dsp firmware fail");
return ret;
}
- ret = SUCCESS;
+ return SUCCESS;
}
static u8 gup_burn_fw_boot(struct i2c_client *client)
@@ -1468,115 +1256,111 @@ static u8 gup_burn_fw_boot(struct i2c_client *client)
u8 retry = 0;
u8 rd_buf[5];
- pr_debug("[burn_fw_boot]Begin burn bootloader firmware---->>\n");
+ pr_debug("Begin burn bootloader firmware");
/* step1:Alloc memory */
- pr_debug("[burn_fw_boot]step1:Alloc memory\n");
+ pr_debug("step1:Alloc memory");
while (retry++ < 5) {
fw_boot = devm_kzalloc(&client->dev, FW_BOOT_LENGTH,
GFP_KERNEL);
if (fw_boot == NULL) {
continue;
} else {
- pr_info("[burn_fw_boot]Alloc %dk byte memory success.\n",
- (FW_BOOT_LENGTH/1024));
break;
}
}
- if (retry == 5) {
- pr_err("[burn_fw_boot]Alloc memory fail,exit.\n");
+ if (retry == 5)
return FAIL;
- }
/* step2:load firmware bootloader */
- pr_debug("[burn_fw_boot]step2:load firmware bootloader\n");
+ pr_debug("step2:load firmware bootloader");
ret = gup_load_section_file(fw_boot, (4 * FW_SECTION_LENGTH +
FW_DSP_LENGTH), FW_BOOT_LENGTH);
if (ret == FAIL) {
- pr_err("[burn_fw_boot]load firmware dsp fail.\n");
+ pr_err("load firmware dsp fail");
return ret;
}
/* step3:hold ss51 & dsp */
- pr_debug("[burn_fw_boot]step3:hold ss51 & dsp\n");
+ pr_debug("step3:hold ss51 & dsp");
ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
if (ret <= 0) {
- pr_err("[burn_fw_boot]hold ss51 & dsp fail.\n");
+ pr_err("hold ss51 & dsp fail");
return FAIL;
}
/* step4:set scramble */
- pr_debug("[burn_fw_boot]step4:set scramble\n");
+ pr_debug("step4:set scramble");
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_OPT_B0_, 0x00);
if (ret <= 0) {
- pr_err("[burn_fw_boot]set scramble fail.\n");
+ pr_err("set scramble fail");
return FAIL;
}
/* step5:release ss51 & dsp */
- pr_debug("[burn_fw_boot]step5:release ss51 & dsp\n");
+ pr_debug("step5:release ss51 & dsp");
ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x04);
if (ret <= 0) {
- pr_err("[burn_fw_boot]release ss51 & dsp fail.\n");
+ pr_err("release ss51 & dsp fail");
return FAIL;
}
/* must delay */
msleep(20);
/* step6:select bank3 */
- pr_debug("[burn_fw_boot]step6:select bank3\n");
+ pr_debug("step6:select bank3");
ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, 0x03);
if (ret <= 0) {
- pr_err("[burn_fw_boot]select bank3 fail.\n");
+ pr_err("select bank3 fail");
return FAIL;
}
/* step7:burn 2k bootloader firmware */
- pr_debug("[burn_fw_boot]step7:burn 2k bootloader firmware\n");
+ pr_debug("step7:burn 2k bootloader firmware");
ret = gup_burn_proc(client, fw_boot, 0x9000, FW_BOOT_LENGTH);
if (ret == FAIL) {
- pr_err("[burn_fw_boot]burn fw_section fail.\n");
+ pr_err("burn fw_section fail");
return ret;
}
/* step7:send burn cmd to move data to flash from sram */
- pr_debug("[burn_fw_boot]step7:send burn cmd to move data to flash from sram\n");
+ pr_debug("step7:send burn cmd to flash data from sram");
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x06);
if (ret <= 0) {
- pr_err("[burn_fw_boot]send burn cmd fail.\n");
+ pr_err("send burn cmd fail");
return ret;
}
- pr_debug("[burn_fw_boot]Wait for the burn is complete......\n");
+ pr_debug("Wait for the burn is complete");
do {
ret = gup_get_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, rd_buf, 1);
if (ret <= 0) {
- pr_err("[burn_fw_boot]Get burn state fail\n");
+ pr_err("Get burn state fail");
return ret;
}
msleep(20);
} while (rd_buf[GTP_ADDR_LENGTH]);
/* step8:recall check 2k bootloader firmware */
- pr_debug("[burn_fw_boot]step8:recall check 2k bootloader firmware\n");
+ pr_debug("step8:recall check 2k bootloader firmware");
ret = gup_recall_check(client, fw_boot, 0x9000, FW_BOOT_LENGTH);
if (ret == FAIL) {
- pr_err("[burn_fw_boot]recall check 4k dsp firmware fail.\n");
+ pr_err("recall check 4k dsp firmware fail");
return ret;
}
/* step9:enable download DSP code */
- pr_debug("[burn_fw_boot]step9:enable download DSP code\n");
+ pr_debug("step9:enable download DSP code ");
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x99);
if (ret <= 0) {
- pr_err("[burn_fw_boot]enable download DSP code fail.\n");
+ pr_err("enable download DSP code fail");
return FAIL;
}
/* step10:release ss51 & hold dsp */
- pr_debug("[burn_fw_boot]step10:release ss51 & hold dsp\n");
+ pr_debug("step10:release ss51 & hold dsp");
ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x08);
if (ret <= 0) {
- pr_err("[burn_fw_boot]release ss51 & hold dsp fail.\n");
+ pr_err("release ss51 & hold dsp fail");
return FAIL;
}
@@ -1586,46 +1370,47 @@ static u8 gup_burn_fw_boot(struct i2c_client *client)
s32 gup_update_proc(void *dir)
{
s32 ret = 0;
- u8 retry = 0;
- st_fw_head fw_head;
+ u8 retry = 0;
+ struct st_fw_head fw_head;
struct goodix_ts_data *ts = NULL;
- pr_debug("[update_proc]Begin update ......\n");
+ pr_debug("Begin update.");
+
+ if (!i2c_connect_client) {
+ pr_err("No i2c connect client for %s\n", __func__);
+ return -EIO;
+ }
show_len = 1;
total_len = 100;
- if (dir == NULL)
- /* wait main thread to be completed */
- msleep(3000);
ts = i2c_get_clientdata(i2c_connect_client);
if (searching_file) {
/* exit .bin update file searching */
searching_file = 0;
- pr_info("Exiting searching .bin update file...\n");
+ pr_info("Exiting searching .bin update file.");
/* wait for auto update quitted completely */
while ((show_len != 200) && (show_len != 100))
msleep(100);
}
- update_msg.file = NULL;
ret = gup_check_update_file(i2c_connect_client, &fw_head, (u8 *)dir);
if (ret == FAIL) {
- pr_err("[update_proc]check update file fail.\n");
+ pr_err("check update file fail");
goto file_fail;
}
/* gtp_reset_guitar(i2c_connect_client, 20); */
ret = gup_get_ic_fw_msg(i2c_connect_client);
if (ret == FAIL) {
- pr_err("[update_proc]get ic message fail.\n");
+ pr_err("get ic message fail");
goto file_fail;
}
- ret = gup_enter_update_judge(&fw_head);
+ ret = gup_enter_update_judge(ts->client, &fw_head);
if (ret == FAIL) {
- pr_err("[update_proc]Check *.bin file fail.\n");
+ pr_err("Check *.bin file fail");
goto file_fail;
}
@@ -1636,7 +1421,7 @@ s32 gup_update_proc(void *dir)
#endif
ret = gup_enter_update_mode(i2c_connect_client);
if (ret == FAIL) {
- pr_err("[update_proc]enter update mode fail.\n");
+ pr_err("enter update mode fail");
goto update_fail;
}
@@ -1645,53 +1430,46 @@ s32 gup_update_proc(void *dir)
total_len = 100;
ret = gup_burn_dsp_isp(i2c_connect_client);
if (ret == FAIL) {
- pr_err("[update_proc]burn dsp isp fail.\n");
+ pr_err("burn dsp isp fail");
continue;
}
show_len += 10;
ret = gup_burn_fw_ss51(i2c_connect_client);
if (ret == FAIL) {
- pr_err("[update_proc]burn ss51 firmware fail.\n");
+ pr_err("burn ss51 firmware fail");
continue;
}
show_len += 40;
ret = gup_burn_fw_dsp(i2c_connect_client);
if (ret == FAIL) {
- pr_err("[update_proc]burn dsp firmware fail.\n");
+ pr_err("burn dsp firmware fail");
continue;
}
show_len += 20;
ret = gup_burn_fw_boot(i2c_connect_client);
if (ret == FAIL) {
- pr_err("[update_proc]burn bootloader fw fail.\n");
+ pr_err("burn bootloader fw fail");
continue;
}
show_len += 10;
- pr_info("[update_proc]UPDATE SUCCESS.\n");
+ pr_info("UPDATE SUCCESS");
break;
}
if (retry >= 5) {
- pr_err("[update_proc]retry timeout,UPDATE FAIL.\n");
+ pr_err("retry timeout,UPDATE FAIL");
goto update_fail;
}
- pr_debug("[update_proc]leave update mode.\n");
- gup_leave_update_mode();
+ pr_debug("leave update mode");
+ gup_leave_update_mode(i2c_connect_client);
msleep(100);
- /*
- * ret = gtp_send_cfg(i2c_connect_client);
- * if(ret < 0) {
- * pr_err("[update_proc]send config fail.");
- * }
- */
-
if (ts->fw_error) {
- pr_info("firmware error auto update, resent config!\n");
+ pr_info("firmware error auto update, resent config\n");
gup_init_panel(ts);
}
show_len = 100;
@@ -1702,7 +1480,11 @@ s32 gup_update_proc(void *dir)
#if GTP_ESD_PROTECT
gtp_esd_switch(ts->client, SWITCH_ON);
#endif
- filp_close(update_msg.file, NULL);
+ if (update_msg.need_free) {
+ devm_kfree(&ts->client->dev, update_msg.fw_data);
+ update_msg.need_free = false;
+ }
+
return SUCCESS;
update_fail:
@@ -1714,11 +1496,12 @@ update_fail:
#endif
file_fail:
- if (update_msg.file && !IS_ERR(update_msg.file))
- filp_close(update_msg.file, NULL);
-
show_len = 200;
total_len = 100;
+ if (update_msg.need_free) {
+ devm_kfree(&ts->client->dev, update_msg.fw_data);
+ update_msg.need_free = false;
+ }
return FAIL;
}
@@ -1728,7 +1511,6 @@ static void gup_update_work(struct work_struct *work)
pr_err("Goodix update work fail\n");
}
-#if GTP_AUTO_UPDATE
u8 gup_init_update_proc(struct goodix_ts_data *ts)
{
dev_dbg(&ts->client->dev, "Ready to run update work\n");
@@ -1739,4 +1521,3 @@ u8 gup_init_update_proc(struct goodix_ts_data *ts)
return 0;
}
-#endif
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
index e8229216fcd3..206941708141 100644
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
@@ -492,33 +492,25 @@ static int synaptics_i2c_change_pipe_owner(
static void synaptics_secure_touch_init(struct synaptics_rmi4_data *data)
{
- int ret = 0;
data->st_initialized = 0;
init_completion(&data->st_powerdown);
init_completion(&data->st_irq_processed);
/* Get clocks */
data->core_clk = clk_get(data->pdev->dev.parent, "core_clk");
if (IS_ERR(data->core_clk)) {
- ret = PTR_ERR(data->core_clk);
- dev_err(data->pdev->dev.parent,
- "%s: error on clk_get(core_clk):%d\n", __func__, ret);
- return;
+ data->core_clk = NULL;
+ dev_warn(data->pdev->dev.parent,
+ "%s: core_clk is not defined\n", __func__);
}
data->iface_clk = clk_get(data->pdev->dev.parent, "iface_clk");
if (IS_ERR(data->iface_clk)) {
- ret = PTR_ERR(data->iface_clk);
- dev_err(data->pdev->dev.parent,
- "%s: error on clk_get(iface_clk):%d\n", __func__, ret);
- goto err_iface_clk;
+ data->iface_clk = NULL;
+ dev_warn(data->pdev->dev.parent,
+ "%s: iface_clk is not defined\n", __func__);
}
data->st_initialized = 1;
- return;
-
-err_iface_clk:
- clk_put(data->core_clk);
- data->core_clk = NULL;
}
static void synaptics_secure_touch_notify(struct synaptics_rmi4_data *rmi4_data)
{
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
index 4c341ffb6094..9d61eb110e2f 100644
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
@@ -347,7 +347,7 @@ static ssize_t rmidev_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos)
{
ssize_t retval;
- unsigned char tmpbuf[count + 1];
+ unsigned char *tmpbuf;
struct rmidev_data *dev_data = filp->private_data;
if (IS_ERR(dev_data)) {
@@ -361,6 +361,10 @@ static ssize_t rmidev_read(struct file *filp, char __user *buf,
if (count > (REG_ADDR_LIMIT - *f_pos))
count = REG_ADDR_LIMIT - *f_pos;
+ tmpbuf = kzalloc(count + 1, GFP_KERNEL);
+ if (!tmpbuf)
+ return -ENOMEM;
+
mutex_lock(&(dev_data->file_mutex));
retval = synaptics_rmi4_reg_read(rmidev->rmi4_data,
@@ -377,7 +381,7 @@ static ssize_t rmidev_read(struct file *filp, char __user *buf,
clean_up:
mutex_unlock(&(dev_data->file_mutex));
-
+ kfree(tmpbuf);
return retval;
}
@@ -393,7 +397,7 @@ static ssize_t rmidev_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
ssize_t retval;
- unsigned char tmpbuf[count + 1];
+ unsigned char *tmpbuf;
struct rmidev_data *dev_data = filp->private_data;
if (IS_ERR(dev_data)) {
@@ -407,9 +411,14 @@ static ssize_t rmidev_write(struct file *filp, const char __user *buf,
if (count > (REG_ADDR_LIMIT - *f_pos))
count = REG_ADDR_LIMIT - *f_pos;
- if (copy_from_user(tmpbuf, buf, count))
- return -EFAULT;
+ tmpbuf = kzalloc(count + 1, GFP_KERNEL);
+ if (!tmpbuf)
+ return -ENOMEM;
+ if (copy_from_user(tmpbuf, buf, count)) {
+ kfree(tmpbuf);
+ return -EFAULT;
+ }
mutex_lock(&(dev_data->file_mutex));
retval = synaptics_rmi4_reg_write(rmidev->rmi4_data,
@@ -420,7 +429,7 @@ static ssize_t rmidev_write(struct file *filp, const char __user *buf,
*f_pos += retval;
mutex_unlock(&(dev_data->file_mutex));
-
+ kfree(tmpbuf);
return retval;
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c69927bd4ff2..afa519aa8203 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1888,8 +1888,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
return NULL;
smmu_domain->secure_vmid = VMID_INVAL;
- /* disable coherent htw by default */
- smmu_domain->attributes = (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE);
INIT_LIST_HEAD(&smmu_domain->pte_info_list);
INIT_LIST_HEAD(&smmu_domain->unassign_list);
INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
@@ -2263,15 +2261,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto err_destroy_domain_context;
}
- if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE))
- && !(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)) {
- dev_err(dev,
- "Can't attach: this domain wants coherent htw but %s doesn't support it\n",
- dev_name(smmu_domain->smmu->dev));
- ret = -EINVAL;
- goto err_destroy_domain_context;
- }
-
/* Looks ok, so add the device to the domain */
ret = arm_smmu_domain_add_master(smmu_domain, cfg);
if (ret)
@@ -2977,11 +2966,6 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
ret = 0;
break;
- case DOMAIN_ATTR_COHERENT_HTW_DISABLE:
- *((int *)data) = !!(smmu_domain->attributes &
- (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE));
- ret = 0;
- break;
case DOMAIN_ATTR_SECURE_VMID:
*((int *)data) = smmu_domain->secure_vmid;
ret = 0;
@@ -3083,29 +3067,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
break;
- case DOMAIN_ATTR_COHERENT_HTW_DISABLE:
- {
- struct arm_smmu_device *smmu;
- int htw_disable = *((int *)data);
-
- smmu = smmu_domain->smmu;
-
- if (smmu && !(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
- && !htw_disable) {
- dev_err(smmu->dev,
- "Can't enable coherent htw on this domain: this SMMU doesn't support it\n");
- ret = -EINVAL;
- goto out_unlock;
- }
-
- if (htw_disable)
- smmu_domain->attributes |=
- (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE);
- else
- smmu_domain->attributes &=
- ~(1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE);
- break;
- }
case DOMAIN_ATTR_SECURE_VMID:
BUG_ON(smmu_domain->secure_vmid != VMID_INVAL);
smmu_domain->secure_vmid = *((int *)data);
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index ea8db1a431d0..266f7065fca4 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -649,7 +649,7 @@ err:
int fast_smmu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping)
{
- int htw_disable = 1, atomic_domain = 1;
+ int atomic_domain = 1;
struct iommu_domain *domain = mapping->domain;
struct iommu_pgtbl_info info;
size_t size = mapping->bits << PAGE_SHIFT;
@@ -657,10 +657,6 @@ int fast_smmu_attach_device(struct device *dev,
if (mapping->base + size > (SZ_1G * 4ULL))
return -EINVAL;
- if (iommu_domain_set_attr(domain, DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &htw_disable))
- return -EINVAL;
-
if (iommu_domain_set_attr(domain, DOMAIN_ATTR_ATOMIC,
&atomic_domain))
return -EINVAL;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index a892b73a4288..3333f15f7f16 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -363,7 +363,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
else
pte |= ARM_LPAE_PTE_TYPE_BLOCK;
- pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
+ pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_OS;
pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
*ptep = pte;
@@ -935,9 +935,14 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
return NULL;
/* TCR */
- reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
- (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
- (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
+ if (cfg->iommu_dev && cfg->iommu_dev->archdata.dma_coherent)
+ reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+ else
+ reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
switch (1 << data->pg_shift) {
case SZ_4K:
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index a0227fd05939..3b54fd4a77e6 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -48,8 +48,6 @@ static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
return "DOMAIN_ATTR_FSL_PAMUV1";
case DOMAIN_ATTR_NESTING:
return "DOMAIN_ATTR_NESTING";
- case DOMAIN_ATTR_COHERENT_HTW_DISABLE:
- return "DOMAIN_ATTR_COHERENT_HTW_DISABLE";
case DOMAIN_ATTR_PT_BASE_ADDR:
return "DOMAIN_ATTR_PT_BASE_ADDR";
case DOMAIN_ATTR_SECURE_VMID:
@@ -96,7 +94,6 @@ static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored)
{
struct iommu_debug_attachment *attach = s->private;
phys_addr_t pt_phys;
- int coherent_htw_disable;
int secure_vmid;
seq_printf(s, "Domain: 0x%p\n", attach->domain);
@@ -110,14 +107,6 @@ static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored)
pt_virt, &pt_phys);
}
- seq_puts(s, "COHERENT_HTW_DISABLE: ");
- if (iommu_domain_get_attr(attach->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &coherent_htw_disable))
- seq_puts(s, "(Unknown)\n");
- else
- seq_printf(s, "%d\n", coherent_htw_disable);
-
seq_puts(s, "SECURE_VMID: ");
if (iommu_domain_get_attr(attach->domain,
DOMAIN_ATTR_SECURE_VMID,
@@ -733,7 +722,6 @@ static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
SZ_1M * 20, 0 };
enum iommu_attr attrs[] = {
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_ATOMIC,
};
int htw_disable = 1, atomic = 1;
@@ -764,7 +752,6 @@ static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
SZ_1M * 20, 0 };
enum iommu_attr attrs[] = {
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_ATOMIC,
DOMAIN_ATTR_SECURE_VMID,
};
@@ -797,7 +784,6 @@ static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
enum iommu_attr attrs[] = {
DOMAIN_ATTR_FAST,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_ATOMIC,
};
int one = 1;
@@ -1507,7 +1493,6 @@ static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
int val, bool is_secure)
{
- int htw_disable = 1;
struct bus_type *bus;
bus = msm_iommu_get_bus(ddev->dev);
@@ -1520,13 +1505,6 @@ static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
return -ENOMEM;
}
- if (iommu_domain_set_attr(ddev->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &htw_disable)) {
- pr_err("Couldn't disable coherent htw\n");
- goto out_domain_free;
- }
-
if (is_secure && iommu_domain_set_attr(ddev->domain,
DOMAIN_ATTR_SECURE_VMID,
&val)) {
diff --git a/drivers/iommu/msm_dma_iommu_mapping.c b/drivers/iommu/msm_dma_iommu_mapping.c
index 334f4e95c068..25fe36ab6339 100644
--- a/drivers/iommu/msm_dma_iommu_mapping.c
+++ b/drivers/iommu/msm_dma_iommu_mapping.c
@@ -17,6 +17,7 @@
#include <linux/rbtree.h>
#include <linux/mutex.h>
#include <linux/err.h>
+#include <asm/barrier.h>
#include <linux/msm_dma_iommu_mapping.h>
@@ -216,10 +217,13 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
sg->dma_length = iommu_map->sgl.dma_length;
kref_get(&iommu_map->ref);
- /*
- * Need to do cache operations here based on "dir" in the
- * future if we go with coherent mappings.
- */
+ if (is_device_dma_coherent(dev))
+ /*
+ * Ensure all outstanding changes for coherent
+ * buffers are applied to the cache before any
+ * DMA occurs.
+ */
+ dmb(ish);
ret = nents;
}
mutex_unlock(&iommu_meta->lock);
@@ -376,6 +380,7 @@ int msm_dma_unmap_all_for_dev(struct device *dev)
return ret;
}
+EXPORT_SYMBOL(msm_dma_unmap_all_for_dev);
/*
* Only to be called by ION code when a buffer is freed
diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h
index ccc1f43cb9a9..cd02396abbe7 100644
--- a/drivers/media/dvb-core/demux.h
+++ b/drivers/media/dvb-core/demux.h
@@ -7,6 +7,8 @@
* Copyright (c) 2000 Nokia Research Center
* Tampere, FINLAND
*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
@@ -36,6 +38,8 @@
* Common definitions
*/
+#define DMX_EVENT_QUEUE_SIZE 500 /* number of events */
+
/*
* DMX_MAX_FILTER_SIZE: Maximum length (in bytes) of a section/PES filter.
*/
@@ -57,6 +61,104 @@
#endif
/*
+ * enum dmx_success: Success codes for the Demux Callback API.
+ */
+enum dmx_success {
+ DMX_OK = 0, /* Received Ok */
+ DMX_OK_PES_END, /* Received OK, data reached end of PES packet */
+ DMX_OK_PCR, /* Received OK, data with new PCR/STC pair */
+ DMX_OK_EOS, /* Received OK, reached End-of-Stream (EOS) */
+ DMX_OK_MARKER, /* Received OK, reached a data Marker */
+ DMX_LENGTH_ERROR, /* Incorrect length */
+ DMX_OVERRUN_ERROR, /* Receiver ring buffer overrun */
+ DMX_CRC_ERROR, /* Incorrect CRC */
+ DMX_FRAME_ERROR, /* Frame alignment error */
+ DMX_FIFO_ERROR, /* Receiver FIFO overrun */
+ DMX_MISSED_ERROR, /* Receiver missed packet */
+ DMX_OK_DECODER_BUF, /* Received OK, new ES data in decoder buffer */
+ DMX_OK_IDX, /* Received OK, new index event */
+ DMX_OK_SCRAMBLING_STATUS, /* Received OK, new scrambling status */
+};
+
+
+/*
+ * struct dmx_data_ready: Parameters for event notification callback.
+ * Event notification notifies demux device that data is written
+ * and available in the device's output buffer or provides
+ * notification on errors and other events. In the latter case
+ * data_length is zero.
+ */
+struct dmx_data_ready {
+ enum dmx_success status;
+
+ /*
+ * data_length may be 0 in case of DMX_OK_PES_END or DMX_OK_EOS
+ * and in non-DMX_OK_XXX events. In DMX_OK_PES_END,
+ * data_length is for data coming after the end of PES.
+ */
+ int data_length;
+
+ union {
+ struct {
+ int start_gap;
+ int actual_length;
+ int disc_indicator_set;
+ int pes_length_mismatch;
+ u64 stc;
+ u32 tei_counter;
+ u32 cont_err_counter;
+ u32 ts_packets_num;
+ } pes_end;
+
+ struct {
+ u64 pcr;
+ u64 stc;
+ int disc_indicator_set;
+ } pcr;
+
+ struct {
+ int handle;
+ int cookie;
+ u32 offset;
+ u32 len;
+ int pts_exists;
+ u64 pts;
+ int dts_exists;
+ u64 dts;
+ u32 tei_counter;
+ u32 cont_err_counter;
+ u32 ts_packets_num;
+ u32 ts_dropped_bytes;
+ u64 stc;
+ } buf;
+
+ struct {
+ u64 id;
+ } marker;
+
+ struct dmx_index_event_info idx_event;
+ struct dmx_scrambling_status_event_info scrambling_bits;
+ };
+};
+
+/*
+ * struct data_buffer: Parameters of buffer allocated by
+ * demux device for input/output. Can be used to directly map the
+ * demux-device buffer to HW output if HW supports it.
+ */
+struct data_buffer {
+ /* dvb_ringbuffer managed by demux-device */
+ const struct dvb_ringbuffer *ringbuff;
+
+
+ /*
+ * Private handle returned by kernel demux when
+ * map_buffer is called in case external buffer
+ * is used. NULL if buffer is allocated internally.
+ */
+ void *priv_handle;
+};
+/*
* TS packet reception
*/
@@ -91,10 +193,18 @@ enum ts_filter_type {
* Using this API, the client can set the filtering properties to start/stop
* filtering TS packets on a particular TS feed.
*/
+struct dmx_ts_feed;
+
+typedef int (*dmx_ts_data_ready_cb)(
+ struct dmx_ts_feed *source,
+ struct dmx_data_ready *dmx_data_ready);
+
struct dmx_ts_feed {
int is_filtering;
struct dmx_demux *parent;
+ struct data_buffer buffer;
void *priv;
+ struct dmx_decoder_buffers *decoder_buffers;
int (*set)(struct dmx_ts_feed *feed,
u16 pid,
int type,
@@ -103,6 +213,34 @@ struct dmx_ts_feed {
struct timespec timeout);
int (*start_filtering)(struct dmx_ts_feed *feed);
int (*stop_filtering)(struct dmx_ts_feed *feed);
+ int (*set_video_codec)(struct dmx_ts_feed *feed,
+ enum dmx_video_codec video_codec);
+ int (*set_idx_params)(struct dmx_ts_feed *feed,
+ struct dmx_indexing_params *idx_params);
+ int (*get_decoder_buff_status)(
+ struct dmx_ts_feed *feed,
+ struct dmx_buffer_status *dmx_buffer_status);
+ int (*reuse_decoder_buffer)(
+ struct dmx_ts_feed *feed,
+ int cookie);
+ int (*data_ready_cb)(struct dmx_ts_feed *feed,
+ dmx_ts_data_ready_cb callback);
+ int (*notify_data_read)(struct dmx_ts_feed *feed,
+ u32 bytes_num);
+ int (*set_tsp_out_format)(struct dmx_ts_feed *feed,
+ enum dmx_tsp_format_t tsp_format);
+ int (*set_secure_mode)(struct dmx_ts_feed *feed,
+ struct dmx_secure_mode *sec_mode);
+ int (*set_cipher_ops)(struct dmx_ts_feed *feed,
+ struct dmx_cipher_operations *cipher_ops);
+ int (*oob_command)(struct dmx_ts_feed *feed,
+ struct dmx_oob_command *cmd);
+ int (*ts_insertion_init)(struct dmx_ts_feed *feed);
+ int (*ts_insertion_terminate)(struct dmx_ts_feed *feed);
+ int (*ts_insertion_insert_buffer)(struct dmx_ts_feed *feed,
+ char *data, size_t size);
+ int (*get_scrambling_bits)(struct dmx_ts_feed *feed, u8 *value);
+ int (*flush_buffer)(struct dmx_ts_feed *feed, size_t length);
};
/*
@@ -127,14 +265,21 @@ struct dmx_ts_feed {
* corresponding bits are compared. The filter only accepts sections that are
* equal to filter_value in all the tested bit positions.
*/
+
+struct dmx_section_feed;
struct dmx_section_filter {
u8 filter_value[DMX_MAX_FILTER_SIZE];
u8 filter_mask[DMX_MAX_FILTER_SIZE];
u8 filter_mode[DMX_MAX_FILTER_SIZE];
struct dmx_section_feed *parent; /* Back-pointer */
+ struct data_buffer buffer;
void *priv; /* Pointer to private data of the API client */
};
+typedef int (*dmx_section_data_ready_cb)(
+ struct dmx_section_filter *source,
+ struct dmx_data_ready *dmx_data_ready);
+
/**
* struct dmx_section_feed - Structure that contains a section feed filter
*
@@ -185,6 +330,18 @@ struct dmx_section_feed {
struct dmx_section_filter *filter);
int (*start_filtering)(struct dmx_section_feed *feed);
int (*stop_filtering)(struct dmx_section_feed *feed);
+ int (*data_ready_cb)(struct dmx_section_feed *feed,
+ dmx_section_data_ready_cb callback);
+ int (*notify_data_read)(struct dmx_section_filter *filter,
+ u32 bytes_num);
+ int (*set_secure_mode)(struct dmx_section_feed *feed,
+ struct dmx_secure_mode *sec_mode);
+ int (*set_cipher_ops)(struct dmx_section_feed *feed,
+ struct dmx_cipher_operations *cipher_ops);
+ int (*oob_command)(struct dmx_section_feed *feed,
+ struct dmx_oob_command *cmd);
+ int (*get_scrambling_bits)(struct dmx_section_feed *feed, u8 *value);
+ int (*flush_buffer)(struct dmx_section_feed *feed, size_t length);
};
/*
@@ -250,7 +407,8 @@ typedef int (*dmx_ts_cb)(const u8 *buffer1,
size_t buffer1_length,
const u8 *buffer2,
size_t buffer2_length,
- struct dmx_ts_feed *source);
+ struct dmx_ts_feed *source,
+ enum dmx_success success);
/**
* typedef dmx_section_cb - DVB demux TS filter callback function prototype
@@ -291,7 +449,18 @@ typedef int (*dmx_section_cb)(const u8 *buffer1,
size_t buffer1_len,
const u8 *buffer2,
size_t buffer2_len,
- struct dmx_section_filter *source);
+ struct dmx_section_filter *source,
+ enum dmx_success success);
+
+typedef int (*dmx_ts_fullness) (
+ struct dmx_ts_feed *source,
+ int required_space,
+ int wait);
+
+typedef int (*dmx_section_fullness) (
+ struct dmx_section_filter *source,
+ int required_space,
+ int wait);
/*--------------------------------------------------------------------------*/
/* DVB Front-End */
@@ -310,6 +479,13 @@ typedef int (*dmx_section_cb)(const u8 *buffer1,
enum dmx_frontend_source {
DMX_MEMORY_FE,
DMX_FRONTEND_0,
+ DMX_FRONTEND_1,
+ DMX_FRONTEND_2,
+ DMX_FRONTEND_3,
+ DMX_STREAM_0, /* external stream input, e.g. LVDS */
+ DMX_STREAM_1,
+ DMX_STREAM_2,
+ DMX_STREAM_3
};
/**
@@ -343,8 +519,11 @@ struct dmx_frontend {
*/
enum dmx_demux_caps {
DMX_TS_FILTERING = 1,
+ DMX_PES_FILTERING = 2,
DMX_SECTION_FILTERING = 4,
DMX_MEMORY_BASED_FILTERING = 8,
+ DMX_CRC_CHECKING = 16,
+ DMX_TS_DESCRAMBLING = 32
};
/*
@@ -556,6 +735,10 @@ struct dmx_demux {
enum dmx_demux_caps capabilities;
struct dmx_frontend *frontend;
void *priv;
+ struct data_buffer dvr_input; /* DVR input buffer */
+ int dvr_input_protected;
+ struct dentry *debugfs_demux_dir; /* debugfs dir */
+
int (*open)(struct dmx_demux *demux);
int (*close)(struct dmx_demux *demux);
int (*write)(struct dmx_demux *demux, const char __user *buf,
@@ -581,18 +764,31 @@ struct dmx_demux {
int (*get_pes_pids)(struct dmx_demux *demux, u16 *pids);
- /* private: Not used upstream and never documented */
-#if 0
int (*get_caps)(struct dmx_demux *demux, struct dmx_caps *caps);
+
int (*set_source)(struct dmx_demux *demux, const dmx_source_t *src);
-#endif
- /*
- * private: Only used at av7110, to read some data from firmware.
- * As this was never documented, we have no clue about what's
- * there, and its usage on other drivers aren't encouraged.
- */
+
+ int (*set_tsp_format)(struct dmx_demux *demux,
+ enum dmx_tsp_format_t tsp_format);
+
+ int (*set_playback_mode)(struct dmx_demux *demux,
+ enum dmx_playback_mode_t mode,
+ dmx_ts_fullness ts_fullness_callback,
+ dmx_section_fullness sec_fullness_callback);
+
+ int (*write_cancel)(struct dmx_demux *demux);
+
int (*get_stc)(struct dmx_demux *demux, unsigned int num,
u64 *stc, unsigned int *base);
+
+ int (*map_buffer)(struct dmx_demux *demux,
+ struct dmx_buffer *dmx_buffer,
+ void **priv_handle, void **mem);
+
+ int (*unmap_buffer)(struct dmx_demux *demux,
+ void *priv_handle);
+
+ int (*get_tsp_size)(struct dmx_demux *demux);
};
#endif /* #ifndef __DEMUX_H */
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index ea9abde902e9..63becfd57eaa 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -28,15 +28,74 @@
#include <linux/poll.h>
#include <linux/ioctl.h>
#include <linux/wait.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/compat.h>
+#include <linux/mm.h>
#include "dmxdev.h"
-static int debug;
+static int overflow_auto_flush = 1;
+module_param(overflow_auto_flush, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(overflow_auto_flush,
+ "Automatically flush buffer on overflow (default: on)");
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+#define DMX_DEFAULT_DECODER_BUFFER_SIZE (32768)
-#define dprintk if (debug) printk
+static inline int dvb_dmxdev_verify_buffer_size(u32 size, u32 max_size,
+ u32 size_align)
+{
+ if (size_align)
+ return size <= max_size && !(size % size_align);
+ else
+ return size <= max_size;
+}
+
+static int dvb_filter_verify_buffer_size(struct dmxdev_filter *filter)
+{
+ struct dmx_caps caps;
+ size_t size = filter->buffer.size;
+
+ /*
+ * For backward compatibility, if no demux capabilities can
+ * be retrieved assume size is ok.
+ * Decoder filter buffer size is verified when decoder buffer is set.
+ */
+ if (filter->dev->demux->get_caps) {
+ filter->dev->demux->get_caps(filter->dev->demux, &caps);
+
+ if (filter->type == DMXDEV_TYPE_SEC)
+ return dvb_dmxdev_verify_buffer_size(
+ size,
+ caps.section.max_size,
+ caps.section.size_alignment);
+
+ if (filter->params.pes.output == DMX_OUT_TAP)
+ return dvb_dmxdev_verify_buffer_size(
+ size,
+ caps.pes.max_size,
+ caps.pes.size_alignment);
+
+ size = (filter->params.pes.output == DMX_OUT_TS_TAP) ?
+ filter->dev->dvr_buffer.size : size;
+
+ if (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP ||
+ filter->params.pes.output == DMX_OUT_TS_TAP) {
+ if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188)
+ return dvb_dmxdev_verify_buffer_size(
+ size,
+ caps.recording_188_tsp.max_size,
+ caps.recording_188_tsp.size_alignment);
+
+ return dvb_dmxdev_verify_buffer_size(
+ size,
+ caps.recording_192_tsp.max_size,
+ caps.recording_192_tsp.size_alignment);
+ }
+ }
+
+ return 1;
+}
static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
const u8 *src, size_t len)
@@ -50,16 +109,400 @@ static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
free = dvb_ringbuffer_free(buf);
if (len > free) {
- dprintk("dmxdev: buffer overflow\n");
+ pr_debug("dmxdev: buffer overflow\n");
return -EOVERFLOW;
}
return dvb_ringbuffer_write(buf, src, len);
}
-static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
- int non_blocking, char __user *buf,
- size_t count, loff_t *ppos)
+static inline void dvb_dmxdev_notify_data_read(struct dmxdev_filter *filter,
+ int bytes_read)
+{
+ if (!filter)
+ return;
+
+ if (filter->type == DMXDEV_TYPE_SEC) {
+ if (filter->feed.sec.feed->notify_data_read)
+ filter->feed.sec.feed->notify_data_read(
+ filter->filter.sec,
+ bytes_read);
+ } else {
+ struct dmxdev_feed *feed;
+
+ /*
+ * All feeds of same demux-handle share the same output
+ * buffer, it is enough to notify on the buffer status
+ * on one of the feeds
+ */
+ feed = list_first_entry(&filter->feed.ts,
+ struct dmxdev_feed, next);
+
+ if (feed->ts->notify_data_read)
+ feed->ts->notify_data_read(
+ feed->ts,
+ bytes_read);
+ }
+}
+
+static inline u32 dvb_dmxdev_advance_event_idx(u32 index)
+{
+ index++;
+ if (index >= DMX_EVENT_QUEUE_SIZE)
+ index = 0;
+
+ return index;
+}
+
+static inline int dvb_dmxdev_events_is_full(struct dmxdev_events_queue *events)
+{
+ int new_write_index;
+
+ new_write_index = dvb_dmxdev_advance_event_idx(events->write_index);
+ if (new_write_index == events->read_index)
+ return 1;
+
+ return 0;
+
+}
+
+static inline void dvb_dmxdev_flush_events(struct dmxdev_events_queue *events)
+{
+ events->read_index = 0;
+ events->write_index = 0;
+ events->notified_index = 0;
+ events->bytes_read_no_event = 0;
+ events->current_event_data_size = 0;
+ events->wakeup_events_counter = 0;
+}
+
+static inline void dvb_dmxdev_flush_output(struct dvb_ringbuffer *buffer,
+ struct dmxdev_events_queue *events)
+{
+ dvb_dmxdev_flush_events(events);
+ dvb_ringbuffer_flush(buffer);
+}
+
+static int dvb_dmxdev_update_pes_event(struct dmx_filter_event *event,
+ int bytes_read)
+{
+ int start_delta;
+
+ if (event->params.pes.total_length <= bytes_read)
+ return event->params.pes.total_length;
+
+ /*
+ * only part of the data relevant to this event was read.
+ * Update the event's information to reflect the new state.
+ */
+ event->params.pes.total_length -= bytes_read;
+
+ start_delta = event->params.pes.start_offset -
+ event->params.pes.base_offset;
+
+ if (bytes_read <= start_delta) {
+ event->params.pes.base_offset +=
+ bytes_read;
+ } else {
+ start_delta =
+ bytes_read - start_delta;
+
+ event->params.pes.start_offset += start_delta;
+ event->params.pes.actual_length -= start_delta;
+
+ event->params.pes.base_offset =
+ event->params.pes.start_offset;
+ }
+
+ return 0;
+}
+
+static int dvb_dmxdev_update_section_event(struct dmx_filter_event *event,
+ int bytes_read)
+{
+ int start_delta;
+
+ if (event->params.section.total_length <= bytes_read)
+ return event->params.section.total_length;
+
+ /*
+ * only part of the data relevant to this event was read.
+ * Update the event's information to reflect the new state.
+ */
+
+ event->params.section.total_length -= bytes_read;
+
+ start_delta = event->params.section.start_offset -
+ event->params.section.base_offset;
+
+ if (bytes_read <= start_delta) {
+ event->params.section.base_offset +=
+ bytes_read;
+ } else {
+ start_delta =
+ bytes_read - start_delta;
+
+ event->params.section.start_offset += start_delta;
+ event->params.section.actual_length -= start_delta;
+
+ event->params.section.base_offset =
+ event->params.section.start_offset;
+ }
+
+ return 0;
+}
+
+static int dvb_dmxdev_update_rec_event(struct dmx_filter_event *event,
+ int bytes_read)
+{
+ if (event->params.recording_chunk.size <= bytes_read)
+ return event->params.recording_chunk.size;
+
+ /*
+ * only part of the data relevant to this event was read.
+ * Update the event's information to reflect the new state.
+ */
+ event->params.recording_chunk.size -= bytes_read;
+ event->params.recording_chunk.offset += bytes_read;
+
+ return 0;
+}
+
+static int dvb_dmxdev_add_event(struct dmxdev_events_queue *events,
+ struct dmx_filter_event *event)
+{
+ int res;
+ int new_write_index;
+ int data_event;
+
+ /* Check if the event is disabled */
+ if (events->event_mask.disable_mask & event->type)
+ return 0;
+
+ /* Check if we are adding an event that user already read its data */
+ if (events->bytes_read_no_event) {
+ data_event = 1;
+
+ if (event->type == DMX_EVENT_NEW_PES)
+ res = dvb_dmxdev_update_pes_event(event,
+ events->bytes_read_no_event);
+ else if (event->type == DMX_EVENT_NEW_SECTION)
+ res = dvb_dmxdev_update_section_event(event,
+ events->bytes_read_no_event);
+ else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
+ res = dvb_dmxdev_update_rec_event(event,
+ events->bytes_read_no_event);
+ else
+ data_event = 0;
+
+ if (data_event) {
+ if (res) {
+ /*
+ * Data relevant to this event was fully
+ * consumed already, discard event.
+ */
+ events->bytes_read_no_event -= res;
+ return 0;
+ }
+ events->bytes_read_no_event = 0;
+ } else {
+ /*
+ * data was read beyond the non-data event,
+ * making it not relevant anymore
+ */
+ return 0;
+ }
+ }
+
+ new_write_index = dvb_dmxdev_advance_event_idx(events->write_index);
+ if (new_write_index == events->read_index) {
+ pr_err("dmxdev: events overflow\n");
+ return -EOVERFLOW;
+ }
+
+ events->queue[events->write_index] = *event;
+ events->write_index = new_write_index;
+
+ if (!(events->event_mask.no_wakeup_mask & event->type))
+ events->wakeup_events_counter++;
+
+ return 0;
+}
+
+static int dvb_dmxdev_remove_event(struct dmxdev_events_queue *events,
+ struct dmx_filter_event *event)
+{
+ if (events->notified_index == events->write_index)
+ return -ENODATA;
+
+ *event = events->queue[events->notified_index];
+
+ events->notified_index =
+ dvb_dmxdev_advance_event_idx(events->notified_index);
+
+ if (!(events->event_mask.no_wakeup_mask & event->type))
+ events->wakeup_events_counter--;
+
+ return 0;
+}
+
+static int dvb_dmxdev_update_events(struct dmxdev_events_queue *events,
+ int bytes_read)
+{
+ struct dmx_filter_event *event;
+ int res;
+ int data_event;
+
+ /*
+ * If data events are not enabled on this filter,
+ * there's nothing to update.
+ */
+ if (events->data_read_event_masked)
+ return 0;
+
+ /*
+ * Go through all events that were notified and
+ * remove them from the events queue if their respective
+ * data was read.
+ */
+ while ((events->read_index != events->notified_index) &&
+ (bytes_read)) {
+ event = events->queue + events->read_index;
+
+ data_event = 1;
+
+ if (event->type == DMX_EVENT_NEW_PES)
+ res = dvb_dmxdev_update_pes_event(event, bytes_read);
+ else if (event->type == DMX_EVENT_NEW_SECTION)
+ res = dvb_dmxdev_update_section_event(event,
+ bytes_read);
+ else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
+ res = dvb_dmxdev_update_rec_event(event, bytes_read);
+ else
+ data_event = 0;
+
+ if (data_event) {
+ if (res) {
+ /*
+ * Data relevant to this event was
+ * fully consumed, remove it from the queue.
+ */
+ bytes_read -= res;
+ events->read_index =
+ dvb_dmxdev_advance_event_idx(
+ events->read_index);
+ } else {
+ bytes_read = 0;
+ }
+ } else {
+ /*
+ * non-data event was already notified,
+ * no need to keep it
+ */
+ events->read_index = dvb_dmxdev_advance_event_idx(
+ events->read_index);
+ }
+ }
+
+ if (!bytes_read)
+ return 0;
+
+ /*
+ * If we reached here it means:
+ * bytes_read != 0
+ * events->read_index == events->notified_index
+ * Check if there are pending events in the queue
+ * which the user didn't read while their relevant data
+ * was read.
+ */
+ while ((events->notified_index != events->write_index) &&
+ (bytes_read)) {
+ event = events->queue + events->notified_index;
+
+ data_event = 1;
+
+ if (event->type == DMX_EVENT_NEW_PES)
+ res = dvb_dmxdev_update_pes_event(event, bytes_read);
+ else if (event->type == DMX_EVENT_NEW_SECTION)
+ res = dvb_dmxdev_update_section_event(event,
+ bytes_read);
+ else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
+ res = dvb_dmxdev_update_rec_event(event, bytes_read);
+ else
+ data_event = 0;
+
+ if (data_event) {
+ if (res) {
+ /*
+ * Data relevant to this event was
+ * fully consumed, remove it from the queue.
+ */
+ bytes_read -= res;
+ events->notified_index =
+ dvb_dmxdev_advance_event_idx(
+ events->notified_index);
+ if (!(events->event_mask.no_wakeup_mask &
+ event->type))
+ events->wakeup_events_counter--;
+ } else {
+ bytes_read = 0;
+ }
+ } else {
+ if (bytes_read)
+ /*
+ * data was read beyond the non-data event,
+ * making it not relevant anymore
+ */
+ events->notified_index =
+ dvb_dmxdev_advance_event_idx(
+ events->notified_index);
+ if (!(events->event_mask.no_wakeup_mask &
+ event->type))
+ events->wakeup_events_counter--;
+ }
+
+ events->read_index = events->notified_index;
+ }
+
+ /*
+ * Check if data was read without having a respective
+ * event in the events-queue
+ */
+ if (bytes_read)
+ events->bytes_read_no_event += bytes_read;
+
+ return 0;
+}
+
+static inline int dvb_dmxdev_check_data(struct dmxdev_filter *filter,
+ struct dvb_ringbuffer *src)
+{
+ int data_status_change;
+
+ if (filter)
+ if (mutex_lock_interruptible(&filter->mutex))
+ return -ERESTARTSYS;
+
+ if (!src->data ||
+ !dvb_ringbuffer_empty(src) ||
+ src->error ||
+ (filter &&
+ (filter->state != DMXDEV_STATE_GO) &&
+ (filter->state != DMXDEV_STATE_DONE)))
+ data_status_change = 1;
+ else
+ data_status_change = 0;
+
+ if (filter)
+ mutex_unlock(&filter->mutex);
+
+ return data_status_change;
+}
+
+static ssize_t dvb_dmxdev_buffer_read(struct dmxdev_filter *filter,
+ struct dvb_ringbuffer *src,
+ int non_blocking, char __user *buf,
+ size_t count, loff_t *ppos)
{
size_t todo;
ssize_t avail;
@@ -70,7 +513,7 @@ static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
if (src->error) {
ret = src->error;
- dvb_ringbuffer_flush(src);
+ src->error = 0;
return ret;
}
@@ -80,15 +523,35 @@ static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
break;
}
+ if (filter) {
+ if ((filter->state == DMXDEV_STATE_DONE) &&
+ dvb_ringbuffer_empty(src))
+ break;
+
+ mutex_unlock(&filter->mutex);
+ }
+
ret = wait_event_interruptible(src->queue,
- !dvb_ringbuffer_empty(src) ||
- (src->error != 0));
+ dvb_dmxdev_check_data(filter, src));
+
+ if (filter) {
+ if (mutex_lock_interruptible(&filter->mutex))
+ return -ERESTARTSYS;
+
+ if ((filter->state != DMXDEV_STATE_GO) &&
+ (filter->state != DMXDEV_STATE_DONE))
+ return -ENODEV;
+ }
+
if (ret < 0)
break;
+ if (!src->data)
+ return 0;
+
if (src->error) {
ret = src->error;
- dvb_ringbuffer_flush(src);
+ src->error = 0;
break;
}
@@ -103,6 +566,9 @@ static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
buf += ret;
}
+ if (count - todo) /* some data was read? */
+ wake_up_all(&src->queue);
+
return (count - todo) ? (count - todo) : ret;
}
@@ -120,13 +586,238 @@ static struct dmx_frontend *get_fe(struct dmx_demux *demux, int type)
return NULL;
}
+static void dvb_dvr_oob_cmd(struct dmxdev *dmxdev, struct dmx_oob_command *cmd)
+{
+ int i;
+ struct dmxdev_filter *filter;
+ struct dmxdev_feed *feed;
+
+ for (i = 0; i < dmxdev->filternum; i++) {
+ filter = &dmxdev->filter[i];
+ if (!filter || filter->state != DMXDEV_STATE_GO)
+ continue;
+
+ switch (filter->type) {
+ case DMXDEV_TYPE_SEC:
+ filter->feed.sec.feed->oob_command(
+ filter->feed.sec.feed, cmd);
+ break;
+ case DMXDEV_TYPE_PES:
+ feed = list_first_entry(&filter->feed.ts,
+ struct dmxdev_feed, next);
+ feed->ts->oob_command(feed->ts, cmd);
+ break;
+ case DMXDEV_TYPE_NONE:
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int dvb_dvr_feed_cmd(struct dmxdev *dmxdev, struct dvr_command *dvr_cmd)
+{
+ int ret = 0;
+ size_t todo;
+ int bytes_written = 0;
+ size_t split;
+ size_t tsp_size;
+ u8 *data_start;
+ struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
+
+ todo = dvr_cmd->cmd.data_feed_count;
+
+ if (dmxdev->demux->get_tsp_size)
+ tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
+ else
+ tsp_size = 188;
+
+ while (todo >= tsp_size) {
+ /* wait for input */
+ ret = wait_event_interruptible(
+ src->queue,
+ (dvb_ringbuffer_avail(src) >= tsp_size) ||
+ dmxdev->dvr_in_exit || src->error);
+
+ if (ret < 0)
+ break;
+
+ spin_lock(&dmxdev->dvr_in_lock);
+
+ if (dmxdev->exit || dmxdev->dvr_in_exit) {
+ spin_unlock(&dmxdev->dvr_in_lock);
+ ret = -ENODEV;
+ break;
+ }
+
+ if (src->error) {
+ spin_unlock(&dmxdev->dvr_in_lock);
+ wake_up_all(&src->queue);
+ ret = -EINVAL;
+ break;
+ }
+
+ dmxdev->dvr_processing_input = 1;
+
+ split = (src->pread + todo > src->size) ?
+ src->size - src->pread : 0;
+
+ /*
+ * In DVR PULL mode, write might block.
+ * Lock on DVR buffer is released before calling to
+ * write, if DVR was released meanwhile, dvr_in_exit is
+ * prompted. Lock is acquired when updating the read pointer
+ * again to preserve read/write pointers consistency.
+ *
+ * In protected input mode, DVR input buffer is not mapped
+ * to kernel memory. Underlying demux implementation
+ * should trigger HW to read from DVR input buffer
+ * based on current read offset.
+ */
+ if (split > 0) {
+ data_start = (dmxdev->demux->dvr_input_protected) ?
+ NULL : (src->data + src->pread);
+
+ spin_unlock(&dmxdev->dvr_in_lock);
+ ret = dmxdev->demux->write(dmxdev->demux,
+ data_start,
+ split);
+
+ if (ret < 0) {
+ pr_err("dmxdev: dvr write error %d\n", ret);
+ continue;
+ }
+
+ if (dmxdev->dvr_in_exit) {
+ ret = -ENODEV;
+ break;
+ }
+
+ spin_lock(&dmxdev->dvr_in_lock);
+
+ todo -= ret;
+ bytes_written += ret;
+ DVB_RINGBUFFER_SKIP(src, ret);
+ if (ret < split) {
+ dmxdev->dvr_processing_input = 0;
+ spin_unlock(&dmxdev->dvr_in_lock);
+ wake_up_all(&src->queue);
+ continue;
+ }
+ }
+
+ data_start = (dmxdev->demux->dvr_input_protected) ?
+ NULL : (src->data + src->pread);
+
+ spin_unlock(&dmxdev->dvr_in_lock);
+ ret = dmxdev->demux->write(dmxdev->demux,
+ data_start, todo);
+
+ if (ret < 0) {
+ pr_err("dmxdev: dvr write error %d\n", ret);
+ continue;
+ }
+
+ if (dmxdev->dvr_in_exit) {
+ ret = -ENODEV;
+ break;
+ }
+
+ spin_lock(&dmxdev->dvr_in_lock);
+
+ todo -= ret;
+ bytes_written += ret;
+ DVB_RINGBUFFER_SKIP(src, ret);
+ dmxdev->dvr_processing_input = 0;
+ spin_unlock(&dmxdev->dvr_in_lock);
+
+ wake_up_all(&src->queue);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return bytes_written;
+}
+
+static int dvr_input_thread_entry(void *arg)
+{
+ struct dmxdev *dmxdev = arg;
+ struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
+ struct dvr_command dvr_cmd;
+ int leftover = 0;
+ int ret;
+
+ while (1) {
+ /* wait for input */
+ ret = wait_event_interruptible(
+ cmdbuf->queue,
+ (!cmdbuf->data) ||
+ (dvb_ringbuffer_avail(cmdbuf) >= sizeof(dvr_cmd)) ||
+ (dmxdev->dvr_in_exit));
+
+ if (ret < 0)
+ break;
+
+ spin_lock(&dmxdev->dvr_in_lock);
+
+ if (!cmdbuf->data || dmxdev->exit || dmxdev->dvr_in_exit) {
+ spin_unlock(&dmxdev->dvr_in_lock);
+ break;
+ }
+
+ dvb_ringbuffer_read(cmdbuf, (u8 *)&dvr_cmd, sizeof(dvr_cmd));
+
+ spin_unlock(&dmxdev->dvr_in_lock);
+
+ if (dvr_cmd.type == DVR_DATA_FEED_CMD) {
+ dvr_cmd.cmd.data_feed_count += leftover;
+
+ ret = dvb_dvr_feed_cmd(dmxdev, &dvr_cmd);
+ if (ret < 0) {
+ pr_debug("%s: DVR data feed failed, ret=%d\n",
+ __func__, ret);
+ continue;
+ }
+
+ leftover = dvr_cmd.cmd.data_feed_count - ret;
+ } else {
+ /*
+ * For EOS, try to process leftover data in the input
+ * buffer.
+ */
+ if (dvr_cmd.cmd.oobcmd.type == DMX_OOB_CMD_EOS) {
+ struct dvr_command feed_cmd;
+
+ feed_cmd.type = DVR_DATA_FEED_CMD;
+ feed_cmd.cmd.data_feed_count =
+ dvb_ringbuffer_avail(
+ &dmxdev->dvr_input_buffer);
+ dvb_dvr_feed_cmd(dmxdev, &feed_cmd);
+ }
+
+ dvb_dvr_oob_cmd(dmxdev, &dvr_cmd.cmd.oobcmd);
+ }
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
static int dvb_dvr_open(struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
struct dmx_frontend *front;
+ void *mem;
- dprintk("function : %s\n", __func__);
+ pr_debug("function : %s(%X)\n", __func__, (file->f_flags & O_ACCMODE));
if (mutex_lock_interruptible(&dmxdev->mutex))
return -ERESTARTSYS;
@@ -144,21 +835,28 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
}
if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
- void *mem;
if (!dvbdev->readers) {
mutex_unlock(&dmxdev->mutex);
return -EBUSY;
}
- mem = vmalloc(DVR_BUFFER_SIZE);
+ mem = vmalloc_user(DVR_BUFFER_SIZE);
if (!mem) {
mutex_unlock(&dmxdev->mutex);
return -ENOMEM;
}
dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
- dvbdev->readers--;
- }
+ dvb_dmxdev_flush_events(&dmxdev->dvr_output_events);
+ dmxdev->dvr_output_events.event_mask.disable_mask = 0;
+ dmxdev->dvr_output_events.event_mask.no_wakeup_mask = 0;
+ dmxdev->dvr_output_events.event_mask.wakeup_threshold = 1;
+ dmxdev->dvr_feeds_count = 0;
+ dmxdev->dvr_buffer_mode = DMX_BUFFER_MODE_INTERNAL;
+ dmxdev->dvr_priv_buff_handle = NULL;
- if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
+ dvbdev->readers--;
+ } else if (!dvbdev->writers) {
+ dmxdev->dvr_in_exit = 0;
+ dmxdev->dvr_processing_input = 0;
dmxdev->dvr_orig_fe = dmxdev->demux->frontend;
if (!dmxdev->demux->write) {
@@ -172,9 +870,51 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
mutex_unlock(&dmxdev->mutex);
return -EINVAL;
}
+
+ mem = vmalloc_user(DVR_BUFFER_SIZE);
+ if (!mem) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ENOMEM;
+ }
+
dmxdev->demux->disconnect_frontend(dmxdev->demux);
dmxdev->demux->connect_frontend(dmxdev->demux, front);
+ dmxdev->dvr_input_buffer_mode = DMX_BUFFER_MODE_INTERNAL;
+
+ dvb_ringbuffer_init(&dmxdev->dvr_input_buffer,
+ mem,
+ DVR_BUFFER_SIZE);
+
+ dmxdev->demux->dvr_input.priv_handle = NULL;
+ dmxdev->demux->dvr_input.ringbuff = &dmxdev->dvr_input_buffer;
+ dmxdev->demux->dvr_input_protected = 0;
+ mem = vmalloc(DVR_CMDS_BUFFER_SIZE);
+ if (!mem) {
+ vfree(dmxdev->dvr_input_buffer.data);
+ dmxdev->dvr_input_buffer.data = NULL;
+ mutex_unlock(&dmxdev->mutex);
+ return -ENOMEM;
+ }
+ dvb_ringbuffer_init(&dmxdev->dvr_cmd_buffer, mem,
+ DVR_CMDS_BUFFER_SIZE);
+ dvbdev->writers--;
+
+ dmxdev->dvr_input_thread =
+ kthread_run(
+ dvr_input_thread_entry,
+ (void *)dmxdev,
+ "dvr_input");
+
+ if (IS_ERR(dmxdev->dvr_input_thread)) {
+ vfree(dmxdev->dvr_input_buffer.data);
+ vfree(dmxdev->dvr_cmd_buffer.data);
+ dmxdev->dvr_input_buffer.data = NULL;
+ dmxdev->dvr_cmd_buffer.data = NULL;
+ mutex_unlock(&dmxdev->mutex);
+ return -ENOMEM;
+ }
}
+
dvbdev->users++;
mutex_unlock(&dmxdev->mutex);
return 0;
@@ -187,11 +927,6 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)
mutex_lock(&dmxdev->mutex);
- if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
- dmxdev->demux->disconnect_frontend(dmxdev->demux);
- dmxdev->demux->connect_frontend(dmxdev->demux,
- dmxdev->dvr_orig_fe);
- }
if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
dvbdev->readers++;
if (dmxdev->dvr_buffer.data) {
@@ -200,12 +935,100 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)
spin_lock_irq(&dmxdev->lock);
dmxdev->dvr_buffer.data = NULL;
spin_unlock_irq(&dmxdev->lock);
+ wake_up_all(&dmxdev->dvr_buffer.queue);
+
+ if (dmxdev->dvr_buffer_mode == DMX_BUFFER_MODE_INTERNAL)
+ vfree(mem);
+ }
+
+ if ((dmxdev->dvr_buffer_mode == DMX_BUFFER_MODE_EXTERNAL) &&
+ dmxdev->dvr_priv_buff_handle) {
+ dmxdev->demux->unmap_buffer(dmxdev->demux,
+ dmxdev->dvr_priv_buff_handle);
+ dmxdev->dvr_priv_buff_handle = NULL;
+ }
+ } else {
+ int i;
+
+ spin_lock(&dmxdev->dvr_in_lock);
+ dmxdev->dvr_in_exit = 1;
+ spin_unlock(&dmxdev->dvr_in_lock);
+
+ wake_up_all(&dmxdev->dvr_cmd_buffer.queue);
+
+ /*
+ * There might be dmx filters reading now from DVR
+ * device, in PULL mode, they might be also stalled
+ * on output, signal to them that DVR is exiting.
+ */
+ if (dmxdev->playback_mode == DMX_PB_MODE_PULL) {
+ wake_up_all(&dmxdev->dvr_buffer.queue);
+
+ for (i = 0; i < dmxdev->filternum; i++)
+ if (dmxdev->filter[i].state == DMXDEV_STATE_GO)
+ wake_up_all(
+ &dmxdev->filter[i].buffer.queue);
+ }
+
+ /* notify kernel demux that we are canceling */
+ if (dmxdev->demux->write_cancel)
+ dmxdev->demux->write_cancel(dmxdev->demux);
+
+ /*
+ * Now stop dvr-input thread so that no one
+ * would process data from dvr input buffer any more
+ * before it gets freed.
+ */
+ kthread_stop(dmxdev->dvr_input_thread);
+
+ dvbdev->writers++;
+ dmxdev->demux->disconnect_frontend(dmxdev->demux);
+ dmxdev->demux->connect_frontend(dmxdev->demux,
+ dmxdev->dvr_orig_fe);
+
+ if (dmxdev->dvr_input_buffer.data) {
+ void *mem = dmxdev->dvr_input_buffer.data;
+ /*
+ * Ensure all the operations on the DVR input buffer
+ * are completed before it gets freed.
+ */
+ mb();
+ spin_lock_irq(&dmxdev->dvr_in_lock);
+ dmxdev->dvr_input_buffer.data = NULL;
+ spin_unlock_irq(&dmxdev->dvr_in_lock);
+
+ if (dmxdev->dvr_input_buffer_mode ==
+ DMX_BUFFER_MODE_INTERNAL)
+ vfree(mem);
+ }
+
+ if ((dmxdev->dvr_input_buffer_mode ==
+ DMX_BUFFER_MODE_EXTERNAL) &&
+ (dmxdev->demux->dvr_input.priv_handle)) {
+ if (!dmxdev->demux->dvr_input_protected)
+ dmxdev->demux->unmap_buffer(dmxdev->demux,
+ dmxdev->demux->dvr_input.priv_handle);
+ dmxdev->demux->dvr_input.priv_handle = NULL;
+ }
+
+ if (dmxdev->dvr_cmd_buffer.data) {
+ void *mem = dmxdev->dvr_cmd_buffer.data;
+ /*
+ * Ensure all the operations on the DVR command buffer
+ * are completed before it gets freed.
+ */
+ mb();
+ spin_lock_irq(&dmxdev->dvr_in_lock);
+ dmxdev->dvr_cmd_buffer.data = NULL;
+ spin_unlock_irq(&dmxdev->dvr_in_lock);
vfree(mem);
}
}
/* TODO */
dvbdev->users--;
if (dvbdev->users == 1 && dmxdev->exit == 1) {
+ fops_put(file->f_op);
+ file->f_op = NULL;
mutex_unlock(&dmxdev->mutex);
wake_up(&dvbdev->wait_queue);
} else
@@ -214,17 +1037,21 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+
+static int dvb_dvr_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct dvb_device *dvbdev = file->private_data;
+ struct dvb_device *dvbdev = filp->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
+ struct dvb_ringbuffer *buffer;
+ enum dmx_buffer_mode buffer_mode;
+ int vma_size;
+ int buffer_size;
int ret;
- if (!dmxdev->demux->write)
- return -EOPNOTSUPP;
- if ((file->f_flags & O_ACCMODE) != O_WRONLY)
+ if (((filp->f_flags & O_ACCMODE) == O_RDONLY) &&
+ (vma->vm_flags & VM_WRITE))
return -EINVAL;
+
if (mutex_lock_interruptible(&dmxdev->mutex))
return -ERESTARTSYS;
@@ -232,58 +1059,693 @@ static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
mutex_unlock(&dmxdev->mutex);
return -ENODEV;
}
- ret = dmxdev->demux->write(dmxdev->demux, buf, count);
+
+ if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
+ buffer = &dmxdev->dvr_buffer;
+ buffer_mode = dmxdev->dvr_buffer_mode;
+ } else {
+ buffer = &dmxdev->dvr_input_buffer;
+ buffer_mode = dmxdev->dvr_input_buffer_mode;
+ }
+
+ if (buffer_mode == DMX_BUFFER_MODE_EXTERNAL) {
+ mutex_unlock(&dmxdev->mutex);
+ return -EINVAL;
+ }
+
+ vma_size = vma->vm_end - vma->vm_start;
+
+ /* Make sure requested mapping is not larger than buffer size */
+ buffer_size = buffer->size + (PAGE_SIZE-1);
+ buffer_size = buffer_size & ~(PAGE_SIZE-1);
+
+ if (vma_size != buffer_size) {
+ mutex_unlock(&dmxdev->mutex);
+ return -EINVAL;
+ }
+
+ ret = remap_vmalloc_range(vma, buffer->data, 0);
+ if (ret) {
+ mutex_unlock(&dmxdev->mutex);
+ return ret;
+ }
+
+ vma->vm_flags |= VM_DONTDUMP;
+ vma->vm_flags |= VM_DONTEXPAND;
+
mutex_unlock(&dmxdev->mutex);
return ret;
}
+static void dvb_dvr_queue_data_feed(struct dmxdev *dmxdev, size_t count)
+{
+ struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
+ struct dvr_command *dvr_cmd;
+ int last_dvr_cmd;
+
+ spin_lock(&dmxdev->dvr_in_lock);
+
+ /* Peek at the last DVR command queued, try to coalesce FEED commands */
+ if (dvb_ringbuffer_avail(cmdbuf) >= sizeof(*dvr_cmd)) {
+ last_dvr_cmd = cmdbuf->pwrite - sizeof(*dvr_cmd);
+ if (last_dvr_cmd < 0)
+ last_dvr_cmd += cmdbuf->size;
+
+ dvr_cmd = (struct dvr_command *)&cmdbuf->data[last_dvr_cmd];
+ if (dvr_cmd->type == DVR_DATA_FEED_CMD) {
+ dvr_cmd->cmd.data_feed_count += count;
+ spin_unlock(&dmxdev->dvr_in_lock);
+ return;
+ }
+ }
+
+ /*
+ * We assume command buffer is large enough so that overflow should not
+ * happen. Overflow to the command buffer means data previously written
+ * to the input buffer is 'orphan' - does not have a matching FEED
+ * command. Issue a warning if this ever happens.
+ * Orphan data might still be processed if EOS is issued.
+ */
+ if (dvb_ringbuffer_free(cmdbuf) < sizeof(*dvr_cmd)) {
+ pr_err("%s: DVR command buffer overflow\n", __func__);
+ spin_unlock(&dmxdev->dvr_in_lock);
+ return;
+ }
+
+ dvr_cmd = (struct dvr_command *)&cmdbuf->data[cmdbuf->pwrite];
+ dvr_cmd->type = DVR_DATA_FEED_CMD;
+ dvr_cmd->cmd.data_feed_count = count;
+ DVB_RINGBUFFER_PUSH(cmdbuf, sizeof(*dvr_cmd));
+ spin_unlock(&dmxdev->dvr_in_lock);
+
+ wake_up_all(&cmdbuf->queue);
+}
+
+static int dvb_dvr_external_input_only(struct dmxdev *dmxdev)
+{
+ struct dmx_caps caps;
+ int is_external_only;
+ int flags;
+ size_t tsp_size;
+
+ if (dmxdev->demux->get_tsp_size)
+ tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
+ else
+ tsp_size = 188;
+
+ /*
+ * For backward compatibility, default assumes that
+ * external only buffers are not supported.
+ */
+ flags = 0;
+ if (dmxdev->demux->get_caps) {
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+
+ if (tsp_size == 188)
+ flags = caps.playback_188_tsp.flags;
+ else
+ flags = caps.playback_192_tsp.flags;
+ }
+
+ if (!(flags & DMX_BUFFER_INTERNAL_SUPPORT) &&
+ (flags & DMX_BUFFER_EXTERNAL_SUPPORT))
+ is_external_only = 1;
+ else
+ is_external_only = 0;
+
+ return is_external_only;
+}
+
+static int dvb_dvr_verify_buffer_size(struct dmxdev *dmxdev,
+ unsigned int f_flags,
+ unsigned long size)
+{
+ struct dmx_caps caps;
+ int tsp_size;
+
+ if (!dmxdev->demux->get_caps)
+ return 1;
+
+ if (dmxdev->demux->get_tsp_size)
+ tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
+ else
+ tsp_size = 188;
+
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+ if ((f_flags & O_ACCMODE) == O_RDONLY)
+ return (tsp_size == 188 && dvb_dmxdev_verify_buffer_size(size,
+ caps.recording_188_tsp.max_size,
+ caps.recording_188_tsp.size_alignment)) ||
+ (tsp_size == 192 && dvb_dmxdev_verify_buffer_size(size,
+ caps.recording_192_tsp.max_size,
+ caps.recording_192_tsp.size_alignment));
+
+ return (tsp_size == 188 && dvb_dmxdev_verify_buffer_size(size,
+ caps.playback_188_tsp.max_size,
+ caps.playback_188_tsp.size_alignment)) ||
+ (tsp_size == 192 && dvb_dmxdev_verify_buffer_size(size,
+ caps.playback_192_tsp.max_size,
+ caps.playback_192_tsp.size_alignment));
+}
+
+static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct dmxdev *dmxdev = dvbdev->priv;
+ struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
+ struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
+ int ret;
+ size_t todo;
+ ssize_t free_space;
+
+ if (!dmxdev->demux->write)
+ return -EOPNOTSUPP;
+
+ if (!dvb_dvr_verify_buffer_size(dmxdev, file->f_flags, src->size) ||
+ ((file->f_flags & O_ACCMODE) == O_RDONLY) ||
+ !src->data || !cmdbuf->data ||
+ (dvb_dvr_external_input_only(dmxdev) &&
+ (dmxdev->dvr_input_buffer_mode == DMX_BUFFER_MODE_INTERNAL)))
+ return -EINVAL;
+
+ if ((file->f_flags & O_NONBLOCK) &&
+ (dvb_ringbuffer_free(src) == 0))
+ return -EWOULDBLOCK;
+
+ ret = 0;
+ for (todo = count; todo > 0; todo -= ret) {
+ ret = wait_event_interruptible(src->queue,
+ (dvb_ringbuffer_free(src)) ||
+ !src->data || !cmdbuf->data ||
+ (src->error != 0) || dmxdev->dvr_in_exit);
+
+ if (ret < 0)
+ return ret;
+
+ if (mutex_lock_interruptible(&dmxdev->mutex))
+ return -ERESTARTSYS;
+
+ if ((!src->data) || (!cmdbuf->data)) {
+ mutex_unlock(&dmxdev->mutex);
+ return 0;
+ }
+
+ if (dmxdev->exit || dmxdev->dvr_in_exit) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ENODEV;
+ }
+
+ if (src->error) {
+ ret = src->error;
+ dvb_ringbuffer_flush(src);
+ mutex_unlock(&dmxdev->mutex);
+ wake_up_all(&src->queue);
+ return ret;
+ }
+
+ free_space = dvb_ringbuffer_free(src);
+
+ if (free_space > todo)
+ free_space = todo;
+
+ ret = dvb_ringbuffer_write_user(src, buf, free_space);
+
+ if (ret < 0) {
+ mutex_unlock(&dmxdev->mutex);
+ return ret;
+ }
+
+ buf += ret;
+
+ dvb_dvr_queue_data_feed(dmxdev, ret);
+
+ mutex_unlock(&dmxdev->mutex);
+ }
+
+ return (count - todo) ? (count - todo) : ret;
+}
+
+static int dvb_dmxdev_flush_data(struct dmxdev_filter *filter, size_t length)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ struct dvb_ringbuffer *buffer = &filter->buffer;
+ struct dmxdev_events_queue *events = &filter->events;
+
+ if (filter->type == DMXDEV_TYPE_PES &&
+ filter->params.pes.output == DMX_OUT_TS_TAP) {
+ buffer = &filter->dev->dvr_buffer;
+ events = &filter->dev->dvr_output_events;
+ }
+
+ /*
+ * Drop 'length' pending data bytes from the ringbuffer and update
+ * event queue accordingly, similarly to dvb_dmxdev_release_data().
+ */
+ spin_lock_irqsave(&filter->dev->lock, flags);
+ DVB_RINGBUFFER_SKIP(buffer, length);
+ buffer->error = 0;
+ dvb_dmxdev_flush_events(events);
+ events->current_event_start_offset = buffer->pwrite;
+ spin_unlock_irqrestore(&filter->dev->lock, flags);
+
+ if (filter->type == DMXDEV_TYPE_PES) {
+ struct dmxdev_feed *feed;
+
+ feed = list_first_entry(&filter->feed.ts,
+ struct dmxdev_feed, next);
+
+ if (feed->ts->flush_buffer)
+ return feed->ts->flush_buffer(feed->ts, length);
+ } else if (filter->type == DMXDEV_TYPE_SEC &&
+ filter->feed.sec.feed->flush_buffer) {
+ return filter->feed.sec.feed->flush_buffer(
+ filter->feed.sec.feed, length);
+ }
+
+ return ret;
+}
+
+static inline void dvb_dmxdev_auto_flush_buffer(struct dmxdev_filter *filter,
+ struct dvb_ringbuffer *buf)
+{
+ size_t flush_len;
+
+ /*
+ * When buffer overflowed, demux-dev marked the buffer in
+ * error state. If auto-flush is enabled discard current
+ * pending data in buffer.
+ */
+ if (overflow_auto_flush) {
+ flush_len = dvb_ringbuffer_avail(buf);
+ dvb_dmxdev_flush_data(filter, flush_len);
+ }
+}
+
static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
+ ssize_t res;
struct dvb_device *dvbdev = file->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
+ unsigned long flags;
if (dmxdev->exit)
return -ENODEV;
- return dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
- file->f_flags & O_NONBLOCK,
- buf, count, ppos);
+ if (!dvb_dvr_verify_buffer_size(dmxdev, file->f_flags,
+ dmxdev->dvr_buffer.size))
+ return -EINVAL;
+
+ res = dvb_dmxdev_buffer_read(NULL, &dmxdev->dvr_buffer,
+ file->f_flags & O_NONBLOCK,
+ buf, count, ppos);
+
+ if (res > 0) {
+ dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, res);
+ spin_lock_irqsave(&dmxdev->lock, flags);
+ dvb_dmxdev_update_events(&dmxdev->dvr_output_events, res);
+ spin_unlock_irqrestore(&dmxdev->lock, flags);
+
+ /*
+ * in PULL mode, we might be stalling on
+ * event queue, so need to wake-up waiters
+ */
+ if (dmxdev->playback_mode == DMX_PB_MODE_PULL)
+ wake_up_all(&dmxdev->dvr_buffer.queue);
+ } else if (res == -EOVERFLOW) {
+ dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed,
+ &dmxdev->dvr_buffer);
+ }
+
+ return res;
+}
+
+/*
+ * dvb_dvr_push_oob_cmd
+ *
+ * Note: this function assume dmxdev->mutex was taken, so command buffer cannot
+ * be released during its operation.
+ */
+static int dvb_dvr_push_oob_cmd(struct dmxdev *dmxdev, unsigned int f_flags,
+ struct dmx_oob_command *cmd)
+{
+ struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
+ struct dvr_command *dvr_cmd;
+
+ if ((f_flags & O_ACCMODE) == O_RDONLY ||
+ dmxdev->source < DMX_SOURCE_DVR0)
+ return -EPERM;
+
+ if (dvb_ringbuffer_free(cmdbuf) < sizeof(*dvr_cmd))
+ return -ENOMEM;
+
+ dvr_cmd = (struct dvr_command *)&cmdbuf->data[cmdbuf->pwrite];
+ dvr_cmd->type = DVR_OOB_CMD;
+ dvr_cmd->cmd.oobcmd = *cmd;
+ DVB_RINGBUFFER_PUSH(cmdbuf, sizeof(*dvr_cmd));
+ wake_up_all(&cmdbuf->queue);
+
+ return 0;
+}
+
+static int dvb_dvr_flush_buffer(struct dmxdev *dmxdev, unsigned int f_flags)
+{
+ size_t flush_len;
+ int ret;
+
+ if ((f_flags & O_ACCMODE) != O_RDONLY)
+ return -EINVAL;
+
+ flush_len = dvb_ringbuffer_avail(&dmxdev->dvr_buffer);
+ ret = dvb_dmxdev_flush_data(dmxdev->dvr_feed, flush_len);
+
+ return ret;
}
static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
- unsigned long size)
+ unsigned int f_flags,
+ unsigned long size)
{
- struct dvb_ringbuffer *buf = &dmxdev->dvr_buffer;
+ struct dvb_ringbuffer *buf;
void *newmem;
void *oldmem;
-
- dprintk("function : %s\n", __func__);
+ spinlock_t *lock;
+ enum dmx_buffer_mode buffer_mode;
+
+ pr_debug("function : %s\n", __func__);
+
+ if ((f_flags & O_ACCMODE) == O_RDONLY) {
+ buf = &dmxdev->dvr_buffer;
+ lock = &dmxdev->lock;
+ buffer_mode = dmxdev->dvr_buffer_mode;
+ } else {
+ buf = &dmxdev->dvr_input_buffer;
+ lock = &dmxdev->dvr_in_lock;
+ buffer_mode = dmxdev->dvr_input_buffer_mode;
+ }
if (buf->size == size)
return 0;
- if (!size)
+ if (!size || (buffer_mode == DMX_BUFFER_MODE_EXTERNAL))
return -EINVAL;
- newmem = vmalloc(size);
+ newmem = vmalloc_user(size);
if (!newmem)
return -ENOMEM;
oldmem = buf->data;
- spin_lock_irq(&dmxdev->lock);
+ spin_lock_irq(lock);
+
+ if (((f_flags & O_ACCMODE) != O_RDONLY) &&
+ (dmxdev->dvr_processing_input)) {
+ spin_unlock_irq(lock);
+ vfree(oldmem);
+ return -EBUSY;
+ }
+
buf->data = newmem;
buf->size = size;
/* reset and not flush in case the buffer shrinks */
dvb_ringbuffer_reset(buf);
- spin_unlock_irq(&dmxdev->lock);
+
+ spin_unlock_irq(lock);
vfree(oldmem);
return 0;
}
+static int dvb_dvr_set_buffer_mode(struct dmxdev *dmxdev,
+ unsigned int f_flags, enum dmx_buffer_mode mode)
+{
+ struct dvb_ringbuffer *buf;
+ spinlock_t *lock;
+ enum dmx_buffer_mode *buffer_mode;
+ void **buff_handle;
+ void *oldmem;
+ int *is_protected;
+
+ if ((mode != DMX_BUFFER_MODE_INTERNAL) &&
+ (mode != DMX_BUFFER_MODE_EXTERNAL))
+ return -EINVAL;
+
+ if ((mode == DMX_BUFFER_MODE_EXTERNAL) &&
+ (!dmxdev->demux->map_buffer || !dmxdev->demux->unmap_buffer))
+ return -EINVAL;
+
+ if ((f_flags & O_ACCMODE) == O_RDONLY) {
+ buf = &dmxdev->dvr_buffer;
+ lock = &dmxdev->lock;
+ buffer_mode = &dmxdev->dvr_buffer_mode;
+ buff_handle = &dmxdev->dvr_priv_buff_handle;
+ is_protected = NULL;
+ } else {
+ buf = &dmxdev->dvr_input_buffer;
+ lock = &dmxdev->dvr_in_lock;
+ buffer_mode = &dmxdev->dvr_input_buffer_mode;
+ buff_handle = &dmxdev->demux->dvr_input.priv_handle;
+ is_protected = &dmxdev->demux->dvr_input_protected;
+ }
+
+ if (mode == *buffer_mode)
+ return 0;
+
+ oldmem = buf->data;
+ spin_lock_irq(lock);
+ buf->data = NULL;
+ spin_unlock_irq(lock);
+
+ *buffer_mode = mode;
+
+ if (mode == DMX_BUFFER_MODE_INTERNAL) {
+ /* switched from external to internal */
+ if (*buff_handle) {
+ dmxdev->demux->unmap_buffer(dmxdev->demux,
+ *buff_handle);
+ *buff_handle = NULL;
+ }
+
+ if (is_protected)
+ *is_protected = 0;
+
+ /* set default internal buffer */
+ dvb_dvr_set_buffer_size(dmxdev, f_flags, DVR_BUFFER_SIZE);
+ } else if (oldmem) {
+ /* switched from internal to external */
+ vfree(oldmem);
+ }
+
+ return 0;
+}
+
+static int dvb_dvr_set_buffer(struct dmxdev *dmxdev,
+ unsigned int f_flags, struct dmx_buffer *dmx_buffer)
+{
+ struct dvb_ringbuffer *buf;
+ spinlock_t *lock;
+ enum dmx_buffer_mode buffer_mode;
+ void **buff_handle;
+ void *newmem;
+ void *oldmem;
+ int *is_protected;
+ struct dmx_caps caps;
+
+ if (dmxdev->demux->get_caps)
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+ else
+ caps.caps = 0;
+
+ if ((f_flags & O_ACCMODE) == O_RDONLY) {
+ buf = &dmxdev->dvr_buffer;
+ lock = &dmxdev->lock;
+ buffer_mode = dmxdev->dvr_buffer_mode;
+ buff_handle = &dmxdev->dvr_priv_buff_handle;
+ is_protected = NULL;
+ } else {
+ buf = &dmxdev->dvr_input_buffer;
+ lock = &dmxdev->dvr_in_lock;
+ buffer_mode = dmxdev->dvr_input_buffer_mode;
+ buff_handle = &dmxdev->demux->dvr_input.priv_handle;
+ is_protected = &dmxdev->demux->dvr_input_protected;
+ if (!(caps.caps & DMX_CAP_SECURED_INPUT_PLAYBACK) &&
+ dmx_buffer->is_protected)
+ return -EINVAL;
+ }
+
+ if (!dmx_buffer->size ||
+ (buffer_mode == DMX_BUFFER_MODE_INTERNAL))
+ return -EINVAL;
+
+ oldmem = *buff_handle;
+
+ /*
+ * Protected buffer is relevant only for DVR input buffer
+ * when DVR device is opened for write. In such case,
+ * buffer is mapped only if the buffer is not protected one.
+ */
+ if (!is_protected || !dmx_buffer->is_protected) {
+ if (dmxdev->demux->map_buffer(dmxdev->demux, dmx_buffer,
+ buff_handle, &newmem))
+ return -ENOMEM;
+ } else {
+ newmem = NULL;
+ *buff_handle = NULL;
+ }
+
+ spin_lock_irq(lock);
+ buf->data = newmem;
+ buf->size = dmx_buffer->size;
+ if (is_protected)
+ *is_protected = dmx_buffer->is_protected;
+ dvb_ringbuffer_reset(buf);
+ spin_unlock_irq(lock);
+
+ if (oldmem)
+ dmxdev->demux->unmap_buffer(dmxdev->demux, oldmem);
+
+ return 0;
+}
+
+static int dvb_dvr_get_event(struct dmxdev *dmxdev,
+ unsigned int f_flags,
+ struct dmx_filter_event *event)
+{
+ int res;
+
+ if (!((f_flags & O_ACCMODE) == O_RDONLY))
+ return -EINVAL;
+
+ spin_lock_irq(&dmxdev->lock);
+
+ if (dmxdev->dvr_buffer.error == -EOVERFLOW) {
+ event->type = DMX_EVENT_BUFFER_OVERFLOW;
+ dmxdev->dvr_buffer.error = 0;
+ } else {
+ res = dvb_dmxdev_remove_event(&dmxdev->dvr_output_events,
+ event);
+ if (res) {
+ spin_unlock_irq(&dmxdev->lock);
+ return res;
+ }
+ }
+
+ spin_unlock_irq(&dmxdev->lock);
+
+ if (event->type == DMX_EVENT_BUFFER_OVERFLOW)
+ dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed,
+ &dmxdev->dvr_buffer);
+
+ /*
+ * in PULL mode, we might be stalling on
+ * event queue, so need to wake-up waiters
+ */
+ if (dmxdev->playback_mode == DMX_PB_MODE_PULL)
+ wake_up_all(&dmxdev->dvr_buffer.queue);
+
+ return res;
+}
+
+static int dvb_dvr_get_buffer_status(struct dmxdev *dmxdev,
+ unsigned int f_flags,
+ struct dmx_buffer_status *dmx_buffer_status)
+{
+ struct dvb_ringbuffer *buf;
+ spinlock_t *lock;
+
+ if ((f_flags & O_ACCMODE) == O_RDONLY) {
+ buf = &dmxdev->dvr_buffer;
+ lock = &dmxdev->lock;
+ } else {
+ buf = &dmxdev->dvr_input_buffer;
+ lock = &dmxdev->dvr_in_lock;
+ }
+
+ spin_lock_irq(lock);
+
+ dmx_buffer_status->error = buf->error;
+ dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
+ dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
+ dmx_buffer_status->read_offset = buf->pread;
+ dmx_buffer_status->write_offset = buf->pwrite;
+ dmx_buffer_status->size = buf->size;
+ buf->error = 0;
+
+ spin_unlock_irq(lock);
+
+ if (dmx_buffer_status->error == -EOVERFLOW)
+ dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed, buf);
+
+ return 0;
+}
+
+static int dvb_dvr_release_data(struct dmxdev *dmxdev,
+ unsigned int f_flags,
+ u32 bytes_count)
+{
+ ssize_t buff_fullness;
+
+ if (!((f_flags & O_ACCMODE) == O_RDONLY))
+ return -EINVAL;
+
+ if (!bytes_count)
+ return 0;
+
+ buff_fullness = dvb_ringbuffer_avail(&dmxdev->dvr_buffer);
+
+ if (bytes_count > buff_fullness)
+ return -EINVAL;
+
+ DVB_RINGBUFFER_SKIP(&dmxdev->dvr_buffer, bytes_count);
+
+ dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, bytes_count);
+ spin_lock_irq(&dmxdev->lock);
+ dvb_dmxdev_update_events(&dmxdev->dvr_output_events, bytes_count);
+ spin_unlock_irq(&dmxdev->lock);
+
+ wake_up_all(&dmxdev->dvr_buffer.queue);
+ return 0;
+}
+
+/*
+ * dvb_dvr_feed_data - Notify new data in DVR input buffer
+ *
+ * @dmxdev - demux device instance
+ * @f_flags - demux device file flag (access mode)
+ * @bytes_count - how many bytes were written to the input buffer
+ *
+ * Note: this function assume dmxdev->mutex was taken, so buffer cannot
+ * be released during its operation.
+ */
+static int dvb_dvr_feed_data(struct dmxdev *dmxdev,
+ unsigned int f_flags,
+ u32 bytes_count)
+{
+ ssize_t free_space;
+ struct dvb_ringbuffer *buffer = &dmxdev->dvr_input_buffer;
+
+ if ((f_flags & O_ACCMODE) == O_RDONLY)
+ return -EINVAL;
+
+ if (!bytes_count)
+ return 0;
+
+ free_space = dvb_ringbuffer_free(buffer);
+
+ if (bytes_count > free_space)
+ return -EINVAL;
+
+ DVB_RINGBUFFER_PUSH(buffer, bytes_count);
+
+ dvb_dvr_queue_data_feed(dmxdev, bytes_count);
+
+ return 0;
+}
+
static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter
*dmxdevfilter, int state)
{
@@ -301,12 +1763,13 @@ static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter,
if (buf->size == size)
return 0;
- if (!size)
+ if (!size ||
+ (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL))
return -EINVAL;
if (dmxdevfilter->state >= DMXDEV_STATE_GO)
return -EBUSY;
- newmem = vmalloc(size);
+ newmem = vmalloc_user(size);
if (!newmem)
return -ENOMEM;
@@ -325,15 +1788,803 @@ static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter,
return 0;
}
+static int dvb_dmxdev_set_buffer_mode(struct dmxdev_filter *dmxdevfilter,
+ enum dmx_buffer_mode mode)
+{
+ struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
+ struct dmxdev *dmxdev = dmxdevfilter->dev;
+ void *oldmem;
+
+ if (dmxdevfilter->state >= DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ if ((mode != DMX_BUFFER_MODE_INTERNAL) &&
+ (mode != DMX_BUFFER_MODE_EXTERNAL))
+ return -EINVAL;
+
+ if ((mode == DMX_BUFFER_MODE_EXTERNAL) &&
+ (!dmxdev->demux->map_buffer || !dmxdev->demux->unmap_buffer))
+ return -EINVAL;
+
+ if (mode == dmxdevfilter->buffer_mode)
+ return 0;
+
+ oldmem = buf->data;
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ buf->data = NULL;
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ dmxdevfilter->buffer_mode = mode;
+
+ if (mode == DMX_BUFFER_MODE_INTERNAL) {
+ /* switched from external to internal */
+ if (dmxdevfilter->priv_buff_handle) {
+ dmxdev->demux->unmap_buffer(dmxdev->demux,
+ dmxdevfilter->priv_buff_handle);
+ dmxdevfilter->priv_buff_handle = NULL;
+ }
+ } else if (oldmem) {
+ /* switched from internal to external */
+ vfree(oldmem);
+ }
+
+ return 0;
+}
+
+static int dvb_dmxdev_set_buffer(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_buffer *buffer)
+{
+ struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
+ struct dmxdev *dmxdev = dmxdevfilter->dev;
+ void *newmem;
+ void *oldmem;
+
+ if (dmxdevfilter->state >= DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ if ((!buffer->size) ||
+ (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_INTERNAL))
+ return -EINVAL;
+
+ oldmem = dmxdevfilter->priv_buff_handle;
+ if (dmxdev->demux->map_buffer(dmxdev->demux, buffer,
+ &dmxdevfilter->priv_buff_handle, &newmem))
+ return -ENOMEM;
+
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ buf->data = newmem;
+ buf->size = buffer->size;
+ dvb_ringbuffer_reset(buf);
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ if (oldmem)
+ dmxdev->demux->unmap_buffer(dmxdev->demux, oldmem);
+
+ return 0;
+}
+
+static int dvb_dmxdev_set_tsp_out_format(struct dmxdev_filter *dmxdevfilter,
+ enum dmx_tsp_format_t dmx_tsp_format)
+{
+ if (dmxdevfilter->state >= DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ if ((dmx_tsp_format > DMX_TSP_FORMAT_192_HEAD) ||
+ (dmx_tsp_format < DMX_TSP_FORMAT_188))
+ return -EINVAL;
+
+ dmxdevfilter->dmx_tsp_format = dmx_tsp_format;
+
+ return 0;
+}
+
+static int dvb_dmxdev_set_decoder_buffer_size(
+ struct dmxdev_filter *dmxdevfilter,
+ unsigned long size)
+{
+ struct dmx_caps caps;
+ struct dmx_demux *demux = dmxdevfilter->dev->demux;
+
+ if (demux->get_caps) {
+ demux->get_caps(demux, &caps);
+ if (!dvb_dmxdev_verify_buffer_size(size, caps.decoder.max_size,
+ caps.decoder.size_alignment))
+ return -EINVAL;
+ }
+
+ if (size == 0)
+ return -EINVAL;
+
+ if (dmxdevfilter->decoder_buffers.buffers_size == size)
+ return 0;
+
+ if (dmxdevfilter->state >= DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ /*
+ * In case decoder buffers were already set before to some external
+ * buffers, setting the decoder buffer size alone implies transition
+ * to internal buffer mode.
+ */
+ dmxdevfilter->decoder_buffers.buffers_size = size;
+ dmxdevfilter->decoder_buffers.buffers_num = 0;
+ dmxdevfilter->decoder_buffers.is_linear = 0;
+ return 0;
+}
+
+static int dvb_dmxdev_set_source(struct dmxdev_filter *dmxdevfilter,
+ dmx_source_t *source)
+{
+ int ret = 0;
+ struct dmxdev *dev;
+
+ if (dmxdevfilter->state == DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ dev = dmxdevfilter->dev;
+ if (dev->demux->set_source)
+ ret = dev->demux->set_source(dev->demux, source);
+
+ if (!ret)
+ dev->source = *source;
+
+ return ret;
+}
+
+static int dvb_dmxdev_reuse_decoder_buf(struct dmxdev_filter *dmxdevfilter,
+ int cookie)
+{
+ struct dmxdev_feed *feed;
+
+ if (dmxdevfilter->state != DMXDEV_STATE_GO ||
+ (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
+ (dmxdevfilter->params.pes.output != DMX_OUT_DECODER) ||
+ (dmxdevfilter->events.event_mask.disable_mask &
+ DMX_EVENT_NEW_ES_DATA))
+ return -EPERM;
+
+ /* Only one feed should be in the list in case of decoder */
+ feed = list_first_entry(&dmxdevfilter->feed.ts,
+ struct dmxdev_feed, next);
+ if (feed && feed->ts && feed->ts->reuse_decoder_buffer)
+ return feed->ts->reuse_decoder_buffer(feed->ts, cookie);
+
+ return -ENODEV;
+}
+
+static int dvb_dmxdev_set_event_mask(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_events_mask *event_mask)
+{
+ if (!event_mask ||
+ (event_mask->wakeup_threshold >= DMX_EVENT_QUEUE_SIZE))
+ return -EINVAL;
+
+ if (dmxdevfilter->state == DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ /*
+ * Overflow event is not allowed to be masked.
+ * This is because if overflow occurs, demux stops outputting data
+ * until user is notified. If user is using events to read the data,
+ * the overflow event must be always enabled or otherwise we would
+ * never recover from overflow state.
+ */
+ event_mask->disable_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW;
+ event_mask->no_wakeup_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW;
+
+ dmxdevfilter->events.event_mask = *event_mask;
+
+ return 0;
+}
+
+static int dvb_dmxdev_get_event_mask(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_events_mask *event_mask)
+{
+ if (!event_mask)
+ return -EINVAL;
+
+ *event_mask = dmxdevfilter->events.event_mask;
+
+ return 0;
+}
+
+static int dvb_dmxdev_set_indexing_params(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_indexing_params *idx_params)
+{
+ int found_pid;
+ struct dmxdev_feed *feed;
+ struct dmxdev_feed *ts_feed = NULL;
+ struct dmx_caps caps;
+ int ret = 0;
+
+ if (!dmxdevfilter->dev->demux->get_caps)
+ return -EINVAL;
+
+ dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps);
+
+ if (!idx_params ||
+ !(caps.caps & DMX_CAP_VIDEO_INDEXING) ||
+ (dmxdevfilter->state < DMXDEV_STATE_SET) ||
+ (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
+ ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
+ (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
+ return -EINVAL;
+
+ if (idx_params->enable && !idx_params->types)
+ return -EINVAL;
+
+ found_pid = 0;
+ list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
+ if (feed->pid == idx_params->pid) {
+ found_pid = 1;
+ ts_feed = feed;
+ ts_feed->idx_params = *idx_params;
+ if ((dmxdevfilter->state == DMXDEV_STATE_GO) &&
+ ts_feed->ts->set_idx_params)
+ ret = ts_feed->ts->set_idx_params(
+ ts_feed->ts, idx_params);
+ break;
+ }
+ }
+
+ if (!found_pid)
+ return -EINVAL;
+
+ return ret;
+}
+
+static int dvb_dmxdev_get_scrambling_bits(struct dmxdev_filter *filter,
+ struct dmx_scrambling_bits *scrambling_bits)
+{
+ struct dmxdev_feed *feed;
+
+ if (!scrambling_bits ||
+ (filter->state != DMXDEV_STATE_GO))
+ return -EINVAL;
+
+ if (filter->type == DMXDEV_TYPE_SEC) {
+ if (filter->feed.sec.feed->get_scrambling_bits)
+ return filter->feed.sec.feed->get_scrambling_bits(
+ filter->feed.sec.feed,
+ &scrambling_bits->value);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(feed, &filter->feed.ts, next) {
+ if (feed->pid == scrambling_bits->pid) {
+ if (feed->ts->get_scrambling_bits)
+ return feed->ts->get_scrambling_bits(feed->ts,
+ &scrambling_bits->value);
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void dvb_dmxdev_ts_insertion_work(struct work_struct *worker)
+{
+ struct ts_insertion_buffer *ts_buffer =
+ container_of(to_delayed_work(worker),
+ struct ts_insertion_buffer, dwork);
+ struct dmxdev_feed *feed;
+ size_t free_bytes;
+ struct dmx_ts_feed *ts;
+
+ mutex_lock(&ts_buffer->dmxdevfilter->mutex);
+
+ if (ts_buffer->abort ||
+ (ts_buffer->dmxdevfilter->state != DMXDEV_STATE_GO)) {
+ mutex_unlock(&ts_buffer->dmxdevfilter->mutex);
+ return;
+ }
+
+ feed = list_first_entry(&ts_buffer->dmxdevfilter->feed.ts,
+ struct dmxdev_feed, next);
+ ts = feed->ts;
+ free_bytes = dvb_ringbuffer_free(&ts_buffer->dmxdevfilter->buffer);
+
+ mutex_unlock(&ts_buffer->dmxdevfilter->mutex);
+
+ if (ts_buffer->size < free_bytes)
+ ts->ts_insertion_insert_buffer(ts,
+ ts_buffer->buffer, ts_buffer->size);
+
+ if (ts_buffer->repetition_time && !ts_buffer->abort)
+ schedule_delayed_work(&ts_buffer->dwork,
+ msecs_to_jiffies(ts_buffer->repetition_time));
+}
+
+static void dvb_dmxdev_queue_ts_insertion(
+ struct ts_insertion_buffer *ts_buffer)
+{
+ size_t tsp_size;
+
+ if (ts_buffer->dmxdevfilter->dmx_tsp_format == DMX_TSP_FORMAT_188)
+ tsp_size = 188;
+ else
+ tsp_size = 192;
+
+ if (ts_buffer->size % tsp_size) {
+ pr_err("%s: Wrong buffer alignment, size=%zu, tsp_size=%zu\n",
+ __func__, ts_buffer->size, tsp_size);
+ return;
+ }
+
+ ts_buffer->abort = 0;
+ schedule_delayed_work(&ts_buffer->dwork, 0);
+}
+
+static void dvb_dmxdev_cancel_ts_insertion(
+ struct ts_insertion_buffer *ts_buffer)
+{
+ /*
+ * This function assumes it is called while mutex
+ * of demux filter is taken. Since work in workqueue
+ * captures the filter's mutex to protect against the DB,
+ * mutex needs to be released before waiting for the work
+ * to get finished otherwise work in workqueue will
+ * never be finished.
+ */
+ if (!mutex_is_locked(&ts_buffer->dmxdevfilter->mutex)) {
+ pr_err("%s: mutex is not locked!\n", __func__);
+ return;
+ }
+
+ ts_buffer->abort = 1;
+
+ mutex_unlock(&ts_buffer->dmxdevfilter->mutex);
+ cancel_delayed_work_sync(&ts_buffer->dwork);
+ mutex_lock(&ts_buffer->dmxdevfilter->mutex);
+}
+
+static int dvb_dmxdev_set_ts_insertion(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_set_ts_insertion *params)
+{
+ int ret = 0;
+ int first_buffer;
+ struct dmxdev_feed *feed;
+ struct ts_insertion_buffer *ts_buffer;
+ struct dmx_caps caps;
+
+ if (!dmxdevfilter->dev->demux->get_caps)
+ return -EINVAL;
+
+ dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps);
+
+ if (!params ||
+ !params->size ||
+ !(caps.caps & DMX_CAP_TS_INSERTION) ||
+ (dmxdevfilter->state < DMXDEV_STATE_SET) ||
+ (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
+ ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
+ (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
+ return -EINVAL;
+
+ ts_buffer = vmalloc(sizeof(struct ts_insertion_buffer));
+ if (!ts_buffer)
+ return -ENOMEM;
+
+ ts_buffer->buffer = vmalloc(params->size);
+ if (!ts_buffer->buffer) {
+ vfree(ts_buffer);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(ts_buffer->buffer,
+ params->ts_packets, params->size)) {
+ vfree(ts_buffer->buffer);
+ vfree(ts_buffer);
+ return -EFAULT;
+ }
+
+ if (params->repetition_time &&
+ params->repetition_time < DMX_MIN_INSERTION_REPETITION_TIME)
+ params->repetition_time = DMX_MIN_INSERTION_REPETITION_TIME;
+
+ ts_buffer->size = params->size;
+ ts_buffer->identifier = params->identifier;
+ ts_buffer->repetition_time = params->repetition_time;
+ ts_buffer->dmxdevfilter = dmxdevfilter;
+ INIT_DELAYED_WORK(&ts_buffer->dwork, dvb_dmxdev_ts_insertion_work);
+
+ first_buffer = list_empty(&dmxdevfilter->insertion_buffers);
+ list_add_tail(&ts_buffer->next, &dmxdevfilter->insertion_buffers);
+
+ if (dmxdevfilter->state != DMXDEV_STATE_GO)
+ return 0;
+
+ feed = list_first_entry(&dmxdevfilter->feed.ts,
+ struct dmxdev_feed, next);
+
+ if (first_buffer && feed->ts->ts_insertion_init)
+ ret = feed->ts->ts_insertion_init(feed->ts);
+
+ if (!ret) {
+ dvb_dmxdev_queue_ts_insertion(ts_buffer);
+ } else {
+ list_del(&ts_buffer->next);
+ vfree(ts_buffer->buffer);
+ vfree(ts_buffer);
+ }
+
+ return ret;
+}
+
+static int dvb_dmxdev_abort_ts_insertion(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_abort_ts_insertion *params)
+{
+ int ret = 0;
+ int found_buffer;
+ struct dmxdev_feed *feed;
+ struct ts_insertion_buffer *ts_buffer, *tmp;
+ struct dmx_caps caps;
+
+ if (!dmxdevfilter->dev->demux->get_caps)
+ return -EINVAL;
+
+ dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps);
+
+ if (!params ||
+ !(caps.caps & DMX_CAP_TS_INSERTION) ||
+ (dmxdevfilter->state < DMXDEV_STATE_SET) ||
+ (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
+ ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
+ (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
+ return -EINVAL;
+
+ found_buffer = 0;
+ list_for_each_entry_safe(ts_buffer, tmp,
+ &dmxdevfilter->insertion_buffers, next) {
+ if (ts_buffer->identifier == params->identifier) {
+ list_del(&ts_buffer->next);
+ found_buffer = 1;
+ break;
+ }
+ }
+
+ if (!found_buffer)
+ return -EINVAL;
+
+ if (dmxdevfilter->state == DMXDEV_STATE_GO) {
+ dvb_dmxdev_cancel_ts_insertion(ts_buffer);
+ if (list_empty(&dmxdevfilter->insertion_buffers)) {
+ feed = list_first_entry(&dmxdevfilter->feed.ts,
+ struct dmxdev_feed, next);
+ if (feed->ts->ts_insertion_terminate)
+ ret = feed->ts->ts_insertion_terminate(
+ feed->ts);
+ }
+ }
+
+ vfree(ts_buffer->buffer);
+ vfree(ts_buffer);
+
+ return ret;
+}
+
+static int dvb_dmxdev_ts_fullness_callback(struct dmx_ts_feed *filter,
+ int required_space, int wait)
+{
+ struct dmxdev_filter *dmxdevfilter = filter->priv;
+ struct dvb_ringbuffer *src;
+ struct dmxdev_events_queue *events;
+ int ret;
+
+ if (!dmxdevfilter) {
+ pr_err("%s: NULL demux filter object!\n", __func__);
+ return -ENODEV;
+ }
+
+ if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
+ src = &dmxdevfilter->buffer;
+ events = &dmxdevfilter->events;
+ } else {
+ src = &dmxdevfilter->dev->dvr_buffer;
+ events = &dmxdevfilter->dev->dvr_output_events;
+ }
+
+ do {
+ ret = 0;
+
+ if (dmxdevfilter->dev->dvr_in_exit)
+ return -ENODEV;
+
+ spin_lock(&dmxdevfilter->dev->lock);
+
+ if ((!src->data) ||
+ (dmxdevfilter->state != DMXDEV_STATE_GO))
+ ret = -EINVAL;
+ else if (src->error)
+ ret = src->error;
+
+ if (ret) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return ret;
+ }
+
+ if ((required_space <= dvb_ringbuffer_free(src)) &&
+ (!dvb_dmxdev_events_is_full(events))) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+
+ if (!wait)
+ return -ENOSPC;
+
+ ret = wait_event_interruptible(src->queue,
+ (!src->data) ||
+ ((dvb_ringbuffer_free(src) >= required_space) &&
+ (!dvb_dmxdev_events_is_full(events))) ||
+ (src->error != 0) ||
+ (dmxdevfilter->state != DMXDEV_STATE_GO) ||
+ dmxdevfilter->dev->dvr_in_exit);
+
+ if (ret < 0)
+ return ret;
+ } while (1);
+}
+
+static int dvb_dmxdev_sec_fullness_callback(
+ struct dmx_section_filter *filter,
+ int required_space, int wait)
+{
+ struct dmxdev_filter *dmxdevfilter = filter->priv;
+ struct dvb_ringbuffer *src;
+ struct dmxdev_events_queue *events;
+ int ret;
+
+ if (!dmxdevfilter) {
+ pr_err("%s: NULL demux filter object!\n", __func__);
+ return -ENODEV;
+ }
+
+ src = &dmxdevfilter->buffer;
+ events = &dmxdevfilter->events;
+
+ do {
+ ret = 0;
+
+ if (dmxdevfilter->dev->dvr_in_exit)
+ return -ENODEV;
+
+ spin_lock(&dmxdevfilter->dev->lock);
+
+ if ((!src->data) ||
+ (dmxdevfilter->state != DMXDEV_STATE_GO))
+ ret = -EINVAL;
+ else if (src->error)
+ ret = src->error;
+
+ if (ret) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return ret;
+ }
+
+ if ((required_space <= dvb_ringbuffer_free(src)) &&
+ (!dvb_dmxdev_events_is_full(events))) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+
+ if (!wait)
+ return -ENOSPC;
+
+ ret = wait_event_interruptible(src->queue,
+ (!src->data) ||
+ ((dvb_ringbuffer_free(src) >= required_space) &&
+ (!dvb_dmxdev_events_is_full(events))) ||
+ (src->error != 0) ||
+ (dmxdevfilter->state != DMXDEV_STATE_GO) ||
+ dmxdevfilter->dev->dvr_in_exit);
+
+ if (ret < 0)
+ return ret;
+ } while (1);
+}
+
+static int dvb_dmxdev_set_playback_mode(struct dmxdev_filter *dmxdevfilter,
+ enum dmx_playback_mode_t playback_mode)
+{
+ struct dmxdev *dmxdev = dmxdevfilter->dev;
+ struct dmx_caps caps;
+
+ if (dmxdev->demux->get_caps)
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+ else
+ caps.caps = 0;
+
+ if ((playback_mode != DMX_PB_MODE_PUSH) &&
+ (playback_mode != DMX_PB_MODE_PULL))
+ return -EINVAL;
+
+ if (((dmxdev->source < DMX_SOURCE_DVR0) ||
+ !dmxdev->demux->set_playback_mode ||
+ !(caps.caps & DMX_CAP_PULL_MODE)) &&
+ (playback_mode == DMX_PB_MODE_PULL))
+ return -EPERM;
+
+ if (dmxdevfilter->state == DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ dmxdev->playback_mode = playback_mode;
+
+ return dmxdev->demux->set_playback_mode(
+ dmxdev->demux,
+ dmxdev->playback_mode,
+ dvb_dmxdev_ts_fullness_callback,
+ dvb_dmxdev_sec_fullness_callback);
+}
+
+static int dvb_dmxdev_flush_buffer(struct dmxdev_filter *filter)
+{
+ size_t flush_len;
+ int ret;
+
+ if (filter->state != DMXDEV_STATE_GO)
+ return -EINVAL;
+
+ flush_len = dvb_ringbuffer_avail(&filter->buffer);
+ ret = dvb_dmxdev_flush_data(filter, flush_len);
+
+ return ret;
+}
+
+static int dvb_dmxdev_get_buffer_status(
+ struct dmxdev_filter *dmxdevfilter,
+ struct dmx_buffer_status *dmx_buffer_status)
+{
+ struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
+
+ /*
+ * Note: Taking the dmxdevfilter->dev->lock spinlock is required only
+ * when getting the status of the Demux-userspace data ringbuffer .
+ * In case we are getting the status of a decoder buffer, taking this
+ * spinlock is not required and in fact might lead to a deadlock.
+ */
+ if ((dmxdevfilter->type == DMXDEV_TYPE_PES) &&
+ (dmxdevfilter->params.pes.output == DMX_OUT_DECODER)) {
+ struct dmxdev_feed *feed;
+ int ret;
+
+ /* Only one feed should be in the list in case of decoder */
+ feed = list_first_entry(&dmxdevfilter->feed.ts,
+ struct dmxdev_feed, next);
+
+ /* Ask for status of decoder's buffer from underlying HW */
+ if (feed->ts->get_decoder_buff_status)
+ ret = feed->ts->get_decoder_buff_status(
+ feed->ts,
+ dmx_buffer_status);
+ else
+ ret = -ENODEV;
+
+ return ret;
+ }
+
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+
+ if (!buf->data) {
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+ return -EINVAL;
+ }
+
+ dmx_buffer_status->error = buf->error;
+ dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
+ dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
+ dmx_buffer_status->read_offset = buf->pread;
+ dmx_buffer_status->write_offset = buf->pwrite;
+ dmx_buffer_status->size = buf->size;
+ buf->error = 0;
+
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ if (dmx_buffer_status->error == -EOVERFLOW)
+ dvb_dmxdev_auto_flush_buffer(dmxdevfilter, buf);
+
+ return 0;
+}
+
+static int dvb_dmxdev_release_data(struct dmxdev_filter *dmxdevfilter,
+ u32 bytes_count)
+{
+ ssize_t buff_fullness;
+
+ if (!dmxdevfilter->buffer.data)
+ return -EINVAL;
+
+ if (!bytes_count)
+ return 0;
+
+ buff_fullness = dvb_ringbuffer_avail(&dmxdevfilter->buffer);
+
+ if (bytes_count > buff_fullness)
+ return -EINVAL;
+
+ DVB_RINGBUFFER_SKIP(&dmxdevfilter->buffer, bytes_count);
+
+ dvb_dmxdev_notify_data_read(dmxdevfilter, bytes_count);
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ dvb_dmxdev_update_events(&dmxdevfilter->events, bytes_count);
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ wake_up_all(&dmxdevfilter->buffer.queue);
+
+ return 0;
+}
+
+static int dvb_dmxdev_get_event(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_filter_event *event)
+{
+ int res;
+
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+
+ /* Check first for filter overflow */
+ if (dmxdevfilter->buffer.error == -EOVERFLOW) {
+ event->type = DMX_EVENT_BUFFER_OVERFLOW;
+ } else {
+ res = dvb_dmxdev_remove_event(&dmxdevfilter->events, event);
+ if (res) {
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+ return res;
+ }
+ }
+
+ /* clear buffer error now that user was notified */
+ if (event->type == DMX_EVENT_BUFFER_OVERFLOW ||
+ event->type == DMX_EVENT_SECTION_TIMEOUT)
+ dmxdevfilter->buffer.error = 0;
+
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ if (event->type == DMX_EVENT_BUFFER_OVERFLOW)
+ dvb_dmxdev_auto_flush_buffer(dmxdevfilter,
+ &dmxdevfilter->buffer);
+
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+
+ /*
+ * If no-data events are enabled on this filter,
+ * the events can be removed from the queue when
+ * user gets them.
+ * For filters with data events enabled, the event is removed
+ * from the queue only when the respective data is read.
+ */
+ if (event->type != DMX_EVENT_BUFFER_OVERFLOW &&
+ dmxdevfilter->events.data_read_event_masked)
+ dmxdevfilter->events.read_index =
+ dvb_dmxdev_advance_event_idx(
+ dmxdevfilter->events.read_index);
+
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ /*
+ * in PULL mode, we might be stalling on
+ * event queue, so need to wake-up waiters
+ */
+ if (dmxdevfilter->dev->playback_mode == DMX_PB_MODE_PULL)
+ wake_up_all(&dmxdevfilter->buffer.queue);
+
+ return res;
+}
+
static void dvb_dmxdev_filter_timeout(unsigned long data)
{
struct dmxdev_filter *dmxdevfilter = (struct dmxdev_filter *)data;
+ struct dmx_filter_event event;
dmxdevfilter->buffer.error = -ETIMEDOUT;
spin_lock_irq(&dmxdevfilter->dev->lock);
dmxdevfilter->state = DMXDEV_STATE_TIMEDOUT;
+ event.type = DMX_EVENT_SECTION_TIMEOUT;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
spin_unlock_irq(&dmxdevfilter->dev->lock);
- wake_up(&dmxdevfilter->buffer.queue);
+ wake_up_all(&dmxdevfilter->buffer.queue);
}
static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
@@ -352,68 +2603,519 @@ static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
const u8 *buffer2, size_t buffer2_len,
- struct dmx_section_filter *filter)
+ struct dmx_section_filter *filter,
+ enum dmx_success success)
{
struct dmxdev_filter *dmxdevfilter = filter->priv;
- int ret;
+ struct dmx_filter_event event;
+ ssize_t free;
- if (dmxdevfilter->buffer.error) {
- wake_up(&dmxdevfilter->buffer.queue);
- return 0;
+ if (!dmxdevfilter) {
+ pr_err("%s: null filter. status=%d\n", __func__, success);
+ return -EINVAL;
}
+
spin_lock(&dmxdevfilter->dev->lock);
- if (dmxdevfilter->state != DMXDEV_STATE_GO) {
+
+ if (dmxdevfilter->buffer.error ||
+ dmxdevfilter->state != DMXDEV_STATE_GO ||
+ dmxdevfilter->eos_state) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ /* Discard section data if event cannot be notified */
+ if (!(dmxdevfilter->events.event_mask.disable_mask &
+ DMX_EVENT_NEW_SECTION) &&
+ dvb_dmxdev_events_is_full(&dmxdevfilter->events)) {
spin_unlock(&dmxdevfilter->dev->lock);
return 0;
}
+
+ if ((buffer1_len + buffer2_len) == 0) {
+ if (success == DMX_CRC_ERROR) {
+ /* Section was dropped due to CRC error */
+ event.type = DMX_EVENT_SECTION_CRC_ERROR;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ }
+
+ return 0;
+ }
+
+ event.params.section.base_offset = dmxdevfilter->buffer.pwrite;
+ event.params.section.start_offset = dmxdevfilter->buffer.pwrite;
+
del_timer(&dmxdevfilter->timer);
- dprintk("dmxdev: section callback %*ph\n", 6, buffer1);
- ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1,
- buffer1_len);
- if (ret == buffer1_len) {
- ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2,
- buffer2_len);
+
+ /* Verify output buffer has sufficient space, or report overflow */
+ free = dvb_ringbuffer_free(&dmxdevfilter->buffer);
+ if (free < (buffer1_len + buffer2_len)) {
+ pr_debug("%s: section filter overflow (pid=%u)\n",
+ __func__, dmxdevfilter->params.sec.pid);
+ dmxdevfilter->buffer.error = -EOVERFLOW;
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ return 0;
}
- if (ret < 0)
- dmxdevfilter->buffer.error = ret;
+
+ dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1, buffer1_len);
+ dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2, buffer2_len);
+
+ event.type = DMX_EVENT_NEW_SECTION;
+ event.params.section.total_length = buffer1_len + buffer2_len;
+ event.params.section.actual_length =
+ event.params.section.total_length;
+
+ if (success == DMX_MISSED_ERROR)
+ event.params.section.flags = DMX_FILTER_CC_ERROR;
+ else
+ event.params.section.flags = 0;
+
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
dmxdevfilter->state = DMXDEV_STATE_DONE;
spin_unlock(&dmxdevfilter->dev->lock);
- wake_up(&dmxdevfilter->buffer.queue);
+ wake_up_all(&dmxdevfilter->buffer.queue);
return 0;
}
static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
const u8 *buffer2, size_t buffer2_len,
- struct dmx_ts_feed *feed)
+ struct dmx_ts_feed *feed,
+ enum dmx_success success)
{
struct dmxdev_filter *dmxdevfilter = feed->priv;
struct dvb_ringbuffer *buffer;
- int ret;
+ struct dmxdev_events_queue *events;
+ struct dmx_filter_event event;
+ ssize_t free;
+
+ if (!dmxdevfilter) {
+ pr_err("%s: null filter (feed->is_filtering=%d) status=%d\n",
+ __func__, feed->is_filtering, success);
+ return -EINVAL;
+ }
spin_lock(&dmxdevfilter->dev->lock);
- if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
+
+ if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER ||
+ dmxdevfilter->state != DMXDEV_STATE_GO ||
+ dmxdevfilter->eos_state) {
spin_unlock(&dmxdevfilter->dev->lock);
return 0;
}
- if (dmxdevfilter->params.pes.output == DMX_OUT_TAP
- || dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+ if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
buffer = &dmxdevfilter->buffer;
- else
+ events = &dmxdevfilter->events;
+ } else {
buffer = &dmxdevfilter->dev->dvr_buffer;
+ events = &dmxdevfilter->dev->dvr_output_events;
+ }
+
if (buffer->error) {
spin_unlock(&dmxdevfilter->dev->lock);
- wake_up(&buffer->queue);
+ wake_up_all(&buffer->queue);
+ return buffer->error;
+ }
+
+ if (dmxdevfilter->params.pes.output == DMX_OUT_TAP) {
+ if (success == DMX_OK && !events->current_event_data_size) {
+ events->current_event_start_offset = buffer->pwrite;
+ } else if (success == DMX_OK_PES_END) {
+ event.type = DMX_EVENT_NEW_PES;
+
+ event.params.pes.actual_length =
+ events->current_event_data_size;
+ event.params.pes.total_length =
+ events->current_event_data_size;
+
+ event.params.pes.base_offset =
+ events->current_event_start_offset;
+ event.params.pes.start_offset =
+ events->current_event_start_offset;
+
+ event.params.pes.flags = 0;
+ event.params.pes.stc = 0;
+ event.params.pes.transport_error_indicator_counter = 0;
+ event.params.pes.continuity_error_counter = 0;
+ event.params.pes.ts_packets_num = 0;
+
+ /* Do not report zero length PES */
+ if (event.params.pes.total_length)
+ dvb_dmxdev_add_event(events, &event);
+ events->current_event_data_size = 0;
+ }
+ } else if (!events->current_event_data_size) {
+ events->current_event_start_offset = buffer->pwrite;
+ }
+
+ /* Verify output buffer has sufficient space, or report overflow */
+ free = dvb_ringbuffer_free(buffer);
+ if (free < (buffer1_len + buffer2_len)) {
+ pr_debug("%s: buffer overflow error, pid=%u\n",
+ __func__, dmxdevfilter->params.pes.pid);
+ buffer->error = -EOVERFLOW;
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+
+ return -EOVERFLOW;
+ }
+
+ if (buffer1_len + buffer2_len) {
+ dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
+ dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
+
+ events->current_event_data_size += (buffer1_len + buffer2_len);
+
+ if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP ||
+ dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+ && events->current_event_data_size >=
+ dmxdevfilter->params.pes.rec_chunk_size) {
+ event.type = DMX_EVENT_NEW_REC_CHUNK;
+ event.params.recording_chunk.offset =
+ events->current_event_start_offset;
+ event.params.recording_chunk.size =
+ events->current_event_data_size;
+
+ dvb_dmxdev_add_event(events, &event);
+ events->current_event_data_size = 0;
+ }
+ }
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+}
+
+static int dvb_dmxdev_section_event_cb(struct dmx_section_filter *filter,
+ struct dmx_data_ready *dmx_data_ready)
+{
+ int res = 0;
+ struct dmxdev_filter *dmxdevfilter = filter->priv;
+ struct dmx_filter_event event;
+ ssize_t free;
+
+ if (!dmxdevfilter) {
+ pr_err("%s: null filter. event type=%d (length=%d) will be discarded\n",
+ __func__, dmx_data_ready->status,
+ dmx_data_ready->data_length);
+ return -EINVAL;
+ }
+
+ spin_lock(&dmxdevfilter->dev->lock);
+
+ if (dmxdevfilter->buffer.error == -ETIMEDOUT ||
+ dmxdevfilter->state != DMXDEV_STATE_GO ||
+ dmxdevfilter->eos_state) {
+ spin_unlock(&dmxdevfilter->dev->lock);
return 0;
}
- ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
- if (ret == buffer1_len)
- ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
- if (ret < 0)
- buffer->error = ret;
+
+ if (dmx_data_ready->data_length == 0) {
+ if (dmx_data_ready->status == DMX_CRC_ERROR) {
+ /* Section was dropped due to CRC error */
+ event.type = DMX_EVENT_SECTION_CRC_ERROR;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else if (dmx_data_ready->status == DMX_OK_EOS) {
+ event.type = DMX_EVENT_EOS;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else if (dmx_data_ready->status == DMX_OK_MARKER) {
+ event.type = DMX_EVENT_MARKER;
+ event.params.marker.id = dmx_data_ready->marker.id;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else if (dmx_data_ready->status == DMX_OK_SCRAMBLING_STATUS) {
+ event.type = DMX_EVENT_SCRAMBLING_STATUS_CHANGE;
+ event.params.scrambling_status =
+ dmx_data_ready->scrambling_bits;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else if (dmx_data_ready->status == DMX_OVERRUN_ERROR) {
+ pr_debug("dmxdev: section filter overflow (pid=%u)\n",
+ dmxdevfilter->params.sec.pid);
+ /* Set buffer error to notify user overflow occurred */
+ dmxdevfilter->buffer.error = -EOVERFLOW;
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ }
+ return 0;
+ }
+
+ event.type = DMX_EVENT_NEW_SECTION;
+ event.params.section.base_offset = dmxdevfilter->buffer.pwrite;
+ event.params.section.start_offset = dmxdevfilter->buffer.pwrite;
+ event.params.section.total_length = dmx_data_ready->data_length;
+ event.params.section.actual_length = dmx_data_ready->data_length;
+
+ if (dmx_data_ready->status == DMX_MISSED_ERROR)
+ event.params.section.flags = DMX_FILTER_CC_ERROR;
+ else
+ event.params.section.flags = 0;
+
+ free = dvb_ringbuffer_free(&dmxdevfilter->buffer);
+ if (free < dmx_data_ready->data_length) {
+ pr_err("%s: invalid data length: data_length=%d > free=%zd\n",
+ __func__, dmx_data_ready->data_length, free);
+ } else {
+ res = dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+ DVB_RINGBUFFER_PUSH(&dmxdevfilter->buffer,
+ dmx_data_ready->data_length);
+ }
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+
+ return res;
+}
+
+static int dvb_dmxdev_ts_event_cb(struct dmx_ts_feed *feed,
+ struct dmx_data_ready *dmx_data_ready)
+{
+ struct dmxdev_filter *dmxdevfilter = feed->priv;
+ struct dvb_ringbuffer *buffer;
+ struct dmxdev_events_queue *events;
+ struct dmx_filter_event event;
+ ssize_t free;
+
+ if (!dmxdevfilter) {
+ pr_err("%s: null filter (feed->is_filtering=%d) event type=%d (length=%d) will be discarded\n",
+ __func__, feed->is_filtering,
+ dmx_data_ready->status,
+ dmx_data_ready->data_length);
+ return -EINVAL;
+ }
+
+ spin_lock(&dmxdevfilter->dev->lock);
+
+ if (dmxdevfilter->state != DMXDEV_STATE_GO ||
+ dmxdevfilter->eos_state) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
+ buffer = &dmxdevfilter->buffer;
+ events = &dmxdevfilter->events;
+ } else {
+ buffer = &dmxdevfilter->dev->dvr_buffer;
+ events = &dmxdevfilter->dev->dvr_output_events;
+ }
+
+ if (!buffer->error && dmx_data_ready->status == DMX_OVERRUN_ERROR) {
+ pr_debug("dmxdev: %s filter buffer overflow (pid=%u)\n",
+ dmxdevfilter->params.pes.output == DMX_OUT_DECODER ?
+ "decoder" : "",
+ dmxdevfilter->params.pes.pid);
+ /* Set buffer error to notify user overflow occurred */
+ buffer->error = -EOVERFLOW;
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmx_data_ready->status == DMX_OK_EOS) {
+ /* Report partial recording chunk */
+ if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP ||
+ dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+ && events->current_event_data_size) {
+ event.type = DMX_EVENT_NEW_REC_CHUNK;
+ event.params.recording_chunk.offset =
+ events->current_event_start_offset;
+ event.params.recording_chunk.size =
+ events->current_event_data_size;
+ events->current_event_start_offset =
+ (events->current_event_start_offset +
+ events->current_event_data_size) %
+ buffer->size;
+ events->current_event_data_size = 0;
+ dvb_dmxdev_add_event(events, &event);
+ }
+
+ dmxdevfilter->eos_state = 1;
+ pr_debug("dmxdev: DMX_OK_EOS - entering EOS state\n");
+ event.type = DMX_EVENT_EOS;
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmx_data_ready->status == DMX_OK_MARKER) {
+ pr_debug("dmxdev: DMX_OK_MARKER - id=%llu\n",
+ dmx_data_ready->marker.id);
+ event.type = DMX_EVENT_MARKER;
+ event.params.marker.id = dmx_data_ready->marker.id;
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmx_data_ready->status == DMX_OK_PCR) {
+ pr_debug("dmxdev: event callback DMX_OK_PCR\n");
+ event.type = DMX_EVENT_NEW_PCR;
+ event.params.pcr.pcr = dmx_data_ready->pcr.pcr;
+ event.params.pcr.stc = dmx_data_ready->pcr.stc;
+ if (dmx_data_ready->pcr.disc_indicator_set)
+ event.params.pcr.flags =
+ DMX_FILTER_DISCONTINUITY_INDICATOR;
+ else
+ event.params.pcr.flags = 0;
+
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmx_data_ready->status == DMX_OK_IDX) {
+ pr_debug("dmxdev: event callback DMX_OK_IDX\n");
+ event.type = DMX_EVENT_NEW_INDEX_ENTRY;
+ event.params.index = dmx_data_ready->idx_event;
+
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmx_data_ready->status == DMX_OK_SCRAMBLING_STATUS) {
+ event.type = DMX_EVENT_SCRAMBLING_STATUS_CHANGE;
+ event.params.scrambling_status =
+ dmx_data_ready->scrambling_bits;
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmx_data_ready->status == DMX_OK_DECODER_BUF) {
+ event.type = DMX_EVENT_NEW_ES_DATA;
+ event.params.es_data.buf_handle = dmx_data_ready->buf.handle;
+ event.params.es_data.cookie = dmx_data_ready->buf.cookie;
+ event.params.es_data.offset = dmx_data_ready->buf.offset;
+ event.params.es_data.data_len = dmx_data_ready->buf.len;
+ event.params.es_data.pts_valid = dmx_data_ready->buf.pts_exists;
+ event.params.es_data.pts = dmx_data_ready->buf.pts;
+ event.params.es_data.dts_valid = dmx_data_ready->buf.dts_exists;
+ event.params.es_data.dts = dmx_data_ready->buf.dts;
+ event.params.es_data.stc = dmx_data_ready->buf.stc;
+ event.params.es_data.transport_error_indicator_counter =
+ dmx_data_ready->buf.tei_counter;
+ event.params.es_data.continuity_error_counter =
+ dmx_data_ready->buf.cont_err_counter;
+ event.params.es_data.ts_packets_num =
+ dmx_data_ready->buf.ts_packets_num;
+ event.params.es_data.ts_dropped_bytes =
+ dmx_data_ready->buf.ts_dropped_bytes;
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ free = dvb_ringbuffer_free(buffer);
+ if (free < dmx_data_ready->data_length) {
+ pr_err("%s: invalid data length: data_length=%d > free=%zd\n",
+ __func__, dmx_data_ready->data_length, free);
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmxdevfilter->params.pes.output == DMX_OUT_TAP) {
+ if (dmx_data_ready->status == DMX_OK &&
+ !events->current_event_data_size) {
+ events->current_event_start_offset = buffer->pwrite;
+ } else if (dmx_data_ready->status == DMX_OK_PES_END) {
+ event.type = DMX_EVENT_NEW_PES;
+
+ event.params.pes.base_offset =
+ events->current_event_start_offset;
+ event.params.pes.start_offset =
+ (events->current_event_start_offset +
+ dmx_data_ready->pes_end.start_gap) %
+ buffer->size;
+
+ event.params.pes.actual_length =
+ dmx_data_ready->pes_end.actual_length;
+ event.params.pes.total_length =
+ events->current_event_data_size;
+
+ event.params.pes.flags = 0;
+ if (dmx_data_ready->pes_end.disc_indicator_set)
+ event.params.pes.flags |=
+ DMX_FILTER_DISCONTINUITY_INDICATOR;
+ if (dmx_data_ready->pes_end.pes_length_mismatch)
+ event.params.pes.flags |=
+ DMX_FILTER_PES_LENGTH_ERROR;
+
+ event.params.pes.stc = dmx_data_ready->pes_end.stc;
+ event.params.pes.transport_error_indicator_counter =
+ dmx_data_ready->pes_end.tei_counter;
+ event.params.pes.continuity_error_counter =
+ dmx_data_ready->pes_end.cont_err_counter;
+ event.params.pes.ts_packets_num =
+ dmx_data_ready->pes_end.ts_packets_num;
+
+ /* Do not report zero length PES */
+ if (event.params.pes.total_length)
+ dvb_dmxdev_add_event(events, &event);
+
+ events->current_event_data_size = 0;
+ }
+ } else if (!events->current_event_data_size) {
+ events->current_event_start_offset = buffer->pwrite;
+ }
+
+ events->current_event_data_size += dmx_data_ready->data_length;
+ DVB_RINGBUFFER_PUSH(buffer, dmx_data_ready->data_length);
+
+ if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) ||
+ (dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)) {
+ while (events->current_event_data_size >=
+ dmxdevfilter->params.pes.rec_chunk_size) {
+ event.type = DMX_EVENT_NEW_REC_CHUNK;
+ event.params.recording_chunk.offset =
+ events->current_event_start_offset;
+ event.params.recording_chunk.size =
+ dmxdevfilter->params.pes.rec_chunk_size;
+ events->current_event_data_size =
+ events->current_event_data_size -
+ dmxdevfilter->params.pes.rec_chunk_size;
+ events->current_event_start_offset =
+ (events->current_event_start_offset +
+ dmxdevfilter->params.pes.rec_chunk_size) %
+ buffer->size;
+
+ dvb_dmxdev_add_event(events, &event);
+ }
+ }
spin_unlock(&dmxdevfilter->dev->lock);
- wake_up(&buffer->queue);
+ wake_up_all(&buffer->queue);
return 0;
}
@@ -427,11 +3129,18 @@ static int dvb_dmxdev_feed_stop(struct dmxdev_filter *dmxdevfilter)
switch (dmxdevfilter->type) {
case DMXDEV_TYPE_SEC:
del_timer(&dmxdevfilter->timer);
- dmxdevfilter->feed.sec->stop_filtering(dmxdevfilter->feed.sec);
+ dmxdevfilter->feed.sec.feed->stop_filtering(
+ dmxdevfilter->feed.sec.feed);
break;
case DMXDEV_TYPE_PES:
- list_for_each_entry(feed, &dmxdevfilter->feed.ts, next)
+ list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
+ if (dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) {
+ dmxdevfilter->dev->dvr_feeds_count--;
+ if (!dmxdevfilter->dev->dvr_feeds_count)
+ dmxdevfilter->dev->dvr_feed = NULL;
+ }
feed->ts->stop_filtering(feed->ts);
+ }
break;
default:
return -EINVAL;
@@ -449,7 +3158,8 @@ static int dvb_dmxdev_feed_start(struct dmxdev_filter *filter)
switch (filter->type) {
case DMXDEV_TYPE_SEC:
- return filter->feed.sec->start_filtering(filter->feed.sec);
+ return filter->feed.sec.feed->start_filtering(
+ filter->feed.sec.feed);
case DMXDEV_TYPE_PES:
list_for_each_entry(feed, &filter->feed.ts, next) {
ret = feed->ts->start_filtering(feed->ts);
@@ -483,7 +3193,7 @@ static int dvb_dmxdev_feed_restart(struct dmxdev_filter *filter)
}
filter->dev->demux->release_section_feed(dmxdev->demux,
- filter->feed.sec);
+ filter->feed.sec.feed);
return 0;
}
@@ -492,25 +3202,38 @@ static int dvb_dmxdev_filter_stop(struct dmxdev_filter *dmxdevfilter)
{
struct dmxdev_feed *feed;
struct dmx_demux *demux;
+ struct ts_insertion_buffer *ts_buffer;
if (dmxdevfilter->state < DMXDEV_STATE_GO)
return 0;
switch (dmxdevfilter->type) {
case DMXDEV_TYPE_SEC:
- if (!dmxdevfilter->feed.sec)
+ if (!dmxdevfilter->feed.sec.feed)
break;
dvb_dmxdev_feed_stop(dmxdevfilter);
if (dmxdevfilter->filter.sec)
- dmxdevfilter->feed.sec->
- release_filter(dmxdevfilter->feed.sec,
+ dmxdevfilter->feed.sec.feed->
+ release_filter(dmxdevfilter->feed.sec.feed,
dmxdevfilter->filter.sec);
dvb_dmxdev_feed_restart(dmxdevfilter);
- dmxdevfilter->feed.sec = NULL;
+ dmxdevfilter->feed.sec.feed = NULL;
break;
case DMXDEV_TYPE_PES:
dvb_dmxdev_feed_stop(dmxdevfilter);
demux = dmxdevfilter->dev->demux;
+
+ if (!list_empty(&dmxdevfilter->insertion_buffers)) {
+ feed = list_first_entry(&dmxdevfilter->feed.ts,
+ struct dmxdev_feed, next);
+
+ list_for_each_entry(ts_buffer,
+ &dmxdevfilter->insertion_buffers, next)
+ dvb_dmxdev_cancel_ts_insertion(ts_buffer);
+ if (feed->ts->ts_insertion_terminate)
+ feed->ts->ts_insertion_terminate(feed->ts);
+ }
+
list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
demux->release_ts_feed(demux, feed->ts);
feed->ts = NULL;
@@ -522,7 +3245,13 @@ static int dvb_dmxdev_filter_stop(struct dmxdev_filter *dmxdevfilter)
return -EINVAL;
}
- dvb_ringbuffer_flush(&dmxdevfilter->buffer);
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ dvb_dmxdev_flush_output(&dmxdevfilter->buffer, &dmxdevfilter->events);
+ dvb_ringbuffer_reset(&dmxdevfilter->buffer);
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ wake_up_all(&dmxdevfilter->buffer.queue);
+
return 0;
}
@@ -589,12 +3318,76 @@ static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev,
tsfeed = feed->ts;
tsfeed->priv = filter;
- ret = tsfeed->set(tsfeed, feed->pid, ts_type, ts_pes, 32768, timeout);
+ if (filter->params.pes.output == DMX_OUT_TS_TAP) {
+ tsfeed->buffer.ringbuff = &dmxdev->dvr_buffer;
+ tsfeed->buffer.priv_handle = dmxdev->dvr_priv_buff_handle;
+ if (!dmxdev->dvr_feeds_count)
+ dmxdev->dvr_feed = filter;
+ dmxdev->dvr_feeds_count++;
+ } else if (filter->params.pes.output == DMX_OUT_DECODER) {
+ tsfeed->buffer.ringbuff = &filter->buffer;
+ tsfeed->decoder_buffers = &filter->decoder_buffers;
+ tsfeed->buffer.priv_handle = filter->priv_buff_handle;
+ } else {
+ tsfeed->buffer.ringbuff = &filter->buffer;
+ tsfeed->buffer.priv_handle = filter->priv_buff_handle;
+ }
+
+ if (tsfeed->data_ready_cb) {
+ ret = tsfeed->data_ready_cb(tsfeed, dvb_dmxdev_ts_event_cb);
+
+ if (ret < 0) {
+ dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
+ return ret;
+ }
+ }
+
+ ret = tsfeed->set(tsfeed, feed->pid,
+ ts_type, ts_pes,
+ filter->decoder_buffers.buffers_size,
+ timeout);
if (ret < 0) {
dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
return ret;
}
+ if (tsfeed->set_tsp_out_format)
+ tsfeed->set_tsp_out_format(tsfeed, filter->dmx_tsp_format);
+
+ if (tsfeed->set_secure_mode)
+ tsfeed->set_secure_mode(tsfeed, &filter->sec_mode);
+
+ if (tsfeed->set_cipher_ops)
+ tsfeed->set_cipher_ops(tsfeed, &feed->cipher_ops);
+
+ if ((para->pes_type == DMX_PES_VIDEO0) ||
+ (para->pes_type == DMX_PES_VIDEO1) ||
+ (para->pes_type == DMX_PES_VIDEO2) ||
+ (para->pes_type == DMX_PES_VIDEO3)) {
+ if (tsfeed->set_video_codec) {
+ ret = tsfeed->set_video_codec(tsfeed,
+ para->video_codec);
+
+ if (ret < 0) {
+ dmxdev->demux->release_ts_feed(dmxdev->demux,
+ tsfeed);
+ return ret;
+ }
+ }
+ }
+
+ if ((filter->params.pes.output == DMX_OUT_TS_TAP) ||
+ (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP))
+ if (tsfeed->set_idx_params) {
+ ret = tsfeed->set_idx_params(
+ tsfeed, &feed->idx_params);
+ if (ret) {
+ dmxdev->demux->release_ts_feed(dmxdev->demux,
+ tsfeed);
+ return ret;
+ }
+ }
+
ret = tsfeed->start_filtering(tsfeed);
if (ret < 0) {
dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
@@ -604,12 +3397,50 @@ static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev,
return 0;
}
+static int dvb_filter_external_buffer_only(struct dmxdev *dmxdev,
+ struct dmxdev_filter *filter)
+{
+ struct dmx_caps caps;
+ int is_external_only;
+ int flags;
+
+ /*
+ * For backward compatibility, default assumes that
+ * external only buffers are not supported.
+ */
+ flags = 0;
+ if (dmxdev->demux->get_caps) {
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+
+ if (filter->type == DMXDEV_TYPE_SEC)
+ flags = caps.section.flags;
+ else if (filter->params.pes.output == DMX_OUT_DECODER)
+ /* For decoder filters dmxdev buffer is not required */
+ flags = 0;
+ else if (filter->params.pes.output == DMX_OUT_TAP)
+ flags = caps.pes.flags;
+ else if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188)
+ flags = caps.recording_188_tsp.flags;
+ else
+ flags = caps.recording_192_tsp.flags;
+ }
+
+ if (!(flags & DMX_BUFFER_INTERNAL_SUPPORT) &&
+ (flags & DMX_BUFFER_EXTERNAL_SUPPORT))
+ is_external_only = 1;
+ else
+ is_external_only = 0;
+
+ return is_external_only;
+}
+
static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
{
struct dmxdev *dmxdev = filter->dev;
struct dmxdev_feed *feed;
void *mem;
int ret, i;
+ size_t tsp_size;
if (filter->state < DMXDEV_STATE_SET)
return -EINVAL;
@@ -617,34 +3448,64 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
if (filter->state >= DMXDEV_STATE_GO)
dvb_dmxdev_filter_stop(filter);
+ if (!dvb_filter_verify_buffer_size(filter))
+ return -EINVAL;
+
if (!filter->buffer.data) {
- mem = vmalloc(filter->buffer.size);
+ /*
+ * dmxdev buffer in decoder filters is not really used
+ * to exchange data with applications. Decoder buffers
+ * can be set using DMX_SET_DECODER_BUFFER, which
+ * would not update the filter->buffer.data at all.
+ * Therefore we should not treat this filter as
+ * other regular filters and should not fail here
+ * even if user sets the buffer in deocder
+ * filter as external buffer.
+ */
+ if (filter->type == DMXDEV_TYPE_PES &&
+ (filter->params.pes.output == DMX_OUT_DECODER ||
+ filter->params.pes.output == DMX_OUT_TS_TAP))
+ filter->buffer_mode = DMX_BUFFER_MODE_INTERNAL;
+
+ if (!(filter->type == DMXDEV_TYPE_PES &&
+ filter->params.pes.output == DMX_OUT_TS_TAP) &&
+ (filter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL ||
+ dvb_filter_external_buffer_only(dmxdev, filter)))
+ return -ENOMEM;
+
+ mem = vmalloc_user(filter->buffer.size);
if (!mem)
return -ENOMEM;
spin_lock_irq(&filter->dev->lock);
filter->buffer.data = mem;
spin_unlock_irq(&filter->dev->lock);
+ } else if ((filter->buffer_mode == DMX_BUFFER_MODE_INTERNAL) &&
+ dvb_filter_external_buffer_only(dmxdev, filter)) {
+ return -ENOMEM;
}
- dvb_ringbuffer_flush(&filter->buffer);
+ filter->eos_state = 0;
+
+ spin_lock_irq(&filter->dev->lock);
+ dvb_dmxdev_flush_output(&filter->buffer, &filter->events);
+ spin_unlock_irq(&filter->dev->lock);
switch (filter->type) {
case DMXDEV_TYPE_SEC:
{
struct dmx_sct_filter_params *para = &filter->params.sec;
struct dmx_section_filter **secfilter = &filter->filter.sec;
- struct dmx_section_feed **secfeed = &filter->feed.sec;
+ struct dmx_section_feed **secfeed = &filter->feed.sec.feed;
*secfilter = NULL;
*secfeed = NULL;
-
/* find active filter/feed with same PID */
for (i = 0; i < dmxdev->filternum; i++) {
if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
dmxdev->filter[i].params.sec.pid == para->pid) {
- *secfeed = dmxdev->filter[i].feed.sec;
+ *secfeed = dmxdev->filter[i].feed.sec.feed;
break;
}
}
@@ -652,22 +3513,44 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
/* if no feed found, try to allocate new one */
if (!*secfeed) {
ret = dmxdev->demux->allocate_section_feed(dmxdev->demux,
- secfeed,
- dvb_dmxdev_section_callback);
+ secfeed,
+ dvb_dmxdev_section_callback);
if (ret < 0) {
- printk("DVB (%s): could not alloc feed\n",
+ pr_err("DVB (%s): could not alloc feed\n",
__func__);
return ret;
}
+ if ((*secfeed)->data_ready_cb) {
+ ret = (*secfeed)->data_ready_cb(
+ *secfeed,
+ dvb_dmxdev_section_event_cb);
+
+ if (ret < 0) {
+ pr_err(
+ "DVB (%s): could not set event cb\n",
+ __func__);
+ dvb_dmxdev_feed_restart(filter);
+ return ret;
+ }
+ }
+
ret = (*secfeed)->set(*secfeed, para->pid, 32768,
(para->flags & DMX_CHECK_CRC) ? 1 : 0);
if (ret < 0) {
- printk("DVB (%s): could not set feed\n",
- __func__);
+ pr_err("DVB (%s): could not set feed\n",
+ __func__);
dvb_dmxdev_feed_restart(filter);
return ret;
}
+
+ if ((*secfeed)->set_secure_mode)
+ (*secfeed)->set_secure_mode(*secfeed,
+ &filter->sec_mode);
+
+ if ((*secfeed)->set_cipher_ops)
+ (*secfeed)->set_cipher_ops(*secfeed,
+ &filter->feed.sec.cipher_ops);
} else {
dvb_dmxdev_feed_stop(filter);
}
@@ -675,12 +3558,14 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
ret = (*secfeed)->allocate_filter(*secfeed, secfilter);
if (ret < 0) {
dvb_dmxdev_feed_restart(filter);
- filter->feed.sec->start_filtering(*secfeed);
- dprintk("could not get filter\n");
+ filter->feed.sec.feed->start_filtering(*secfeed);
+ pr_debug("could not get filter\n");
return ret;
}
(*secfilter)->priv = filter;
+ (*secfilter)->buffer.ringbuff = &filter->buffer;
+ (*secfilter)->buffer.priv_handle = filter->priv_buff_handle;
memcpy(&((*secfilter)->filter_value[3]),
&(para->filter.filter[1]), DMX_FILTER_SIZE - 1);
@@ -696,8 +3581,12 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
(*secfilter)->filter_mask[2] = 0;
filter->todo = 0;
+ filter->events.data_read_event_masked =
+ filter->events.event_mask.disable_mask &
+ DMX_EVENT_NEW_SECTION;
- ret = filter->feed.sec->start_filtering(filter->feed.sec);
+ ret = filter->feed.sec.feed->start_filtering(
+ filter->feed.sec.feed);
if (ret < 0)
return ret;
@@ -705,19 +3594,93 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
break;
}
case DMXDEV_TYPE_PES:
+ if (filter->params.pes.rec_chunk_size <
+ DMX_REC_BUFF_CHUNK_MIN_SIZE)
+ filter->params.pes.rec_chunk_size =
+ DMX_REC_BUFF_CHUNK_MIN_SIZE;
+
+ if (filter->params.pes.rec_chunk_size >=
+ filter->buffer.size)
+ filter->params.pes.rec_chunk_size =
+ filter->buffer.size >> 2;
+
+ /* Align rec-chunk based on output format */
+ if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188)
+ tsp_size = 188;
+ else
+ tsp_size = 192;
+
+ filter->params.pes.rec_chunk_size /= tsp_size;
+ filter->params.pes.rec_chunk_size *= tsp_size;
+
+ if (filter->params.pes.output == DMX_OUT_TS_TAP)
+ dmxdev->dvr_output_events.data_read_event_masked =
+ dmxdev->dvr_output_events.event_mask.disable_mask &
+ DMX_EVENT_NEW_REC_CHUNK;
+ else if (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+ filter->events.data_read_event_masked =
+ filter->events.event_mask.disable_mask &
+ DMX_EVENT_NEW_REC_CHUNK;
+ else if (filter->params.pes.output == DMX_OUT_TAP)
+ filter->events.data_read_event_masked =
+ filter->events.event_mask.disable_mask &
+ DMX_EVENT_NEW_PES;
+ else
+ filter->events.data_read_event_masked = 1;
+
+ ret = 0;
list_for_each_entry(feed, &filter->feed.ts, next) {
ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
- if (ret < 0) {
- dvb_dmxdev_filter_stop(filter);
- return ret;
+ if (ret)
+ break;
+ }
+
+ if (!ret)
+ break;
+
+ /* cleanup feeds that were started before the failure */
+ list_for_each_entry(feed, &filter->feed.ts, next) {
+ if (!feed->ts)
+ continue;
+ feed->ts->stop_filtering(feed->ts);
+ dmxdev->demux->release_ts_feed(dmxdev->demux, feed->ts);
+ feed->ts = NULL;
+
+ if (filter->params.pes.output == DMX_OUT_TS_TAP) {
+ filter->dev->dvr_feeds_count--;
+ if (!filter->dev->dvr_feeds_count)
+ filter->dev->dvr_feed = NULL;
}
}
- break;
+ return ret;
+
default:
return -EINVAL;
}
dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
+
+ if ((filter->type == DMXDEV_TYPE_PES) &&
+ !list_empty(&filter->insertion_buffers)) {
+ struct ts_insertion_buffer *ts_buffer;
+
+ feed = list_first_entry(&filter->feed.ts,
+ struct dmxdev_feed, next);
+
+ ret = 0;
+ if (feed->ts->ts_insertion_init)
+ ret = feed->ts->ts_insertion_init(feed->ts);
+ if (!ret) {
+ list_for_each_entry(ts_buffer,
+ &filter->insertion_buffers, next)
+ dvb_dmxdev_queue_ts_insertion(
+ ts_buffer);
+ } else {
+ pr_err("%s: ts_insertion_init failed, err %d\n",
+ __func__, ret);
+ }
+ }
+
return 0;
}
@@ -747,11 +3710,28 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
mutex_init(&dmxdevfilter->mutex);
file->private_data = dmxdevfilter;
+ memset(&dmxdevfilter->decoder_buffers,
+ 0,
+ sizeof(dmxdevfilter->decoder_buffers));
+ dmxdevfilter->decoder_buffers.buffers_size =
+ DMX_DEFAULT_DECODER_BUFFER_SIZE;
+ dmxdevfilter->buffer_mode = DMX_BUFFER_MODE_INTERNAL;
+ dmxdevfilter->priv_buff_handle = NULL;
dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
+ dvb_dmxdev_flush_events(&dmxdevfilter->events);
+ dmxdevfilter->events.event_mask.disable_mask = DMX_EVENT_NEW_ES_DATA;
+ dmxdevfilter->events.event_mask.no_wakeup_mask = 0;
+ dmxdevfilter->events.event_mask.wakeup_threshold = 1;
+
dmxdevfilter->type = DMXDEV_TYPE_NONE;
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
init_timer(&dmxdevfilter->timer);
+ dmxdevfilter->sec_mode.is_secured = 0;
+
+ INIT_LIST_HEAD(&dmxdevfilter->insertion_buffers);
+
+ dmxdevfilter->dmx_tsp_format = DMX_TSP_FORMAT_188;
dvbdev->users++;
mutex_unlock(&dmxdev->mutex);
@@ -761,23 +3741,40 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev,
struct dmxdev_filter *dmxdevfilter)
{
+ struct ts_insertion_buffer *ts_buffer, *tmp;
+
mutex_lock(&dmxdev->mutex);
mutex_lock(&dmxdevfilter->mutex);
dvb_dmxdev_filter_stop(dmxdevfilter);
dvb_dmxdev_filter_reset(dmxdevfilter);
+ list_for_each_entry_safe(ts_buffer, tmp,
+ &dmxdevfilter->insertion_buffers, next) {
+ list_del(&ts_buffer->next);
+ vfree(ts_buffer->buffer);
+ vfree(ts_buffer);
+ }
+
if (dmxdevfilter->buffer.data) {
void *mem = dmxdevfilter->buffer.data;
spin_lock_irq(&dmxdev->lock);
dmxdevfilter->buffer.data = NULL;
spin_unlock_irq(&dmxdev->lock);
- vfree(mem);
+ if (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_INTERNAL)
+ vfree(mem);
+ }
+
+ if ((dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL) &&
+ dmxdevfilter->priv_buff_handle) {
+ dmxdev->demux->unmap_buffer(dmxdev->demux,
+ dmxdevfilter->priv_buff_handle);
+ dmxdevfilter->priv_buff_handle = NULL;
}
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_FREE);
- wake_up(&dmxdevfilter->buffer.queue);
+ wake_up_all(&dmxdevfilter->buffer.queue);
mutex_unlock(&dmxdevfilter->mutex);
mutex_unlock(&dmxdev->mutex);
return 0;
@@ -795,6 +3792,7 @@ static int dvb_dmxdev_add_pid(struct dmxdev *dmxdev,
struct dmxdev_filter *filter, u16 pid)
{
struct dmxdev_feed *feed;
+ int ret = 0;
if ((filter->type != DMXDEV_TYPE_PES) ||
(filter->state < DMXDEV_STATE_SET))
@@ -810,28 +3808,45 @@ static int dvb_dmxdev_add_pid(struct dmxdev *dmxdev,
return -ENOMEM;
feed->pid = pid;
- list_add(&feed->next, &filter->feed.ts);
+ feed->cipher_ops.operations_count = 0;
+ feed->idx_params.enable = 0;
if (filter->state >= DMXDEV_STATE_GO)
- return dvb_dmxdev_start_feed(dmxdev, filter, feed);
+ ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
- return 0;
+ if (!ret)
+ list_add(&feed->next, &filter->feed.ts);
+ else
+ kfree(feed);
+
+ return ret;
}
static int dvb_dmxdev_remove_pid(struct dmxdev *dmxdev,
struct dmxdev_filter *filter, u16 pid)
{
+ int feed_count;
struct dmxdev_feed *feed, *tmp;
if ((filter->type != DMXDEV_TYPE_PES) ||
(filter->state < DMXDEV_STATE_SET))
return -EINVAL;
+ feed_count = 0;
+ list_for_each_entry(tmp, &filter->feed.ts, next)
+ feed_count++;
+
+ if (feed_count <= 1)
+ return -EINVAL;
+
list_for_each_entry_safe(feed, tmp, &filter->feed.ts, next) {
- if ((feed->pid == pid) && (feed->ts != NULL)) {
- feed->ts->stop_filtering(feed->ts);
- filter->dev->demux->release_ts_feed(filter->dev->demux,
- feed->ts);
+ if (feed->pid == pid) {
+ if (feed->ts != NULL) {
+ feed->ts->stop_filtering(feed->ts);
+ filter->dev->demux->release_ts_feed(
+ filter->dev->demux,
+ feed->ts);
+ }
list_del(&feed->next);
kfree(feed);
}
@@ -844,7 +3859,7 @@ static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev,
struct dmxdev_filter *dmxdevfilter,
struct dmx_sct_filter_params *params)
{
- dprintk("function : %s, PID=0x%04x, flags=%02x, timeout=%d\n",
+ pr_debug("function : %s, PID=0x%04x, flags=%02x, timeout=%d\n",
__func__, params->pid, params->flags, params->timeout);
dvb_dmxdev_filter_stop(dmxdevfilter);
@@ -853,6 +3868,7 @@ static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev,
memcpy(&dmxdevfilter->params.sec,
params, sizeof(struct dmx_sct_filter_params));
invert_mode(&dmxdevfilter->params.sec.filter);
+ dmxdevfilter->feed.sec.cipher_ops.operations_count = 0;
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
if (params->flags & DMX_IMMEDIATE_START)
@@ -861,6 +3877,99 @@ static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev,
return 0;
}
+static int dvb_dmxdev_set_secure_mode(
+ struct dmxdev *dmxdev,
+ struct dmxdev_filter *filter,
+ struct dmx_secure_mode *sec_mode)
+{
+ if (!dmxdev || !filter || !sec_mode)
+ return -EINVAL;
+
+ if (filter->state == DMXDEV_STATE_GO) {
+ pr_err("%s: invalid filter state\n", __func__);
+ return -EBUSY;
+ }
+
+ pr_debug("%s: secure=%d\n", __func__, sec_mode->is_secured);
+
+ filter->sec_mode = *sec_mode;
+
+ return 0;
+}
+
+static int dvb_dmxdev_set_cipher(struct dmxdev *dmxdev,
+ struct dmxdev_filter *filter,
+ struct dmx_cipher_operations *cipher_ops)
+{
+ struct dmxdev_feed *feed;
+ struct dmxdev_feed *ts_feed = NULL;
+ struct dmxdev_sec_feed *sec_feed = NULL;
+ struct dmx_caps caps;
+
+ if (!dmxdev || !dmxdev->demux->get_caps)
+ return -EINVAL;
+
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+
+ if (!filter || !cipher_ops ||
+ (cipher_ops->operations_count > caps.num_cipher_ops) ||
+ (cipher_ops->operations_count >
+ DMX_MAX_CIPHER_OPERATIONS_COUNT))
+ return -EINVAL;
+
+ pr_debug("%s: pid=%d, operations=%d\n", __func__,
+ cipher_ops->pid, cipher_ops->operations_count);
+
+ if (filter->state < DMXDEV_STATE_SET ||
+ filter->state > DMXDEV_STATE_GO) {
+ pr_err("%s: invalid filter state\n", __func__);
+ return -EPERM;
+ }
+
+ if (!filter->sec_mode.is_secured && cipher_ops->operations_count) {
+ pr_err("%s: secure mode must be enabled to set cipher ops\n",
+ __func__);
+ return -EPERM;
+ }
+
+ switch (filter->type) {
+ case DMXDEV_TYPE_PES:
+ list_for_each_entry(feed, &filter->feed.ts, next) {
+ if (feed->pid == cipher_ops->pid) {
+ ts_feed = feed;
+ ts_feed->cipher_ops = *cipher_ops;
+ if (filter->state == DMXDEV_STATE_GO &&
+ ts_feed->ts->set_cipher_ops)
+ ts_feed->ts->set_cipher_ops(
+ ts_feed->ts, cipher_ops);
+ break;
+ }
+ }
+ break;
+ case DMXDEV_TYPE_SEC:
+ if (filter->params.sec.pid == cipher_ops->pid) {
+ sec_feed = &filter->feed.sec;
+ sec_feed->cipher_ops = *cipher_ops;
+ if (filter->state == DMXDEV_STATE_GO &&
+ sec_feed->feed->set_cipher_ops)
+ sec_feed->feed->set_cipher_ops(sec_feed->feed,
+ cipher_ops);
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!ts_feed && !sec_feed) {
+ pr_err("%s: pid %d is undefined for this filter\n",
+ __func__, cipher_ops->pid);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
struct dmxdev_filter *dmxdevfilter,
struct dmx_pes_filter_params *params)
@@ -891,6 +4000,55 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
return 0;
}
+static int dvb_dmxdev_set_decoder_buffer(struct dmxdev *dmxdev,
+ struct dmxdev_filter *filter,
+ struct dmx_decoder_buffers *buffs)
+{
+ int i;
+ struct dmx_decoder_buffers *dec_buffs;
+ struct dmx_caps caps;
+
+ if (!dmxdev || !filter || !buffs)
+ return -EINVAL;
+
+ dec_buffs = &filter->decoder_buffers;
+ if (!dmxdev->demux->get_caps)
+ return -EINVAL;
+
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+ if (!dvb_dmxdev_verify_buffer_size(buffs->buffers_size,
+ caps.decoder.max_size, caps.decoder.size_alignment))
+ return -EINVAL;
+
+ if ((buffs->buffers_size == 0) ||
+ (buffs->is_linear &&
+ ((buffs->buffers_num <= 1) ||
+ (buffs->buffers_num > DMX_MAX_DECODER_BUFFER_NUM))))
+ return -EINVAL;
+
+ if (buffs->buffers_num == 0) {
+ /* Internal mode - linear buffers not supported in this mode */
+ if (!(caps.decoder.flags & DMX_BUFFER_INTERNAL_SUPPORT) ||
+ buffs->is_linear)
+ return -EINVAL;
+ } else {
+ /* External buffer(s) mode */
+ if ((!(caps.decoder.flags & DMX_BUFFER_LINEAR_GROUP_SUPPORT) &&
+ buffs->buffers_num > 1) ||
+ !(caps.decoder.flags & DMX_BUFFER_EXTERNAL_SUPPORT) ||
+ buffs->buffers_num > caps.decoder.max_buffer_num)
+ return -EINVAL;
+
+ dec_buffs->is_linear = buffs->is_linear;
+ dec_buffs->buffers_num = buffs->buffers_num;
+ dec_buffs->buffers_size = buffs->buffers_size;
+ for (i = 0; i < dec_buffs->buffers_num; i++)
+ dec_buffs->handles[i] = buffs->handles[i];
+ }
+
+ return 0;
+}
+
static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
struct file *file, char __user *buf,
size_t count, loff_t *ppos)
@@ -902,7 +4060,7 @@ static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
hcount = 3 + dfil->todo;
if (hcount > count)
hcount = count;
- result = dvb_dmxdev_buffer_read(&dfil->buffer,
+ result = dvb_dmxdev_buffer_read(dfil, &dfil->buffer,
file->f_flags & O_NONBLOCK,
buf, hcount, ppos);
if (result < 0) {
@@ -923,7 +4081,7 @@ static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
}
if (count > dfil->todo)
count = dfil->todo;
- result = dvb_dmxdev_buffer_read(&dfil->buffer,
+ result = dvb_dmxdev_buffer_read(dfil, &dfil->buffer,
file->f_flags & O_NONBLOCK,
buf, count, ppos);
if (result < 0)
@@ -942,12 +4100,36 @@ dvb_demux_read(struct file *file, char __user *buf, size_t count,
if (mutex_lock_interruptible(&dmxdevfilter->mutex))
return -ERESTARTSYS;
+ if (dmxdevfilter->eos_state &&
+ dvb_ringbuffer_empty(&dmxdevfilter->buffer)) {
+ mutex_unlock(&dmxdevfilter->mutex);
+ return 0;
+ }
+
if (dmxdevfilter->type == DMXDEV_TYPE_SEC)
ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos);
else
- ret = dvb_dmxdev_buffer_read(&dmxdevfilter->buffer,
- file->f_flags & O_NONBLOCK,
- buf, count, ppos);
+ ret = dvb_dmxdev_buffer_read(dmxdevfilter,
+ &dmxdevfilter->buffer,
+ file->f_flags & O_NONBLOCK,
+ buf, count, ppos);
+
+ if (ret > 0) {
+ dvb_dmxdev_notify_data_read(dmxdevfilter, ret);
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ dvb_dmxdev_update_events(&dmxdevfilter->events, ret);
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ /*
+ * in PULL mode, we might be stalling on
+ * event queue, so need to wake-up waiters
+ */
+ if (dmxdevfilter->dev->playback_mode == DMX_PB_MODE_PULL)
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else if (ret == -EOVERFLOW) {
+ dvb_dmxdev_auto_flush_buffer(dmxdevfilter,
+ &dmxdevfilter->buffer);
+ }
mutex_unlock(&dmxdevfilter->mutex);
return ret;
@@ -1013,6 +4195,43 @@ static int dvb_demux_do_ioctl(struct file *file,
mutex_unlock(&dmxdevfilter->mutex);
break;
+ case DMX_SET_BUFFER_MODE:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_set_buffer_mode(dmxdevfilter,
+ *(enum dmx_buffer_mode *)parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_BUFFER:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_set_buffer(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_GET_BUFFER_STATUS:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_get_buffer_status(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_RELEASE_DATA:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_release_data(dmxdevfilter, arg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
case DMX_GET_PES_PIDS:
if (!dmxdev->demux->get_pes_pids) {
ret = -EINVAL;
@@ -1021,9 +4240,6 @@ static int dvb_demux_do_ioctl(struct file *file,
dmxdev->demux->get_pes_pids(dmxdev->demux, parg);
break;
-#if 0
- /* Not used upstream and never documented */
-
case DMX_GET_CAPS:
if (!dmxdev->demux->get_caps) {
ret = -EINVAL;
@@ -1033,13 +4249,65 @@ static int dvb_demux_do_ioctl(struct file *file,
break;
case DMX_SET_SOURCE:
- if (!dmxdev->demux->set_source) {
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_set_source(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_TS_PACKET_FORMAT:
+ if (!dmxdev->demux->set_tsp_format) {
ret = -EINVAL;
break;
}
- ret = dmxdev->demux->set_source(dmxdev->demux, parg);
+
+ if (dmxdevfilter->state >= DMXDEV_STATE_GO) {
+ ret = -EBUSY;
+ break;
+ }
+ ret = dmxdev->demux->set_tsp_format(
+ dmxdev->demux,
+ *(enum dmx_tsp_format_t *)parg);
+ break;
+
+ case DMX_SET_TS_OUT_FORMAT:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+
+ ret = dvb_dmxdev_set_tsp_out_format(dmxdevfilter,
+ *(enum dmx_tsp_format_t *)parg);
+
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_DECODER_BUFFER_SIZE:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+
+ ret = dvb_dmxdev_set_decoder_buffer_size(dmxdevfilter, arg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_PLAYBACK_MODE:
+ ret = dvb_dmxdev_set_playback_mode(
+ dmxdevfilter,
+ *(enum dmx_playback_mode_t *)parg);
+ break;
+
+ case DMX_GET_EVENT:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_get_event(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
break;
-#endif
case DMX_GET_STC:
if (!dmxdev->demux->get_stc) {
@@ -1070,8 +4338,109 @@ static int dvb_demux_do_ioctl(struct file *file,
mutex_unlock(&dmxdevfilter->mutex);
break;
+ case DMX_SET_DECODER_BUFFER:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ ret = dvb_dmxdev_set_decoder_buffer(dmxdev, dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_SECURE_MODE:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ ret = dvb_dmxdev_set_secure_mode(dmxdev, dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_CIPHER:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ ret = dvb_dmxdev_set_cipher(dmxdev, dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_REUSE_DECODER_BUFFER:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_reuse_decoder_buf(dmxdevfilter, arg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_EVENTS_MASK:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_set_event_mask(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_GET_EVENTS_MASK:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_get_event_mask(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_INDEXING_PARAMS:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_set_indexing_params(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_TS_INSERTION:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_set_ts_insertion(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_ABORT_TS_INSERTION:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_abort_ts_insertion(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_GET_SCRAMBLING_BITS:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_get_scrambling_bits(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_FLUSH_BUFFER:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_flush_buffer(dmxdevfilter);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
default:
- ret = -EINVAL;
+ pr_err("%s: unknown ioctl code (0x%x)\n",
+ __func__, cmd);
+ ret = -ENOIOCTLCMD;
break;
}
mutex_unlock(&dmxdev->mutex);
@@ -1084,13 +4453,78 @@ static long dvb_demux_ioctl(struct file *file, unsigned int cmd,
return dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
}
+#ifdef CONFIG_COMPAT
+
+struct dmx_set_ts_insertion32 {
+ __u32 identifier;
+ __u32 repetition_time;
+ compat_uptr_t ts_packets;
+ compat_size_t size;
+};
+
+static long dmx_set_ts_insertion32_wrapper(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct dmx_set_ts_insertion32 dmx_ts_insert32;
+ struct dmx_set_ts_insertion dmx_ts_insert;
+
+ ret = copy_from_user(&dmx_ts_insert32, (void __user *)arg,
+ sizeof(dmx_ts_insert32));
+ if (ret) {
+ pr_err(
+ "%s: copy dmx_set_ts_insertion32 from user failed, ret=%d\n",
+ __func__, ret);
+ return -EFAULT;
+ }
+
+ memset(&dmx_ts_insert, 0, sizeof(dmx_ts_insert));
+ dmx_ts_insert.identifier = dmx_ts_insert32.identifier;
+ dmx_ts_insert.repetition_time = dmx_ts_insert32.repetition_time;
+ dmx_ts_insert.ts_packets = compat_ptr(dmx_ts_insert32.ts_packets);
+ dmx_ts_insert.size = dmx_ts_insert32.size;
+
+ ret = dvb_demux_do_ioctl(file, DMX_SET_TS_INSERTION, &dmx_ts_insert);
+
+ return ret;
+}
+
+#define DMX_SET_TS_INSERTION32 _IOW('o', 70, struct dmx_set_ts_insertion32)
+
+/*
+ * compat ioctl is called whenever compatibility is required, i.e when a 32bit
+ * process calls an ioctl for a 64bit kernel.
+ */
+static long dvb_demux_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long ret = 0;
+
+ switch (cmd) {
+ case DMX_SET_TS_INSERTION32:
+ ret = dmx_set_ts_insertion32_wrapper(file, cmd, arg);
+ break;
+ case DMX_SET_TS_INSERTION:
+ pr_err("%s: 64bit ioctl code (0x%lx) used by 32bit userspace\n",
+ __func__, DMX_SET_TS_INSERTION);
+ ret = -ENOIOCTLCMD;
+ break;
+ default:
+ /* use regular ioctl */
+ ret = dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
+ }
+
+ return ret;
+}
+#endif
+
static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
{
struct dmxdev_filter *dmxdevfilter = file->private_data;
unsigned int mask = 0;
- if ((!dmxdevfilter) || dmxdevfilter->dev->exit)
- return POLLERR;
+ if (!dmxdevfilter)
+ return -EINVAL;
poll_wait(file, &dmxdevfilter->buffer.queue, wait);
@@ -1099,20 +4533,80 @@ static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
dmxdevfilter->state != DMXDEV_STATE_TIMEDOUT)
return 0;
- if (dmxdevfilter->buffer.error)
- mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
+ if (dmxdevfilter->buffer.error) {
+ mask |= (POLLIN | POLLRDNORM | POLLERR);
+ if (dmxdevfilter->buffer.error == -EOVERFLOW)
+ mask |= POLLPRI;
+ }
if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer))
- mask |= (POLLIN | POLLRDNORM | POLLPRI);
+ mask |= (POLLIN | POLLRDNORM);
+
+ if (dmxdevfilter->events.wakeup_events_counter >=
+ dmxdevfilter->events.event_mask.wakeup_threshold)
+ mask |= POLLPRI;
return mask;
}
+static int dvb_demux_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct dmxdev_filter *dmxdevfilter = filp->private_data;
+ struct dmxdev *dmxdev = dmxdevfilter->dev;
+ int ret;
+ int vma_size;
+ int buffer_size;
+
+ vma_size = vma->vm_end - vma->vm_start;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&dmxdev->mutex))
+ return -ERESTARTSYS;
+
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+
+ if ((!dmxdevfilter->buffer.data) ||
+ (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL)) {
+ mutex_unlock(&dmxdevfilter->mutex);
+ mutex_unlock(&dmxdev->mutex);
+ return -EINVAL;
+ }
+
+ /* Make sure requested mapping is not larger than buffer size */
+ buffer_size = dmxdevfilter->buffer.size + (PAGE_SIZE-1);
+ buffer_size = buffer_size & ~(PAGE_SIZE-1);
+
+ if (vma_size != buffer_size) {
+ mutex_unlock(&dmxdevfilter->mutex);
+ mutex_unlock(&dmxdev->mutex);
+ return -EINVAL;
+ }
+
+ ret = remap_vmalloc_range(vma, dmxdevfilter->buffer.data, 0);
+ if (ret) {
+ mutex_unlock(&dmxdevfilter->mutex);
+ mutex_unlock(&dmxdev->mutex);
+ return ret;
+ }
+
+ vma->vm_flags |= VM_DONTDUMP;
+ vma->vm_flags |= VM_DONTEXPAND;
+
+ mutex_unlock(&dmxdevfilter->mutex);
+ mutex_unlock(&dmxdev->mutex);
+
+ return 0;
+}
+
static int dvb_demux_release(struct inode *inode, struct file *file)
{
struct dmxdev_filter *dmxdevfilter = file->private_data;
struct dmxdev *dmxdev = dmxdevfilter->dev;
-
int ret;
ret = dvb_dmxdev_filter_free(dmxdev, dmxdevfilter);
@@ -1120,6 +4614,8 @@ static int dvb_demux_release(struct inode *inode, struct file *file)
mutex_lock(&dmxdev->mutex);
dmxdev->dvbdev->users--;
if(dmxdev->dvbdev->users==1 && dmxdev->exit==1) {
+ fops_put(file->f_op);
+ file->f_op = NULL;
mutex_unlock(&dmxdev->mutex);
wake_up(&dmxdev->dvbdev->wait_queue);
} else
@@ -1136,6 +4632,10 @@ static const struct file_operations dvb_demux_fops = {
.release = dvb_demux_release,
.poll = dvb_demux_poll,
.llseek = default_llseek,
+ .mmap = dvb_demux_mmap,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = dvb_demux_compat_ioctl,
+#endif
};
static const struct dvb_device dvbdev_demux = {
@@ -1161,11 +4661,44 @@ static int dvb_dvr_do_ioctl(struct file *file,
switch (cmd) {
case DMX_SET_BUFFER_SIZE:
- ret = dvb_dvr_set_buffer_size(dmxdev, arg);
+ ret = dvb_dvr_set_buffer_size(dmxdev, file->f_flags, arg);
+ break;
+
+ case DMX_SET_BUFFER_MODE:
+ ret = dvb_dvr_set_buffer_mode(dmxdev, file->f_flags,
+ *(enum dmx_buffer_mode *)parg);
+ break;
+
+ case DMX_SET_BUFFER:
+ ret = dvb_dvr_set_buffer(dmxdev, file->f_flags, parg);
+ break;
+
+ case DMX_GET_BUFFER_STATUS:
+ ret = dvb_dvr_get_buffer_status(dmxdev, file->f_flags, parg);
+ break;
+
+ case DMX_RELEASE_DATA:
+ ret = dvb_dvr_release_data(dmxdev, file->f_flags, arg);
+ break;
+
+ case DMX_FEED_DATA:
+ ret = dvb_dvr_feed_data(dmxdev, file->f_flags, arg);
+ break;
+
+ case DMX_GET_EVENT:
+ ret = dvb_dvr_get_event(dmxdev, file->f_flags, parg);
+ break;
+
+ case DMX_PUSH_OOB_COMMAND:
+ ret = dvb_dvr_push_oob_cmd(dmxdev, file->f_flags, parg);
+ break;
+
+ case DMX_FLUSH_BUFFER:
+ ret = dvb_dvr_flush_buffer(dmxdev, file->f_flags);
break;
default:
- ret = -EINVAL;
+ ret = -ENOIOCTLCMD;
break;
}
mutex_unlock(&dmxdev->mutex);
@@ -1173,10 +4706,18 @@ static int dvb_dvr_do_ioctl(struct file *file,
}
static long dvb_dvr_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg)
+{
+ return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
+}
+
+#ifdef CONFIG_COMPAT
+static long dvb_dvr_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
}
+#endif
static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
{
@@ -1184,21 +4725,31 @@ static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
struct dmxdev *dmxdev = dvbdev->priv;
unsigned int mask = 0;
- dprintk("function : %s\n", __func__);
-
- if (dmxdev->exit)
- return POLLERR;
-
- poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
+ pr_debug("function : %s\n", __func__);
if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
- if (dmxdev->dvr_buffer.error)
- mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
+ poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
+
+ if (dmxdev->dvr_buffer.error) {
+ mask |= (POLLIN | POLLRDNORM | POLLERR);
+ if (dmxdev->dvr_buffer.error == -EOVERFLOW)
+ mask |= POLLPRI;
+ }
if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer))
- mask |= (POLLIN | POLLRDNORM | POLLPRI);
- } else
- mask |= (POLLOUT | POLLWRNORM | POLLPRI);
+ mask |= (POLLIN | POLLRDNORM);
+
+ if (dmxdev->dvr_output_events.wakeup_events_counter >=
+ dmxdev->dvr_output_events.event_mask.wakeup_threshold)
+ mask |= POLLPRI;
+ } else {
+ poll_wait(file, &dmxdev->dvr_input_buffer.queue, wait);
+ if (dmxdev->dvr_input_buffer.error)
+ mask |= (POLLOUT | POLLRDNORM | POLLPRI | POLLERR);
+
+ if (dvb_ringbuffer_free(&dmxdev->dvr_input_buffer))
+ mask |= (POLLOUT | POLLRDNORM | POLLPRI);
+ }
return mask;
}
@@ -1207,7 +4758,11 @@ static const struct file_operations dvb_dvr_fops = {
.owner = THIS_MODULE,
.read = dvb_dvr_read,
.write = dvb_dvr_write,
+ .mmap = dvb_dvr_mmap,
.unlocked_ioctl = dvb_dvr_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = dvb_dvr_compat_ioctl,
+#endif
.open = dvb_dvr_open,
.release = dvb_dvr_release,
.poll = dvb_dvr_poll,
@@ -1223,9 +4778,94 @@ static const struct dvb_device dvbdev_dvr = {
#endif
.fops = &dvb_dvr_fops
};
+
+
+/**
+ * debugfs service to print active filters information.
+ */
+static int dvb_dmxdev_dbgfs_print(struct seq_file *s, void *p)
+{
+ int i;
+ struct dmxdev *dmxdev = s->private;
+ struct dmxdev_filter *filter;
+ int active_count = 0;
+ struct dmx_buffer_status buffer_status;
+ struct dmx_scrambling_bits scrambling_bits;
+ static const char * const pes_feeds[] = {"DEC", "PES", "DVR", "REC"};
+ int ret;
+
+ if (!dmxdev)
+ return 0;
+
+ for (i = 0; i < dmxdev->filternum; i++) {
+ filter = &dmxdev->filter[i];
+ if (filter->state >= DMXDEV_STATE_GO) {
+ active_count++;
+
+ seq_printf(s, "filter_%02d - ", i);
+
+ if (filter->type == DMXDEV_TYPE_SEC) {
+ seq_puts(s, "type: SEC, ");
+ seq_printf(s, "PID %04d ",
+ filter->params.sec.pid);
+ scrambling_bits.pid = filter->params.sec.pid;
+ } else {
+ seq_printf(s, "type: %s, ",
+ pes_feeds[filter->params.pes.output]);
+ seq_printf(s, "PID: %04d ",
+ filter->params.pes.pid);
+ scrambling_bits.pid = filter->params.pes.pid;
+ }
+
+ dvb_dmxdev_get_scrambling_bits(filter,
+ &scrambling_bits);
+
+ if (filter->type == DMXDEV_TYPE_PES &&
+ filter->params.pes.output == DMX_OUT_TS_TAP)
+ ret = dvb_dvr_get_buffer_status(dmxdev,
+ O_RDONLY, &buffer_status);
+ else
+ ret = dvb_dmxdev_get_buffer_status(filter,
+ &buffer_status);
+ if (!ret) {
+ seq_printf(s, "size: %08d, ",
+ buffer_status.size);
+ seq_printf(s, "fullness: %08d, ",
+ buffer_status.fullness);
+ seq_printf(s, "error: %d, ",
+ buffer_status.error);
+ }
+
+ seq_printf(s, "scramble: %d, ",
+ scrambling_bits.value);
+ seq_printf(s, "secured: %d\n",
+ filter->sec_mode.is_secured);
+ }
+ }
+
+ if (!active_count)
+ seq_puts(s, "No active filters\n");
+
+ return 0;
+}
+
+static int dvb_dmxdev_dbgfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dvb_dmxdev_dbgfs_print, inode->i_private);
+}
+
+static const struct file_operations dbgfs_filters_fops = {
+ .open = dvb_dmxdev_dbgfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
{
int i;
+ struct dmx_caps caps;
if (dmxdev->demux->open(dmxdev->demux) < 0)
return -EUSERS;
@@ -1234,8 +4874,12 @@ int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
if (!dmxdev->filter)
return -ENOMEM;
+ dmxdev->playback_mode = DMX_PB_MODE_PUSH;
+ dmxdev->demux->dvr_input_protected = 0;
+
mutex_init(&dmxdev->mutex);
spin_lock_init(&dmxdev->lock);
+ spin_lock_init(&dmxdev->dvr_in_lock);
for (i = 0; i < dmxdev->filternum; i++) {
dmxdev->filter[i].dev = dmxdev;
dmxdev->filter[i].buffer.data = NULL;
@@ -1249,6 +4893,19 @@ int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
dmxdev, DVB_DEVICE_DVR);
dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192);
+ dvb_ringbuffer_init(&dmxdev->dvr_input_buffer, NULL, 8192);
+
+ /* Disable auto buffer flushing if plugin does not allow it */
+ if (dmxdev->demux->get_caps) {
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+ if (!(caps.caps & DMX_CAP_AUTO_BUFFER_FLUSH))
+ overflow_auto_flush = 0;
+ }
+
+ if (dmxdev->demux->debugfs_demux_dir)
+ debugfs_create_file("filters", S_IRUGO,
+ dmxdev->demux->debugfs_demux_dir, dmxdev,
+ &dbgfs_filters_fops);
return 0;
}
diff --git a/drivers/media/dvb-core/dmxdev.h b/drivers/media/dvb-core/dmxdev.h
index 48c6cf92ab99..ad007f4fb9ac 100644
--- a/drivers/media/dvb-core/dmxdev.h
+++ b/drivers/media/dvb-core/dmxdev.h
@@ -33,7 +33,7 @@
#include <linux/string.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-
+#include <linux/kthread.h>
#include <linux/dvb/dmx.h>
#include "dvbdev.h"
@@ -57,10 +57,87 @@ enum dmxdev_state {
struct dmxdev_feed {
u16 pid;
+ struct dmx_indexing_params idx_params;
+ struct dmx_cipher_operations cipher_ops;
struct dmx_ts_feed *ts;
struct list_head next;
};
+struct dmxdev_sec_feed {
+ struct dmx_section_feed *feed;
+ struct dmx_cipher_operations cipher_ops;
+};
+
+struct dmxdev_events_queue {
+ /*
+ * indices used to manage events queue.
+ * read_index advanced when relevant data is read
+ * from the buffer.
+ * notified_index is the index from which next events
+ * are returned.
+ * read_index <= notified_index <= write_index
+ *
+ * If user reads the data without getting the respective
+ * event first, the read/notified indices are updated
+ * automatically to reflect the actual data that exist
+ * in the buffer.
+ */
+ u32 read_index;
+ u32 write_index;
+ u32 notified_index;
+
+ /* Bytes read by user without having respective event in the queue */
+ u32 bytes_read_no_event;
+
+ /* internal tracking of PES and recording events */
+ u32 current_event_data_size;
+ u32 current_event_start_offset;
+
+ /* current setting of the events masking */
+ struct dmx_events_mask event_mask;
+
+ /*
+ * indicates if an event used for data-reading from demux
+ * filter is enabled or not. These are events on which
+ * user may wait for before calling read() on the demux filter.
+ */
+ int data_read_event_masked;
+
+ /*
+ * holds the current number of pending events in the
+ * events queue that are considered as a wake-up source
+ */
+ u32 wakeup_events_counter;
+
+ struct dmx_filter_event queue[DMX_EVENT_QUEUE_SIZE];
+};
+
+#define DMX_MIN_INSERTION_REPETITION_TIME 25 /* in msec */
+struct ts_insertion_buffer {
+ /* work scheduled for insertion of this buffer */
+ struct delayed_work dwork;
+
+ struct list_head next;
+
+ /* buffer holding TS packets for insertion */
+ char *buffer;
+
+ /* buffer size */
+ size_t size;
+
+ /* buffer ID from user */
+ u32 identifier;
+
+ /* repetition time for the buffer insertion */
+ u32 repetition_time;
+
+ /* the recording filter to which this buffer belongs */
+ struct dmxdev_filter *dmxdevfilter;
+
+ /* indication whether insertion should be aborted */
+ int abort;
+};
+
struct dmxdev_filter {
union {
struct dmx_section_filter *sec;
@@ -69,7 +146,7 @@ struct dmxdev_filter {
union {
/* list of TS and PES feeds (struct dmxdev_feed) */
struct list_head ts;
- struct dmx_section_feed *sec;
+ struct dmxdev_sec_feed sec;
} feed;
union {
@@ -77,19 +154,37 @@ struct dmxdev_filter {
struct dmx_pes_filter_params pes;
} params;
+ struct dmxdev_events_queue events;
+
enum dmxdev_type type;
enum dmxdev_state state;
struct dmxdev *dev;
struct dvb_ringbuffer buffer;
+ void *priv_buff_handle;
+ enum dmx_buffer_mode buffer_mode;
struct mutex mutex;
+ /* for recording output */
+ enum dmx_tsp_format_t dmx_tsp_format;
+ u32 rec_chunk_size;
+
+ /* list of buffers used for insertion (struct ts_insertion_buffer) */
+ struct list_head insertion_buffers;
+
+ /* End-of-stream indication has been received */
+ int eos_state;
+
/* only for sections */
struct timer_list timer;
int todo;
u8 secheader[3];
-};
+ struct dmx_secure_mode sec_mode;
+
+ /* Decoder buffer(s) related */
+ struct dmx_decoder_buffers decoder_buffers;
+};
struct dmxdev {
struct dvb_device *dvbdev;
@@ -100,18 +195,52 @@ struct dmxdev {
int filternum;
int capabilities;
+#define DMXDEV_CAP_DUPLEX 0x01
+
+ enum dmx_playback_mode_t playback_mode;
+ dmx_source_t source;
unsigned int exit:1;
-#define DMXDEV_CAP_DUPLEX 1
+ unsigned int dvr_in_exit:1;
+ unsigned int dvr_processing_input:1;
+
struct dmx_frontend *dvr_orig_fe;
struct dvb_ringbuffer dvr_buffer;
+ void *dvr_priv_buff_handle;
+ enum dmx_buffer_mode dvr_buffer_mode;
+ struct dmxdev_events_queue dvr_output_events;
+ struct dmxdev_filter *dvr_feed;
+ int dvr_feeds_count;
+
+ struct dvb_ringbuffer dvr_input_buffer;
+ enum dmx_buffer_mode dvr_input_buffer_mode;
+ struct task_struct *dvr_input_thread;
+ /* DVR commands (data feed / OOB command) queue */
+ struct dvb_ringbuffer dvr_cmd_buffer;
+
#define DVR_BUFFER_SIZE (10*188*1024)
struct mutex mutex;
spinlock_t lock;
+ spinlock_t dvr_in_lock;
+};
+
+enum dvr_cmd {
+ DVR_DATA_FEED_CMD,
+ DVR_OOB_CMD
};
+struct dvr_command {
+ enum dvr_cmd type;
+ union {
+ struct dmx_oob_command oobcmd;
+ size_t data_feed_count;
+ } cmd;
+};
+
+#define DVR_CMDS_BUFFER_SIZE (sizeof(struct dvr_command)*500)
+
int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *);
void dvb_dmxdev_release(struct dmxdev *dmxdev);
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 0cc5e935166c..d45bcc55b76a 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -55,11 +55,118 @@ module_param(dvb_demux_feed_err_pkts, int, 0644);
MODULE_PARM_DESC(dvb_demux_feed_err_pkts,
"when set to 0, drop packets with the TEI bit set (1 by default)");
+/* counter advancing for each new dvb-demux device */
+static int dvb_demux_index;
+
+static int dvb_demux_performancecheck;
+module_param(dvb_demux_performancecheck, int, 0644);
+MODULE_PARM_DESC(dvb_demux_performancecheck,
+ "enable transport stream performance check, reported through debugfs");
+
#define dprintk_tscheck(x...) do { \
if (dvb_demux_tscheck && printk_ratelimit()) \
printk(x); \
} while (0)
+static const struct dvb_dmx_video_patterns mpeg2_seq_hdr = {
+ {0x00, 0x00, 0x01, 0xB3},
+ {0xFF, 0xFF, 0xFF, 0xFF},
+ 4,
+ DMX_IDX_MPEG_SEQ_HEADER
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_gop = {
+ {0x00, 0x00, 0x01, 0xB8},
+ {0xFF, 0xFF, 0xFF, 0xFF},
+ 4,
+ DMX_IDX_MPEG_GOP
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_iframe = {
+ {0x00, 0x00, 0x01, 0x00, 0x00, 0x08},
+ {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38},
+ 6,
+ DMX_IDX_MPEG_I_FRAME_START
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_pframe = {
+ {0x00, 0x00, 0x01, 0x00, 0x00, 0x10},
+ {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38},
+ 6,
+ DMX_IDX_MPEG_P_FRAME_START
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_bframe = {
+ {0x00, 0x00, 0x01, 0x00, 0x00, 0x18},
+ {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38},
+ 6,
+ DMX_IDX_MPEG_B_FRAME_START
+};
+
+static const struct dvb_dmx_video_patterns h264_sps = {
+ {0x00, 0x00, 0x01, 0x07},
+ {0xFF, 0xFF, 0xFF, 0x1F},
+ 4,
+ DMX_IDX_H264_SPS
+};
+
+static const struct dvb_dmx_video_patterns h264_pps = {
+ {0x00, 0x00, 0x01, 0x08},
+ {0xFF, 0xFF, 0xFF, 0x1F},
+ 4,
+ DMX_IDX_H264_PPS
+};
+
+static const struct dvb_dmx_video_patterns h264_idr = {
+ {0x00, 0x00, 0x01, 0x05, 0x80},
+ {0xFF, 0xFF, 0xFF, 0x1F, 0x80},
+ 5,
+ DMX_IDX_H264_IDR_START
+};
+
+static const struct dvb_dmx_video_patterns h264_non_idr = {
+ {0x00, 0x00, 0x01, 0x01, 0x80},
+ {0xFF, 0xFF, 0xFF, 0x1F, 0x80},
+ 5,
+ DMX_IDX_H264_NON_IDR_START
+};
+
+static const struct dvb_dmx_video_patterns h264_non_access_unit_del = {
+ {0x00, 0x00, 0x01, 0x09},
+ {0xFF, 0xFF, 0xFF, 0x1F},
+ 4,
+ DMX_IDX_H264_ACCESS_UNIT_DEL
+};
+
+static const struct dvb_dmx_video_patterns h264_non_sei = {
+ {0x00, 0x00, 0x01, 0x06},
+ {0xFF, 0xFF, 0xFF, 0x1F},
+ 4,
+ DMX_IDX_H264_SEI
+};
+
+static const struct dvb_dmx_video_patterns vc1_seq_hdr = {
+ {0x00, 0x00, 0x01, 0x0F},
+ {0xFF, 0xFF, 0xFF, 0xFF},
+ 4,
+ DMX_IDX_VC1_SEQ_HEADER
+};
+
+static const struct dvb_dmx_video_patterns vc1_entry_point = {
+ {0x00, 0x00, 0x01, 0x0E},
+ {0xFF, 0xFF, 0xFF, 0xFF},
+ 4,
+ DMX_IDX_VC1_ENTRY_POINT
+};
+
+static const struct dvb_dmx_video_patterns vc1_frame = {
+ {0x00, 0x00, 0x01, 0x0D},
+ {0xFF, 0xFF, 0xFF, 0xFF},
+ 4,
+ DMX_IDX_VC1_FRAME_START
+};
+
+
/******************************************************************************
* static inlined helper functions
******************************************************************************/
@@ -69,9 +176,9 @@ static inline u16 section_length(const u8 *buf)
return 3 + ((buf[1] & 0x0f) << 8) + buf[2];
}
-static inline u16 ts_pid(const u8 *buf)
+static inline u8 ts_scrambling_ctrl(const u8 *buf)
{
- return ((buf[1] & 0x1f) << 8) + buf[2];
+ return (buf[3] >> 6) & 0x3;
}
static inline u8 payload(const u8 *tsp)
@@ -100,37 +207,360 @@ static void dvb_dmx_memcopy(struct dvb_demux_feed *f, u8 *d, const u8 *s,
memcpy(d, s, len);
}
+static u32 dvb_dmx_calc_time_delta(struct timespec past_time)
+{
+ struct timespec curr_time, delta_time;
+ u64 delta_time_us;
+
+ curr_time = current_kernel_time();
+ delta_time = timespec_sub(curr_time, past_time);
+ delta_time_us = ((s64)delta_time.tv_sec * USEC_PER_SEC) +
+ delta_time.tv_nsec / 1000;
+
+ return (u32)delta_time_us;
+}
+
/******************************************************************************
* Software filter functions
******************************************************************************/
+/*
+ * Check if two patterns are identical, taking mask into consideration.
+ * @pattern1: the first byte pattern to compare.
+ * @pattern2: the second byte pattern to compare.
+ * @mask: the bit mask to use.
+ * @pattern_size: the length of both patterns and the mask, in bytes.
+ *
+ * Return: 1 if patterns match, 0 otherwise.
+ */
+static inline int dvb_dmx_patterns_match(const u8 *pattern1, const u8 *pattern2,
+ const u8 *mask, size_t pattern_size)
+{
+ int i;
+
+ /*
+ * Assumption: it is OK to access pattern1, pattern2 and mask.
+ * This function performs no sanity checks to keep things fast.
+ */
+
+ for (i = 0; i < pattern_size; i++)
+ if ((pattern1[i] & mask[i]) != (pattern2[i] & mask[i]))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * dvb_dmx_video_pattern_search -
+ * search for framing patterns in a given buffer.
+ *
+ * Optimized version: first search for a common substring, e.g. 0x00 0x00 0x01.
+ * If this string is found, go over all the given patterns (all must start
+ * with this string) and search for their ending in the buffer.
+ *
+ * Assumption: the patterns we look for do not spread over more than two
+ * buffers.
+ *
+ * @paterns: the full patterns information to look for.
+ * @patterns_num: the number of patterns to look for.
+ * @buf: the buffer to search.
+ * @buf_size: the size of the buffer to search. we search the entire buffer.
+ * @prefix_size_masks: a bit mask (per pattern) of possible prefix sizes to use
+ * when searching for a pattern that started at the last buffer.
+ * Updated in this function for use in the next lookup.
+ * @results: lookup results (offset, type, used_prefix_size) per found pattern,
+ * up to DVB_DMX_MAX_FOUND_PATTERNS.
+ *
+ * Return:
+ * Number of patterns found (up to DVB_DMX_MAX_FOUND_PATTERNS).
+ * 0 if pattern was not found.
+ * error value on failure.
+ */
+int dvb_dmx_video_pattern_search(
+ const struct dvb_dmx_video_patterns
+ *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM],
+ int patterns_num,
+ const u8 *buf,
+ size_t buf_size,
+ struct dvb_dmx_video_prefix_size_masks *prefix_size_masks,
+ struct dvb_dmx_video_patterns_results *results)
+{
+ int i, j;
+ unsigned int current_size;
+ u32 prefix;
+ int found = 0;
+ int start_offset = 0;
+ /* the starting common substring to look for */
+ u8 string[] = {0x00, 0x00, 0x01};
+ /* the mask for the starting string */
+ u8 string_mask[] = {0xFF, 0xFF, 0xFF};
+ /* the size of the starting string (in bytes) */
+ size_t string_size = 3;
+
+ if ((patterns == NULL) || (patterns_num <= 0) || (buf == NULL))
+ return -EINVAL;
+
+ memset(results, 0, sizeof(struct dvb_dmx_video_patterns_results));
+
+ /*
+ * handle prefix - disregard string, simply check all patterns,
+ * looking for a matching suffix at the very beginning of the buffer.
+ */
+ for (j = 0; (j < patterns_num) && !found; j++) {
+ prefix = prefix_size_masks->size_mask[j];
+ current_size = 32;
+ while (prefix) {
+ if (prefix & (0x1 << (current_size - 1))) {
+ /*
+ * check that we don't look further
+ * than buf_size boundary
+ */
+ if ((int)(patterns[j]->size - current_size) >
+ buf_size)
+ break;
+
+ if (dvb_dmx_patterns_match(
+ (patterns[j]->pattern + current_size),
+ buf, (patterns[j]->mask + current_size),
+ (patterns[j]->size - current_size))) {
+
+ /*
+ * pattern found using prefix at the
+ * very beginning of the buffer, so
+ * offset is 0, but we already zeroed
+ * everything in the beginning of the
+ * function. that's why the next line
+ * is commented.
+ */
+ /* results->info[found].offset = 0; */
+ results->info[found].type =
+ patterns[j]->type;
+ results->info[found].used_prefix_size =
+ current_size;
+ found++;
+ /*
+ * save offset to start looking from
+ * in the buffer, to avoid reusing the
+ * data of a pattern we already found.
+ */
+ start_offset = (patterns[j]->size -
+ current_size);
+
+ if (found >= DVB_DMX_MAX_FOUND_PATTERNS)
+ goto next_prefix_lookup;
+ /*
+ * we don't want to search for the same
+ * pattern with several possible prefix
+ * sizes if we have already found it,
+ * so we break from the inner loop.
+ * since we incremented 'found', we
+ * will not search for additional
+ * patterns using a prefix - that would
+ * imply ambiguous patterns where one
+ * pattern can be included in another.
+ * the for loop will exit.
+ */
+ break;
+ }
+ }
+ prefix &= ~(0x1 << (current_size - 1));
+ current_size--;
+ }
+ }
+
+ /*
+ * Search buffer for entire pattern, starting with the string.
+ * Note the external for loop does not execute if buf_size is
+ * smaller than string_size (the cast to int is required, since
+ * size_t is unsigned).
+ */
+ for (i = start_offset; i < (int)(buf_size - string_size + 1); i++) {
+ if (dvb_dmx_patterns_match(string, (buf + i), string_mask,
+ string_size)) {
+ /* now search for patterns: */
+ for (j = 0; j < patterns_num; j++) {
+ /* avoid overflow to next buffer */
+ if ((i + patterns[j]->size) > buf_size)
+ continue;
+
+ if (dvb_dmx_patterns_match(
+ (patterns[j]->pattern + string_size),
+ (buf + i + string_size),
+ (patterns[j]->mask + string_size),
+ (patterns[j]->size - string_size))) {
+
+ results->info[found].offset = i;
+ results->info[found].type =
+ patterns[j]->type;
+ /*
+ * save offset to start next prefix
+ * lookup, to avoid reusing the data
+ * of any pattern we already found.
+ */
+ if ((i + patterns[j]->size) >
+ start_offset)
+ start_offset = (i +
+ patterns[j]->size);
+ /*
+ * did not use a prefix to find this
+ * pattern, but we zeroed everything
+ * in the beginning of the function.
+ * So no need to zero used_prefix_size
+ * for results->info[found]
+ */
+
+ found++;
+ if (found >= DVB_DMX_MAX_FOUND_PATTERNS)
+ goto next_prefix_lookup;
+ /*
+ * theoretically we don't have to break
+ * here, but we don't want to search
+ * for the other matching patterns on
+ * the very same same place in the
+ * buffer. That would mean the
+ * (pattern & mask) combinations are
+ * not unique. So we break from inner
+ * loop and move on to the next place
+ * in the buffer.
+ */
+ break;
+ }
+ }
+ }
+ }
+
+next_prefix_lookup:
+ /* check for possible prefix sizes for the next buffer */
+ for (j = 0; j < patterns_num; j++) {
+ prefix_size_masks->size_mask[j] = 0;
+ for (i = 1; i < patterns[j]->size; i++) {
+ /*
+ * avoid looking outside of the buffer
+ * or reusing previously used data.
+ */
+ if (i > (buf_size - start_offset))
+ break;
+
+ if (dvb_dmx_patterns_match(patterns[j]->pattern,
+ (buf + buf_size - i),
+ patterns[j]->mask, i)) {
+ prefix_size_masks->size_mask[j] |=
+ (1 << (i - 1));
+ }
+ }
+ }
+
+ return found;
+}
+EXPORT_SYMBOL(dvb_dmx_video_pattern_search);
+
+/**
+ * dvb_dmx_notify_section_event() - Notify demux event for all filters of a
+ * specified section feed.
+ *
+ * @feed: dvb_demux_feed object
+ * @event: demux event to notify
+ * @should_lock: specifies whether the function should lock the demux
+ *
+ * Caller is responsible for locking the demux properly, either by doing the
+ * locking itself and setting 'should_lock' to 0, or have the function do it
+ * by setting 'should_lock' to 1.
+ */
+int dvb_dmx_notify_section_event(struct dvb_demux_feed *feed,
+ struct dmx_data_ready *event, int should_lock)
+{
+ struct dvb_demux_filter *f;
+
+ if (feed == NULL || event == NULL || feed->type != DMX_TYPE_SEC)
+ return -EINVAL;
+
+ if (!should_lock && !spin_is_locked(&feed->demux->lock))
+ return -EINVAL;
+
+ if (should_lock)
+ spin_lock(&feed->demux->lock);
+
+ f = feed->filter;
+ while (f && feed->feed.sec.is_filtering) {
+ feed->data_ready_cb.sec(&f->filter, event);
+ f = f->next;
+ }
+
+ if (should_lock)
+ spin_unlock(&feed->demux->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(dvb_dmx_notify_section_event);
+
+static int dvb_dmx_check_pes_end(struct dvb_demux_feed *feed)
+{
+ struct dmx_data_ready data;
+
+ if (!feed->pusi_seen)
+ return 0;
+
+ data.status = DMX_OK_PES_END;
+ data.data_length = 0;
+ data.pes_end.start_gap = 0;
+ data.pes_end.actual_length = feed->peslen;
+ data.pes_end.disc_indicator_set = 0;
+ data.pes_end.pes_length_mismatch = 0;
+ data.pes_end.stc = 0;
+ data.pes_end.tei_counter = feed->pes_tei_counter;
+ data.pes_end.cont_err_counter = feed->pes_cont_err_counter;
+ data.pes_end.ts_packets_num = feed->pes_ts_packets_num;
+
+ return feed->data_ready_cb.ts(&feed->feed.ts, &data);
+}
+
static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
const u8 *buf)
{
int count = payload(buf);
int p;
- //int ccok;
- //u8 cc;
+ int ccok;
+ u8 cc;
+ int ret;
if (count == 0)
return -1;
p = 188 - count;
- /*
cc = buf[3] & 0x0f;
- ccok = ((feed->cc + 1) & 0x0f) == cc;
+ if (feed->first_cc)
+ ccok = 1;
+ else
+ ccok = ((feed->cc + 1) & 0x0f) == cc;
+
+ feed->first_cc = 0;
feed->cc = cc;
- if (!ccok)
- printk("missed packet!\n");
- */
- if (buf[1] & 0x40) // PUSI ?
- feed->peslen = 0xfffa;
+ /* PUSI ? */
+ if (buf[1] & 0x40) {
+ dvb_dmx_check_pes_end(feed);
+ feed->pusi_seen = 1;
+ feed->peslen = 0;
+ feed->pes_tei_counter = 0;
+ feed->pes_cont_err_counter = 0;
+ feed->pes_ts_packets_num = 0;
+ }
+
+ if (feed->pusi_seen == 0)
+ return 0;
+
+ ret = feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts, DMX_OK);
- feed->peslen += count;
+ /* Verify TS packet was copied successfully */
+ if (!ret) {
+ feed->pes_cont_err_counter += !ccok;
+ feed->pes_tei_counter += (buf[1] & 0x80) ? 1 : 0;
+ feed->pes_ts_packets_num++;
+ feed->peslen += count;
+ }
- return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts);
+ return ret;
}
static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed,
@@ -152,7 +582,7 @@ static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed,
return 0;
return feed->cb.sec(feed->feed.sec.secbuf, feed->feed.sec.seclen,
- NULL, 0, &f->filter);
+ NULL, 0, &f->filter, DMX_OK);
}
static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed)
@@ -169,10 +599,28 @@ static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed)
return 0;
if (sec->check_crc) {
+ struct timespec pre_crc_time;
+
+ if (dvb_demux_performancecheck)
+ pre_crc_time = current_kernel_time();
+
section_syntax_indicator = ((sec->secbuf[1] & 0x80) != 0);
if (section_syntax_indicator &&
- demux->check_crc32(feed, sec->secbuf, sec->seclen))
+ demux->check_crc32(feed, sec->secbuf, sec->seclen)) {
+ if (dvb_demux_performancecheck)
+ demux->total_crc_time +=
+ dvb_dmx_calc_time_delta(pre_crc_time);
+
+ /* Notify on CRC error */
+ feed->cb.sec(NULL, 0, NULL, 0,
+ &f->filter, DMX_CRC_ERROR);
+
return -1;
+ }
+
+ if (dvb_demux_performancecheck)
+ demux->total_crc_time +=
+ dvb_dmx_calc_time_delta(pre_crc_time);
}
do {
@@ -287,7 +735,7 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
return 0;
}
-static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
+static int dvb_dmx_swfilter_section_one_packet(struct dvb_demux_feed *feed,
const u8 *buf)
{
u8 p, count;
@@ -302,7 +750,16 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
p = 188 - count; /* payload start */
cc = buf[3] & 0x0f;
- ccok = ((feed->cc + 1) & 0x0f) == cc;
+ if (feed->first_cc)
+ ccok = 1;
+ else
+ ccok = ((feed->cc + 1) & 0x0f) == cc;
+
+ /* discard TS packets holding sections with TEI bit set */
+ if (buf[1] & 0x80)
+ return -EINVAL;
+
+ feed->first_cc = 0;
feed->cc = cc;
if (buf[3] & 0x20) {
@@ -356,28 +813,656 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
return 0;
}
+/*
+ * dvb_dmx_swfilter_section_packet - wrapper for section filtering of single
+ * TS packet.
+ *
+ * @feed: dvb demux feed
+ * @buf: buffer containing the TS packet
+ * @should_lock: specifies demux locking semantics: if not set, proper demux
+ * locking is expected to have been done by the caller.
+ *
+ * Return error status
+ */
+int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
+ const u8 *buf, int should_lock)
+{
+ int ret;
+
+ if (!should_lock && !spin_is_locked(&feed->demux->lock)) {
+ pr_err("%s: demux spinlock should have been locked\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (should_lock)
+ spin_lock(&feed->demux->lock);
+
+ ret = dvb_dmx_swfilter_section_one_packet(feed, buf);
+
+ if (should_lock)
+ spin_unlock(&feed->demux->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(dvb_dmx_swfilter_section_packet);
+
+static int dvb_demux_idx_event_sort(struct dmx_index_event_info *curr,
+ struct dmx_index_event_info *new)
+{
+ if (curr->match_tsp_num > new->match_tsp_num)
+ return 0;
+
+ if (curr->match_tsp_num < new->match_tsp_num)
+ return 1;
+ /*
+ * In case TSP numbers are equal, sort according to event type giving
+ * priority to PUSI events first, then RAI and finally framing events.
+ */
+ if ((curr->type & DMX_IDX_RAI && new->type & DMX_IDX_PUSI) ||
+ (!(curr->type & DMX_IDX_PUSI) && !(curr->type & DMX_IDX_RAI) &&
+ new->type & (DMX_IDX_PUSI | DMX_IDX_RAI)))
+ return 0;
+
+ return 1;
+}
+
+static int dvb_demux_save_idx_event(struct dvb_demux_feed *feed,
+ struct dmx_index_event_info *idx_event,
+ int traverse_from_tail)
+{
+ struct dmx_index_entry *idx_entry;
+ struct dmx_index_entry *curr_entry;
+ struct list_head *pos;
+
+ /* get entry from free list */
+ if (list_empty(&feed->rec_info->idx_info.free_list)) {
+ pr_err("%s: index free list is empty\n", __func__);
+ return -ENOMEM;
+ }
+
+ idx_entry = list_first_entry(&feed->rec_info->idx_info.free_list,
+ struct dmx_index_entry, next);
+ list_del(&idx_entry->next);
+
+ idx_entry->event = *idx_event;
+
+ pos = &feed->rec_info->idx_info.ready_list;
+ if (traverse_from_tail) {
+ list_for_each_entry_reverse(curr_entry,
+ &feed->rec_info->idx_info.ready_list, next) {
+ if (dvb_demux_idx_event_sort(&curr_entry->event,
+ idx_event)) {
+ pos = &curr_entry->next;
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry(curr_entry,
+ &feed->rec_info->idx_info.ready_list, next) {
+ if (!dvb_demux_idx_event_sort(&curr_entry->event,
+ idx_event)) {
+ pos = &curr_entry->next;
+ break;
+ }
+ }
+ }
+
+ if (traverse_from_tail)
+ list_add(&idx_entry->next, pos);
+ else
+ list_add_tail(&idx_entry->next, pos);
+
+ return 0;
+}
+
+int dvb_demux_push_idx_event(struct dvb_demux_feed *feed,
+ struct dmx_index_event_info *idx_event, int should_lock)
+{
+ int ret;
+
+ if (!should_lock && !spin_is_locked(&feed->demux->lock))
+ return -EINVAL;
+
+ if (should_lock)
+ spin_lock(&feed->demux->lock);
+ ret = dvb_demux_save_idx_event(feed, idx_event, 1);
+ if (should_lock)
+ spin_unlock(&feed->demux->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(dvb_demux_push_idx_event);
+
+static inline void dvb_dmx_notify_indexing(struct dvb_demux_feed *feed)
+{
+ struct dmx_data_ready dmx_data_ready;
+ struct dmx_index_entry *curr_entry;
+ struct list_head *n, *pos;
+
+ dmx_data_ready.status = DMX_OK_IDX;
+
+ list_for_each_safe(pos, n, &feed->rec_info->idx_info.ready_list) {
+ curr_entry = list_entry(pos, struct dmx_index_entry, next);
+
+ if ((feed->rec_info->idx_info.min_pattern_tsp_num == (u64)-1) ||
+ (curr_entry->event.match_tsp_num <=
+ feed->rec_info->idx_info.min_pattern_tsp_num)) {
+ dmx_data_ready.idx_event = curr_entry->event;
+ feed->data_ready_cb.ts(&feed->feed.ts, &dmx_data_ready);
+ list_del(&curr_entry->next);
+ list_add_tail(&curr_entry->next,
+ &feed->rec_info->idx_info.free_list);
+ }
+ }
+}
+
+void dvb_dmx_notify_idx_events(struct dvb_demux_feed *feed, int should_lock)
+{
+ if (!should_lock && !spin_is_locked(&feed->demux->lock))
+ return;
+
+ if (should_lock)
+ spin_lock(&feed->demux->lock);
+ dvb_dmx_notify_indexing(feed);
+ if (should_lock)
+ spin_unlock(&feed->demux->lock);
+}
+EXPORT_SYMBOL(dvb_dmx_notify_idx_events);
+
+static void dvb_dmx_process_pattern_result(struct dvb_demux_feed *feed,
+ struct dvb_dmx_video_patterns_results *patterns, int pattern,
+ u64 curr_stc, u64 prev_stc,
+ u64 curr_match_tsp, u64 prev_match_tsp,
+ u64 curr_pusi_tsp, u64 prev_pusi_tsp)
+{
+ int mpeg_frame_start;
+ int h264_frame_start;
+ int vc1_frame_start;
+ int seq_start;
+ u64 frame_end_in_seq;
+ struct dmx_index_event_info idx_event;
+
+ idx_event.pid = feed->pid;
+ if (patterns->info[pattern].used_prefix_size) {
+ idx_event.match_tsp_num = prev_match_tsp;
+ idx_event.last_pusi_tsp_num = prev_pusi_tsp;
+ idx_event.stc = prev_stc;
+ } else {
+ idx_event.match_tsp_num = curr_match_tsp;
+ idx_event.last_pusi_tsp_num = curr_pusi_tsp;
+ idx_event.stc = curr_stc;
+ }
+
+ /* notify on frame-end if needed */
+ if (feed->prev_frame_valid) {
+ if (feed->prev_frame_type & DMX_IDX_MPEG_I_FRAME_START) {
+ idx_event.type = DMX_IDX_MPEG_I_FRAME_END;
+ frame_end_in_seq = DMX_IDX_MPEG_FIRST_SEQ_FRAME_END;
+ } else if (feed->prev_frame_type & DMX_IDX_MPEG_P_FRAME_START) {
+ idx_event.type = DMX_IDX_MPEG_P_FRAME_END;
+ frame_end_in_seq = DMX_IDX_MPEG_FIRST_SEQ_FRAME_END;
+ } else if (feed->prev_frame_type & DMX_IDX_MPEG_B_FRAME_START) {
+ idx_event.type = DMX_IDX_MPEG_B_FRAME_END;
+ frame_end_in_seq = DMX_IDX_MPEG_FIRST_SEQ_FRAME_END;
+ } else if (feed->prev_frame_type & DMX_IDX_H264_IDR_START) {
+ idx_event.type = DMX_IDX_H264_IDR_END;
+ frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END;
+ } else if (feed->prev_frame_type & DMX_IDX_H264_NON_IDR_START) {
+ idx_event.type = DMX_IDX_H264_NON_IDR_END;
+ frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END;
+ } else {
+ idx_event.type = DMX_IDX_VC1_FRAME_END;
+ frame_end_in_seq = DMX_IDX_VC1_FIRST_SEQ_FRAME_END;
+ }
+
+ if (feed->idx_params.types & idx_event.type)
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+
+ if (feed->first_frame_in_seq_notified &&
+ feed->idx_params.types & frame_end_in_seq) {
+ idx_event.type = frame_end_in_seq;
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ feed->first_frame_in_seq_notified = 0;
+ }
+ }
+
+ seq_start = patterns->info[pattern].type &
+ (DMX_IDX_MPEG_SEQ_HEADER | DMX_IDX_H264_SPS |
+ DMX_IDX_VC1_SEQ_HEADER);
+
+ /* did we find start of sequence/SPS? */
+ if (seq_start) {
+ feed->first_frame_in_seq = 1;
+ feed->first_frame_in_seq_notified = 0;
+ feed->prev_frame_valid = 0;
+ idx_event.type = patterns->info[pattern].type;
+ if (feed->idx_params.types & idx_event.type)
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ return;
+ }
+
+ mpeg_frame_start = patterns->info[pattern].type &
+ (DMX_IDX_MPEG_I_FRAME_START |
+ DMX_IDX_MPEG_P_FRAME_START |
+ DMX_IDX_MPEG_B_FRAME_START);
+
+ h264_frame_start = patterns->info[pattern].type &
+ (DMX_IDX_H264_IDR_START | DMX_IDX_H264_NON_IDR_START);
+
+ vc1_frame_start = patterns->info[pattern].type &
+ DMX_IDX_VC1_FRAME_START;
+
+ if (!mpeg_frame_start && !h264_frame_start && !vc1_frame_start) {
+ /* neither sequence nor frame, notify on the entry if needed */
+ idx_event.type = patterns->info[pattern].type;
+ if (feed->idx_params.types & idx_event.type)
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ feed->prev_frame_valid = 0;
+ return;
+ }
+
+ /* notify on first frame in sequence/sps if needed */
+ if (feed->first_frame_in_seq) {
+ feed->first_frame_in_seq = 0;
+ feed->first_frame_in_seq_notified = 1;
+ if (mpeg_frame_start)
+ idx_event.type = DMX_IDX_MPEG_FIRST_SEQ_FRAME_START;
+ else if (h264_frame_start)
+ idx_event.type = DMX_IDX_H264_FIRST_SPS_FRAME_START;
+ else
+ idx_event.type = DMX_IDX_VC1_FIRST_SEQ_FRAME_START;
+
+ if (feed->idx_params.types & idx_event.type)
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ }
+
+ /* notify on frame start if needed */
+ idx_event.type = patterns->info[pattern].type;
+ if (feed->idx_params.types & idx_event.type)
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+
+ feed->prev_frame_valid = 1;
+ feed->prev_frame_type = patterns->info[pattern].type;
+}
+
+void dvb_dmx_process_idx_pattern(struct dvb_demux_feed *feed,
+ struct dvb_dmx_video_patterns_results *patterns, int pattern,
+ u64 curr_stc, u64 prev_stc,
+ u64 curr_match_tsp, u64 prev_match_tsp,
+ u64 curr_pusi_tsp, u64 prev_pusi_tsp)
+{
+ spin_lock(&feed->demux->lock);
+ dvb_dmx_process_pattern_result(feed,
+ patterns, pattern,
+ curr_stc, prev_stc,
+ curr_match_tsp, prev_match_tsp,
+ curr_pusi_tsp, prev_pusi_tsp);
+ spin_unlock(&feed->demux->lock);
+}
+EXPORT_SYMBOL(dvb_dmx_process_idx_pattern);
+
+static void dvb_dmx_index(struct dvb_demux_feed *feed,
+ const u8 *buf,
+ const u8 timestamp[TIMESTAMP_LEN])
+{
+ int i;
+ int p;
+ u64 stc;
+ int found_patterns;
+ int count = payload(buf);
+ u64 min_pattern_tsp_num;
+ struct dvb_demux_feed *tmp_feed;
+ struct dvb_demux *demux = feed->demux;
+ struct dmx_index_event_info idx_event;
+ struct dvb_dmx_video_patterns_results patterns;
+
+ if (feed->demux->convert_ts)
+ feed->demux->convert_ts(feed, timestamp, &stc);
+ else
+ stc = 0;
+
+ idx_event.pid = feed->pid;
+ idx_event.stc = stc;
+ idx_event.match_tsp_num = feed->rec_info->ts_output_count;
+
+ /* PUSI ? */
+ if (buf[1] & 0x40) {
+ feed->curr_pusi_tsp_num = feed->rec_info->ts_output_count;
+ if (feed->idx_params.types & DMX_IDX_PUSI) {
+ idx_event.type = DMX_IDX_PUSI;
+ idx_event.last_pusi_tsp_num =
+ feed->curr_pusi_tsp_num;
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ }
+ }
+
+ /*
+ * if we still did not encounter a TS packet with PUSI indication,
+ * we cannot report index entries yet as we need to provide
+ * the TS packet number with PUSI indication preceding the TS
+ * packet pointed by the reported index entry.
+ */
+ if (feed->curr_pusi_tsp_num == (u64)-1) {
+ dvb_dmx_notify_indexing(feed);
+ return;
+ }
+
+ if ((feed->idx_params.types & DMX_IDX_RAI) && /* index RAI? */
+ (buf[3] & 0x20) && /* adaptation field exists? */
+ (buf[4] > 0) && /* adaptation field len > 0 ? */
+ (buf[5] & 0x40)) { /* RAI is set? */
+ idx_event.type = DMX_IDX_RAI;
+ idx_event.last_pusi_tsp_num =
+ feed->curr_pusi_tsp_num;
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ }
+
+ /*
+ * if no pattern search is required, or the TS packet has no payload,
+ * pattern search is not executed.
+ */
+ if (!feed->pattern_num || !count) {
+ dvb_dmx_notify_indexing(feed);
+ return;
+ }
+
+ p = 188 - count; /* payload start */
+
+ found_patterns =
+ dvb_dmx_video_pattern_search(feed->patterns,
+ feed->pattern_num, &buf[p], count,
+ &feed->prefix_size, &patterns);
+
+ for (i = 0; i < found_patterns; i++)
+ dvb_dmx_process_pattern_result(feed, &patterns, i,
+ stc, feed->prev_stc,
+ feed->rec_info->ts_output_count, feed->prev_tsp_num,
+ feed->curr_pusi_tsp_num, feed->prev_pusi_tsp_num);
+
+ feed->prev_tsp_num = feed->rec_info->ts_output_count;
+ feed->prev_pusi_tsp_num = feed->curr_pusi_tsp_num;
+ feed->prev_stc = stc;
+ feed->last_pattern_tsp_num = feed->rec_info->ts_output_count;
+
+ /*
+ * it is possible to have a TS packet that has a prefix of
+ * a video pattern but the video pattern is not identified yet
+ * until we get the next TS packet of that PID. When we get
+ * the next TS packet of that PID, pattern-search would
+ * detect that we have a new index entry that starts in the
+ * previous TS packet.
+ * In order to notify the user on index entries with match_tsp_num
+ * in ascending order, index events with match_tsp_num up to
+ * the last_pattern_tsp_num are notified now to the user,
+ * the rest can't be notified now as we might hit the above
+ * scenario and cause the events not to be notified with
+ * ascending order of match_tsp_num.
+ */
+ if (feed->rec_info->idx_info.pattern_search_feeds_num == 1) {
+ /*
+ * optimization for case we have only one PID
+ * with video pattern search, in this case
+ * min_pattern_tsp_num is simply updated to the new
+ * TS packet number of the PID with pattern search.
+ */
+ feed->rec_info->idx_info.min_pattern_tsp_num =
+ feed->last_pattern_tsp_num;
+ dvb_dmx_notify_indexing(feed);
+ return;
+ }
+
+ /*
+ * if we have more than one PID with pattern search,
+ * min_pattern_tsp_num needs to be updated now based on
+ * last_pattern_tsp_num of all PIDs with pattern search.
+ */
+ min_pattern_tsp_num = (u64)-1;
+ i = feed->rec_info->idx_info.pattern_search_feeds_num;
+ list_for_each_entry(tmp_feed, &demux->feed_list, list_head) {
+ if ((tmp_feed->state != DMX_STATE_GO) ||
+ (tmp_feed->type != DMX_TYPE_TS) ||
+ (tmp_feed->feed.ts.buffer.ringbuff !=
+ feed->feed.ts.buffer.ringbuff))
+ continue;
+
+ if ((tmp_feed->last_pattern_tsp_num != (u64)-1) &&
+ ((min_pattern_tsp_num == (u64)-1) ||
+ (tmp_feed->last_pattern_tsp_num <
+ min_pattern_tsp_num)))
+ min_pattern_tsp_num = tmp_feed->last_pattern_tsp_num;
+
+ if (tmp_feed->pattern_num) {
+ i--;
+ if (i == 0)
+ break;
+ }
+ }
+
+ feed->rec_info->idx_info.min_pattern_tsp_num = min_pattern_tsp_num;
+
+ /* notify all index entries up to min_pattern_tsp_num */
+ dvb_dmx_notify_indexing(feed);
+}
+
+static inline void dvb_dmx_swfilter_output_packet(
+ struct dvb_demux_feed *feed,
+ const u8 *buf,
+ const u8 timestamp[TIMESTAMP_LEN])
+{
+ /*
+ * if we output 192 packet with timestamp at head of packet,
+ * output the timestamp now before the 188 TS packet
+ */
+ if (feed->tsp_out_format == DMX_TSP_FORMAT_192_HEAD)
+ feed->cb.ts(timestamp, TIMESTAMP_LEN, NULL,
+ 0, &feed->feed.ts, DMX_OK);
+
+ feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts, DMX_OK);
+
+ /*
+ * if we output 192 packet with timestamp at tail of packet,
+ * output the timestamp now after the 188 TS packet
+ */
+ if (feed->tsp_out_format == DMX_TSP_FORMAT_192_TAIL)
+ feed->cb.ts(timestamp, TIMESTAMP_LEN, NULL,
+ 0, &feed->feed.ts, DMX_OK);
+
+ if (feed->idx_params.enable)
+ dvb_dmx_index(feed, buf, timestamp);
+
+ feed->rec_info->ts_output_count++;
+}
+
+static inline void dvb_dmx_configure_decoder_fullness(
+ struct dvb_demux *demux,
+ int initialize)
+{
+ struct dvb_demux_feed *feed;
+ int j;
+
+ for (j = 0; j < demux->feednum; j++) {
+ feed = &demux->feed[j];
+
+ if ((feed->state != DMX_STATE_GO) ||
+ (feed->type != DMX_TYPE_TS) ||
+ !(feed->ts_type & TS_DECODER))
+ continue;
+
+ if (initialize) {
+ if (demux->decoder_fullness_init)
+ demux->decoder_fullness_init(feed);
+ } else {
+ if (demux->decoder_fullness_abort)
+ demux->decoder_fullness_abort(feed);
+ }
+ }
+}
+
+static inline int dvb_dmx_swfilter_buffer_check(
+ struct dvb_demux *demux,
+ u16 pid)
+{
+ int desired_space;
+ int ret;
+ struct dmx_ts_feed *ts;
+ struct dvb_demux_filter *f;
+ struct dvb_demux_feed *feed;
+ int was_locked;
+ int i, j;
+
+ if (likely(spin_is_locked(&demux->lock)))
+ was_locked = 1;
+ else
+ was_locked = 0;
+
+ /*
+ * Check that there's enough free space for data output.
+ * If there no space, wait for it (block).
+ * Since this function is called while spinlock
+ * is acquired, the lock should be released first.
+ * Once we get control back, lock is acquired back
+ * and checks that the filter is still valid.
+ */
+ for (j = 0; j < demux->feednum; j++) {
+ feed = &demux->feed[j];
+
+ if (demux->sw_filter_abort)
+ return -ENODEV;
+
+ if ((feed->state != DMX_STATE_GO) ||
+ ((feed->pid != pid) && (feed->pid != 0x2000)))
+ continue;
+
+ if (feed->secure_mode.is_secured &&
+ !dvb_dmx_is_rec_feed(feed))
+ return 0;
+
+ if (feed->type == DMX_TYPE_TS) {
+ desired_space = 192; /* upper bound */
+ ts = &feed->feed.ts;
+
+ if (feed->ts_type & TS_PACKET) {
+ if (likely(was_locked))
+ spin_unlock(&demux->lock);
+
+ ret = demux->buffer_ctrl.ts(ts,
+ desired_space, 1);
+
+ if (likely(was_locked))
+ spin_lock(&demux->lock);
+
+ if (ret < 0)
+ continue;
+ }
+
+ if (demux->sw_filter_abort)
+ return -ENODEV;
+
+ if (!ts->is_filtering)
+ continue;
+
+ if ((feed->ts_type & TS_DECODER) &&
+ (demux->decoder_fullness_wait)) {
+ if (likely(was_locked))
+ spin_unlock(&demux->lock);
+
+ ret = demux->decoder_fullness_wait(
+ feed,
+ desired_space);
+
+ if (likely(was_locked))
+ spin_lock(&demux->lock);
+
+ if (ret < 0)
+ continue;
+ }
+
+ continue;
+ }
+
+ /* else - section case */
+ desired_space = feed->feed.sec.tsfeedp + 188; /* upper bound */
+ for (i = 0; i < demux->filternum; i++) {
+ if (demux->sw_filter_abort)
+ return -EPERM;
+
+ if (!feed->feed.sec.is_filtering)
+ continue;
+
+ f = &demux->filter[i];
+ if (f->feed != feed)
+ continue;
+
+ if (likely(was_locked))
+ spin_unlock(&demux->lock);
+
+ ret = demux->buffer_ctrl.sec(&f->filter,
+ desired_space, 1);
+
+ if (likely(was_locked))
+ spin_lock(&demux->lock);
+
+ if (ret < 0)
+ break;
+ }
+ }
+
+ return 0;
+}
+
static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,
- const u8 *buf)
+ const u8 *buf, const u8 timestamp[TIMESTAMP_LEN])
{
+ u16 pid = ts_pid(buf);
+ u8 scrambling_bits = ts_scrambling_ctrl(buf);
+ struct dmx_data_ready dmx_data_ready;
+
+ /*
+ * Notify on scrambling status change only when we move
+ * from clear (0) to non-clear and vise-versa
+ */
+ if ((scrambling_bits && !feed->scrambling_bits) ||
+ (!scrambling_bits && feed->scrambling_bits)) {
+ dmx_data_ready.status = DMX_OK_SCRAMBLING_STATUS;
+ dmx_data_ready.data_length = 0;
+ dmx_data_ready.scrambling_bits.pid = pid;
+ dmx_data_ready.scrambling_bits.old_value =
+ feed->scrambling_bits;
+ dmx_data_ready.scrambling_bits.new_value = scrambling_bits;
+
+ if (feed->type == DMX_TYPE_SEC)
+ dvb_dmx_notify_section_event(feed, &dmx_data_ready, 0);
+ else if (feed->feed.ts.is_filtering)
+ feed->data_ready_cb.ts(&feed->feed.ts, &dmx_data_ready);
+ }
+
+ feed->scrambling_bits = scrambling_bits;
+
switch (feed->type) {
case DMX_TYPE_TS:
if (!feed->feed.ts.is_filtering)
break;
if (feed->ts_type & TS_PACKET) {
- if (feed->ts_type & TS_PAYLOAD_ONLY)
- dvb_dmx_swfilter_payload(feed, buf);
- else
- feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
+ if (feed->ts_type & TS_PAYLOAD_ONLY) {
+ if (!feed->secure_mode.is_secured)
+ dvb_dmx_swfilter_payload(feed, buf);
+ } else {
+ dvb_dmx_swfilter_output_packet(feed,
+ buf, timestamp);
+ }
}
- if (feed->ts_type & TS_DECODER)
+ if ((feed->ts_type & TS_DECODER) &&
+ !feed->secure_mode.is_secured)
if (feed->demux->write_to_decoder)
feed->demux->write_to_decoder(feed, buf, 188);
break;
case DMX_TYPE_SEC:
- if (!feed->feed.sec.is_filtering)
+ if (!feed->feed.sec.is_filtering ||
+ feed->secure_mode.is_secured)
break;
- if (dvb_dmx_swfilter_section_packet(feed, buf) < 0)
+ if (dvb_dmx_swfilter_section_one_packet(feed, buf) < 0)
feed->feed.sec.seclen = feed->feed.sec.secbufp = 0;
break;
@@ -391,7 +1476,8 @@ static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,
((f)->feed.ts.is_filtering) && \
(((f)->ts_type & (TS_PACKET | TS_DEMUX)) == TS_PACKET))
-static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
+static void dvb_dmx_swfilter_one_packet(struct dvb_demux *demux, const u8 *buf,
+ const u8 timestamp[TIMESTAMP_LEN])
{
struct dvb_demux_feed *feed;
u16 pid = ts_pid(buf);
@@ -420,9 +1506,9 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
(u64)timespec_to_ns(&delta_time);
speed_timedelta = div64_u64(speed_timedelta,
1000000); /* nsec -> usec */
- printk(KERN_INFO "TS speed %llu Kbits/sec \n",
- div64_u64(speed_bytes,
- speed_timedelta));
+ pr_info("TS speed %llu Kbits/sec\n",
+ div64_u64(speed_bytes,
+ speed_timedelta));
}
demux->speed_last_time = cur_time;
@@ -431,11 +1517,12 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
}
if (buf[1] & 0x80) {
- dprintk_tscheck("TEI detected. "
- "PID=0x%x data1=0x%x\n",
- pid, buf[1]);
- /* data in this packet can't be trusted - drop it unless
- * module option dvb_demux_feed_err_pkts is set */
+ dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n", pid,
+ buf[1]);
+ /*
+ * data in this packet can't be trusted - drop it unless
+ * module option dvb_demux_feed_err_pkts is set
+ */
if (!dvb_demux_feed_err_pkts)
return;
} else /* if TEI bit is set, pid may be wrong- skip pkt counter */
@@ -444,10 +1531,12 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
if (pid < MAX_PID) {
if (buf[3] & 0x10)
demux->cnt_storage[pid] =
- (demux->cnt_storage[pid] + 1) & 0xf;
+ (demux->cnt_storage[pid] + 1) &
+ 0xf;
if ((buf[3] & 0xf) != demux->cnt_storage[pid]) {
- dprintk_tscheck("TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n",
+ dprintk_tscheck(
+ "TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n",
pid, demux->cnt_storage[pid],
buf[3] & 0xf);
demux->cnt_storage[pid] = buf[3] & 0xf;
@@ -456,48 +1545,76 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
/* end check */
}
+ if (demux->playback_mode == DMX_PB_MODE_PULL)
+ if (dvb_dmx_swfilter_buffer_check(demux, pid) < 0)
+ return;
+
list_for_each_entry(feed, &demux->feed_list, list_head) {
if ((feed->pid != pid) && (feed->pid != 0x2000))
continue;
- /* copy each packet only once to the dvr device, even
- * if a PID is in multiple filters (e.g. video + PCR) */
+ /*
+ * copy each packet only once to the dvr device, even
+ * if a PID is in multiple filters (e.g. video + PCR)
+ */
if ((DVR_FEED(feed)) && (dvr_done++))
continue;
if (feed->pid == pid)
- dvb_dmx_swfilter_packet_type(feed, buf);
- else if (feed->pid == 0x2000)
- feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
+ dvb_dmx_swfilter_packet_type(feed, buf, timestamp);
+ else if ((feed->pid == 0x2000) &&
+ (feed->feed.ts.is_filtering))
+ dvb_dmx_swfilter_output_packet(feed, buf, timestamp);
}
}
+void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf,
+ const u8 timestamp[TIMESTAMP_LEN])
+{
+ spin_lock(&demux->lock);
+ dvb_dmx_swfilter_one_packet(demux, buf, timestamp);
+ spin_unlock(&demux->lock);
+}
+EXPORT_SYMBOL(dvb_dmx_swfilter_packet);
+
void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
size_t count)
{
- unsigned long flags;
+ struct timespec pre_time;
+ u8 timestamp[TIMESTAMP_LEN] = {0};
+
+ if (dvb_demux_performancecheck)
+ pre_time = current_kernel_time();
+
+ spin_lock(&demux->lock);
- spin_lock_irqsave(&demux->lock, flags);
+ demux->sw_filter_abort = 0;
+ dvb_dmx_configure_decoder_fullness(demux, 1);
while (count--) {
if (buf[0] == 0x47)
- dvb_dmx_swfilter_packet(demux, buf);
+ dvb_dmx_swfilter_one_packet(demux, buf, timestamp);
buf += 188;
}
- spin_unlock_irqrestore(&demux->lock, flags);
-}
+ spin_unlock(&demux->lock);
+ if (dvb_demux_performancecheck)
+ demux->total_process_time += dvb_dmx_calc_time_delta(pre_time);
+}
EXPORT_SYMBOL(dvb_dmx_swfilter_packets);
static inline int find_next_packet(const u8 *buf, int pos, size_t count,
- const int pktsize)
+ const int pktsize, const int leadingbytes)
{
int start = pos, lost;
while (pos < count) {
- if (buf[pos] == 0x47 ||
- (pktsize == 204 && buf[pos] == 0xB8))
+ if ((buf[pos] == 0x47 && !leadingbytes) ||
+ (pktsize == 204 && buf[pos] == 0xB8) ||
+ (pktsize == 192 && leadingbytes &&
+ (pos+leadingbytes < count) &&
+ buf[pos+leadingbytes] == 0x47))
break;
pos++;
}
@@ -506,8 +1623,11 @@ static inline int find_next_packet(const u8 *buf, int pos, size_t count,
if (lost) {
/* This garbage is part of a valid packet? */
int backtrack = pos - pktsize;
+
if (backtrack >= 0 && (buf[backtrack] == 0x47 ||
- (pktsize == 204 && buf[backtrack] == 0xB8)))
+ (pktsize == 204 && buf[backtrack] == 0xB8) ||
+ (pktsize == 192 &&
+ buf[backtrack+leadingbytes] == 0x47)))
return backtrack;
}
@@ -516,13 +1636,20 @@ static inline int find_next_packet(const u8 *buf, int pos, size_t count,
/* Filter all pktsize= 188 or 204 sized packets and skip garbage. */
static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf,
- size_t count, const int pktsize)
+ size_t count, const int pktsize, const int leadingbytes)
{
int p = 0, i, j;
const u8 *q;
- unsigned long flags;
+ struct timespec pre_time;
+ u8 timestamp[TIMESTAMP_LEN];
+
+ if (dvb_demux_performancecheck)
+ pre_time = current_kernel_time();
+
+ spin_lock(&demux->lock);
- spin_lock_irqsave(&demux->lock, flags);
+ demux->sw_filter_abort = 0;
+ dvb_dmx_configure_decoder_fullness(demux, 1);
if (demux->tsbufp) { /* tsbuf[0] is now 0x47. */
i = demux->tsbufp;
@@ -533,14 +1660,36 @@ static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf,
goto bailout;
}
memcpy(&demux->tsbuf[i], buf, j);
- if (demux->tsbuf[0] == 0x47) /* double check */
- dvb_dmx_swfilter_packet(demux, demux->tsbuf);
+
+ if (pktsize == 192) {
+ if (leadingbytes)
+ memcpy(timestamp, &demux->tsbuf[p],
+ TIMESTAMP_LEN);
+ else
+ memcpy(timestamp, &demux->tsbuf[188],
+ TIMESTAMP_LEN);
+ } else {
+ memset(timestamp, 0, TIMESTAMP_LEN);
+ }
+
+ if (pktsize == 192 &&
+ leadingbytes &&
+ demux->tsbuf[leadingbytes] == 0x47) /* double check */
+ dvb_dmx_swfilter_one_packet(demux,
+ demux->tsbuf + TIMESTAMP_LEN, timestamp);
+ else if (demux->tsbuf[0] == 0x47) /* double check */
+ dvb_dmx_swfilter_one_packet(demux,
+ demux->tsbuf, timestamp);
demux->tsbufp = 0;
p += j;
}
while (1) {
- p = find_next_packet(buf, p, count, pktsize);
+ p = find_next_packet(buf, p, count, pktsize, leadingbytes);
+
+ if (demux->sw_filter_abort)
+ goto bailout;
+
if (p >= count)
break;
if (count - p < pktsize)
@@ -553,7 +1702,19 @@ static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf,
demux->tsbuf[0] = 0x47;
q = demux->tsbuf;
}
- dvb_dmx_swfilter_packet(demux, q);
+
+ if (pktsize == 192) {
+ if (leadingbytes) {
+ q = &buf[p+leadingbytes];
+ memcpy(timestamp, &buf[p], TIMESTAMP_LEN);
+ } else {
+ memcpy(timestamp, &buf[p+188], TIMESTAMP_LEN);
+ }
+ } else {
+ memset(timestamp, 0, TIMESTAMP_LEN);
+ }
+
+ dvb_dmx_swfilter_one_packet(demux, q, timestamp);
p += pktsize;
}
@@ -566,33 +1727,65 @@ static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf,
}
bailout:
- spin_unlock_irqrestore(&demux->lock, flags);
+ spin_unlock(&demux->lock);
+
+ if (dvb_demux_performancecheck)
+ demux->total_process_time += dvb_dmx_calc_time_delta(pre_time);
}
void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count)
{
- _dvb_dmx_swfilter(demux, buf, count, 188);
+ _dvb_dmx_swfilter(demux, buf, count, 188, 0);
}
EXPORT_SYMBOL(dvb_dmx_swfilter);
void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count)
{
- _dvb_dmx_swfilter(demux, buf, count, 204);
+ _dvb_dmx_swfilter(demux, buf, count, 204, 0);
}
EXPORT_SYMBOL(dvb_dmx_swfilter_204);
void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count)
{
- unsigned long flags;
+ spin_lock(&demux->lock);
- spin_lock_irqsave(&demux->lock, flags);
+ demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts, DMX_OK);
- demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts);
-
- spin_unlock_irqrestore(&demux->lock, flags);
+ spin_unlock(&demux->lock);
}
EXPORT_SYMBOL(dvb_dmx_swfilter_raw);
+void dvb_dmx_swfilter_format(
+ struct dvb_demux *demux,
+ const u8 *buf,
+ size_t count,
+ enum dmx_tsp_format_t tsp_format)
+{
+ switch (tsp_format) {
+ case DMX_TSP_FORMAT_188:
+ _dvb_dmx_swfilter(demux, buf, count, 188, 0);
+ break;
+
+ case DMX_TSP_FORMAT_192_TAIL:
+ _dvb_dmx_swfilter(demux, buf, count, 192, 0);
+ break;
+
+ case DMX_TSP_FORMAT_192_HEAD:
+ _dvb_dmx_swfilter(demux, buf, count, 192, TIMESTAMP_LEN);
+ break;
+
+ case DMX_TSP_FORMAT_204:
+ _dvb_dmx_swfilter(demux, buf, count, 204, 0);
+ break;
+
+ default:
+ pr_err("%s: invalid TS packet format (format=%d)\n", __func__,
+ tsp_format);
+ break;
+ }
+}
+EXPORT_SYMBOL(dvb_dmx_swfilter_format);
+
static struct dvb_demux_filter *dvb_dmx_filter_alloc(struct dvb_demux *demux)
{
int i;
@@ -625,6 +1818,268 @@ static struct dvb_demux_feed *dvb_dmx_feed_alloc(struct dvb_demux *demux)
return &demux->feed[i];
}
+const struct dvb_dmx_video_patterns *dvb_dmx_get_pattern(u64 dmx_idx_pattern)
+{
+ switch (dmx_idx_pattern) {
+ case DMX_IDX_MPEG_SEQ_HEADER:
+ return &mpeg2_seq_hdr;
+
+ case DMX_IDX_MPEG_GOP:
+ return &mpeg2_gop;
+
+ case DMX_IDX_MPEG_I_FRAME_START:
+ return &mpeg2_iframe;
+
+ case DMX_IDX_MPEG_P_FRAME_START:
+ return &mpeg2_pframe;
+
+ case DMX_IDX_MPEG_B_FRAME_START:
+ return &mpeg2_bframe;
+
+ case DMX_IDX_H264_SPS:
+ return &h264_sps;
+
+ case DMX_IDX_H264_PPS:
+ return &h264_pps;
+
+ case DMX_IDX_H264_IDR_START:
+ return &h264_idr;
+
+ case DMX_IDX_H264_NON_IDR_START:
+ return &h264_non_idr;
+
+ case DMX_IDX_H264_ACCESS_UNIT_DEL:
+ return &h264_non_access_unit_del;
+
+ case DMX_IDX_H264_SEI:
+ return &h264_non_sei;
+
+ case DMX_IDX_VC1_SEQ_HEADER:
+ return &vc1_seq_hdr;
+
+ case DMX_IDX_VC1_ENTRY_POINT:
+ return &vc1_entry_point;
+
+ case DMX_IDX_VC1_FRAME_START:
+ return &vc1_frame;
+
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(dvb_dmx_get_pattern);
+
+static void dvb_dmx_init_idx_state(struct dvb_demux_feed *feed)
+{
+ feed->prev_tsp_num = (u64)-1;
+ feed->curr_pusi_tsp_num = (u64)-1;
+ feed->prev_pusi_tsp_num = (u64)-1;
+ feed->prev_frame_valid = 0;
+ feed->first_frame_in_seq = 0;
+ feed->first_frame_in_seq_notified = 0;
+ feed->last_pattern_tsp_num = (u64)-1;
+ feed->pattern_num = 0;
+ memset(&feed->prefix_size, 0,
+ sizeof(struct dvb_dmx_video_prefix_size_masks));
+
+ if (feed->idx_params.types &
+ (DMX_IDX_MPEG_SEQ_HEADER |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_END)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_MPEG_SEQ_HEADER);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_MPEG_GOP)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_MPEG_GOP);
+ feed->pattern_num++;
+ }
+
+ /* MPEG2 I-frame */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_MPEG_I_FRAME_START | DMX_IDX_MPEG_I_FRAME_END |
+ DMX_IDX_MPEG_P_FRAME_END | DMX_IDX_MPEG_B_FRAME_END |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_MPEG_I_FRAME_START);
+ feed->pattern_num++;
+ }
+
+ /* MPEG2 P-frame */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_MPEG_P_FRAME_START | DMX_IDX_MPEG_P_FRAME_END |
+ DMX_IDX_MPEG_I_FRAME_END | DMX_IDX_MPEG_B_FRAME_END |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_MPEG_P_FRAME_START);
+ feed->pattern_num++;
+ }
+
+ /* MPEG2 B-frame */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_MPEG_B_FRAME_START | DMX_IDX_MPEG_B_FRAME_END |
+ DMX_IDX_MPEG_I_FRAME_END | DMX_IDX_MPEG_P_FRAME_END |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_MPEG_B_FRAME_START);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_H264_SPS |
+ DMX_IDX_H264_FIRST_SPS_FRAME_START |
+ DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_SPS);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_H264_PPS)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_PPS);
+ feed->pattern_num++;
+ }
+
+ /* H264 IDR */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_H264_IDR_START | DMX_IDX_H264_IDR_END |
+ DMX_IDX_H264_NON_IDR_END |
+ DMX_IDX_H264_FIRST_SPS_FRAME_START |
+ DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_IDR_START);
+ feed->pattern_num++;
+ }
+
+ /* H264 non-IDR */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_H264_NON_IDR_START | DMX_IDX_H264_NON_IDR_END |
+ DMX_IDX_H264_IDR_END |
+ DMX_IDX_H264_FIRST_SPS_FRAME_START |
+ DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_START);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_H264_ACCESS_UNIT_DEL)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_ACCESS_UNIT_DEL);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_H264_SEI)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_SEI);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_VC1_SEQ_HEADER |
+ DMX_IDX_VC1_FIRST_SEQ_FRAME_START |
+ DMX_IDX_VC1_FIRST_SEQ_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_VC1_SEQ_HEADER);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_VC1_ENTRY_POINT)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_VC1_ENTRY_POINT);
+ feed->pattern_num++;
+ }
+
+ /* VC1 frame */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_VC1_FRAME_START | DMX_IDX_VC1_FRAME_END |
+ DMX_IDX_VC1_FIRST_SEQ_FRAME_START |
+ DMX_IDX_VC1_FIRST_SEQ_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_VC1_FRAME_START);
+ feed->pattern_num++;
+ }
+
+ if (feed->pattern_num)
+ feed->rec_info->idx_info.pattern_search_feeds_num++;
+}
+
+static struct dvb_demux_rec_info *dvb_dmx_alloc_rec_info(
+ struct dmx_ts_feed *ts_feed)
+{
+ int i;
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+ struct dvb_demux_rec_info *rec_info;
+ struct dvb_demux_feed *tmp_feed;
+
+ /* check if this feed share recording buffer with other active feeds */
+ list_for_each_entry(tmp_feed, &demux->feed_list, list_head) {
+ if ((tmp_feed->state == DMX_STATE_GO) &&
+ (tmp_feed->type == DMX_TYPE_TS) &&
+ (tmp_feed != feed) &&
+ (tmp_feed->feed.ts.buffer.ringbuff ==
+ ts_feed->buffer.ringbuff)) {
+ /* indexing information is shared between the feeds */
+ tmp_feed->rec_info->ref_count++;
+ return tmp_feed->rec_info;
+ }
+ }
+
+ /* Need to allocate a new indexing info */
+ for (i = 0; i < demux->feednum; i++)
+ if (!demux->rec_info_pool[i].ref_count)
+ break;
+
+ if (i == demux->feednum)
+ return NULL;
+
+ rec_info = &demux->rec_info_pool[i];
+ rec_info->ref_count++;
+ INIT_LIST_HEAD(&rec_info->idx_info.free_list);
+ INIT_LIST_HEAD(&rec_info->idx_info.ready_list);
+
+ for (i = 0; i < DMX_IDX_EVENT_QUEUE_SIZE; i++)
+ list_add(&rec_info->idx_info.events[i].next,
+ &rec_info->idx_info.free_list);
+
+ rec_info->ts_output_count = 0;
+ rec_info->idx_info.min_pattern_tsp_num = (u64)-1;
+ rec_info->idx_info.pattern_search_feeds_num = 0;
+ rec_info->idx_info.indexing_feeds_num = 0;
+
+ return rec_info;
+}
+
+static void dvb_dmx_free_rec_info(struct dmx_ts_feed *ts_feed)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+
+ if (!feed->rec_info || !feed->rec_info->ref_count) {
+ pr_err("%s: invalid idx info state\n", __func__);
+ return;
+ }
+
+ feed->rec_info->ref_count--;
+}
+
static int dvb_demux_feed_find(struct dvb_demux_feed *feed)
{
struct dvb_demux_feed *entry;
@@ -640,7 +2095,7 @@ static void dvb_demux_feed_add(struct dvb_demux_feed *feed)
{
spin_lock_irq(&feed->demux->lock);
if (dvb_demux_feed_find(feed)) {
- printk(KERN_ERR "%s: feed already in list (type=%x state=%x pid=%x)\n",
+ pr_err("%s: feed already in list (type=%x state=%x pid=%x)\n",
__func__, feed->type, feed->state, feed->pid);
goto out;
}
@@ -654,7 +2109,7 @@ static void dvb_demux_feed_del(struct dvb_demux_feed *feed)
{
spin_lock_irq(&feed->demux->lock);
if (!(dvb_demux_feed_find(feed))) {
- printk(KERN_ERR "%s: feed not in list (type=%x state=%x pid=%x)\n",
+ pr_err("%s: feed not in list (type=%x state=%x pid=%x)\n",
__func__, feed->type, feed->state, feed->pid);
goto out;
}
@@ -738,7 +2193,34 @@ static int dmx_ts_feed_start_filtering(struct dmx_ts_feed *ts_feed)
return -ENODEV;
}
- if ((ret = demux->start_feed(feed)) < 0) {
+ feed->first_cc = 1;
+ feed->scrambling_bits = 0;
+
+ if ((feed->ts_type & TS_PACKET) &&
+ !(feed->ts_type & TS_PAYLOAD_ONLY)) {
+ feed->rec_info = dvb_dmx_alloc_rec_info(ts_feed);
+ if (!feed->rec_info) {
+ mutex_unlock(&demux->mutex);
+ return -ENOMEM;
+ }
+ if (feed->idx_params.enable) {
+ dvb_dmx_init_idx_state(feed);
+ feed->rec_info->idx_info.indexing_feeds_num++;
+ if (demux->set_indexing)
+ demux->set_indexing(feed);
+ }
+ } else {
+ feed->pattern_num = 0;
+ feed->rec_info = NULL;
+ }
+
+ ret = demux->start_feed(feed);
+ if (ret < 0) {
+ if ((feed->ts_type & TS_PACKET) &&
+ !(feed->ts_type & TS_PAYLOAD_ONLY)) {
+ dvb_dmx_free_rec_info(ts_feed);
+ feed->rec_info = NULL;
+ }
mutex_unlock(&demux->mutex);
return ret;
}
@@ -776,11 +2258,337 @@ static int dmx_ts_feed_stop_filtering(struct dmx_ts_feed *ts_feed)
ts_feed->is_filtering = 0;
feed->state = DMX_STATE_ALLOCATED;
spin_unlock_irq(&demux->lock);
+
+ if (feed->rec_info) {
+ if (feed->pattern_num)
+ feed->rec_info->idx_info.pattern_search_feeds_num--;
+ if (feed->idx_params.enable)
+ feed->rec_info->idx_info.indexing_feeds_num--;
+ dvb_dmx_free_rec_info(ts_feed);
+ feed->rec_info = NULL;
+ }
+
mutex_unlock(&demux->mutex);
return ret;
}
+static int dmx_ts_feed_decoder_buff_status(struct dmx_ts_feed *ts_feed,
+ struct dmx_buffer_status *dmx_buffer_status)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+ int ret;
+
+ mutex_lock(&demux->mutex);
+
+ if (feed->state < DMX_STATE_GO) {
+ mutex_unlock(&demux->mutex);
+ return -EINVAL;
+ }
+
+ if (!demux->decoder_buffer_status) {
+ mutex_unlock(&demux->mutex);
+ return -ENODEV;
+ }
+
+ ret = demux->decoder_buffer_status(feed, dmx_buffer_status);
+
+ mutex_unlock(&demux->mutex);
+
+ return ret;
+}
+
+static int dmx_ts_feed_reuse_decoder_buffer(struct dmx_ts_feed *ts_feed,
+ int cookie)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+ int ret;
+
+ mutex_lock(&demux->mutex);
+
+ if (feed->state < DMX_STATE_GO) {
+ mutex_unlock(&demux->mutex);
+ return -EINVAL;
+ }
+
+ if (!demux->reuse_decoder_buffer) {
+ mutex_unlock(&demux->mutex);
+ return -ENODEV;
+ }
+
+ ret = demux->reuse_decoder_buffer(feed, cookie);
+
+ mutex_unlock(&demux->mutex);
+
+ return ret;
+}
+
+static int dmx_ts_feed_data_ready_cb(struct dmx_ts_feed *feed,
+ dmx_ts_data_ready_cb callback)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (dvbdmxfeed->state == DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ dvbdmxfeed->data_ready_cb.ts = callback;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return 0;
+}
+
+static int dmx_ts_set_secure_mode(struct dmx_ts_feed *feed,
+ struct dmx_secure_mode *secure_mode)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+ if (mutex_lock_interruptible(&dvbdmx->mutex))
+ return -ERESTARTSYS;
+
+ if (dvbdmxfeed->state == DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EBUSY;
+ }
+
+ dvbdmxfeed->secure_mode = *secure_mode;
+ mutex_unlock(&dvbdmx->mutex);
+ return 0;
+}
+
+static int dmx_ts_set_cipher_ops(struct dmx_ts_feed *feed,
+ struct dmx_cipher_operations *cipher_ops)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&dvbdmx->mutex))
+ return -ERESTARTSYS;
+
+ if ((dvbdmxfeed->state == DMX_STATE_GO) &&
+ dvbdmx->set_cipher_op)
+ ret = dvbdmx->set_cipher_op(dvbdmxfeed, cipher_ops);
+
+ if (!ret)
+ dvbdmxfeed->cipher_ops = *cipher_ops;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+}
+
+static int dmx_ts_set_video_codec(
+ struct dmx_ts_feed *ts_feed,
+ enum dmx_video_codec video_codec)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+
+ feed->video_codec = video_codec;
+
+ return 0;
+}
+
+static int dmx_ts_set_idx_params(struct dmx_ts_feed *ts_feed,
+ struct dmx_indexing_params *idx_params)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *dvbdmx = feed->demux;
+ int idx_enabled;
+ int ret = 0;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if ((feed->state == DMX_STATE_GO) &&
+ !feed->rec_info) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ idx_enabled = feed->idx_params.enable;
+ feed->idx_params = *idx_params;
+
+ if (feed->state == DMX_STATE_GO) {
+ spin_lock_irq(&dvbdmx->lock);
+ if (feed->pattern_num)
+ feed->rec_info->idx_info.pattern_search_feeds_num--;
+ if (idx_enabled && !idx_params->enable)
+ feed->rec_info->idx_info.indexing_feeds_num--;
+ if (!idx_enabled && idx_params->enable)
+ feed->rec_info->idx_info.indexing_feeds_num++;
+ dvb_dmx_init_idx_state(feed);
+ spin_unlock_irq(&dvbdmx->lock);
+
+ if (dvbdmx->set_indexing)
+ ret = dvbdmx->set_indexing(feed);
+ }
+
+ mutex_unlock(&dvbdmx->mutex);
+
+ return ret;
+}
+
+static int dvbdmx_ts_feed_oob_cmd(struct dmx_ts_feed *ts_feed,
+ struct dmx_oob_command *cmd)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dmx_data_ready data;
+ struct dvb_demux *dvbdmx = feed->demux;
+ int ret = 0;
+ int secure_non_rec = feed->secure_mode.is_secured &&
+ !dvb_dmx_is_rec_feed(feed);
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (feed->state != DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ /* Decoder & non-recording secure feeds are handled by plug-in */
+ if ((feed->ts_type & TS_DECODER) || secure_non_rec) {
+ if (feed->demux->oob_command)
+ ret = feed->demux->oob_command(feed, cmd);
+ }
+
+ if (!(feed->ts_type & (TS_PAYLOAD_ONLY | TS_PACKET)) ||
+ secure_non_rec) {
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+ }
+
+ data.data_length = 0;
+
+ switch (cmd->type) {
+ case DMX_OOB_CMD_EOS:
+ if (feed->ts_type & TS_PAYLOAD_ONLY)
+ dvb_dmx_check_pes_end(feed);
+
+ data.status = DMX_OK_EOS;
+ ret = feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ break;
+
+ case DMX_OOB_CMD_MARKER:
+ data.status = DMX_OK_MARKER;
+ data.marker.id = cmd->params.marker.id;
+ ret = feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+}
+
+static int dvbdmx_ts_get_scrambling_bits(struct dmx_ts_feed *ts_feed,
+ u8 *value)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+
+ spin_lock(&demux->lock);
+
+ if (!ts_feed->is_filtering) {
+ spin_unlock(&demux->lock);
+ return -EINVAL;
+ }
+
+ *value = feed->scrambling_bits;
+ spin_unlock(&demux->lock);
+
+ return 0;
+}
+
+static int dvbdmx_ts_insertion_insert_buffer(struct dmx_ts_feed *ts_feed,
+ char *data, size_t size)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+
+ spin_lock(&demux->lock);
+ if (!ts_feed->is_filtering) {
+ spin_unlock(&demux->lock);
+ return 0;
+ }
+
+ feed->cb.ts(data, size, NULL, 0, ts_feed, DMX_OK);
+
+ spin_unlock(&demux->lock);
+
+ return 0;
+}
+
+static int dmx_ts_set_tsp_out_format(
+ struct dmx_ts_feed *ts_feed,
+ enum dmx_tsp_format_t tsp_format)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *dvbdmx = feed->demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (feed->state == DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ feed->tsp_out_format = tsp_format;
+ mutex_unlock(&dvbdmx->mutex);
+ return 0;
+}
+
+/**
+ * dvbdmx_ts_reset_pes_state() - Reset the current PES length and PES counters
+ *
+ * @feed: dvb demux feed object
+ */
+void dvbdmx_ts_reset_pes_state(struct dvb_demux_feed *feed)
+{
+ unsigned long flags;
+
+ /*
+ * Reset PES state.
+ * PUSI seen indication is kept so we can get partial PES.
+ */
+ spin_lock_irqsave(&feed->demux->lock, flags);
+
+ feed->peslen = 0;
+ feed->pes_tei_counter = 0;
+ feed->pes_cont_err_counter = 0;
+ feed->pes_ts_packets_num = 0;
+
+ spin_unlock_irqrestore(&feed->demux->lock, flags);
+}
+EXPORT_SYMBOL(dvbdmx_ts_reset_pes_state);
+
+static int dvbdmx_ts_flush_buffer(struct dmx_ts_feed *ts_feed, size_t length)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&demux->mutex))
+ return -ERESTARTSYS;
+
+ dvbdmx_ts_reset_pes_state(feed);
+
+ if ((feed->ts_type & TS_DECODER) && demux->flush_decoder_buffer)
+ /* Call decoder specific flushing if one exists */
+ ret = demux->flush_decoder_buffer(feed, length);
+
+ mutex_unlock(&demux->mutex);
+ return ret;
+}
+
static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
struct dmx_ts_feed **ts_feed,
dmx_ts_cb callback)
@@ -800,8 +2608,21 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
feed->cb.ts = callback;
feed->demux = demux;
feed->pid = 0xffff;
- feed->peslen = 0xfffa;
+ feed->peslen = 0;
+ feed->pes_tei_counter = 0;
+ feed->pes_ts_packets_num = 0;
+ feed->pes_cont_err_counter = 0;
+ feed->secure_mode.is_secured = 0;
feed->buffer = NULL;
+ feed->tsp_out_format = DMX_TSP_FORMAT_188;
+ feed->idx_params.enable = 0;
+
+ /* default behaviour - pass first PES data even if it is
+ * partial PES data from previous PES that we didn't receive its header.
+ * Override this to 0 in your start_feed function in order to handle
+ * first PES differently.
+ */
+ feed->pusi_seen = 1;
(*ts_feed) = &feed->feed.ts;
(*ts_feed)->parent = dmx;
@@ -810,6 +2631,22 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
(*ts_feed)->start_filtering = dmx_ts_feed_start_filtering;
(*ts_feed)->stop_filtering = dmx_ts_feed_stop_filtering;
(*ts_feed)->set = dmx_ts_feed_set;
+ (*ts_feed)->set_video_codec = dmx_ts_set_video_codec;
+ (*ts_feed)->set_idx_params = dmx_ts_set_idx_params;
+ (*ts_feed)->set_tsp_out_format = dmx_ts_set_tsp_out_format;
+ (*ts_feed)->get_decoder_buff_status = dmx_ts_feed_decoder_buff_status;
+ (*ts_feed)->reuse_decoder_buffer = dmx_ts_feed_reuse_decoder_buffer;
+ (*ts_feed)->data_ready_cb = dmx_ts_feed_data_ready_cb;
+ (*ts_feed)->notify_data_read = NULL;
+ (*ts_feed)->set_secure_mode = dmx_ts_set_secure_mode;
+ (*ts_feed)->set_cipher_ops = dmx_ts_set_cipher_ops;
+ (*ts_feed)->oob_command = dvbdmx_ts_feed_oob_cmd;
+ (*ts_feed)->get_scrambling_bits = dvbdmx_ts_get_scrambling_bits;
+ (*ts_feed)->ts_insertion_init = NULL;
+ (*ts_feed)->ts_insertion_terminate = NULL;
+ (*ts_feed)->ts_insertion_insert_buffer =
+ dvbdmx_ts_insertion_insert_buffer;
+ (*ts_feed)->flush_buffer = dvbdmx_ts_flush_buffer;
if (!(feed->filter = dvb_dmx_filter_alloc(demux))) {
feed->state = DMX_STATE_FREE;
@@ -845,7 +2682,7 @@ static int dvbdmx_release_ts_feed(struct dmx_demux *dmx,
feed->state = DMX_STATE_FREE;
feed->filter->state = DMX_STATE_FREE;
-
+ ts_feed->priv = NULL;
dvb_demux_feed_del(feed);
feed->pid = 0xffff;
@@ -971,6 +2808,8 @@ static int dmx_section_feed_start_filtering(struct dmx_section_feed *feed)
dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base;
dvbdmxfeed->feed.sec.secbufp = 0;
dvbdmxfeed->feed.sec.seclen = 0;
+ dvbdmxfeed->first_cc = 1;
+ dvbdmxfeed->scrambling_bits = 0;
if (!dvbdmx->start_feed) {
mutex_unlock(&dvbdmx->mutex);
@@ -1017,6 +2856,66 @@ static int dmx_section_feed_stop_filtering(struct dmx_section_feed *feed)
return ret;
}
+
+static int dmx_section_feed_data_ready_cb(struct dmx_section_feed *feed,
+ dmx_section_data_ready_cb callback)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (dvbdmxfeed->state == DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ dvbdmxfeed->data_ready_cb.sec = callback;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return 0;
+}
+
+static int dmx_section_set_secure_mode(struct dmx_section_feed *feed,
+ struct dmx_secure_mode *secure_mode)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (dvbdmxfeed->state == DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EBUSY;
+ }
+
+ dvbdmxfeed->secure_mode = *secure_mode;
+ mutex_unlock(&dvbdmx->mutex);
+ return 0;
+}
+
+static int dmx_section_set_cipher_ops(struct dmx_section_feed *feed,
+ struct dmx_cipher_operations *cipher_ops)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&dvbdmx->mutex))
+ return -ERESTARTSYS;
+
+ if ((dvbdmxfeed->state == DMX_STATE_GO) &&
+ dvbdmx->set_cipher_op) {
+ ret = dvbdmx->set_cipher_op(dvbdmxfeed, cipher_ops);
+ }
+
+ if (!ret)
+ dvbdmxfeed->cipher_ops = *cipher_ops;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+}
+
static int dmx_section_feed_release_filter(struct dmx_section_feed *feed,
struct dmx_section_filter *filter)
{
@@ -1050,12 +2949,82 @@ static int dmx_section_feed_release_filter(struct dmx_section_feed *feed,
f->next = f->next->next;
}
+ filter->priv = NULL;
dvbdmxfilter->state = DMX_STATE_FREE;
spin_unlock_irq(&dvbdmx->lock);
mutex_unlock(&dvbdmx->mutex);
return 0;
}
+static int dvbdmx_section_feed_oob_cmd(struct dmx_section_feed *section_feed,
+ struct dmx_oob_command *cmd)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)section_feed;
+ struct dvb_demux *dvbdmx = feed->demux;
+ struct dmx_data_ready data;
+ int ret = 0;
+
+ data.data_length = 0;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (feed->state != DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ /* Secure section feeds are handled by the plug-in */
+ if (feed->secure_mode.is_secured) {
+ if (feed->demux->oob_command)
+ ret = feed->demux->oob_command(feed, cmd);
+ else
+ ret = 0;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+ }
+
+ switch (cmd->type) {
+ case DMX_OOB_CMD_EOS:
+ data.status = DMX_OK_EOS;
+ break;
+
+ case DMX_OOB_CMD_MARKER:
+ data.status = DMX_OK_MARKER;
+ data.marker.id = cmd->params.marker.id;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret)
+ ret = dvb_dmx_notify_section_event(feed, &data, 1);
+
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+}
+
+static int dvbdmx_section_get_scrambling_bits(
+ struct dmx_section_feed *section_feed, u8 *value)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)section_feed;
+ struct dvb_demux *demux = feed->demux;
+
+ spin_lock(&demux->lock);
+
+ if (!section_feed->is_filtering) {
+ spin_unlock(&demux->lock);
+ return -EINVAL;
+ }
+
+ *value = feed->scrambling_bits;
+ spin_unlock(&demux->lock);
+
+ return 0;
+}
+
static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,
struct dmx_section_feed **feed,
dmx_section_cb callback)
@@ -1075,11 +3044,14 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,
dvbdmxfeed->cb.sec = callback;
dvbdmxfeed->demux = dvbdmx;
dvbdmxfeed->pid = 0xffff;
+ dvbdmxfeed->secure_mode.is_secured = 0;
+ dvbdmxfeed->tsp_out_format = DMX_TSP_FORMAT_188;
dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base;
dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0;
dvbdmxfeed->feed.sec.tsfeedp = 0;
dvbdmxfeed->filter = NULL;
dvbdmxfeed->buffer = NULL;
+ dvbdmxfeed->idx_params.enable = 0;
(*feed) = &dvbdmxfeed->feed.sec;
(*feed)->is_filtering = 0;
@@ -1091,6 +3063,13 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,
(*feed)->start_filtering = dmx_section_feed_start_filtering;
(*feed)->stop_filtering = dmx_section_feed_stop_filtering;
(*feed)->release_filter = dmx_section_feed_release_filter;
+ (*feed)->data_ready_cb = dmx_section_feed_data_ready_cb;
+ (*feed)->notify_data_read = NULL;
+ (*feed)->set_secure_mode = dmx_section_set_secure_mode;
+ (*feed)->set_cipher_ops = dmx_section_set_cipher_ops;
+ (*feed)->oob_command = dvbdmx_section_feed_oob_cmd;
+ (*feed)->get_scrambling_bits = dvbdmx_section_get_scrambling_bits;
+ (*feed)->flush_buffer = NULL;
mutex_unlock(&dvbdmx->mutex);
return 0;
@@ -1113,7 +3092,7 @@ static int dvbdmx_release_section_feed(struct dmx_demux *demux,
dvbdmxfeed->buffer = NULL;
#endif
dvbdmxfeed->state = DMX_STATE_FREE;
-
+ feed->priv = NULL;
dvb_demux_feed_del(dvbdmxfeed);
dvbdmxfeed->pid = 0xffff;
@@ -1149,23 +3128,18 @@ static int dvbdmx_close(struct dmx_demux *demux)
return 0;
}
-static int dvbdmx_write(struct dmx_demux *demux, const char __user *buf, size_t count)
+static int dvbdmx_write(struct dmx_demux *demux, const char *buf, size_t count)
{
struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
- void *p;
- if ((!demux->frontend) || (demux->frontend->source != DMX_MEMORY_FE))
+ if (!demux->frontend || !buf || demux->dvr_input_protected ||
+ (demux->frontend->source != DMX_MEMORY_FE))
return -EINVAL;
-
- p = memdup_user(buf, count);
- if (IS_ERR(p))
- return PTR_ERR(p);
- if (mutex_lock_interruptible(&dvbdemux->mutex)) {
- kfree(p);
+ if (mutex_lock_interruptible(&dvbdemux->mutex))
return -ERESTARTSYS;
- }
- dvb_dmx_swfilter(dvbdemux, p, count);
- kfree(p);
+
+ dvb_dmx_swfilter_format(dvbdemux, buf, count, dvbdemux->tsp_format);
+
mutex_unlock(&dvbdemux->mutex);
if (signal_pending(current))
@@ -1173,6 +3147,40 @@ static int dvbdmx_write(struct dmx_demux *demux, const char __user *buf, size_t
return count;
}
+static int dvbdmx_write_cancel(struct dmx_demux *demux)
+{
+ struct dvb_demux *dvbdmx = (struct dvb_demux *)demux;
+
+ spin_lock_irq(&dvbdmx->lock);
+
+ /* cancel any pending wait for decoder's buffers */
+ dvbdmx->sw_filter_abort = 1;
+ dvbdmx->tsbufp = 0;
+ dvb_dmx_configure_decoder_fullness(dvbdmx, 0);
+
+ spin_unlock_irq(&dvbdmx->lock);
+
+ return 0;
+}
+
+static int dvbdmx_set_playback_mode(struct dmx_demux *demux,
+ enum dmx_playback_mode_t mode,
+ dmx_ts_fullness ts_fullness_callback,
+ dmx_section_fullness sec_fullness_callback)
+{
+ struct dvb_demux *dvbdmx = (struct dvb_demux *)demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ dvbdmx->playback_mode = mode;
+ dvbdmx->buffer_ctrl.ts = ts_fullness_callback;
+ dvbdmx->buffer_ctrl.sec = sec_fullness_callback;
+
+ mutex_unlock(&dvbdmx->mutex);
+
+ return 0;
+}
+
static int dvbdmx_add_frontend(struct dmx_demux *demux,
struct dmx_frontend *frontend)
{
@@ -1230,7 +3238,7 @@ static int dvbdmx_disconnect_frontend(struct dmx_demux *demux)
struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
mutex_lock(&dvbdemux->mutex);
-
+ dvbdemux->sw_filter_abort = 0;
demux->frontend = NULL;
mutex_unlock(&dvbdemux->mutex);
return 0;
@@ -1244,6 +3252,48 @@ static int dvbdmx_get_pes_pids(struct dmx_demux *demux, u16 * pids)
return 0;
}
+static int dvbdmx_get_tsp_size(struct dmx_demux *demux)
+{
+ int tsp_size;
+ struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
+
+ mutex_lock(&dvbdemux->mutex);
+ tsp_size = dvbdemux->ts_packet_size;
+ mutex_unlock(&dvbdemux->mutex);
+
+ return tsp_size;
+}
+
+static int dvbdmx_set_tsp_format(
+ struct dmx_demux *demux,
+ enum dmx_tsp_format_t tsp_format)
+{
+ struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
+
+ if ((tsp_format > DMX_TSP_FORMAT_204) ||
+ (tsp_format < DMX_TSP_FORMAT_188))
+ return -EINVAL;
+
+ mutex_lock(&dvbdemux->mutex);
+
+ dvbdemux->tsp_format = tsp_format;
+ switch (tsp_format) {
+ case DMX_TSP_FORMAT_188:
+ dvbdemux->ts_packet_size = 188;
+ break;
+ case DMX_TSP_FORMAT_192_TAIL:
+ case DMX_TSP_FORMAT_192_HEAD:
+ dvbdemux->ts_packet_size = 192;
+ break;
+ case DMX_TSP_FORMAT_204:
+ dvbdemux->ts_packet_size = 204;
+ break;
+ }
+
+ mutex_unlock(&dvbdemux->mutex);
+ return 0;
+}
+
int dvb_dmx_init(struct dvb_demux *dvbdemux)
{
int i;
@@ -1262,13 +3312,52 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
dvbdemux->filter = NULL;
return -ENOMEM;
}
+
+ dvbdemux->rec_info_pool = vmalloc(dvbdemux->feednum *
+ sizeof(struct dvb_demux_rec_info));
+ if (!dvbdemux->rec_info_pool) {
+ vfree(dvbdemux->feed);
+ vfree(dvbdemux->filter);
+ dvbdemux->feed = NULL;
+ dvbdemux->filter = NULL;
+ return -ENOMEM;
+ }
+
+ dvbdemux->sw_filter_abort = 0;
+ dvbdemux->total_process_time = 0;
+ dvbdemux->total_crc_time = 0;
+ snprintf(dvbdemux->alias,
+ MAX_DVB_DEMUX_NAME_LEN,
+ "demux%d",
+ dvb_demux_index++);
+
+ dvbdemux->dmx.debugfs_demux_dir =
+ debugfs_create_dir(dvbdemux->alias, NULL);
+
+ if (dvbdemux->dmx.debugfs_demux_dir != NULL) {
+ debugfs_create_u32(
+ "total_processing_time",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ dvbdemux->dmx.debugfs_demux_dir,
+ &dvbdemux->total_process_time);
+
+ debugfs_create_u32(
+ "total_crc_time",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ dvbdemux->dmx.debugfs_demux_dir,
+ &dvbdemux->total_crc_time);
+ }
+
for (i = 0; i < dvbdemux->filternum; i++) {
dvbdemux->filter[i].state = DMX_STATE_FREE;
dvbdemux->filter[i].index = i;
}
+
for (i = 0; i < dvbdemux->feednum; i++) {
dvbdemux->feed[i].state = DMX_STATE_FREE;
dvbdemux->feed[i].index = i;
+
+ dvbdemux->rec_info_pool[i].ref_count = 0;
}
dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
@@ -1288,6 +3377,9 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
dvbdemux->recording = 0;
dvbdemux->tsbufp = 0;
+ dvbdemux->tsp_format = DMX_TSP_FORMAT_188;
+ dvbdemux->ts_packet_size = 188;
+
if (!dvbdemux->check_crc32)
dvbdemux->check_crc32 = dvb_dmx_crc32;
@@ -1299,10 +3391,14 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
dmx->open = dvbdmx_open;
dmx->close = dvbdmx_close;
dmx->write = dvbdmx_write;
+ dmx->write_cancel = dvbdmx_write_cancel;
+ dmx->set_playback_mode = dvbdmx_set_playback_mode;
dmx->allocate_ts_feed = dvbdmx_allocate_ts_feed;
dmx->release_ts_feed = dvbdmx_release_ts_feed;
dmx->allocate_section_feed = dvbdmx_allocate_section_feed;
dmx->release_section_feed = dvbdmx_release_section_feed;
+ dmx->map_buffer = NULL;
+ dmx->unmap_buffer = NULL;
dmx->add_frontend = dvbdmx_add_frontend;
dmx->remove_frontend = dvbdmx_remove_frontend;
@@ -1311,6 +3407,9 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
dmx->disconnect_frontend = dvbdmx_disconnect_frontend;
dmx->get_pes_pids = dvbdmx_get_pes_pids;
+ dmx->set_tsp_format = dvbdmx_set_tsp_format;
+ dmx->get_tsp_size = dvbdmx_get_tsp_size;
+
mutex_init(&dvbdemux->mutex);
spin_lock_init(&dvbdemux->lock);
@@ -1321,9 +3420,14 @@ EXPORT_SYMBOL(dvb_dmx_init);
void dvb_dmx_release(struct dvb_demux *dvbdemux)
{
+ if (dvbdemux->dmx.debugfs_demux_dir != NULL)
+ debugfs_remove_recursive(dvbdemux->dmx.debugfs_demux_dir);
+
+ dvb_demux_index--;
vfree(dvbdemux->cnt_storage);
vfree(dvbdemux->filter);
vfree(dvbdemux->feed);
+ vfree(dvbdemux->rec_info_pool);
}
EXPORT_SYMBOL(dvb_dmx_release);
diff --git a/drivers/media/dvb-core/dvb_demux.h b/drivers/media/dvb-core/dvb_demux.h
index ae7fc33c3231..779de7ed078a 100644
--- a/drivers/media/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb-core/dvb_demux.h
@@ -27,6 +27,7 @@
#include <linux/timer.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/debugfs.h>
#include "demux.h"
@@ -44,6 +45,8 @@
#define MAX_PID 0x1fff
+#define TIMESTAMP_LEN 4
+
#define SPEED_PKTS_INTERVAL 50000
struct dvb_demux_filter {
@@ -64,6 +67,92 @@ struct dvb_demux_filter {
#define DMX_FEED_ENTRY(pos) list_entry(pos, struct dvb_demux_feed, list_head)
+
+struct dmx_index_entry {
+ struct dmx_index_event_info event;
+ struct list_head next;
+};
+
+#define DMX_IDX_EVENT_QUEUE_SIZE DMX_EVENT_QUEUE_SIZE
+
+struct dvb_demux_rec_info {
+ /* Reference counter for number of feeds using this information */
+ int ref_count;
+
+ /* Counter for number of TS packets output to recording buffer */
+ u64 ts_output_count;
+
+ /* Indexing information */
+ struct {
+ /*
+ * Minimum TS packet number encountered in recording filter
+ * among all feeds that search for video patterns
+ */
+ u64 min_pattern_tsp_num;
+
+ /* Number of indexing-enabled feeds */
+ u8 indexing_feeds_num;
+
+ /* Number of feeds with video pattern search request */
+ u8 pattern_search_feeds_num;
+
+ /* Index entries pool */
+ struct dmx_index_entry events[DMX_IDX_EVENT_QUEUE_SIZE];
+
+ /* List of free entries that can be used for new index events */
+ struct list_head free_list;
+
+ /* List holding ready index entries not notified to user yet */
+ struct list_head ready_list;
+ } idx_info;
+};
+
+#define DVB_DMX_MAX_PATTERN_LEN 6
+struct dvb_dmx_video_patterns {
+ /* the byte pattern to look for */
+ u8 pattern[DVB_DMX_MAX_PATTERN_LEN];
+
+ /* the byte mask to use (same length as pattern) */
+ u8 mask[DVB_DMX_MAX_PATTERN_LEN];
+
+ /* the length of the pattern, in bytes */
+ size_t size;
+
+ /* the type of the pattern. One of DMX_IDX_* definitions */
+ u64 type;
+};
+
+#define DVB_DMX_MAX_FOUND_PATTERNS 20
+#define DVB_DMX_MAX_SEARCH_PATTERN_NUM 20
+struct dvb_dmx_video_prefix_size_masks {
+ /*
+ * a bit mask (per pattern) of possible prefix sizes to use
+ * when searching for a pattern that started in the previous TS packet.
+ * Updated by dvb_dmx_video_pattern_search for use in the next lookup.
+ */
+ u32 size_mask[DVB_DMX_MAX_FOUND_PATTERNS];
+};
+
+struct dvb_dmx_video_patterns_results {
+ struct {
+ /*
+ * The offset in the buffer where the pattern was found.
+ * If a pattern is found using a prefix (i.e. started on the
+ * previous buffer), offset is zero.
+ */
+ u32 offset;
+
+ /*
+ * The type of the pattern found.
+ * One of DMX_IDX_* definitions.
+ */
+ u64 type;
+
+ /* The prefix size that was used to find this pattern */
+ u32 used_prefix_size;
+ } info[DVB_DMX_MAX_FOUND_PATTERNS];
+};
+
struct dvb_demux_feed {
union {
struct dmx_ts_feed ts;
@@ -75,6 +164,11 @@ struct dvb_demux_feed {
dmx_section_cb sec;
} cb;
+ union {
+ dmx_ts_data_ready_cb ts;
+ dmx_section_data_ready_cb sec;
+ } data_ready_cb;
+
struct dvb_demux *demux;
void *priv;
int type;
@@ -82,6 +176,9 @@ struct dvb_demux_feed {
u16 pid;
u8 *buffer;
int buffer_size;
+ enum dmx_tsp_format_t tsp_out_format;
+ struct dmx_secure_mode secure_mode;
+ struct dmx_cipher_operations cipher_ops;
struct timespec timeout;
struct dvb_demux_filter *filter;
@@ -90,12 +187,34 @@ struct dvb_demux_feed {
enum dmx_ts_pes pes_type;
int cc;
+ int first_cc;
int pusi_seen; /* prevents feeding of garbage from previous section */
+ u8 scrambling_bits;
+
+ struct dvb_demux_rec_info *rec_info;
+ u64 prev_tsp_num;
+ u64 prev_stc;
+ u64 curr_pusi_tsp_num;
+ u64 prev_pusi_tsp_num;
+ int prev_frame_valid;
+ u64 prev_frame_type;
+ int first_frame_in_seq;
+ int first_frame_in_seq_notified;
+ u64 last_pattern_tsp_num;
+ int pattern_num;
+const struct dvb_dmx_video_patterns *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM];
+ struct dvb_dmx_video_prefix_size_masks prefix_size;
u16 peslen;
+ u32 pes_tei_counter;
+ u32 pes_cont_err_counter;
+ u32 pes_ts_packets_num;
struct list_head list_head;
unsigned int index; /* a unique index for each feed (can be used as hardware pid filter index) */
+
+ enum dmx_video_codec video_codec;
+ struct dmx_indexing_params idx_params;
};
struct dvb_demux {
@@ -107,10 +226,27 @@ struct dvb_demux {
int (*stop_feed)(struct dvb_demux_feed *feed);
int (*write_to_decoder)(struct dvb_demux_feed *feed,
const u8 *buf, size_t len);
+ int (*decoder_fullness_init)(struct dvb_demux_feed *feed);
+ int (*decoder_fullness_wait)(struct dvb_demux_feed *feed,
+ size_t required_space);
+ int (*decoder_fullness_abort)(struct dvb_demux_feed *feed);
+ int (*decoder_buffer_status)(struct dvb_demux_feed *feed,
+ struct dmx_buffer_status *dmx_buffer_status);
+ int (*reuse_decoder_buffer)(struct dvb_demux_feed *feed,
+ int cookie);
+ int (*set_cipher_op)(struct dvb_demux_feed *feed,
+ struct dmx_cipher_operations *cipher_ops);
u32 (*check_crc32)(struct dvb_demux_feed *feed,
const u8 *buf, size_t len);
void (*memcopy)(struct dvb_demux_feed *feed, u8 *dst,
const u8 *src, size_t len);
+ int (*oob_command)(struct dvb_demux_feed *feed,
+ struct dmx_oob_command *cmd);
+ void (*convert_ts)(struct dvb_demux_feed *feed,
+ const u8 timestamp[TIMESTAMP_LEN],
+ u64 *timestampIn27Mhz);
+ int (*set_indexing)(struct dvb_demux_feed *feed);
+ int (*flush_decoder_buffer)(struct dvb_demux_feed *feed, size_t length);
int users;
#define MAX_DVB_DEMUX_USERS 10
@@ -136,10 +272,35 @@ struct dvb_demux {
struct timespec speed_last_time; /* for TS speed check */
uint32_t speed_pkts_cnt; /* for TS speed check */
+
+ enum dmx_tsp_format_t tsp_format;
+ size_t ts_packet_size;
+
+ enum dmx_playback_mode_t playback_mode;
+ int sw_filter_abort;
+
+ struct {
+ dmx_ts_fullness ts;
+ dmx_section_fullness sec;
+ } buffer_ctrl;
+
+ struct dvb_demux_rec_info *rec_info_pool;
+
+ /*
+ * the following is used for debugfs exposing info
+ * about dvb demux performance.
+ */
+#define MAX_DVB_DEMUX_NAME_LEN 10
+ char alias[MAX_DVB_DEMUX_NAME_LEN];
+
+ u32 total_process_time;
+ u32 total_crc_time;
};
int dvb_dmx_init(struct dvb_demux *dvbdemux);
void dvb_dmx_release(struct dvb_demux *dvbdemux);
+int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, const u8 *buf,
+ int should_lock);
void dvb_dmx_swfilter_packets(struct dvb_demux *dvbdmx, const u8 *buf,
size_t count);
void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count);
@@ -147,5 +308,116 @@ void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf,
size_t count);
void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf,
size_t count);
+void dvb_dmx_swfilter_format(
+ struct dvb_demux *demux, const u8 *buf,
+ size_t count,
+ enum dmx_tsp_format_t tsp_format);
+void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf,
+ const u8 timestamp[TIMESTAMP_LEN]);
+const struct dvb_dmx_video_patterns *dvb_dmx_get_pattern(u64 dmx_idx_pattern);
+int dvb_dmx_video_pattern_search(
+ const struct dvb_dmx_video_patterns
+ *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM],
+ int patterns_num,
+ const u8 *buf, size_t buf_size,
+ struct dvb_dmx_video_prefix_size_masks *prefix_size_masks,
+ struct dvb_dmx_video_patterns_results *results);
+int dvb_demux_push_idx_event(struct dvb_demux_feed *feed,
+ struct dmx_index_event_info *idx_event, int should_lock);
+void dvb_dmx_process_idx_pattern(struct dvb_demux_feed *feed,
+ struct dvb_dmx_video_patterns_results *patterns, int pattern,
+ u64 curr_stc, u64 prev_stc,
+ u64 curr_match_tsp, u64 prev_match_tsp,
+ u64 curr_pusi_tsp, u64 prev_pusi_tsp);
+void dvb_dmx_notify_idx_events(struct dvb_demux_feed *feed, int should_lock);
+int dvb_dmx_notify_section_event(struct dvb_demux_feed *feed,
+ struct dmx_data_ready *event, int should_lock);
+void dvbdmx_ts_reset_pes_state(struct dvb_demux_feed *feed);
+
+/**
+ * dvb_dmx_is_video_feed - Returns whether the PES feed
+ * is video one.
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is video feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_video_feed(struct dvb_demux_feed *feed)
+{
+ if (feed->type != DMX_TYPE_TS)
+ return 0;
+
+ if (feed->ts_type & (~TS_DECODER))
+ return 0;
+
+ if ((feed->pes_type == DMX_PES_VIDEO0) ||
+ (feed->pes_type == DMX_PES_VIDEO1) ||
+ (feed->pes_type == DMX_PES_VIDEO2) ||
+ (feed->pes_type == DMX_PES_VIDEO3))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * dvb_dmx_is_pcr_feed - Returns whether the PES feed
+ * is PCR one.
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is PCR feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_pcr_feed(struct dvb_demux_feed *feed)
+{
+ if (feed->type != DMX_TYPE_TS)
+ return 0;
+
+ if (feed->ts_type & (~TS_DECODER))
+ return 0;
+
+ if ((feed->pes_type == DMX_PES_PCR0) ||
+ (feed->pes_type == DMX_PES_PCR1) ||
+ (feed->pes_type == DMX_PES_PCR2) ||
+ (feed->pes_type == DMX_PES_PCR3))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * dvb_dmx_is_sec_feed - Returns whether this is a section feed
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is a section feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_sec_feed(struct dvb_demux_feed *feed)
+{
+ return (feed->type == DMX_TYPE_SEC);
+}
+
+/**
+ * dvb_dmx_is_rec_feed - Returns whether this is a recording feed
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is recording feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_rec_feed(struct dvb_demux_feed *feed)
+{
+ if (feed->type != DMX_TYPE_TS)
+ return 0;
+
+ if (feed->ts_type & (TS_DECODER | TS_PAYLOAD_ONLY))
+ return 0;
+
+ return 1;
+}
+
+static inline u16 ts_pid(const u8 *buf)
+{
+ return ((buf[1] & 0x1f) << 8) + buf[2];
+}
+
#endif /* _DVB_DEMUX_H_ */
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index ce4332e80a91..454584a8bf17 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -761,7 +761,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len,
const u8 *buffer2, size_t buffer2_len,
- struct dmx_ts_feed *feed)
+ struct dmx_ts_feed *feed,
+ enum dmx_success success)
{
struct net_device *dev = feed->priv;
@@ -870,7 +871,8 @@ static void dvb_net_sec(struct net_device *dev,
static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,
const u8 *buffer2, size_t buffer2_len,
- struct dmx_section_filter *filter)
+ struct dmx_section_filter *filter,
+ enum dmx_success success)
{
struct net_device *dev = filter->priv;
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
index 1100e98a7b1d..d61be58e22f0 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.c
+++ b/drivers/media/dvb-core/dvb_ringbuffer.c
@@ -37,6 +37,8 @@
#define PKT_READY 0
#define PKT_DISPOSED 1
+#define PKT_PENDING 2
+
void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len)
@@ -55,7 +57,7 @@ void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len)
int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf)
{
- return (rbuf->pread==rbuf->pwrite);
+ return (rbuf->pread == rbuf->pwrite);
}
@@ -167,25 +169,29 @@ ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t
}
ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
- const u8 __user *buf, size_t len)
+ const u8 __user *buf, size_t len)
{
- int status;
size_t todo = len;
size_t split;
+ ssize_t oldpwrite = rbuf->pwrite;
- split = (rbuf->pwrite + len > rbuf->size) ? rbuf->size - rbuf->pwrite : 0;
+ split = (rbuf->pwrite + len > rbuf->size) ?
+ rbuf->size - rbuf->pwrite :
+ 0;
if (split > 0) {
- status = copy_from_user(rbuf->data+rbuf->pwrite, buf, split);
- if (status)
- return len - todo;
+ if (copy_from_user(rbuf->data + rbuf->pwrite, buf, split))
+ return -EFAULT;
buf += split;
todo -= split;
rbuf->pwrite = 0;
}
- status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo);
- if (status)
- return len - todo;
+
+ if (copy_from_user(rbuf->data + rbuf->pwrite, buf, todo)) {
+ rbuf->pwrite = oldpwrite;
+ return -EFAULT;
+ }
+
rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size;
return len;
@@ -205,6 +211,31 @@ ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf, size_t le
return status;
}
+ssize_t dvb_ringbuffer_pkt_start(struct dvb_ringbuffer *rbuf, size_t len)
+{
+ ssize_t oldpwrite = rbuf->pwrite;
+
+ DVB_RINGBUFFER_WRITE_BYTE(rbuf, len >> 8);
+ DVB_RINGBUFFER_WRITE_BYTE(rbuf, len & 0xff);
+ DVB_RINGBUFFER_WRITE_BYTE(rbuf, PKT_PENDING);
+
+ return oldpwrite;
+}
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_start);
+
+int dvb_ringbuffer_pkt_close(struct dvb_ringbuffer *rbuf, ssize_t idx)
+{
+ idx = (idx + 2) % rbuf->size;
+
+ if (rbuf->data[idx] != PKT_PENDING)
+ return -EINVAL;
+
+ rbuf->data[idx] = PKT_READY;
+
+ return 0;
+}
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_close);
+
ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx,
int offset, u8 __user *buf, size_t len)
{
@@ -212,6 +243,9 @@ ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx,
size_t split;
size_t pktlen;
+ if (DVB_RINGBUFFER_PEEK(rbuf, (idx+2)) != PKT_READY)
+ return -EINVAL;
+
pktlen = rbuf->data[idx] << 8;
pktlen |= rbuf->data[(idx + 1) % rbuf->size];
if (offset > pktlen) return -EINVAL;
@@ -232,6 +266,7 @@ ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx,
return len;
}
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_read_user);
ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
int offset, u8* buf, size_t len)
@@ -240,6 +275,9 @@ ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
size_t split;
size_t pktlen;
+ if (rbuf->data[(idx + 2) % rbuf->size] != PKT_READY)
+ return -EINVAL;
+
pktlen = rbuf->data[idx] << 8;
pktlen |= rbuf->data[(idx + 1) % rbuf->size];
if (offset > pktlen) return -EINVAL;
@@ -257,6 +295,7 @@ ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
memcpy(buf, rbuf->data+idx, todo);
return len;
}
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_read);
void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx)
{
@@ -276,6 +315,7 @@ void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx)
}
}
}
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_dispose);
ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* pktlen)
{
@@ -291,7 +331,10 @@ ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t*
idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size;
}
- consumed = (idx - rbuf->pread) % rbuf->size;
+ if (idx >= rbuf->pread)
+ consumed = idx - rbuf->pread;
+ else
+ consumed = rbuf->size - (rbuf->pread - idx);
while((dvb_ringbuffer_avail(rbuf) - consumed) > DVB_RINGBUFFER_PKTHDRSIZE) {
@@ -304,6 +347,9 @@ ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t*
return idx;
}
+ if (curpktstatus == PKT_PENDING)
+ return -EFAULT;
+
consumed += curpktlen + DVB_RINGBUFFER_PKTHDRSIZE;
idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size;
}
@@ -311,8 +357,7 @@ ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t*
// no packets available
return -1;
}
-
-
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_next);
EXPORT_SYMBOL(dvb_ringbuffer_init);
EXPORT_SYMBOL(dvb_ringbuffer_empty);
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb-core/dvb_ringbuffer.h
index 3ebc2d34b4a2..2fe589e5d7ea 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.h
+++ b/drivers/media/dvb-core/dvb_ringbuffer.h
@@ -108,6 +108,9 @@ extern void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf);
/* advance read ptr by @num: bytes */
#define DVB_RINGBUFFER_SKIP(rbuf,num) \
(rbuf)->pread=((rbuf)->pread+(num))%(rbuf)->size
+/* advance write ptr by <num> bytes */
+#define DVB_RINGBUFFER_PUSH(rbuf, num) \
+ ((rbuf)->pwrite = (((rbuf)->pwrite+(num))%(rbuf)->size))
/*
* read @len: bytes from ring buffer into @buf:
@@ -200,4 +203,31 @@ extern void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx);
extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* pktlen);
+/**
+ * Start a new packet that will be written directly by the user to the packet
+ * buffer.
+ * The function only writes the header of the packet into the packet buffer,
+ * and the packet is in pending state (can't be read by the reader) until it is
+ * closed using dvb_ringbuffer_pkt_close. You must write the data into the
+ * packet buffer using dvb_ringbuffer_write followed by
+ * dvb_ringbuffer_pkt_close.
+ *
+ * @rbuf: Ringbuffer concerned.
+ * @len: Size of the packet's data
+ * returns Index of the packet's header that was started.
+ */
+extern ssize_t dvb_ringbuffer_pkt_start(struct dvb_ringbuffer *rbuf,
+ size_t len);
+
+/**
+ * Close a packet that was started using dvb_ringbuffer_pkt_start.
+ * The packet will be marked as ready to be ready.
+ *
+ * @rbuf: Ringbuffer concerned.
+ * @idx: Packet index that was returned by dvb_ringbuffer_pkt_start
+ * returns error status, -EINVAL if the provided index is invalid
+ */
+extern int dvb_ringbuffer_pkt_close(struct dvb_ringbuffer *rbuf, ssize_t idx);
+
+
#endif /* _DVB_RINGBUFFER_H_ */
diff --git a/drivers/media/platform/msm/Kconfig b/drivers/media/platform/msm/Kconfig
index 16060773ac96..e2523e06ab76 100644
--- a/drivers/media/platform/msm/Kconfig
+++ b/drivers/media/platform/msm/Kconfig
@@ -41,3 +41,5 @@ endif # MSMB_CAMERA
source "drivers/media/platform/msm/vidc/Kconfig"
source "drivers/media/platform/msm/sde/Kconfig"
+source "drivers/media/platform/msm/dvb/Kconfig"
+source "drivers/media/platform/msm/broadcast/Kconfig"
diff --git a/drivers/media/platform/msm/Makefile b/drivers/media/platform/msm/Makefile
index ff0369bdeca5..a3f802d3ce59 100644
--- a/drivers/media/platform/msm/Makefile
+++ b/drivers/media/platform/msm/Makefile
@@ -5,3 +5,5 @@
obj-$(CONFIG_MSMB_CAMERA) += camera_v2/
obj-$(CONFIG_MSM_VIDC_V4L2) += vidc/
obj-y += sde/
+obj-y += broadcast/
+obj-$(CONFIG_DVB_MPQ) += dvb/
diff --git a/drivers/media/platform/msm/broadcast/Kconfig b/drivers/media/platform/msm/broadcast/Kconfig
new file mode 100644
index 000000000000..cdd1b2091179
--- /dev/null
+++ b/drivers/media/platform/msm/broadcast/Kconfig
@@ -0,0 +1,14 @@
+#
+# MSM Broadcast subsystem drivers
+#
+
+config TSPP
+ depends on ARCH_QCOM
+ tristate "TSPP (Transport Stream Packet Processor) Support"
+ ---help---
+ Transport Stream Packet Processor v1 is used to offload the
+ processing of MPEG transport streams from the main processor.
+ It is used to process incoming transport streams from TSIF
+ to supports use-cases such as transport stream live play
+ and recording.
+ This can also be compiled as a loadable module.
diff --git a/drivers/media/platform/msm/broadcast/Makefile b/drivers/media/platform/msm/broadcast/Makefile
new file mode 100644
index 000000000000..3735bdc212ad
--- /dev/null
+++ b/drivers/media/platform/msm/broadcast/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for MSM Broadcast subsystem drivers.
+#
+obj-$(CONFIG_TSPP) += tspp.o
diff --git a/drivers/media/platform/msm/broadcast/tspp.c b/drivers/media/platform/msm/broadcast/tspp.c
new file mode 100644
index 000000000000..275b8b90af05
--- /dev/null
+++ b/drivers/media/platform/msm/broadcast/tspp.c
@@ -0,0 +1,3094 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h> /* Just for modules */
+#include <linux/kernel.h> /* Only for KERN_INFO */
+#include <linux/err.h> /* Error macros */
+#include <linux/list.h> /* Linked list */
+#include <linux/cdev.h>
+#include <linux/init.h> /* Needed for the macros */
+#include <linux/io.h> /* IO macros */
+#include <linux/device.h> /* Device drivers need this */
+#include <linux/sched.h> /* Externally defined globals */
+#include <linux/pm_runtime.h> /* Runtime power management */
+#include <linux/fs.h>
+#include <linux/uaccess.h> /* copy_to_user */
+#include <linux/slab.h> /* kfree, kzalloc */
+#include <linux/ioport.h> /* XXX_ mem_region */
+#include <linux/dma-mapping.h> /* dma_XXX */
+#include <linux/dmapool.h> /* DMA pools */
+#include <linux/delay.h> /* msleep */
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/poll.h> /* poll() file op */
+#include <linux/wait.h> /* wait() macros, sleeping */
+#include <linux/bitops.h> /* BIT() macro */
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/msm-sps.h> /* BAM stuff */
+#include <linux/wakelock.h> /* Locking functions */
+#include <linux/timer.h> /* Timer services */
+#include <linux/jiffies.h> /* Jiffies counter */
+#include <linux/qcom_tspp.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/string.h>
+#include <linux/msm-bus.h>
+#include <linux/interrupt.h> /* tasklet */
+
+/*
+ * General defines
+ */
+#define TSPP_TSIF_INSTANCES 2
+#define TSPP_GPIOS_PER_TSIF 4
+#define TSPP_FILTER_TABLES 3
+#define TSPP_MAX_DEVICES 1
+#define TSPP_NUM_CHANNELS 16
+#define TSPP_NUM_PRIORITIES 16
+#define TSPP_NUM_KEYS 8
+#define INVALID_CHANNEL 0xFFFFFFFF
+#define TSPP_BAM_DEFAULT_IPC_LOGLVL 2
+/*
+ * BAM descriptor FIFO size (in number of descriptors).
+ * Max number of descriptors allowed by SPS which is 8K-1.
+ */
+#define TSPP_SPS_DESCRIPTOR_COUNT (8 * 1024 - 1)
+#define TSPP_PACKET_LENGTH 188
+#define TSPP_MIN_BUFFER_SIZE (TSPP_PACKET_LENGTH)
+
+/* Max descriptor buffer size allowed by SPS */
+#define TSPP_MAX_BUFFER_SIZE (32 * 1024 - 1)
+
+/*
+ * Returns whether to use DMA pool for TSPP output buffers.
+ * For buffers smaller than page size, using DMA pool
+ * provides better memory utilization as dma_alloc_coherent
+ * allocates minimum of page size.
+ */
+#define TSPP_USE_DMA_POOL(buff_size) ((buff_size) < PAGE_SIZE)
+
+/*
+ * Max allowed TSPP buffers/descriptors.
+ * If SPS desc FIFO holds X descriptors, we can queue up to X-1 descriptors.
+ */
+#define TSPP_NUM_BUFFERS (TSPP_SPS_DESCRIPTOR_COUNT - 1)
+#define TSPP_TSIF_DEFAULT_TIME_LIMIT 60
+#define SPS_DESCRIPTOR_SIZE 8
+#define MIN_ACCEPTABLE_BUFFER_COUNT 2
+#define TSPP_DEBUG(msg...)
+
+/*
+ * TSIF register offsets
+ */
+#define TSIF_STS_CTL_OFF (0x0)
+#define TSIF_TIME_LIMIT_OFF (0x4)
+#define TSIF_CLK_REF_OFF (0x8)
+#define TSIF_LPBK_FLAGS_OFF (0xc)
+#define TSIF_LPBK_DATA_OFF (0x10)
+#define TSIF_TEST_CTL_OFF (0x14)
+#define TSIF_TEST_MODE_OFF (0x18)
+#define TSIF_TEST_RESET_OFF (0x1c)
+#define TSIF_TEST_EXPORT_OFF (0x20)
+#define TSIF_TEST_CURRENT_OFF (0x24)
+
+#define TSIF_DATA_PORT_OFF (0x100)
+
+/* bits for TSIF_STS_CTL register */
+#define TSIF_STS_CTL_EN_IRQ BIT(28)
+#define TSIF_STS_CTL_PACK_AVAIL BIT(27)
+#define TSIF_STS_CTL_1ST_PACKET BIT(26)
+#define TSIF_STS_CTL_OVERFLOW BIT(25)
+#define TSIF_STS_CTL_LOST_SYNC BIT(24)
+#define TSIF_STS_CTL_TIMEOUT BIT(23)
+#define TSIF_STS_CTL_INV_SYNC BIT(21)
+#define TSIF_STS_CTL_INV_NULL BIT(20)
+#define TSIF_STS_CTL_INV_ERROR BIT(19)
+#define TSIF_STS_CTL_INV_ENABLE BIT(18)
+#define TSIF_STS_CTL_INV_DATA BIT(17)
+#define TSIF_STS_CTL_INV_CLOCK BIT(16)
+#define TSIF_STS_CTL_SPARE BIT(15)
+#define TSIF_STS_CTL_EN_NULL BIT(11)
+#define TSIF_STS_CTL_EN_ERROR BIT(10)
+#define TSIF_STS_CTL_LAST_BIT BIT(9)
+#define TSIF_STS_CTL_EN_TIME_LIM BIT(8)
+#define TSIF_STS_CTL_EN_TCR BIT(7)
+#define TSIF_STS_CTL_TEST_MODE BIT(6)
+#define TSIF_STS_CTL_MODE_2 BIT(5)
+#define TSIF_STS_CTL_EN_DM BIT(4)
+#define TSIF_STS_CTL_STOP BIT(3)
+#define TSIF_STS_CTL_START BIT(0)
+
+/*
+ * TSPP register offsets
+ */
+#define TSPP_RST 0x00
+#define TSPP_CLK_CONTROL 0x04
+#define TSPP_CONFIG 0x08
+#define TSPP_CONTROL 0x0C
+#define TSPP_PS_DISABLE 0x10
+#define TSPP_MSG_IRQ_STATUS 0x14
+#define TSPP_MSG_IRQ_MASK 0x18
+#define TSPP_IRQ_STATUS 0x1C
+#define TSPP_IRQ_MASK 0x20
+#define TSPP_IRQ_CLEAR 0x24
+#define TSPP_PIPE_ERROR_STATUS(_n) (0x28 + (_n << 2))
+#define TSPP_STATUS 0x68
+#define TSPP_CURR_TSP_HEADER 0x6C
+#define TSPP_CURR_PID_FILTER 0x70
+#define TSPP_SYSTEM_KEY(_n) (0x74 + (_n << 2))
+#define TSPP_CBC_INIT_VAL(_n) (0x94 + (_n << 2))
+#define TSPP_DATA_KEY_RESET 0x9C
+#define TSPP_KEY_VALID 0xA0
+#define TSPP_KEY_ERROR 0xA4
+#define TSPP_TEST_CTRL 0xA8
+#define TSPP_VERSION 0xAC
+#define TSPP_GENERICS 0xB0
+#define TSPP_NOP 0xB4
+
+/*
+ * Register bit definitions
+ */
+/* TSPP_RST */
+#define TSPP_RST_RESET BIT(0)
+
+/* TSPP_CLK_CONTROL */
+#define TSPP_CLK_CONTROL_FORCE_CRYPTO BIT(9)
+#define TSPP_CLK_CONTROL_FORCE_PES_PL BIT(8)
+#define TSPP_CLK_CONTROL_FORCE_PES_AF BIT(7)
+#define TSPP_CLK_CONTROL_FORCE_RAW_CTRL BIT(6)
+#define TSPP_CLK_CONTROL_FORCE_PERF_CNT BIT(5)
+#define TSPP_CLK_CONTROL_FORCE_CTX_SEARCH BIT(4)
+#define TSPP_CLK_CONTROL_FORCE_TSP_PROC BIT(3)
+#define TSPP_CLK_CONTROL_FORCE_CONS_AHB2MEM BIT(2)
+#define TSPP_CLK_CONTROL_FORCE_TS_AHB2MEM BIT(1)
+#define TSPP_CLK_CONTROL_SET_CLKON BIT(0)
+
+/* TSPP_CONFIG */
+#define TSPP_CONFIG_SET_PACKET_LENGTH(_a, _b) (_a = (_a & 0xF0) | \
+((_b & 0xF) << 8))
+#define TSPP_CONFIG_GET_PACKET_LENGTH(_a) ((_a >> 8) & 0xF)
+#define TSPP_CONFIG_DUP_WITH_DISC_EN BIT(7)
+#define TSPP_CONFIG_PES_SYNC_ERROR_MASK BIT(6)
+#define TSPP_CONFIG_PS_LEN_ERR_MASK BIT(5)
+#define TSPP_CONFIG_PS_CONT_ERR_UNSP_MASK BIT(4)
+#define TSPP_CONFIG_PS_CONT_ERR_MASK BIT(3)
+#define TSPP_CONFIG_PS_DUP_TSP_MASK BIT(2)
+#define TSPP_CONFIG_TSP_ERR_IND_MASK BIT(1)
+#define TSPP_CONFIG_TSP_SYNC_ERR_MASK BIT(0)
+
+/* TSPP_CONTROL */
+#define TSPP_CONTROL_PID_FILTER_LOCK BIT(5)
+#define TSPP_CONTROL_FORCE_KEY_CALC BIT(4)
+#define TSPP_CONTROL_TSP_CONS_SRC_DIS BIT(3)
+#define TSPP_CONTROL_TSP_TSIF1_SRC_DIS BIT(2)
+#define TSPP_CONTROL_TSP_TSIF0_SRC_DIS BIT(1)
+#define TSPP_CONTROL_PERF_COUNT_INIT BIT(0)
+
+/* TSPP_MSG_IRQ_STATUS + TSPP_MSG_IRQ_MASK */
+#define TSPP_MSG_TSPP_IRQ BIT(2)
+#define TSPP_MSG_TSIF_1_IRQ BIT(1)
+#define TSPP_MSG_TSIF_0_IRQ BIT(0)
+
+/* TSPP_IRQ_STATUS + TSPP_IRQ_MASK + TSPP_IRQ_CLEAR */
+#define TSPP_IRQ_STATUS_TSP_RD_CMPL BIT(19)
+#define TSPP_IRQ_STATUS_KEY_ERROR BIT(18)
+#define TSPP_IRQ_STATUS_KEY_SWITCHED_BAD BIT(17)
+#define TSPP_IRQ_STATUS_KEY_SWITCHED BIT(16)
+#define TSPP_IRQ_STATUS_PS_BROKEN(_n) BIT((_n))
+
+/* TSPP_PIPE_ERROR_STATUS */
+#define TSPP_PIPE_PES_SYNC_ERROR BIT(3)
+#define TSPP_PIPE_PS_LENGTH_ERROR BIT(2)
+#define TSPP_PIPE_PS_CONTINUITY_ERROR BIT(1)
+#define TSPP_PIP_PS_LOST_START BIT(0)
+
+/* TSPP_STATUS */
+#define TSPP_STATUS_TSP_PKT_AVAIL BIT(10)
+#define TSPP_STATUS_TSIF1_DM_REQ BIT(6)
+#define TSPP_STATUS_TSIF0_DM_REQ BIT(2)
+#define TSPP_CURR_FILTER_TABLE BIT(0)
+
+/* TSPP_GENERICS */
+#define TSPP_GENERICS_CRYPTO_GEN BIT(12)
+#define TSPP_GENERICS_MAX_CONS_PIPES BIT(7)
+#define TSPP_GENERICS_MAX_PIPES BIT(2)
+#define TSPP_GENERICS_TSIF_1_GEN BIT(1)
+#define TSPP_GENERICS_TSIF_0_GEN BIT(0)
+
+/*
+ * TSPP memory regions
+ */
+#define TSPP_PID_FILTER_TABLE0 0x800
+#define TSPP_PID_FILTER_TABLE1 0x880
+#define TSPP_PID_FILTER_TABLE2 0x900
+#define TSPP_GLOBAL_PERFORMANCE 0x980 /* see tspp_global_performance */
+#define TSPP_PIPE_CONTEXT 0x990 /* see tspp_pipe_context */
+#define TSPP_PIPE_PERFORMANCE 0x998 /* see tspp_pipe_performance */
+#define TSPP_TSP_BUFF_WORD(_n) (0xC10 + (_n << 2))
+#define TSPP_DATA_KEY 0xCD0
+
+struct debugfs_entry {
+ const char *name;
+ mode_t mode;
+ int offset;
+};
+
+static const struct debugfs_entry debugfs_tsif_regs[] = {
+ {"sts_ctl", S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF},
+ {"time_limit", S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF},
+ {"clk_ref", S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF},
+ {"lpbk_flags", S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF},
+ {"lpbk_data", S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF},
+ {"test_ctl", S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF},
+ {"test_mode", S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF},
+ {"test_reset", S_IWUSR, TSIF_TEST_RESET_OFF},
+ {"test_export", S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF},
+ {"test_current", S_IRUGO, TSIF_TEST_CURRENT_OFF},
+ {"data_port", S_IRUSR, TSIF_DATA_PORT_OFF},
+};
+
+static const struct debugfs_entry debugfs_tspp_regs[] = {
+ {"rst", S_IRUGO | S_IWUSR, TSPP_RST},
+ {"clk_control", S_IRUGO | S_IWUSR, TSPP_CLK_CONTROL},
+ {"config", S_IRUGO | S_IWUSR, TSPP_CONFIG},
+ {"control", S_IRUGO | S_IWUSR, TSPP_CONTROL},
+ {"ps_disable", S_IRUGO | S_IWUSR, TSPP_PS_DISABLE},
+ {"msg_irq_status", S_IRUGO | S_IWUSR, TSPP_MSG_IRQ_STATUS},
+ {"msg_irq_mask", S_IRUGO | S_IWUSR, TSPP_MSG_IRQ_MASK},
+ {"irq_status", S_IRUGO | S_IWUSR, TSPP_IRQ_STATUS},
+ {"irq_mask", S_IRUGO | S_IWUSR, TSPP_IRQ_MASK},
+ {"irq_clear", S_IRUGO | S_IWUSR, TSPP_IRQ_CLEAR},
+ /* {"pipe_error_status",S_IRUGO | S_IWUSR, TSPP_PIPE_ERROR_STATUS}, */
+ {"status", S_IRUGO | S_IWUSR, TSPP_STATUS},
+ {"curr_tsp_header", S_IRUGO | S_IWUSR, TSPP_CURR_TSP_HEADER},
+ {"curr_pid_filter", S_IRUGO | S_IWUSR, TSPP_CURR_PID_FILTER},
+ /* {"system_key", S_IRUGO | S_IWUSR, TSPP_SYSTEM_KEY}, */
+ /* {"cbc_init_val", S_IRUGO | S_IWUSR, TSPP_CBC_INIT_VAL}, */
+ {"data_key_reset", S_IRUGO | S_IWUSR, TSPP_DATA_KEY_RESET},
+ {"key_valid", S_IRUGO | S_IWUSR, TSPP_KEY_VALID},
+ {"key_error", S_IRUGO | S_IWUSR, TSPP_KEY_ERROR},
+ {"test_ctrl", S_IRUGO | S_IWUSR, TSPP_TEST_CTRL},
+ {"version", S_IRUGO | S_IWUSR, TSPP_VERSION},
+ {"generics", S_IRUGO | S_IWUSR, TSPP_GENERICS},
+ {"pid_filter_table0", S_IRUGO | S_IWUSR, TSPP_PID_FILTER_TABLE0},
+ {"pid_filter_table1", S_IRUGO | S_IWUSR, TSPP_PID_FILTER_TABLE1},
+ {"pid_filter_table2", S_IRUGO | S_IWUSR, TSPP_PID_FILTER_TABLE2},
+ {"tsp_total_num", S_IRUGO | S_IWUSR, TSPP_GLOBAL_PERFORMANCE},
+ {"tsp_ignored_num", S_IRUGO | S_IWUSR, TSPP_GLOBAL_PERFORMANCE + 4},
+ {"tsp_err_ind_num", S_IRUGO | S_IWUSR, TSPP_GLOBAL_PERFORMANCE + 8},
+ {"tsp_sync_err_num", S_IRUGO | S_IWUSR, TSPP_GLOBAL_PERFORMANCE + 16},
+ {"pipe_context", S_IRUGO | S_IWUSR, TSPP_PIPE_CONTEXT},
+ {"pipe_performance", S_IRUGO | S_IWUSR, TSPP_PIPE_PERFORMANCE},
+ {"data_key", S_IRUGO | S_IWUSR, TSPP_DATA_KEY}
+};
+
+struct tspp_pid_filter {
+ u32 filter; /* see FILTER_ macros */
+ u32 config; /* see FILTER_ macros */
+};
+
+/* tsp_info */
+#define FILTER_HEADER_ERROR_MASK BIT(7)
+#define FILTER_TRANS_END_DISABLE BIT(6)
+#define FILTER_DEC_ON_ERROR_EN BIT(5)
+#define FILTER_DECRYPT BIT(4)
+#define FILTER_HAS_ENCRYPTION(_p) (_p->config & FILTER_DECRYPT)
+#define FILTER_GET_PIPE_NUMBER0(_p) (_p->config & 0xF)
+#define FILTER_SET_PIPE_NUMBER0(_p, _b) (_p->config = \
+ (_p->config & ~0xF) | (_b & 0xF))
+#define FILTER_GET_PIPE_PROCESS0(_p) ((_p->filter >> 30) & 0x3)
+#define FILTER_SET_PIPE_PROCESS0(_p, _b) (_p->filter = \
+ (_p->filter & ~(0x3<<30)) | ((_b & 0x3) << 30))
+#define FILTER_GET_PIPE_PID(_p) ((_p->filter >> 13) & 0x1FFF)
+#define FILTER_SET_PIPE_PID(_p, _b) (_p->filter = \
+ (_p->filter & ~(0x1FFF<<13)) | ((_b & 0x1FFF) << 13))
+#define FILTER_GET_PID_MASK(_p) (_p->filter & 0x1FFF)
+#define FILTER_SET_PID_MASK(_p, _b) (_p->filter = \
+ (_p->filter & ~0x1FFF) | (_b & 0x1FFF))
+#define FILTER_GET_PIPE_PROCESS1(_p) ((_p->config >> 30) & 0x3)
+#define FILTER_SET_PIPE_PROCESS1(_p, _b) (_p->config = \
+ (_p->config & ~(0x3<<30)) | ((_b & 0x3) << 30))
+#define FILTER_GET_KEY_NUMBER(_p) ((_p->config >> 8) & 0x7)
+#define FILTER_SET_KEY_NUMBER(_p, _b) (_p->config = \
+ (_p->config & ~(0x7<<8)) | ((_b & 0x7) << 8))
+
+struct tspp_global_performance_regs {
+ u32 tsp_total;
+ u32 tsp_ignored;
+ u32 tsp_error;
+ u32 tsp_sync;
+};
+
+struct tspp_pipe_context_regs {
+ u16 pes_bytes_left;
+ u16 count;
+ u32 tsif_suffix;
+} __packed;
+#define CONTEXT_GET_STATE(_a) (_a & 0x3)
+#define CONTEXT_UNSPEC_LENGTH BIT(11)
+#define CONTEXT_GET_CONT_COUNT(_a) ((_a >> 12) & 0xF)
+
+#define MSEC_TO_JIFFIES(msec) ((msec) * HZ / 1000)
+
+struct tspp_pipe_performance_regs {
+ u32 tsp_total;
+ u32 ps_duplicate_tsp;
+ u32 tsp_no_payload;
+ u32 tsp_broken_ps;
+ u32 ps_total_num;
+ u32 ps_continuity_error;
+ u32 ps_length_error;
+ u32 pes_sync_error;
+};
+
+struct tspp_tsif_device {
+ void __iomem *base;
+ u32 time_limit;
+ u32 ref_count;
+ enum tspp_tsif_mode mode;
+ int clock_inverse;
+ int data_inverse;
+ int sync_inverse;
+ int enable_inverse;
+ u32 tsif_irq;
+
+ /* debugfs */
+ struct dentry *dent_tsif;
+ struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
+ u32 stat_rx;
+ u32 stat_overflow;
+ u32 stat_lost_sync;
+ u32 stat_timeout;
+};
+
+enum tspp_buf_state {
+ TSPP_BUF_STATE_EMPTY, /* buffer has been allocated, but not waiting */
+ TSPP_BUF_STATE_WAITING, /* buffer is waiting to be filled */
+ TSPP_BUF_STATE_DATA, /* buffer is not empty and can be read */
+ TSPP_BUF_STATE_LOCKED /* buffer is being read by a client */
+};
+
+struct tspp_mem_buffer {
+ struct tspp_mem_buffer *next;
+ struct sps_mem_buffer sps;
+ struct tspp_data_descriptor desc; /* buffer descriptor for kernel api */
+ enum tspp_buf_state state;
+ size_t filled; /* how much data this buffer is holding */
+ int read_index; /* where to start reading data from */
+};
+
+/* this represents each char device 'channel' */
+struct tspp_channel {
+ struct tspp_device *pdev; /* can use container_of instead? */
+ struct sps_pipe *pipe;
+ struct sps_connect config;
+ struct sps_register_event event;
+ struct tspp_mem_buffer *data; /* list of buffers */
+ struct tspp_mem_buffer *read; /* first buffer ready to be read */
+ struct tspp_mem_buffer *waiting; /* first outstanding transfer */
+ struct tspp_mem_buffer *locked; /* buffer currently being read */
+ wait_queue_head_t in_queue; /* set when data is received */
+ u32 id; /* channel id (0-15) */
+ int used; /* is this channel in use? */
+ int key; /* which encryption key index is used */
+ u32 buffer_size; /* size of the sps transfer buffers */
+ u32 max_buffers; /* how many buffers should be allocated */
+ u32 buffer_count; /* how many buffers are actually allocated */
+ u32 filter_count; /* how many filters have been added to this channel */
+ u32 int_freq; /* generate interrupts every x descriptors */
+ enum tspp_source src;
+ enum tspp_mode mode;
+ tspp_notifier *notifier; /* used only with kernel api */
+ void *notify_data; /* data to be passed with the notifier */
+ u32 expiration_period_ms; /* notification on partially filled buffers */
+ struct timer_list expiration_timer;
+ struct dma_pool *dma_pool;
+ tspp_memfree *memfree; /* user defined memory free function */
+ void *user_info; /* user cookie passed to memory alloc/free function */
+};
+
+struct tspp_pid_filter_table {
+ struct tspp_pid_filter filter[TSPP_NUM_PRIORITIES];
+};
+
+struct tspp_key_entry {
+ u32 even_lsb;
+ u32 even_msb;
+ u32 odd_lsb;
+ u32 odd_msb;
+};
+
+struct tspp_key_table {
+ struct tspp_key_entry entry[TSPP_NUM_KEYS];
+};
+
+struct tspp_pinctrl {
+ struct pinctrl *pinctrl;
+
+ struct pinctrl_state *disabled;
+ struct pinctrl_state *tsif0_mode1;
+ struct pinctrl_state *tsif0_mode2;
+ struct pinctrl_state *tsif1_mode1;
+ struct pinctrl_state *tsif1_mode2;
+ struct pinctrl_state *dual_mode1;
+ struct pinctrl_state *dual_mode2;
+
+ bool tsif0_active;
+ bool tsif1_active;
+};
+
+/* this represents the actual hardware device */
+struct tspp_device {
+ struct list_head devlist; /* list of all devices */
+ struct platform_device *pdev;
+ void __iomem *base;
+ uint32_t tsif_bus_client;
+ unsigned int tspp_irq;
+ unsigned int bam_irq;
+ unsigned long bam_handle;
+ struct sps_bam_props bam_props;
+ struct wakeup_source ws;
+ spinlock_t spinlock;
+ struct tasklet_struct tlet;
+ struct tspp_tsif_device tsif[TSPP_TSIF_INSTANCES];
+ /* clocks */
+ struct clk *tsif_pclk;
+ struct clk *tsif_ref_clk;
+ /* regulators */
+ struct regulator *tsif_vreg;
+ /* data */
+ struct tspp_pid_filter_table *filters[TSPP_FILTER_TABLES];
+ struct tspp_channel channels[TSPP_NUM_CHANNELS];
+ struct tspp_key_table *tspp_key_table;
+ struct tspp_global_performance_regs *tspp_global_performance;
+ struct tspp_pipe_context_regs *tspp_pipe_context;
+ struct tspp_pipe_performance_regs *tspp_pipe_performance;
+ bool req_irqs;
+ /* pinctrl */
+ struct mutex mutex;
+ struct tspp_pinctrl pinctrl;
+
+ struct dentry *dent;
+ struct dentry *debugfs_regs[ARRAY_SIZE(debugfs_tspp_regs)];
+};
+
+static int tspp_key_entry;
+static u32 channel_id; /* next channel id number to assign */
+
+static LIST_HEAD(tspp_devices);
+
+/*** IRQ ***/
+static irqreturn_t tspp_isr(int irq, void *dev)
+{
+ struct tspp_device *device = dev;
+ u32 status, mask;
+ u32 data;
+
+ status = readl_relaxed(device->base + TSPP_IRQ_STATUS);
+ mask = readl_relaxed(device->base + TSPP_IRQ_MASK);
+ status &= mask;
+
+ if (!status) {
+ dev_warn(&device->pdev->dev, "Spurious interrupt");
+ return IRQ_NONE;
+ }
+
+ /* if (status & TSPP_IRQ_STATUS_TSP_RD_CMPL) */
+
+ if (status & TSPP_IRQ_STATUS_KEY_ERROR) {
+ /* read the key error info */
+ data = readl_relaxed(device->base + TSPP_KEY_ERROR);
+ dev_info(&device->pdev->dev, "key error 0x%x", data);
+ }
+ if (status & TSPP_IRQ_STATUS_KEY_SWITCHED_BAD) {
+ data = readl_relaxed(device->base + TSPP_KEY_VALID);
+ dev_info(&device->pdev->dev, "key invalidated: 0x%x", data);
+ }
+ if (status & TSPP_IRQ_STATUS_KEY_SWITCHED)
+ dev_info(&device->pdev->dev, "key switched");
+
+ if (status & 0xffff)
+ dev_info(&device->pdev->dev, "broken pipe %i", status & 0xffff);
+
+ writel_relaxed(status, device->base + TSPP_IRQ_CLEAR);
+
+ /*
+ * Before returning IRQ_HANDLED to the generic interrupt handling
+ * framework need to make sure all operations including clearing of
+ * interrupt status registers in the hardware is performed.
+ * Thus a barrier after clearing the interrupt status register
+ * is required to guarantee that the interrupt status register has
+ * really been cleared by the time we return from this handler.
+ */
+ wmb();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tsif_isr(int irq, void *dev)
+{
+ struct tspp_tsif_device *tsif_device = dev;
+ u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
+
+ if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
+ TSIF_STS_CTL_OVERFLOW |
+ TSIF_STS_CTL_LOST_SYNC |
+ TSIF_STS_CTL_TIMEOUT)))
+ return IRQ_NONE;
+
+ if (sts_ctl & TSIF_STS_CTL_OVERFLOW)
+ tsif_device->stat_overflow++;
+
+ if (sts_ctl & TSIF_STS_CTL_LOST_SYNC)
+ tsif_device->stat_lost_sync++;
+
+ if (sts_ctl & TSIF_STS_CTL_TIMEOUT)
+ tsif_device->stat_timeout++;
+
+ iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+
+ /*
+ * Before returning IRQ_HANDLED to the generic interrupt handling
+ * framework need to make sure all operations including clearing of
+ * interrupt status registers in the hardware is performed.
+ * Thus a barrier after clearing the interrupt status register
+ * is required to guarantee that the interrupt status register has
+ * really been cleared by the time we return from this handler.
+ */
+ wmb();
+ return IRQ_HANDLED;
+}
+
+/*** callbacks ***/
+static void tspp_sps_complete_cb(struct sps_event_notify *notify)
+{
+ struct tspp_device *pdev;
+
+ if (!notify || !notify->user)
+ return;
+
+ pdev = notify->user;
+ tasklet_schedule(&pdev->tlet);
+}
+
+static void tspp_expiration_timer(unsigned long data)
+{
+ struct tspp_device *pdev = (struct tspp_device *)data;
+
+ if (pdev)
+ tasklet_schedule(&pdev->tlet);
+}
+
+/*** tasklet ***/
+static void tspp_sps_complete_tlet(unsigned long data)
+{
+ int i;
+ int complete;
+ unsigned long flags;
+ struct sps_iovec iovec;
+ struct tspp_channel *channel;
+ struct tspp_device *device = (struct tspp_device *)data;
+
+ spin_lock_irqsave(&device->spinlock, flags);
+
+ for (i = 0; i < TSPP_NUM_CHANNELS; i++) {
+ complete = 0;
+ channel = &device->channels[i];
+
+ if (!channel->used || !channel->waiting)
+ continue;
+
+ /* stop the expiration timer */
+ if (channel->expiration_period_ms)
+ del_timer(&channel->expiration_timer);
+
+ /* get completions */
+ while (channel->waiting->state == TSPP_BUF_STATE_WAITING) {
+ if (sps_get_iovec(channel->pipe, &iovec) != 0) {
+ pr_err("tspp: Error in iovec on channel %i",
+ channel->id);
+ break;
+ }
+ if (iovec.size == 0)
+ break;
+
+ if (DESC_FULL_ADDR(iovec.flags, iovec.addr)
+ != channel->waiting->sps.phys_base)
+ pr_err("tspp: buffer mismatch %pa",
+ &channel->waiting->sps.phys_base);
+
+ complete = 1;
+ channel->waiting->state = TSPP_BUF_STATE_DATA;
+ channel->waiting->filled = iovec.size;
+ channel->waiting->read_index = 0;
+
+ if (channel->src == TSPP_SOURCE_TSIF0)
+ device->tsif[0].stat_rx++;
+ else if (channel->src == TSPP_SOURCE_TSIF1)
+ device->tsif[1].stat_rx++;
+
+ /* update the pointers */
+ channel->waiting = channel->waiting->next;
+ }
+
+ /* wake any waiting processes */
+ if (complete) {
+ wake_up_interruptible(&channel->in_queue);
+
+ /* call notifiers */
+ if (channel->notifier)
+ channel->notifier(channel->id,
+ channel->notify_data);
+ }
+
+ /* restart expiration timer */
+ if (channel->expiration_period_ms)
+ mod_timer(&channel->expiration_timer,
+ jiffies +
+ MSEC_TO_JIFFIES(
+ channel->expiration_period_ms));
+ }
+
+ spin_unlock_irqrestore(&device->spinlock, flags);
+}
+
+static int tspp_config_gpios(struct tspp_device *device,
+ enum tspp_source source,
+ int enable)
+{
+ int ret;
+ struct pinctrl_state *s;
+ struct tspp_pinctrl *p = &device->pinctrl;
+ bool mode2;
+
+ /*
+ * TSIF devices are handled separately, however changing of the pinctrl
+ * state must be protected from race condition.
+ */
+ if (mutex_lock_interruptible(&device->mutex))
+ return -ERESTARTSYS;
+
+ switch (source) {
+ case TSPP_SOURCE_TSIF0:
+ mode2 = device->tsif[0].mode == TSPP_TSIF_MODE_2;
+ if (enable == p->tsif1_active) {
+ if (enable)
+ /* Both tsif enabled */
+ s = mode2 ? p->dual_mode2 : p->dual_mode1;
+ else
+ /* Both tsif disabled */
+ s = p->disabled;
+ } else if (enable) {
+ /* Only tsif0 is enabled */
+ s = mode2 ? p->tsif0_mode2 : p->tsif0_mode1;
+ } else {
+ /* Only tsif1 is enabled */
+ s = mode2 ? p->tsif1_mode2 : p->tsif1_mode1;
+ }
+
+ ret = pinctrl_select_state(p->pinctrl, s);
+ if (!ret)
+ p->tsif0_active = enable;
+ break;
+ case TSPP_SOURCE_TSIF1:
+ mode2 = device->tsif[1].mode == TSPP_TSIF_MODE_2;
+ if (enable == p->tsif0_active) {
+ if (enable)
+ /* Both tsif enabled */
+ s = mode2 ? p->dual_mode2 : p->dual_mode1;
+ else
+ /* Both tsif disabled */
+ s = p->disabled;
+ } else if (enable) {
+ /* Only tsif1 is enabled */
+ s = mode2 ? p->tsif1_mode2 : p->tsif1_mode1;
+ } else {
+ /* Only tsif0 is enabled */
+ s = mode2 ? p->tsif0_mode2 : p->tsif0_mode1;
+ }
+
+ ret = pinctrl_select_state(p->pinctrl, s);
+ if (!ret)
+ p->tsif1_active = enable;
+ break;
+ default:
+ pr_err("%s: invalid source %d\n", __func__, source);
+ mutex_unlock(&device->mutex);
+ return -EINVAL;
+ }
+
+ if (ret)
+ pr_err("%s: failed to change pinctrl state, ret=%d\n",
+ __func__, ret);
+
+ mutex_unlock(&device->mutex);
+ return ret;
+}
+
+static int tspp_get_pinctrl(struct tspp_device *device)
+{
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *state;
+
+ pinctrl = devm_pinctrl_get(&device->pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ pr_err("%s: Unable to get pinctrl handle\n", __func__);
+ return -EINVAL;
+ }
+ device->pinctrl.pinctrl = pinctrl;
+
+ state = pinctrl_lookup_state(pinctrl, "disabled");
+ if (IS_ERR(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "disabled");
+ return -EINVAL;
+ }
+ device->pinctrl.disabled = state;
+
+ state = pinctrl_lookup_state(pinctrl, "tsif0-mode1");
+ if (IS_ERR(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "tsif0-mode1");
+ return -EINVAL;
+ }
+ device->pinctrl.tsif0_mode1 = state;
+
+ state = pinctrl_lookup_state(pinctrl, "tsif0-mode2");
+ if (IS_ERR(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "tsif0-mode2");
+ return -EINVAL;
+ }
+ device->pinctrl.tsif0_mode2 = state;
+
+ state = pinctrl_lookup_state(pinctrl, "tsif1-mode1");
+ if (IS_ERR(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "tsif1-mode1");
+ return -EINVAL;
+ }
+ device->pinctrl.tsif1_mode1 = state;
+
+ state = pinctrl_lookup_state(pinctrl, "tsif1-mode2");
+ if (IS_ERR(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "tsif1-mode2");
+ return -EINVAL;
+ }
+ device->pinctrl.tsif1_mode2 = state;
+
+ state = pinctrl_lookup_state(pinctrl, "dual-tsif-mode1");
+ if (IS_ERR(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "dual-tsif-mode1");
+ return -EINVAL;
+ }
+ device->pinctrl.dual_mode1 = state;
+
+ state = pinctrl_lookup_state(pinctrl, "dual-tsif-mode2");
+ if (IS_ERR(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "dual-tsif-mode2");
+ return -EINVAL;
+ }
+ device->pinctrl.dual_mode2 = state;
+
+ device->pinctrl.tsif0_active = false;
+ device->pinctrl.tsif1_active = false;
+
+ return 0;
+}
+
+
+/*** Clock functions ***/
+static int tspp_clock_start(struct tspp_device *device)
+{
+ int rc;
+
+ if (device == NULL) {
+ pr_err("tspp: Can't start clocks, invalid device\n");
+ return -EINVAL;
+ }
+
+ if (device->tsif_bus_client) {
+ rc = msm_bus_scale_client_update_request(
+ device->tsif_bus_client, 1);
+ if (rc) {
+ pr_err("tspp: Can't enable bus\n");
+ return -EBUSY;
+ }
+ }
+
+ if (device->tsif_vreg) {
+ rc = regulator_set_voltage(device->tsif_vreg,
+ RPM_REGULATOR_CORNER_SUPER_TURBO,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ if (rc) {
+ pr_err("Unable to set CX voltage.\n");
+ if (device->tsif_bus_client)
+ msm_bus_scale_client_update_request(
+ device->tsif_bus_client, 0);
+ return rc;
+ }
+ }
+
+ if (device->tsif_pclk && clk_prepare_enable(device->tsif_pclk) != 0) {
+ pr_err("tspp: Can't start pclk");
+
+ if (device->tsif_vreg) {
+ regulator_set_voltage(device->tsif_vreg,
+ RPM_REGULATOR_CORNER_NONE,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ }
+
+ if (device->tsif_bus_client)
+ msm_bus_scale_client_update_request(
+ device->tsif_bus_client, 0);
+ return -EBUSY;
+ }
+
+ if (device->tsif_ref_clk &&
+ clk_prepare_enable(device->tsif_ref_clk) != 0) {
+ pr_err("tspp: Can't start ref clk");
+ clk_disable_unprepare(device->tsif_pclk);
+ if (device->tsif_vreg) {
+ regulator_set_voltage(device->tsif_vreg,
+ RPM_REGULATOR_CORNER_NONE,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ }
+
+ if (device->tsif_bus_client)
+ msm_bus_scale_client_update_request(
+ device->tsif_bus_client, 0);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void tspp_clock_stop(struct tspp_device *device)
+{
+ int rc;
+
+ if (device == NULL) {
+ pr_err("tspp: Can't stop clocks, invalid device\n");
+ return;
+ }
+
+ if (device->tsif_pclk)
+ clk_disable_unprepare(device->tsif_pclk);
+
+ if (device->tsif_ref_clk)
+ clk_disable_unprepare(device->tsif_ref_clk);
+
+ if (device->tsif_vreg) {
+ rc = regulator_set_voltage(device->tsif_vreg,
+ RPM_REGULATOR_CORNER_NONE,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ if (rc)
+ pr_err("Unable to set CX voltage.\n");
+ }
+
+ if (device->tsif_bus_client) {
+ rc = msm_bus_scale_client_update_request(
+ device->tsif_bus_client, 0);
+ if (rc)
+ pr_err("tspp: Can't disable bus\n");
+ }
+}
+
+/*** TSIF functions ***/
+static int tspp_start_tsif(struct tspp_tsif_device *tsif_device)
+{
+ int start_hardware = 0;
+ u32 ctl;
+
+ if (tsif_device->ref_count == 0) {
+ start_hardware = 1;
+ } else if (tsif_device->ref_count > 0) {
+ ctl = readl_relaxed(tsif_device->base + TSIF_STS_CTL_OFF);
+ if ((ctl & TSIF_STS_CTL_START) != 1) {
+ /* this hardware should already be running */
+ pr_warn("tspp: tsif hw not started but ref count > 0");
+ start_hardware = 1;
+ }
+ }
+
+ if (start_hardware) {
+ ctl = TSIF_STS_CTL_EN_IRQ |
+ TSIF_STS_CTL_EN_DM |
+ TSIF_STS_CTL_PACK_AVAIL |
+ TSIF_STS_CTL_OVERFLOW |
+ TSIF_STS_CTL_LOST_SYNC;
+
+ if (tsif_device->clock_inverse)
+ ctl |= TSIF_STS_CTL_INV_CLOCK;
+
+ if (tsif_device->data_inverse)
+ ctl |= TSIF_STS_CTL_INV_DATA;
+
+ if (tsif_device->sync_inverse)
+ ctl |= TSIF_STS_CTL_INV_SYNC;
+
+ if (tsif_device->enable_inverse)
+ ctl |= TSIF_STS_CTL_INV_ENABLE;
+
+ switch (tsif_device->mode) {
+ case TSPP_TSIF_MODE_LOOPBACK:
+ ctl |= TSIF_STS_CTL_EN_NULL |
+ TSIF_STS_CTL_EN_ERROR |
+ TSIF_STS_CTL_TEST_MODE;
+ break;
+ case TSPP_TSIF_MODE_1:
+ ctl |= TSIF_STS_CTL_EN_TIME_LIM |
+ TSIF_STS_CTL_EN_TCR;
+ break;
+ case TSPP_TSIF_MODE_2:
+ ctl |= TSIF_STS_CTL_EN_TIME_LIM |
+ TSIF_STS_CTL_EN_TCR |
+ TSIF_STS_CTL_MODE_2;
+ break;
+ default:
+ pr_warn("tspp: unknown tsif mode 0x%x",
+ tsif_device->mode);
+ }
+ writel_relaxed(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+ writel_relaxed(tsif_device->time_limit,
+ tsif_device->base + TSIF_TIME_LIMIT_OFF);
+ /* assure register configuration is done before starting TSIF */
+ wmb();
+ writel_relaxed(ctl | TSIF_STS_CTL_START,
+ tsif_device->base + TSIF_STS_CTL_OFF);
+ /* assure TSIF start configuration */
+ wmb();
+ }
+
+ ctl = readl_relaxed(tsif_device->base + TSIF_STS_CTL_OFF);
+ if (!(ctl & TSIF_STS_CTL_START))
+ return -EBUSY;
+
+ tsif_device->ref_count++;
+ return 0;
+}
+
+static void tspp_stop_tsif(struct tspp_tsif_device *tsif_device)
+{
+ if (tsif_device->ref_count == 0)
+ return;
+
+ tsif_device->ref_count--;
+
+ if (tsif_device->ref_count == 0) {
+ writel_relaxed(TSIF_STS_CTL_STOP,
+ tsif_device->base + TSIF_STS_CTL_OFF);
+ /* assure TSIF stop configuration */
+ wmb();
+ }
+}
+
+/*** local TSPP functions ***/
+static int tspp_channels_in_use(struct tspp_device *pdev)
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < TSPP_NUM_CHANNELS; i++)
+ count += (pdev->channels[i].used ? 1 : 0);
+
+ return count;
+}
+
+static struct tspp_device *tspp_find_by_id(int id)
+{
+ struct tspp_device *dev;
+
+ list_for_each_entry(dev, &tspp_devices, devlist) {
+ if (dev->pdev->id == id)
+ return dev;
+ }
+ return NULL;
+}
+
+static int tspp_get_key_entry(void)
+{
+ int i;
+
+ for (i = 0; i < TSPP_NUM_KEYS; i++) {
+ if (!(tspp_key_entry & (1 << i))) {
+ tspp_key_entry |= (1 << i);
+ return i;
+ }
+ }
+ return 1 < TSPP_NUM_KEYS;
+}
+
+static void tspp_free_key_entry(int entry)
+{
+ if (entry > TSPP_NUM_KEYS) {
+ pr_err("tspp_free_key_entry: index out of bounds");
+ return;
+ }
+
+ tspp_key_entry &= ~(1 << entry);
+}
+
+static int tspp_alloc_buffer(u32 channel_id, struct tspp_data_descriptor *desc,
+ u32 size, struct dma_pool *dma_pool, tspp_allocator *alloc, void *user)
+{
+ if (size < TSPP_MIN_BUFFER_SIZE ||
+ size > TSPP_MAX_BUFFER_SIZE) {
+ pr_err("tspp: bad buffer size %i", size);
+ return -ENOMEM;
+ }
+
+ if (alloc) {
+ TSPP_DEBUG("tspp using alloc function");
+ desc->virt_base = alloc(channel_id, size,
+ &desc->phys_base, user);
+ } else {
+ if (!dma_pool)
+ desc->virt_base = dma_alloc_coherent(NULL, size,
+ &desc->phys_base, GFP_KERNEL);
+ else
+ desc->virt_base = dma_pool_alloc(dma_pool, GFP_KERNEL,
+ &desc->phys_base);
+
+ if (desc->virt_base == 0) {
+ pr_err("tspp: dma buffer allocation failed %i\n", size);
+ return -ENOMEM;
+ }
+ }
+
+ desc->size = size;
+ return 0;
+}
+
+static int tspp_queue_buffer(struct tspp_channel *channel,
+ struct tspp_mem_buffer *buffer)
+{
+ int rc;
+ u32 flags = 0;
+
+ /* make sure the interrupt frequency is valid */
+ if (channel->int_freq < 1)
+ channel->int_freq = 1;
+
+ /* generate interrupt according to requested frequency */
+ if (buffer->desc.id % channel->int_freq == channel->int_freq-1)
+ flags = SPS_IOVEC_FLAG_INT;
+
+ /* start the transfer */
+ rc = sps_transfer_one(channel->pipe,
+ buffer->sps.phys_base,
+ buffer->sps.size,
+ flags ? channel->pdev : NULL,
+ flags);
+ if (rc < 0)
+ return rc;
+
+ buffer->state = TSPP_BUF_STATE_WAITING;
+
+ return 0;
+}
+
+static int tspp_global_reset(struct tspp_device *pdev)
+{
+ u32 i, val;
+
+ /* stop all TSIFs */
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+ pdev->tsif[i].ref_count = 1; /* allows stopping hw */
+ tspp_stop_tsif(&pdev->tsif[i]); /* will reset ref_count to 0 */
+ pdev->tsif[i].time_limit = TSPP_TSIF_DEFAULT_TIME_LIMIT;
+ pdev->tsif[i].clock_inverse = 0;
+ pdev->tsif[i].data_inverse = 0;
+ pdev->tsif[i].sync_inverse = 0;
+ pdev->tsif[i].enable_inverse = 0;
+ }
+ writel_relaxed(TSPP_RST_RESET, pdev->base + TSPP_RST);
+ /* assure state is reset before continuing with configuration */
+ wmb();
+
+ /* TSPP tables */
+ for (i = 0; i < TSPP_FILTER_TABLES; i++)
+ memset_io(pdev->filters[i],
+ 0, sizeof(struct tspp_pid_filter_table));
+
+ /* disable all filters */
+ val = (2 << TSPP_NUM_CHANNELS) - 1;
+ writel_relaxed(val, pdev->base + TSPP_PS_DISABLE);
+
+ /* TSPP registers */
+ val = readl_relaxed(pdev->base + TSPP_CONTROL);
+ writel_relaxed(val | TSPP_CLK_CONTROL_FORCE_PERF_CNT,
+ pdev->base + TSPP_CONTROL);
+ /* assure tspp performance count clock is set to 0 */
+ wmb();
+ memset_io(pdev->tspp_global_performance, 0,
+ sizeof(struct tspp_global_performance_regs));
+ memset_io(pdev->tspp_pipe_context, 0,
+ sizeof(struct tspp_pipe_context_regs));
+ memset_io(pdev->tspp_pipe_performance, 0,
+ sizeof(struct tspp_pipe_performance_regs));
+ /* assure tspp pipe context registers are set to 0 */
+ wmb();
+ writel_relaxed(val & ~TSPP_CLK_CONTROL_FORCE_PERF_CNT,
+ pdev->base + TSPP_CONTROL);
+ /* assure tspp performance count clock is reset */
+ wmb();
+
+ val = readl_relaxed(pdev->base + TSPP_CONFIG);
+ val &= ~(TSPP_CONFIG_PS_LEN_ERR_MASK |
+ TSPP_CONFIG_PS_CONT_ERR_UNSP_MASK |
+ TSPP_CONFIG_PS_CONT_ERR_MASK);
+ TSPP_CONFIG_SET_PACKET_LENGTH(val, TSPP_PACKET_LENGTH);
+ writel_relaxed(val, pdev->base + TSPP_CONFIG);
+ writel_relaxed(0x0007ffff, pdev->base + TSPP_IRQ_MASK);
+ writel_relaxed(0x000fffff, pdev->base + TSPP_IRQ_CLEAR);
+ writel_relaxed(0, pdev->base + TSPP_RST);
+ /* assure tspp reset clear */
+ wmb();
+
+ tspp_key_entry = 0;
+
+ return 0;
+}
+
+static void tspp_channel_init(struct tspp_channel *channel,
+ struct tspp_device *pdev)
+{
+ channel->pdev = pdev;
+ channel->data = NULL;
+ channel->read = NULL;
+ channel->waiting = NULL;
+ channel->locked = NULL;
+ channel->id = channel_id++;
+ channel->used = 0;
+ channel->buffer_size = TSPP_MIN_BUFFER_SIZE;
+ channel->max_buffers = TSPP_NUM_BUFFERS;
+ channel->buffer_count = 0;
+ channel->filter_count = 0;
+ channel->int_freq = 1;
+ channel->src = TSPP_SOURCE_NONE;
+ channel->mode = TSPP_MODE_DISABLED;
+ channel->notifier = NULL;
+ channel->notify_data = NULL;
+ channel->expiration_period_ms = 0;
+ channel->memfree = NULL;
+ channel->user_info = NULL;
+ init_waitqueue_head(&channel->in_queue);
+}
+
+static void tspp_set_tsif_mode(struct tspp_channel *channel,
+ enum tspp_tsif_mode mode)
+{
+ int index;
+
+ switch (channel->src) {
+ case TSPP_SOURCE_TSIF0:
+ index = 0;
+ break;
+ case TSPP_SOURCE_TSIF1:
+ index = 1;
+ break;
+ default:
+ pr_warn("tspp: can't set mode for non-tsif source %d",
+ channel->src);
+ return;
+ }
+ channel->pdev->tsif[index].mode = mode;
+}
+
+static void tspp_set_signal_inversion(struct tspp_channel *channel,
+ int clock_inverse, int data_inverse,
+ int sync_inverse, int enable_inverse)
+{
+ int index;
+
+ switch (channel->src) {
+ case TSPP_SOURCE_TSIF0:
+ index = 0;
+ break;
+ case TSPP_SOURCE_TSIF1:
+ index = 1;
+ break;
+ default:
+ return;
+ }
+ channel->pdev->tsif[index].clock_inverse = clock_inverse;
+ channel->pdev->tsif[index].data_inverse = data_inverse;
+ channel->pdev->tsif[index].sync_inverse = sync_inverse;
+ channel->pdev->tsif[index].enable_inverse = enable_inverse;
+}
+
+static int tspp_is_buffer_size_aligned(u32 size, enum tspp_mode mode)
+{
+ u32 alignment;
+
+ switch (mode) {
+ case TSPP_MODE_RAW:
+ /* must be a multiple of 192 */
+ alignment = (TSPP_PACKET_LENGTH + 4);
+ if (size % alignment)
+ return 0;
+ return 1;
+
+ case TSPP_MODE_RAW_NO_SUFFIX:
+ /* must be a multiple of 188 */
+ alignment = TSPP_PACKET_LENGTH;
+ if (size % alignment)
+ return 0;
+ return 1;
+
+ case TSPP_MODE_DISABLED:
+ case TSPP_MODE_PES:
+ default:
+ /* no alignment requirement */
+ return 1;
+ }
+
+}
+
+static u32 tspp_align_buffer_size_by_mode(u32 size, enum tspp_mode mode)
+{
+ u32 new_size;
+ u32 alignment;
+
+ switch (mode) {
+ case TSPP_MODE_RAW:
+ /* must be a multiple of 192 */
+ alignment = (TSPP_PACKET_LENGTH + 4);
+ break;
+
+ case TSPP_MODE_RAW_NO_SUFFIX:
+ /* must be a multiple of 188 */
+ alignment = TSPP_PACKET_LENGTH;
+ break;
+
+ case TSPP_MODE_DISABLED:
+ case TSPP_MODE_PES:
+ default:
+ /* no alignment requirement - give the user what he asks for */
+ alignment = 1;
+ break;
+ }
+ /* align up */
+ new_size = (((size + alignment - 1) / alignment) * alignment);
+ return new_size;
+}
+
+static void tspp_destroy_buffers(u32 channel_id, struct tspp_channel *channel)
+{
+ int i;
+ struct tspp_mem_buffer *pbuf, *temp;
+
+ pbuf = channel->data;
+ for (i = 0; i < channel->buffer_count; i++) {
+ if (pbuf->desc.phys_base) {
+ if (channel->memfree) {
+ channel->memfree(channel_id,
+ pbuf->desc.size,
+ pbuf->desc.virt_base,
+ pbuf->desc.phys_base,
+ channel->user_info);
+ } else {
+ if (!channel->dma_pool)
+ dma_free_coherent(
+ &channel->pdev->pdev->dev,
+ pbuf->desc.size,
+ pbuf->desc.virt_base,
+ pbuf->desc.phys_base);
+ else
+ dma_pool_free(channel->dma_pool,
+ pbuf->desc.virt_base,
+ pbuf->desc.phys_base);
+ }
+ pbuf->desc.phys_base = 0;
+ }
+ pbuf->desc.virt_base = 0;
+ pbuf->state = TSPP_BUF_STATE_EMPTY;
+ temp = pbuf;
+ pbuf = pbuf->next;
+ kfree(temp);
+ }
+}
+
+static int msm_tspp_req_irqs(struct tspp_device *device)
+{
+ int rc;
+ int i;
+ int j;
+
+ rc = request_irq(device->tspp_irq, tspp_isr, IRQF_SHARED,
+ dev_name(&device->pdev->dev), device);
+ if (rc) {
+ dev_err(&device->pdev->dev,
+ "failed to request TSPP IRQ %d : %d",
+ device->tspp_irq, rc);
+ return rc;
+ }
+
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+ rc = request_irq(device->tsif[i].tsif_irq,
+ tsif_isr, IRQF_SHARED, dev_name(&device->pdev->dev),
+ &device->tsif[i]);
+ if (rc) {
+ dev_err(&device->pdev->dev,
+ "failed to request TSIF%d IRQ: %d",
+ i, rc);
+ goto failed;
+ }
+ }
+ device->req_irqs = true;
+ return 0;
+
+failed:
+ free_irq(device->tspp_irq, device);
+ for (j = 0; j < i; j++)
+ free_irq(device->tsif[j].tsif_irq, device);
+
+ return rc;
+}
+
+static inline void msm_tspp_free_irqs(struct tspp_device *device)
+{
+ int i;
+
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+ if (device->tsif[i].tsif_irq)
+ free_irq(device->tsif[i].tsif_irq, &device->tsif[i]);
+ }
+
+ if (device->tspp_irq)
+ free_irq(device->tspp_irq, device);
+ device->req_irqs = false;
+}
+
+/*** TSPP API functions ***/
+
+/**
+ * tspp_open_stream - open a TSPP stream for use.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @source: stream source parameters.
+ *
+ * Return error status
+ *
+ */
+int tspp_open_stream(u32 dev, u32 channel_id,
+ struct tspp_select_source *source)
+{
+ u32 val;
+ int rc;
+ struct tspp_device *pdev;
+ struct tspp_channel *channel;
+ bool req_irqs = false;
+
+ TSPP_DEBUG("tspp_open_stream %i %i %i %i",
+ dev, channel_id, source->source, source->mode);
+
+ if (dev >= TSPP_MAX_DEVICES) {
+ pr_err("tspp: device id out of range");
+ return -ENODEV;
+ }
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_str: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+ channel->src = source->source;
+ tspp_set_tsif_mode(channel, source->mode);
+ tspp_set_signal_inversion(channel, source->clk_inverse,
+ source->data_inverse, source->sync_inverse,
+ source->enable_inverse);
+
+ /* Request IRQ resources on first open */
+ if (!pdev->req_irqs && (source->source == TSPP_SOURCE_TSIF0 ||
+ source->source == TSPP_SOURCE_TSIF1)) {
+ rc = msm_tspp_req_irqs(pdev);
+ if (rc) {
+ pr_err("tspp: error requesting irqs\n");
+ return rc;
+ }
+ req_irqs = true;
+ }
+
+ switch (source->source) {
+ case TSPP_SOURCE_TSIF0:
+ if (tspp_config_gpios(pdev, channel->src, 1) != 0) {
+ rc = -EBUSY;
+ pr_err("tspp: error enabling tsif0 GPIOs\n");
+ goto free_irq;
+ }
+ /* make sure TSIF0 is running & enabled */
+ if (tspp_start_tsif(&pdev->tsif[0]) != 0) {
+ rc = -EBUSY;
+ pr_err("tspp: error starting tsif0");
+ goto free_irq;
+ }
+ if (pdev->tsif[0].ref_count == 1) {
+ val = readl_relaxed(pdev->base + TSPP_CONTROL);
+ writel_relaxed(val & ~TSPP_CONTROL_TSP_TSIF0_SRC_DIS,
+ pdev->base + TSPP_CONTROL);
+ /* Assure BAM TS PKT packet processing is enabled */
+ wmb();
+ }
+ break;
+ case TSPP_SOURCE_TSIF1:
+ if (tspp_config_gpios(pdev, channel->src, 1) != 0) {
+ rc = -EBUSY;
+ pr_err("tspp: error enabling tsif1 GPIOs\n");
+ goto free_irq;
+ }
+ /* make sure TSIF1 is running & enabled */
+ if (tspp_start_tsif(&pdev->tsif[1]) != 0) {
+ rc = -EBUSY;
+ pr_err("tspp: error starting tsif1");
+ goto free_irq;
+ }
+ if (pdev->tsif[1].ref_count == 1) {
+ val = readl_relaxed(pdev->base + TSPP_CONTROL);
+ writel_relaxed(val & ~TSPP_CONTROL_TSP_TSIF1_SRC_DIS,
+ pdev->base + TSPP_CONTROL);
+ /* Assure BAM TS PKT packet processing is enabled */
+ wmb();
+ }
+ break;
+ case TSPP_SOURCE_MEM:
+ break;
+ default:
+ pr_err("tspp: channel %i invalid source %i",
+ channel->id, source->source);
+ return -EBUSY;
+ }
+
+ return 0;
+
+free_irq:
+ /* Free irqs only if were requested during opening of this stream */
+ if (req_irqs)
+ msm_tspp_free_irqs(pdev);
+ return rc;
+}
+EXPORT_SYMBOL(tspp_open_stream);
+
+/**
+ * tspp_close_stream - close a TSPP stream.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return error status
+ *
+ */
+int tspp_close_stream(u32 dev, u32 channel_id)
+{
+ u32 val;
+ u32 prev_ref_count = 0;
+ struct tspp_device *pdev;
+ struct tspp_channel *channel;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_cs: can't find device %i", dev);
+ return -EBUSY;
+ }
+ channel = &pdev->channels[channel_id];
+
+ switch (channel->src) {
+ case TSPP_SOURCE_TSIF0:
+ prev_ref_count = pdev->tsif[0].ref_count;
+ tspp_stop_tsif(&pdev->tsif[0]);
+ if (tspp_config_gpios(pdev, channel->src, 0) != 0)
+ pr_err("tspp: error disabling tsif0 GPIOs\n");
+
+ if (prev_ref_count == 1) {
+ val = readl_relaxed(pdev->base + TSPP_CONTROL);
+ writel_relaxed(val | TSPP_CONTROL_TSP_TSIF0_SRC_DIS,
+ pdev->base + TSPP_CONTROL);
+ /* Assure BAM TS PKT packet processing is disabled */
+ wmb();
+ }
+ break;
+ case TSPP_SOURCE_TSIF1:
+ prev_ref_count = pdev->tsif[1].ref_count;
+ tspp_stop_tsif(&pdev->tsif[1]);
+ if (tspp_config_gpios(pdev, channel->src, 0) != 0)
+ pr_err("tspp: error disabling tsif0 GPIOs\n");
+
+ if (prev_ref_count == 1) {
+ val = readl_relaxed(pdev->base + TSPP_CONTROL);
+ writel_relaxed(val | TSPP_CONTROL_TSP_TSIF1_SRC_DIS,
+ pdev->base + TSPP_CONTROL);
+ /* Assure BAM TS PKT packet processing is disabled */
+ wmb();
+ }
+ break;
+ case TSPP_SOURCE_MEM:
+ break;
+ case TSPP_SOURCE_NONE:
+ break;
+ }
+
+ channel->src = TSPP_SOURCE_NONE;
+
+ /* Free requested interrupts to save power */
+ if ((pdev->tsif[0].ref_count + pdev->tsif[1].ref_count) == 0 &&
+ prev_ref_count)
+ msm_tspp_free_irqs(pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_close_stream);
+
+static int tspp_init_sps_device(struct tspp_device *dev)
+{
+ int ret;
+
+ ret = sps_register_bam_device(&dev->bam_props, &dev->bam_handle);
+ if (ret) {
+ pr_err("tspp: failed to register bam device, err-%d\n", ret);
+ return ret;
+ }
+
+ ret = sps_device_reset(dev->bam_handle);
+ if (ret) {
+ sps_deregister_bam_device(dev->bam_handle);
+ pr_err("tspp: error resetting bam device, err=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * tspp_open_channel - open a TSPP channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return error status
+ *
+ */
+int tspp_open_channel(u32 dev, u32 channel_id)
+{
+ int rc = 0;
+ struct sps_connect *config;
+ struct sps_register_event *event;
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_oc: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+
+ if (channel->used) {
+ pr_err("tspp channel already in use");
+ return -EBUSY;
+ }
+
+ config = &channel->config;
+ event = &channel->event;
+
+ /* start the clocks if needed */
+ if (tspp_channels_in_use(pdev) == 0) {
+ rc = tspp_clock_start(pdev);
+ if (rc)
+ return rc;
+
+ if (pdev->bam_handle == SPS_DEV_HANDLE_INVALID) {
+ rc = tspp_init_sps_device(pdev);
+ if (rc) {
+ pr_err("tspp: failed to init sps device, err=%d\n",
+ rc);
+ tspp_clock_stop(pdev);
+ return rc;
+ }
+ }
+
+ __pm_stay_awake(&pdev->ws);
+ }
+
+ /* mark it as used */
+ channel->used = 1;
+
+ /* start the bam */
+ channel->pipe = sps_alloc_endpoint();
+ if (channel->pipe == 0) {
+ pr_err("tspp: error allocating endpoint");
+ rc = -ENOMEM;
+ goto err_sps_alloc;
+ }
+
+ /* get default configuration */
+ sps_get_config(channel->pipe, config);
+
+ config->source = pdev->bam_handle;
+ config->destination = SPS_DEV_HANDLE_MEM;
+ config->mode = SPS_MODE_SRC;
+ config->options =
+ SPS_O_AUTO_ENABLE | /* connection is auto-enabled */
+ SPS_O_STREAMING | /* streaming mode */
+ SPS_O_DESC_DONE | /* interrupt on end of descriptor */
+ SPS_O_ACK_TRANSFERS | /* must use sps_get_iovec() */
+ SPS_O_HYBRID; /* Read actual descriptors in sps_get_iovec() */
+ config->src_pipe_index = channel->id;
+ config->desc.size =
+ TSPP_SPS_DESCRIPTOR_COUNT * SPS_DESCRIPTOR_SIZE;
+ config->desc.base = dma_alloc_coherent(&pdev->pdev->dev,
+ config->desc.size,
+ &config->desc.phys_base,
+ GFP_KERNEL);
+ if (config->desc.base == 0) {
+ pr_err("tspp: error allocating sps descriptors");
+ rc = -ENOMEM;
+ goto err_desc_alloc;
+ }
+
+ memset(config->desc.base, 0, config->desc.size);
+
+ rc = sps_connect(channel->pipe, config);
+ if (rc) {
+ pr_err("tspp: error connecting bam");
+ goto err_connect;
+ }
+
+ event->mode = SPS_TRIGGER_CALLBACK;
+ event->options = SPS_O_DESC_DONE;
+ event->callback = tspp_sps_complete_cb;
+ event->xfer_done = NULL;
+ event->user = pdev;
+
+ rc = sps_register_event(channel->pipe, event);
+ if (rc) {
+ pr_err("tspp: error registering event");
+ goto err_event;
+ }
+
+ init_timer(&channel->expiration_timer);
+ channel->expiration_timer.function = tspp_expiration_timer;
+ channel->expiration_timer.data = (unsigned long)pdev;
+ channel->expiration_timer.expires = 0xffffffffL;
+
+ rc = pm_runtime_get(&pdev->pdev->dev);
+ if (rc < 0) {
+ dev_err(&pdev->pdev->dev,
+ "Runtime PM: Unable to wake up tspp device, rc = %d",
+ rc);
+ }
+ return 0;
+
+err_event:
+ sps_disconnect(channel->pipe);
+err_connect:
+ dma_free_coherent(&pdev->pdev->dev, config->desc.size,
+ config->desc.base, config->desc.phys_base);
+err_desc_alloc:
+ sps_free_endpoint(channel->pipe);
+err_sps_alloc:
+ channel->used = 0;
+ return rc;
+}
+EXPORT_SYMBOL(tspp_open_channel);
+
+/**
+ * tspp_close_channel - close a TSPP channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return error status
+ *
+ */
+int tspp_close_channel(u32 dev, u32 channel_id)
+{
+ int i;
+ int id;
+ int table_idx;
+ u32 val;
+ unsigned long flags;
+
+ struct sps_connect *config;
+ struct tspp_device *pdev;
+ struct tspp_channel *channel;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_close: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+
+ /* if the channel is not used, we are done */
+ if (!channel->used)
+ return 0;
+
+ /*
+ * Need to protect access to used and waiting fields, as they are
+ * used by the tasklet which is invoked from interrupt context
+ */
+ spin_lock_irqsave(&pdev->spinlock, flags);
+ channel->used = 0;
+ channel->waiting = NULL;
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+
+ if (channel->expiration_period_ms)
+ del_timer(&channel->expiration_timer);
+
+ channel->notifier = NULL;
+ channel->notify_data = NULL;
+ channel->expiration_period_ms = 0;
+
+ config = &channel->config;
+ pdev = channel->pdev;
+
+ /* disable pipe (channel) */
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+ writel_relaxed(val | channel->id, pdev->base + TSPP_PS_DISABLE);
+ /* Assure PS_DISABLE register is set */
+ wmb();
+
+ /* unregister all filters for this channel */
+ for (table_idx = 0; table_idx < TSPP_FILTER_TABLES; table_idx++) {
+ for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+ struct tspp_pid_filter *filter =
+ &pdev->filters[table_idx]->filter[i];
+ id = FILTER_GET_PIPE_NUMBER0(filter);
+ if (id == channel->id) {
+ if (FILTER_HAS_ENCRYPTION(filter))
+ tspp_free_key_entry(
+ FILTER_GET_KEY_NUMBER(filter));
+ filter->config = 0;
+ filter->filter = 0;
+ }
+ }
+ }
+ channel->filter_count = 0;
+
+ /* disconnect the bam */
+ if (sps_disconnect(channel->pipe) != 0)
+ pr_warn("tspp: Error freeing sps endpoint (%i)", channel->id);
+
+ /* destroy the buffers */
+ dma_free_coherent(&pdev->pdev->dev, config->desc.size,
+ config->desc.base, config->desc.phys_base);
+
+ sps_free_endpoint(channel->pipe);
+
+ tspp_destroy_buffers(channel_id, channel);
+
+ dma_pool_destroy(channel->dma_pool);
+ channel->dma_pool = NULL;
+
+ channel->src = TSPP_SOURCE_NONE;
+ channel->mode = TSPP_MODE_DISABLED;
+ channel->memfree = NULL;
+ channel->user_info = NULL;
+ channel->buffer_count = 0;
+ channel->data = NULL;
+ channel->read = NULL;
+ channel->locked = NULL;
+
+ if (tspp_channels_in_use(pdev) == 0) {
+ sps_deregister_bam_device(pdev->bam_handle);
+ pdev->bam_handle = SPS_DEV_HANDLE_INVALID;
+
+ __pm_relax(&pdev->ws);
+ tspp_clock_stop(pdev);
+ }
+
+ pm_runtime_put(&pdev->pdev->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_close_channel);
+
+/**
+ * tspp_get_ref_clk_counter - return the TSIF clock reference (TCR) counter.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @source: The TSIF source from which the counter should be read
+ * @tcr_counter: the value of TCR counter
+ *
+ * Return error status
+ *
+ * TCR increments at a rate equal to 27 MHz/256 = 105.47 kHz.
+ * If source is neither TSIF 0 or TSIF1 0 is returned.
+ */
+int tspp_get_ref_clk_counter(u32 dev, enum tspp_source source, u32 *tcr_counter)
+{
+ struct tspp_device *pdev;
+ struct tspp_tsif_device *tsif_device;
+
+ if (!tcr_counter)
+ return -EINVAL;
+
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_get_ref_clk_counter: can't find device %i\n", dev);
+ return -ENODEV;
+ }
+
+ switch (source) {
+ case TSPP_SOURCE_TSIF0:
+ tsif_device = &pdev->tsif[0];
+ break;
+
+ case TSPP_SOURCE_TSIF1:
+ tsif_device = &pdev->tsif[1];
+ break;
+
+ default:
+ tsif_device = NULL;
+ break;
+ }
+
+ if (tsif_device && tsif_device->ref_count)
+ *tcr_counter = ioread32(tsif_device->base + TSIF_CLK_REF_OFF);
+ else
+ *tcr_counter = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_get_ref_clk_counter);
+
+/**
+ * tspp_add_filter - add a TSPP filter to a channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @filter: TSPP filter parameters
+ *
+ * Return error status
+ *
+ */
+int tspp_add_filter(u32 dev, u32 channel_id,
+ struct tspp_filter *filter)
+{
+ int i, rc;
+ int other_channel;
+ int entry;
+ u32 val, pid, enabled;
+ struct tspp_device *pdev;
+ struct tspp_pid_filter p;
+ struct tspp_channel *channel;
+
+ TSPP_DEBUG("tspp: add filter");
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_add: can't find device %i", dev);
+ return -ENODEV;
+ }
+
+ channel = &pdev->channels[channel_id];
+
+ if (filter->source > TSPP_SOURCE_MEM) {
+ pr_err("tspp invalid source");
+ return -ENOSR;
+ }
+
+ if (filter->priority >= TSPP_NUM_PRIORITIES) {
+ pr_err("tspp invalid filter priority");
+ return -ENOSR;
+ }
+
+ channel->mode = filter->mode;
+ /*
+ * if buffers are already allocated, verify they fulfil
+ * the alignment requirements.
+ */
+ if ((channel->buffer_count > 0) &&
+ (!tspp_is_buffer_size_aligned(channel->buffer_size, channel->mode)))
+ pr_warn("tspp: buffers allocated with incorrect alignment\n");
+
+ if (filter->mode == TSPP_MODE_PES) {
+ for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+ struct tspp_pid_filter *tspp_filter =
+ &pdev->filters[channel->src]->filter[i];
+ pid = FILTER_GET_PIPE_PID((tspp_filter));
+ enabled = FILTER_GET_PIPE_PROCESS0(tspp_filter);
+ if (enabled && (pid == filter->pid)) {
+ other_channel =
+ FILTER_GET_PIPE_NUMBER0(tspp_filter);
+ pr_err("tspp: pid 0x%x already in use by channel %i",
+ filter->pid, other_channel);
+ return -EBADSLT;
+ }
+ }
+ }
+
+ /* make sure this priority is not already in use */
+ enabled = FILTER_GET_PIPE_PROCESS0(
+ (&(pdev->filters[channel->src]->filter[filter->priority])));
+ if (enabled) {
+ pr_err("tspp: filter priority %i source %i is already enabled\n",
+ filter->priority, channel->src);
+ return -ENOSR;
+ }
+
+ if (channel->mode == TSPP_MODE_PES) {
+ /*
+ * if we are already processing in PES mode, disable pipe
+ * (channel) and filter to be updated
+ */
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+ writel_relaxed(val | (1 << channel->id),
+ pdev->base + TSPP_PS_DISABLE);
+ /* Assure PS_DISABLE register is set */
+ wmb();
+ }
+
+ /* update entry */
+ p.filter = 0;
+ p.config = FILTER_TRANS_END_DISABLE;
+ FILTER_SET_PIPE_PROCESS0((&p), filter->mode);
+ FILTER_SET_PIPE_PID((&p), filter->pid);
+ FILTER_SET_PID_MASK((&p), filter->mask);
+ FILTER_SET_PIPE_NUMBER0((&p), channel->id);
+ FILTER_SET_PIPE_PROCESS1((&p), TSPP_MODE_DISABLED);
+ if (filter->decrypt) {
+ entry = tspp_get_key_entry();
+ if (entry == -1) {
+ pr_err("tspp: no more keys available!");
+ } else {
+ p.config |= FILTER_DECRYPT;
+ FILTER_SET_KEY_NUMBER((&p), entry);
+ }
+ }
+
+ pdev->filters[channel->src]->
+ filter[filter->priority].config = p.config;
+ pdev->filters[channel->src]->
+ filter[filter->priority].filter = p.filter;
+
+ /*
+ * allocate buffers if needed (i.e. if user did has not already called
+ * tspp_allocate_buffers() explicitly).
+ */
+ if (channel->buffer_count == 0) {
+ channel->buffer_size =
+ tspp_align_buffer_size_by_mode(channel->buffer_size,
+ channel->mode);
+ rc = tspp_allocate_buffers(dev, channel->id,
+ channel->max_buffers,
+ channel->buffer_size,
+ channel->int_freq, NULL, NULL, NULL);
+ if (rc != 0) {
+ pr_err("tspp: tspp_allocate_buffers failed\n");
+ return rc;
+ }
+ }
+
+ /* reenable pipe */
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+ writel_relaxed(val & ~(1 << channel->id), pdev->base + TSPP_PS_DISABLE);
+ /* Assure PS_DISABLE register is reset */
+ wmb();
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+
+ channel->filter_count++;
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_add_filter);
+
+/**
+ * tspp_remove_filter - remove a TSPP filter from a channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @filter: TSPP filter parameters
+ *
+ * Return error status
+ *
+ */
+int tspp_remove_filter(u32 dev, u32 channel_id,
+ struct tspp_filter *filter)
+{
+ int entry;
+ u32 val;
+ struct tspp_device *pdev;
+ int src;
+ struct tspp_pid_filter *tspp_filter;
+ struct tspp_channel *channel;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ if (!filter) {
+ pr_err("tspp: NULL filter pointer");
+ return -EINVAL;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_remove: can't find device %i", dev);
+ return -ENODEV;
+ }
+ if (filter->priority >= TSPP_NUM_PRIORITIES) {
+ pr_err("tspp invalid filter priority");
+ return -ENOSR;
+ }
+ channel = &pdev->channels[channel_id];
+
+ src = channel->src;
+ tspp_filter = &(pdev->filters[src]->filter[filter->priority]);
+
+ /* disable pipe (channel) */
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+ writel_relaxed(val | channel->id, pdev->base + TSPP_PS_DISABLE);
+ /* Assure PS_DISABLE register is set */
+ wmb();
+
+ /* update data keys */
+ if (tspp_filter->config & FILTER_DECRYPT) {
+ entry = FILTER_GET_KEY_NUMBER(tspp_filter);
+ tspp_free_key_entry(entry);
+ }
+
+ /* update pid table */
+ tspp_filter->config = 0;
+ tspp_filter->filter = 0;
+
+ channel->filter_count--;
+
+ /* reenable pipe */
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+ writel_relaxed(val & ~(1 << channel->id),
+ pdev->base + TSPP_PS_DISABLE);
+ /* Assure PS_DISABLE register is reset */
+ wmb();
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_remove_filter);
+
+/**
+ * tspp_set_key - set TSPP key in key table.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @key: TSPP key parameters
+ *
+ * Return error status
+ *
+ */
+int tspp_set_key(u32 dev, u32 channel_id, struct tspp_key *key)
+{
+ int i;
+ int id;
+ int key_index;
+ int data;
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_set: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+
+ /* read the key index used by this channel */
+ for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+ struct tspp_pid_filter *tspp_filter =
+ &(pdev->filters[channel->src]->filter[i]);
+ id = FILTER_GET_PIPE_NUMBER0(tspp_filter);
+ if (id == channel->id) {
+ if (FILTER_HAS_ENCRYPTION(tspp_filter)) {
+ key_index = FILTER_GET_KEY_NUMBER(tspp_filter);
+ break;
+ }
+ }
+ }
+ if (i == TSPP_NUM_PRIORITIES) {
+ pr_err("tspp: no encryption on this channel");
+ return -ENOKEY;
+ }
+
+ if (key->parity == TSPP_KEY_PARITY_EVEN) {
+ pdev->tspp_key_table->entry[key_index].even_lsb = key->lsb;
+ pdev->tspp_key_table->entry[key_index].even_msb = key->msb;
+ } else {
+ pdev->tspp_key_table->entry[key_index].odd_lsb = key->lsb;
+ pdev->tspp_key_table->entry[key_index].odd_msb = key->msb;
+ }
+ data = readl_relaxed(channel->pdev->base + TSPP_KEY_VALID);
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_set_key);
+
+/**
+ * tspp_register_notification - register TSPP channel notification function.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @notify: notification function
+ * @userdata: user data to pass to notification function
+ * @timer_ms: notification for partially filled buffers
+ *
+ * Return error status
+ *
+ */
+int tspp_register_notification(u32 dev, u32 channel_id,
+ tspp_notifier *notify, void *userdata, u32 timer_ms)
+{
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_reg: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+ channel->notifier = notify;
+ channel->notify_data = userdata;
+ channel->expiration_period_ms = timer_ms;
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_register_notification);
+
+/**
+ * tspp_unregister_notification - unregister TSPP channel notification function.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return error status
+ *
+ */
+int tspp_unregister_notification(u32 dev, u32 channel_id)
+{
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_unreg: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+ channel->notifier = NULL;
+ channel->notify_data = 0;
+ return 0;
+}
+EXPORT_SYMBOL(tspp_unregister_notification);
+
+/**
+ * tspp_get_buffer - get TSPP data buffer.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return error status
+ *
+ */
+const struct tspp_data_descriptor *tspp_get_buffer(u32 dev, u32 channel_id)
+{
+ struct tspp_mem_buffer *buffer;
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+ unsigned long flags;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return NULL;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_get: can't find device %i", dev);
+ return NULL;
+ }
+
+ spin_lock_irqsave(&pdev->spinlock, flags);
+
+ channel = &pdev->channels[channel_id];
+
+ if (!channel->read) {
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+ pr_warn("tspp: no buffer to get on channel %i!",
+ channel->id);
+ return NULL;
+ }
+
+ buffer = channel->read;
+ /* see if we have any buffers ready to read */
+ if (buffer->state != TSPP_BUF_STATE_DATA) {
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+ return NULL;
+ }
+
+ if (buffer->state == TSPP_BUF_STATE_DATA) {
+ /* mark the buffer as busy */
+ buffer->state = TSPP_BUF_STATE_LOCKED;
+
+ /* increment the pointer along the list */
+ channel->read = channel->read->next;
+ }
+
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+
+ return &buffer->desc;
+}
+EXPORT_SYMBOL(tspp_get_buffer);
+
+/**
+ * tspp_release_buffer - release TSPP data buffer back to TSPP.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @descriptor_id: buffer descriptor ID
+ *
+ * Return error status
+ *
+ */
+int tspp_release_buffer(u32 dev, u32 channel_id, u32 descriptor_id)
+{
+ int i, found = 0;
+ struct tspp_mem_buffer *buffer;
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+ unsigned long flags;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp: can't find device %i", dev);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&pdev->spinlock, flags);
+
+ channel = &pdev->channels[channel_id];
+
+ if (descriptor_id > channel->buffer_count)
+ pr_warn("tspp: desc id looks weird 0x%08x", descriptor_id);
+
+ /* find the correct descriptor */
+ buffer = channel->locked;
+ for (i = 0; i < channel->buffer_count; i++) {
+ if (buffer->desc.id == descriptor_id) {
+ found = 1;
+ break;
+ }
+ buffer = buffer->next;
+ }
+ channel->locked = channel->locked->next;
+
+ if (!found) {
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+ pr_err("tspp: cant find desc %i", descriptor_id);
+ return -EINVAL;
+ }
+
+ /* make sure the buffer is in the expected state */
+ if (buffer->state != TSPP_BUF_STATE_LOCKED) {
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+ pr_err("tspp: buffer %i not locked", descriptor_id);
+ return -EINVAL;
+ }
+ /* unlock the buffer and requeue it */
+ buffer->state = TSPP_BUF_STATE_WAITING;
+
+ if (tspp_queue_buffer(channel, buffer))
+ pr_warn("tspp: can't requeue buffer");
+
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_release_buffer);
+
+/**
+ * tspp_allocate_buffers - allocate TSPP data buffers.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @count: number of buffers to allocate
+ * @size: size of each buffer to allocate
+ * @int_freq: interrupt frequency
+ * @alloc: user defined memory allocator function. Pass NULL for default.
+ * @memfree: user defined memory free function. Pass NULL for default.
+ * @user: user data to pass to the memory allocator/free function
+ *
+ * Return error status
+ *
+ * The user can optionally call this function explicitly to allocate the TSPP
+ * data buffers. Alternatively, if the user did not call this function, it
+ * is called implicitly by tspp_add_filter().
+ */
+int tspp_allocate_buffers(u32 dev, u32 channel_id, u32 count, u32 size,
+ u32 int_freq, tspp_allocator *alloc,
+ tspp_memfree *memfree, void *user)
+{
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+ struct tspp_mem_buffer *last = NULL;
+
+ TSPP_DEBUG("tspp_allocate_buffers");
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("%s: channel id out of range", __func__);
+ return -ECHRNG;
+ }
+
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("%s: can't find device %i", __func__, dev);
+ return -ENODEV;
+ }
+
+ if (count < MIN_ACCEPTABLE_BUFFER_COUNT) {
+ pr_err("%s: tspp requires a minimum of %i buffers\n",
+ __func__, MIN_ACCEPTABLE_BUFFER_COUNT);
+ return -EINVAL;
+ }
+
+ if (count > TSPP_NUM_BUFFERS) {
+ pr_err("%s: tspp requires a maximum of %i buffers\n",
+ __func__, TSPP_NUM_BUFFERS);
+ return -EINVAL;
+ }
+
+ channel = &pdev->channels[channel_id];
+
+ /* allow buffer allocation only if there was no previous buffer
+ * allocation for this channel.
+ */
+ if (channel->buffer_count > 0) {
+ pr_err("%s: buffers already allocated for channel %u",
+ __func__, channel_id);
+ return -EINVAL;
+ }
+
+ channel->max_buffers = count;
+
+ /* set up interrupt frequency */
+ if (int_freq > channel->max_buffers) {
+ int_freq = channel->max_buffers;
+ pr_warn("%s: setting interrupt frequency to %u\n",
+ __func__, int_freq);
+ }
+ channel->int_freq = int_freq;
+ /*
+ * it is the responsibility of the caller to tspp_allocate_buffers(),
+ * whether it's the user or the driver, to make sure the size parameter
+ * is compatible to the channel mode.
+ */
+ channel->buffer_size = size;
+
+ /* save user defined memory free function for later use */
+ channel->memfree = memfree;
+ channel->user_info = user;
+
+ /*
+ * For small buffers, create a DMA pool so that memory
+ * is not wasted through dma_alloc_coherent.
+ */
+ if (TSPP_USE_DMA_POOL(channel->buffer_size)) {
+ channel->dma_pool = dma_pool_create("tspp",
+ &pdev->pdev->dev, channel->buffer_size, 0, 0);
+ if (!channel->dma_pool) {
+ pr_err("%s: Can't allocate memory pool\n", __func__);
+ return -ENOMEM;
+ }
+ } else {
+ channel->dma_pool = NULL;
+ }
+
+
+ for (channel->buffer_count = 0;
+ channel->buffer_count < channel->max_buffers;
+ channel->buffer_count++) {
+
+ /* allocate the descriptor */
+ struct tspp_mem_buffer *desc = (struct tspp_mem_buffer *)
+ kmalloc(sizeof(struct tspp_mem_buffer), GFP_KERNEL);
+ if (!desc) {
+ pr_warn("%s: Can't allocate desc %i",
+ __func__, channel->buffer_count);
+ break;
+ }
+
+ desc->desc.id = channel->buffer_count;
+ /* allocate the buffer */
+ if (tspp_alloc_buffer(channel_id, &desc->desc,
+ channel->buffer_size, channel->dma_pool,
+ alloc, user) != 0) {
+ kfree(desc);
+ pr_warn("%s: Can't allocate buffer %i",
+ __func__, channel->buffer_count);
+ break;
+ }
+
+ /* add the descriptor to the list */
+ desc->filled = 0;
+ desc->read_index = 0;
+ if (!channel->data) {
+ channel->data = desc;
+ desc->next = channel->data;
+ } else {
+ if (last != NULL)
+ last->next = desc;
+ }
+ last = desc;
+ desc->next = channel->data;
+
+ /* prepare the sps descriptor */
+ desc->sps.phys_base = desc->desc.phys_base;
+ desc->sps.base = desc->desc.virt_base;
+ desc->sps.size = desc->desc.size;
+
+ /* start the transfer */
+ if (tspp_queue_buffer(channel, desc))
+ pr_err("%s: can't queue buffer %i",
+ __func__, desc->desc.id);
+ }
+
+ if (channel->buffer_count < channel->max_buffers) {
+ /*
+ * we failed to allocate the requested number of buffers.
+ * we don't allow a partial success, so need to clean up here.
+ */
+ tspp_destroy_buffers(channel_id, channel);
+ channel->buffer_count = 0;
+
+ dma_pool_destroy(channel->dma_pool);
+ channel->dma_pool = NULL;
+ return -ENOMEM;
+ }
+
+ channel->waiting = channel->data;
+ channel->read = channel->data;
+ channel->locked = channel->data;
+
+ /* Now that buffers are scheduled to HW, kick data expiration timer */
+ if (channel->expiration_period_ms)
+ mod_timer(&channel->expiration_timer,
+ jiffies +
+ MSEC_TO_JIFFIES(
+ channel->expiration_period_ms));
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_allocate_buffers);
+
+/*** debugfs ***/
+static int debugfs_iomem_x32_set(void *data, u64 val)
+{
+ int rc;
+ int clock_started = 0;
+ struct tspp_device *pdev;
+
+ pdev = tspp_find_by_id(0);
+ if (!pdev) {
+ pr_err("%s: can't find device 0\n", __func__);
+ return 0;
+ }
+
+ if (tspp_channels_in_use(pdev) == 0) {
+ rc = tspp_clock_start(pdev);
+ if (rc) {
+ pr_err("%s: tspp_clock_start failed %d\n",
+ __func__, rc);
+ return 0;
+ }
+ clock_started = 1;
+ }
+
+ writel_relaxed(val, data);
+ /* Assure register write */
+ wmb();
+
+ if (clock_started)
+ tspp_clock_stop(pdev);
+ return 0;
+}
+
+static int debugfs_iomem_x32_get(void *data, u64 *val)
+{
+ int rc;
+ int clock_started = 0;
+ struct tspp_device *pdev;
+
+ pdev = tspp_find_by_id(0);
+ if (!pdev) {
+ pr_err("%s: can't find device 0\n", __func__);
+ *val = 0;
+ return 0;
+ }
+
+ if (tspp_channels_in_use(pdev) == 0) {
+ rc = tspp_clock_start(pdev);
+ if (rc) {
+ pr_err("%s: tspp_clock_start failed %d\n",
+ __func__, rc);
+ *val = 0;
+ return 0;
+ }
+ clock_started = 1;
+ }
+
+ *val = readl_relaxed(data);
+
+ if (clock_started)
+ tspp_clock_stop(pdev);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
+ debugfs_iomem_x32_set, "0x%08llx");
+
+static void tsif_debugfs_init(struct tspp_tsif_device *tsif_device,
+ int instance)
+{
+ char name[10];
+
+ snprintf(name, 10, "tsif%i", instance);
+ tsif_device->dent_tsif = debugfs_create_dir(
+ name, NULL);
+ if (tsif_device->dent_tsif) {
+ int i;
+ void __iomem *base = tsif_device->base;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
+ tsif_device->debugfs_tsif_regs[i] =
+ debugfs_create_file(
+ debugfs_tsif_regs[i].name,
+ debugfs_tsif_regs[i].mode,
+ tsif_device->dent_tsif,
+ base + debugfs_tsif_regs[i].offset,
+ &fops_iomem_x32);
+ }
+
+ debugfs_create_u32(
+ "stat_rx_chunks",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_rx);
+
+ debugfs_create_u32(
+ "stat_overflow",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_overflow);
+
+ debugfs_create_u32(
+ "stat_lost_sync",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_lost_sync);
+
+ debugfs_create_u32(
+ "stat_timeout",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_timeout);
+ }
+}
+
+static void tsif_debugfs_exit(struct tspp_tsif_device *tsif_device)
+{
+ int i;
+
+ debugfs_remove_recursive(tsif_device->dent_tsif);
+ tsif_device->dent_tsif = NULL;
+ for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
+ tsif_device->debugfs_tsif_regs[i] = NULL;
+}
+
+static void tspp_debugfs_init(struct tspp_device *device, int instance)
+{
+ char name[10];
+
+ snprintf(name, 10, "tspp%i", instance);
+ device->dent = debugfs_create_dir(
+ name, NULL);
+ if (device->dent) {
+ int i;
+ void __iomem *base = device->base;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_tspp_regs); i++)
+ device->debugfs_regs[i] =
+ debugfs_create_file(
+ debugfs_tspp_regs[i].name,
+ debugfs_tspp_regs[i].mode,
+ device->dent,
+ base + debugfs_tspp_regs[i].offset,
+ &fops_iomem_x32);
+ }
+}
+
+static void tspp_debugfs_exit(struct tspp_device *device)
+{
+ int i;
+
+ debugfs_remove_recursive(device->dent);
+ for (i = 0; i < ARRAY_SIZE(debugfs_tspp_regs); i++)
+ device->debugfs_regs[i] = NULL;
+}
+
+static int msm_tspp_map_irqs(struct platform_device *pdev,
+ struct tspp_device *device)
+{
+ int rc;
+
+ /* get IRQ numbers from platform information */
+
+ /* map TSPP IRQ */
+ rc = platform_get_irq_byname(pdev, "TSIF_TSPP_IRQ");
+ if (rc > 0) {
+ device->tspp_irq = rc;
+ } else {
+ dev_err(&pdev->dev, "failed to get TSPP IRQ");
+ return -EINVAL;
+ }
+
+ /* map TSIF IRQs */
+ rc = platform_get_irq_byname(pdev, "TSIF0_IRQ");
+ if (rc > 0) {
+ device->tsif[0].tsif_irq = rc;
+ } else {
+ dev_err(&pdev->dev, "failed to get TSIF0 IRQ");
+ return -EINVAL;
+ }
+
+ rc = platform_get_irq_byname(pdev, "TSIF1_IRQ");
+ if (rc > 0) {
+ device->tsif[1].tsif_irq = rc;
+ } else {
+ dev_err(&pdev->dev, "failed to get TSIF1 IRQ");
+ return -EINVAL;
+ }
+
+ /* map BAM IRQ */
+ rc = platform_get_irq_byname(pdev, "TSIF_BAM_IRQ");
+ if (rc > 0) {
+ device->bam_irq = rc;
+ } else {
+ dev_err(&pdev->dev, "failed to get TSPP BAM IRQ");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int msm_tspp_probe(struct platform_device *pdev)
+{
+ int rc = -ENODEV;
+ u32 version;
+ u32 i;
+ struct tspp_device *device;
+ struct resource *mem_tsif0;
+ struct resource *mem_tsif1;
+ struct resource *mem_tspp;
+ struct resource *mem_bam;
+ struct msm_bus_scale_pdata *tspp_bus_pdata = NULL;
+ unsigned long rate;
+
+ if (pdev->dev.of_node) {
+ /* ID is always 0 since there is only 1 instance of TSPP */
+ pdev->id = 0;
+ tspp_bus_pdata = msm_bus_cl_get_pdata(pdev);
+ } else {
+ /* must have device tree data */
+ pr_err("tspp: Device tree data not available\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* OK, we will use this device */
+ device = kzalloc(sizeof(struct tspp_device), GFP_KERNEL);
+ if (!device) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* set up references */
+ device->pdev = pdev;
+ platform_set_drvdata(pdev, device);
+
+ /* setup pin control */
+ rc = tspp_get_pinctrl(device);
+ if (rc) {
+ pr_err("tspp: failed to get pin control data, rc=%d\n", rc);
+ goto err_pinctrl;
+ }
+
+ /* register bus client */
+ if (tspp_bus_pdata) {
+ device->tsif_bus_client =
+ msm_bus_scale_register_client(tspp_bus_pdata);
+ if (!device->tsif_bus_client)
+ pr_err("tspp: Unable to register bus client\n");
+ } else {
+ device->tsif_bus_client = 0;
+ }
+
+ /* map regulators */
+ device->tsif_vreg = devm_regulator_get_optional(&pdev->dev, "vdd_cx");
+ if (IS_ERR(device->tsif_vreg)) {
+ rc = PTR_ERR(device->tsif_vreg);
+ device->tsif_vreg = NULL;
+ if (rc == -ENODEV) {
+ pr_notice("%s: vdd_cx regulator will not be used\n",
+ __func__);
+ } else {
+ dev_err(&pdev->dev,
+ "failed to get CX regulator, err=%d\n", rc);
+ goto err_regulator;
+ }
+ } else {
+ /* Set an initial voltage and enable the regulator */
+ rc = regulator_set_voltage(device->tsif_vreg,
+ RPM_REGULATOR_CORNER_NONE,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to set CX voltage.\n");
+ goto err_regulator;
+ }
+
+ rc = regulator_enable(device->tsif_vreg);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to enable CX regulator.\n");
+ goto err_regulator;
+ }
+ }
+
+ /* map clocks */
+ device->tsif_pclk = clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(device->tsif_pclk)) {
+ rc = PTR_ERR(device->tsif_pclk);
+ device->tsif_pclk = NULL;
+ goto err_pclock;
+ }
+
+ device->tsif_ref_clk = clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR(device->tsif_ref_clk)) {
+ rc = PTR_ERR(device->tsif_ref_clk);
+ device->tsif_ref_clk = NULL;
+ goto err_refclock;
+ }
+ rate = clk_round_rate(device->tsif_ref_clk, 1);
+ rc = clk_set_rate(device->tsif_ref_clk, rate);
+ if (rc)
+ goto err_res_tsif0;
+
+ /* map I/O memory */
+ mem_tsif0 = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "MSM_TSIF0_PHYS");
+ if (!mem_tsif0) {
+ pr_err("tspp: Missing tsif0 MEM resource\n");
+ rc = -ENXIO;
+ goto err_res_tsif0;
+ }
+ device->tsif[0].base = ioremap(mem_tsif0->start,
+ resource_size(mem_tsif0));
+ if (!device->tsif[0].base) {
+ pr_err("tspp: ioremap failed\n");
+ goto err_map_tsif0;
+ }
+
+ mem_tsif1 = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "MSM_TSIF1_PHYS");
+ if (!mem_tsif1) {
+ dev_err(&pdev->dev, "Missing tsif1 MEM resource\n");
+ rc = -ENXIO;
+ goto err_res_tsif1;
+ }
+ device->tsif[1].base = ioremap(mem_tsif1->start,
+ resource_size(mem_tsif1));
+ if (!device->tsif[1].base) {
+ dev_err(&pdev->dev, "ioremap failed");
+ goto err_map_tsif1;
+ }
+
+ mem_tspp = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "MSM_TSPP_PHYS");
+ if (!mem_tspp) {
+ dev_err(&pdev->dev, "Missing MEM resource");
+ rc = -ENXIO;
+ goto err_res_dev;
+ }
+ device->base = ioremap(mem_tspp->start, resource_size(mem_tspp));
+ if (!device->base) {
+ dev_err(&pdev->dev, "ioremap failed");
+ goto err_map_dev;
+ }
+
+ mem_bam = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "MSM_TSPP_BAM_PHYS");
+ if (!mem_bam) {
+ pr_err("tspp: Missing bam MEM resource");
+ rc = -ENXIO;
+ goto err_res_bam;
+ }
+ memset(&device->bam_props, 0, sizeof(device->bam_props));
+ device->bam_props.phys_addr = mem_bam->start;
+ device->bam_props.virt_addr = ioremap(mem_bam->start,
+ resource_size(mem_bam));
+ if (!device->bam_props.virt_addr) {
+ dev_err(&pdev->dev, "ioremap failed");
+ goto err_map_bam;
+ }
+
+ if (msm_tspp_map_irqs(pdev, device))
+ goto err_irq;
+ device->req_irqs = false;
+
+ /* power management */
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ tspp_debugfs_init(device, 0);
+
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+ tsif_debugfs_init(&device->tsif[i], i);
+
+ wakeup_source_init(&device->ws, dev_name(&pdev->dev));
+
+ /* set up pointers to ram-based 'registers' */
+ device->filters[0] = device->base + TSPP_PID_FILTER_TABLE0;
+ device->filters[1] = device->base + TSPP_PID_FILTER_TABLE1;
+ device->filters[2] = device->base + TSPP_PID_FILTER_TABLE2;
+ device->tspp_key_table = device->base + TSPP_DATA_KEY;
+ device->tspp_global_performance =
+ device->base + TSPP_GLOBAL_PERFORMANCE;
+ device->tspp_pipe_context =
+ device->base + TSPP_PIPE_CONTEXT;
+ device->tspp_pipe_performance =
+ device->base + TSPP_PIPE_PERFORMANCE;
+
+ device->bam_props.summing_threshold = 0x10;
+ device->bam_props.irq = device->bam_irq;
+ device->bam_props.manage = SPS_BAM_MGR_LOCAL;
+ /*add SPS BAM log level*/
+ device->bam_props.ipc_loglevel = TSPP_BAM_DEFAULT_IPC_LOGLVL;
+
+ if (tspp_clock_start(device) != 0) {
+ dev_err(&pdev->dev, "Can't start clocks");
+ goto err_clock;
+ }
+
+ device->bam_handle = SPS_DEV_HANDLE_INVALID;
+
+ spin_lock_init(&device->spinlock);
+ mutex_init(&device->mutex);
+ tasklet_init(&device->tlet, tspp_sps_complete_tlet,
+ (unsigned long)device);
+
+ /* initialize everything to a known state */
+ tspp_global_reset(device);
+
+ version = readl_relaxed(device->base + TSPP_VERSION);
+ /*
+ * TSPP version can be bits [7:0] or alternatively,
+ * TSPP major version is bits [31:28].
+ */
+ if ((version != 0x1) && (((version >> 28) & 0xF) != 0x1))
+ pr_warn("tspp: unrecognized hw version=%i", version);
+
+ /* initialize the channels */
+ for (i = 0; i < TSPP_NUM_CHANNELS; i++)
+ tspp_channel_init(&(device->channels[i]), device);
+
+ /* stop the clocks for power savings */
+ tspp_clock_stop(device);
+
+ /* everything is ok, so add the device to the list */
+ list_add_tail(&(device->devlist), &tspp_devices);
+ return 0;
+
+err_clock:
+ tspp_debugfs_exit(device);
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+ tsif_debugfs_exit(&device->tsif[i]);
+err_irq:
+ iounmap(device->bam_props.virt_addr);
+err_map_bam:
+err_res_bam:
+ iounmap(device->base);
+err_map_dev:
+err_res_dev:
+ iounmap(device->tsif[1].base);
+err_map_tsif1:
+err_res_tsif1:
+ iounmap(device->tsif[0].base);
+err_map_tsif0:
+err_res_tsif0:
+ if (device->tsif_ref_clk)
+ clk_put(device->tsif_ref_clk);
+err_refclock:
+ if (device->tsif_pclk)
+ clk_put(device->tsif_pclk);
+err_pclock:
+ if (device->tsif_vreg)
+ regulator_disable(device->tsif_vreg);
+err_regulator:
+ if (device->tsif_bus_client)
+ msm_bus_scale_unregister_client(device->tsif_bus_client);
+err_pinctrl:
+ kfree(device);
+
+out:
+ return rc;
+}
+
+static int msm_tspp_remove(struct platform_device *pdev)
+{
+ struct tspp_channel *channel;
+ u32 i;
+
+ struct tspp_device *device = platform_get_drvdata(pdev);
+
+ /* free the buffers, and delete the channels */
+ for (i = 0; i < TSPP_NUM_CHANNELS; i++) {
+ channel = &device->channels[i];
+ tspp_close_channel(device->pdev->id, i);
+ }
+
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+ tsif_debugfs_exit(&device->tsif[i]);
+
+ mutex_destroy(&device->mutex);
+
+ if (device->tsif_bus_client)
+ msm_bus_scale_unregister_client(device->tsif_bus_client);
+
+ wakeup_source_trash(&device->ws);
+ if (device->req_irqs)
+ msm_tspp_free_irqs(device);
+
+ iounmap(device->bam_props.virt_addr);
+ iounmap(device->base);
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+ iounmap(device->tsif[i].base);
+
+ if (device->tsif_ref_clk)
+ clk_put(device->tsif_ref_clk);
+
+ if (device->tsif_pclk)
+ clk_put(device->tsif_pclk);
+
+ if (device->tsif_vreg)
+ regulator_disable(device->tsif_vreg);
+
+ pm_runtime_disable(&pdev->dev);
+
+ kfree(device);
+
+ return 0;
+}
+
+/*** power management ***/
+
+static int tspp_runtime_suspend(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: suspending...");
+ return 0;
+}
+
+static int tspp_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: resuming...");
+ return 0;
+}
+
+static const struct dev_pm_ops tspp_dev_pm_ops = {
+ .runtime_suspend = tspp_runtime_suspend,
+ .runtime_resume = tspp_runtime_resume,
+};
+
+static const struct of_device_id msm_match_table[] = {
+ {.compatible = "qcom,msm_tspp"},
+ {}
+};
+
+static struct platform_driver msm_tspp_driver = {
+ .probe = msm_tspp_probe,
+ .remove = msm_tspp_remove,
+ .driver = {
+ .name = "msm_tspp",
+ .pm = &tspp_dev_pm_ops,
+ .of_match_table = msm_match_table,
+ },
+};
+
+
+static int __init mod_init(void)
+{
+ int rc;
+
+ /* register the driver, and check hardware */
+ rc = platform_driver_register(&msm_tspp_driver);
+ if (rc)
+ pr_err("tspp: platform_driver_register failed: %d", rc);
+
+ return rc;
+}
+
+static void __exit mod_exit(void)
+{
+ /* delete low level driver */
+ platform_driver_unregister(&msm_tspp_driver);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("TSPP platform device");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
index 03a61407aef8..feede3a14e07 100644
--- a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
@@ -1427,7 +1427,6 @@ static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
struct device *dev)
{
int rc = 0;
- int disable_htw = 1;
if (!cb || !dev) {
pr_err("Error: invalid input params\n");
@@ -1465,21 +1464,7 @@ static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
goto end;
}
- /*
- * Set the domain attributes
- * disable L2 redirect since it decreases
- * performance
- */
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- pr_err("Error: couldn't disable coherent HTW\n");
- rc = -ENODEV;
- goto err_set_attr;
- }
return 0;
-err_set_attr:
- arm_iommu_release_mapping(cb->mapping);
end:
return rc;
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index b434161f5599..9a469abc56ca 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -248,13 +248,31 @@ static enum cam_ahb_clk_vote msm_isp47_get_cam_clk_vote(
return 0;
}
-static int msm_isp47_ahb_clk_cfg(struct vfe_device *vfe_dev,
+int msm_isp47_ahb_clk_cfg(struct vfe_device *vfe_dev,
struct msm_isp_ahb_clk_cfg *ahb_cfg)
{
int rc = 0;
enum cam_ahb_clk_vote vote;
+ enum cam_ahb_clk_vote src_clk_vote;
+ struct msm_isp_clk_rates clk_rates;
- vote = msm_isp47_get_cam_clk_vote(ahb_cfg->vote);
+ if (ahb_cfg)
+ vote = msm_isp47_get_cam_clk_vote(ahb_cfg->vote);
+ else
+ vote = CAM_AHB_SVS_VOTE;
+
+ vfe_dev->hw_info->vfe_ops.platform_ops.get_clk_rates(vfe_dev,
+ &clk_rates);
+ if (vfe_dev->msm_isp_vfe_clk_rate <= clk_rates.svs_rate)
+ src_clk_vote = CAM_AHB_SVS_VOTE;
+ else if (vfe_dev->msm_isp_vfe_clk_rate <= clk_rates.nominal_rate)
+ src_clk_vote = CAM_AHB_NOMINAL_VOTE;
+ else
+ src_clk_vote = CAM_AHB_TURBO_VOTE;
+
+ /* vote for higher of the user requested or src clock matched vote */
+ if (vote < src_clk_vote)
+ vote = src_clk_vote;
if (vote && vfe_dev->ahb_vote != vote) {
rc = cam_config_ahb_clk(NULL, 0,
@@ -320,6 +338,7 @@ enable_regulators_failed:
void msm_vfe47_release_hardware(struct vfe_device *vfe_dev)
{
enum cam_ahb_clk_client id;
+ unsigned long rate = 0;
/* when closing node, disable all irq */
vfe_dev->irq0_mask = 0;
@@ -345,6 +364,8 @@ void msm_vfe47_release_hardware(struct vfe_device *vfe_dev)
vfe_dev->ahb_vote = CAM_AHB_SUSPEND_VOTE;
+ vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(vfe_dev, &rate);
+
vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(
vfe_dev, 0);
vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
@@ -378,8 +399,8 @@ void msm_vfe47_init_hardware_reg(struct vfe_device *vfe_dev)
/* BUS_CFG */
msm_camera_io_w(0x00000101, vfe_dev->vfe_base + 0x84);
/* IRQ_MASK/CLEAR */
- msm_vfe47_config_irq(vfe_dev, 0x810000E0, 0xFFFFFF7E,
- MSM_ISP_IRQ_ENABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 0x810000E0, 0xFFFFFF7E, MSM_ISP_IRQ_ENABLE);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58);
@@ -387,8 +408,8 @@ void msm_vfe47_init_hardware_reg(struct vfe_device *vfe_dev)
void msm_vfe47_clear_status_reg(struct vfe_device *vfe_dev)
{
- msm_vfe47_config_irq(vfe_dev, 0x80000000, 0x0,
- MSM_ISP_IRQ_SET);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 0x80000000, 0x0, MSM_ISP_IRQ_SET);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58);
@@ -536,7 +557,8 @@ void msm_vfe47_read_irq_status(struct vfe_device *vfe_dev,
vfe_dev->error_info.camif_status =
msm_camera_io_r(vfe_dev->vfe_base + 0x4A4);
/* mask off camif error after first occurrance */
- msm_vfe47_config_irq(vfe_dev, 0, (1 << 0), MSM_ISP_IRQ_DISABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, 0,
+ (1 << 0), MSM_ISP_IRQ_DISABLE);
}
if (*irq_status1 & (1 << 7))
@@ -775,7 +797,8 @@ void msm_vfe47_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
stream_composite_mask << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
- msm_vfe47_config_irq(vfe_dev, 1 << (comp_mask_index + 25), 0,
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << (comp_mask_index + 25), 0,
MSM_ISP_IRQ_ENABLE);
}
@@ -790,7 +813,8 @@ void msm_vfe47_axi_clear_comp_mask(struct vfe_device *vfe_dev,
comp_mask &= ~(0x7F << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
- msm_vfe47_config_irq(vfe_dev, (1 << (comp_mask_index + 25)), 0,
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ (1 << (comp_mask_index + 25)), 0,
MSM_ISP_IRQ_DISABLE);
}
@@ -799,7 +823,8 @@ void msm_vfe47_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
{
int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
- msm_vfe47_config_irq(vfe_dev, 1 << (stream_info->wm[vfe_idx][0] + 8), 0,
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << (stream_info->wm[vfe_idx][0] + 8), 0,
MSM_ISP_IRQ_ENABLE);
}
@@ -808,7 +833,8 @@ void msm_vfe47_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
{
int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
- msm_vfe47_config_irq(vfe_dev, (1 << (stream_info->wm[vfe_idx][0] + 8)),
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ (1 << (stream_info->wm[vfe_idx][0] + 8)),
0, MSM_ISP_IRQ_DISABLE);
}
@@ -1060,7 +1086,8 @@ void msm_vfe47_cfg_fetch_engine(struct vfe_device *vfe_dev,
temp |= (1 << 1);
msm_camera_io_w(temp, vfe_dev->vfe_base + 0x84);
- msm_vfe47_config_irq(vfe_dev, (1 << 24), 0,
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ (1 << 24), 0,
MSM_ISP_IRQ_ENABLE);
temp = fe_cfg->fetch_height - 1;
@@ -1390,7 +1417,8 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x64);
msm_camera_io_w(0x81, vfe_dev->vfe_base + 0x68);
msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
- msm_vfe47_config_irq(vfe_dev, 0x15, 0x81,
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 0x15, 0x81,
MSM_ISP_IRQ_ENABLE);
if ((vfe_dev->hvx_cmd > HVX_DISABLE) &&
@@ -1422,7 +1450,7 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
update_state = DISABLE_CAMIF;
/* turn off camif violation and error irqs */
- msm_vfe47_config_irq(vfe_dev, 0, 0x81,
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, 0, 0x81,
MSM_ISP_IRQ_DISABLE);
val = msm_camera_io_r(vfe_dev->vfe_base + 0x464);
/* disable danger signal */
@@ -1447,7 +1475,8 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
msm_camera_io_w(0, vfe_dev->vfe_base + 0x64);
msm_camera_io_w(1 << 0, vfe_dev->vfe_base + 0x68);
msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x58);
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask,
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ vfe_dev->irq0_mask,
vfe_dev->irq1_mask, MSM_ISP_IRQ_SET);
}
@@ -1728,7 +1757,8 @@ int msm_vfe47_axi_halt(struct vfe_device *vfe_dev,
msm_camera_io_w(val, vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET);
/* Keep only halt and reset mask */
- msm_vfe47_config_irq(vfe_dev, (1 << 31), (1 << 8),
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ (1 << 31), (1 << 8),
MSM_ISP_IRQ_SET);
/*Clear IRQ Status0, only leave reset irq mask*/
@@ -1777,7 +1807,8 @@ int msm_vfe47_axi_halt(struct vfe_device *vfe_dev,
void msm_vfe47_axi_restart(struct vfe_device *vfe_dev,
uint32_t blocking, uint32_t enable_camif)
{
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ vfe_dev->irq0_mask, vfe_dev->irq1_mask,
MSM_ISP_IRQ_SET);
msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x64);
msm_camera_io_w(0xFFFFFEFF, vfe_dev->vfe_base + 0x68);
@@ -1884,7 +1915,8 @@ void msm_vfe47_stats_cfg_comp_mask(
comp_mask_reg |= stats_mask << (request_comp_index * 16);
atomic_set(stats_comp_mask, stats_mask |
atomic_read(stats_comp_mask));
- msm_vfe47_config_irq(vfe_dev, 1 << (29 + request_comp_index),
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << (29 + request_comp_index),
0, MSM_ISP_IRQ_ENABLE);
} else {
if (!(atomic_read(stats_comp_mask) & stats_mask))
@@ -1893,7 +1925,8 @@ void msm_vfe47_stats_cfg_comp_mask(
atomic_set(stats_comp_mask,
~stats_mask & atomic_read(stats_comp_mask));
comp_mask_reg &= ~(stats_mask << (request_comp_index * 16));
- msm_vfe47_config_irq(vfe_dev, 1 << (29 + request_comp_index),
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << (29 + request_comp_index),
0, MSM_ISP_IRQ_DISABLE);
}
@@ -1916,32 +1949,41 @@ void msm_vfe47_stats_cfg_wm_irq_mask(
switch (STATS_IDX(stream_info->stream_handle[vfe_idx])) {
case STATS_COMP_IDX_AEC_BG:
- msm_vfe47_config_irq(vfe_dev, 1 << 15, 0, MSM_ISP_IRQ_ENABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 15, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_HDR_BE:
- msm_vfe47_config_irq(vfe_dev, 1 << 16, 0, MSM_ISP_IRQ_ENABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 16, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_BG:
- msm_vfe47_config_irq(vfe_dev, 1 << 17, 0, MSM_ISP_IRQ_ENABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 17, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_BF:
- msm_vfe47_config_irq(vfe_dev, 1 << 18, 1 << 26,
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 18, 1 << 26,
MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_HDR_BHIST:
- msm_vfe47_config_irq(vfe_dev, 1 << 19, 0, MSM_ISP_IRQ_ENABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 19, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_RS:
- msm_vfe47_config_irq(vfe_dev, 1 << 20, 0, MSM_ISP_IRQ_ENABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 20, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_CS:
- msm_vfe47_config_irq(vfe_dev, 1 << 21, 0, MSM_ISP_IRQ_ENABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 21, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_IHIST:
- msm_vfe47_config_irq(vfe_dev, 1 << 22, 0, MSM_ISP_IRQ_ENABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 22, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_BHIST:
- msm_vfe47_config_irq(vfe_dev, 1 << 23, 0, MSM_ISP_IRQ_ENABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 23, 0, MSM_ISP_IRQ_ENABLE);
break;
default:
pr_err("%s: Invalid stats idx %d\n", __func__,
@@ -1958,32 +2000,41 @@ void msm_vfe47_stats_clear_wm_irq_mask(
switch (STATS_IDX(stream_info->stream_handle[vfe_idx])) {
case STATS_COMP_IDX_AEC_BG:
- msm_vfe47_config_irq(vfe_dev, 1 << 15, 0, MSM_ISP_IRQ_DISABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 15, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_HDR_BE:
- msm_vfe47_config_irq(vfe_dev, 1 << 16, 0, MSM_ISP_IRQ_DISABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 16, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_BG:
- msm_vfe47_config_irq(vfe_dev, 1 << 17, 0, MSM_ISP_IRQ_DISABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 17, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_BF:
- msm_vfe47_config_irq(vfe_dev, 1 << 18, 1 << 26,
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 18, 1 << 26,
MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_HDR_BHIST:
- msm_vfe47_config_irq(vfe_dev, 1 << 19, 0, MSM_ISP_IRQ_DISABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 19, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_RS:
- msm_vfe47_config_irq(vfe_dev, 1 << 20, 0, MSM_ISP_IRQ_DISABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 20, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_CS:
- msm_vfe47_config_irq(vfe_dev, 1 << 21, 0, MSM_ISP_IRQ_DISABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 21, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_IHIST:
- msm_vfe47_config_irq(vfe_dev, 1 << 22, 0, MSM_ISP_IRQ_DISABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 22, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_BHIST:
- msm_vfe47_config_irq(vfe_dev, 1 << 23, 0, MSM_ISP_IRQ_DISABLE);
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ 1 << 23, 0, MSM_ISP_IRQ_DISABLE);
break;
default:
pr_err("%s: Invalid stats idx %d\n", __func__,
@@ -2336,6 +2387,9 @@ int msm_vfe47_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
return rc;
*rate = clk_round_rate(vfe_dev->vfe_clk[clk_idx], *rate);
vfe_dev->msm_isp_vfe_clk_rate = *rate;
+
+ if (vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg)
+ vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg(vfe_dev, NULL);
return 0;
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h
index 8581373b3b71..3955196d1deb 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h
@@ -193,4 +193,6 @@ int msm_vfe47_update_bandwidth(
void msm_vfe47_config_irq(struct vfe_device *vfe_dev,
uint32_t irq0_mask, uint32_t irq1_mask,
enum msm_isp_irq_operation oper);
+int msm_isp47_ahb_clk_cfg(struct vfe_device *vfe_dev,
+ struct msm_isp_ahb_clk_cfg *ahb_cfg);
#endif /* __MSM_ISP47_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c
index a792404c243c..49520bb44ad8 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c
@@ -246,6 +246,8 @@ struct msm_vfe_hardware_info vfe48_hw_info = {
.num_iommu_secure_ctx = 0,
.vfe_clk_idx = VFE48_SRC_CLK_DTSI_IDX,
.runtime_axi_update = 1,
+ .min_ib = 100000000,
+ .min_ab = 100000000,
.vfe_ops = {
.irq_ops = {
.read_irq_status = msm_vfe47_read_irq_status,
@@ -306,6 +308,7 @@ struct msm_vfe_hardware_info vfe48_hw_info = {
.process_error_status = msm_vfe47_process_error_status,
.is_module_cfg_lock_needed =
msm_vfe47_is_module_cfg_lock_needed,
+ .ahb_clk_cfg = msm_isp47_ahb_clk_cfg,
},
.stats_ops = {
.get_stats_idx = msm_vfe47_get_stats_idx,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 39a0845a886f..7488f371545b 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -1023,31 +1023,32 @@ static void msm_isp_calculate_bandwidth(
struct msm_vfe_axi_stream *stream_info)
{
int bpp = 0;
+ struct vfe_device *vfe_dev;
struct msm_vfe_axi_shared_data *axi_data;
int i;
- if (stream_info->stream_src < RDI_INTF_0) {
- for (i = 0; i < stream_info->num_isp; i++) {
- axi_data = &stream_info->vfe_dev[i]->axi_data;
+ for (i = 0; i < stream_info->num_isp; i++) {
+ vfe_dev = stream_info->vfe_dev[i];
+ axi_data = &vfe_dev->axi_data;
+ if (stream_info->stream_src < RDI_INTF_0) {
stream_info->bandwidth[i] =
- (axi_data->src_info[VFE_PIX_0].pixel_clock /
+ (vfe_dev->msm_isp_vfe_clk_rate /
axi_data->src_info[VFE_PIX_0].width) *
stream_info->max_width[i];
stream_info->bandwidth[i] =
(unsigned long)stream_info->bandwidth[i] *
stream_info->format_factor / ISP_Q2;
- }
- } else {
- int rdi = SRC_TO_INTF(stream_info->stream_src);
- bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
- if (rdi < VFE_SRC_MAX) {
- for (i = 0; i < stream_info->num_isp; i++) {
- axi_data = &stream_info->vfe_dev[i]->axi_data;
+ } else {
+ int rdi = SRC_TO_INTF(stream_info->stream_src);
+
+ bpp = msm_isp_get_bit_per_pixel(
+ stream_info->output_format);
+ if (rdi < VFE_SRC_MAX) {
stream_info->bandwidth[i] =
- (axi_data->src_info[rdi].pixel_clock / 8) * bpp;
+ (vfe_dev->msm_isp_vfe_clk_rate / 8) * bpp;
+ } else {
+ pr_err("%s: Invalid rdi interface\n", __func__);
}
- } else {
- pr_err("%s: Invalid rdi interface\n", __func__);
}
}
}
@@ -1103,7 +1104,6 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
uint32_t io_format = 0;
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd = arg;
struct msm_vfe_axi_stream *stream_info;
- unsigned long flags;
if (stream_cfg_cmd->stream_src >= VFE_AXI_SRC_MAX) {
pr_err("%s:%d invalid stream_src %d\n", __func__, __LINE__,
@@ -1113,12 +1113,9 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
stream_info = msm_isp_get_stream_common_data(vfe_dev,
stream_cfg_cmd->stream_src);
- spin_lock_irqsave(&stream_info->lock, flags);
-
rc = msm_isp_axi_create_stream(vfe_dev,
&vfe_dev->axi_data, stream_cfg_cmd, stream_info);
if (rc) {
- spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: create stream failed\n", __func__);
return rc;
}
@@ -1127,7 +1124,6 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
vfe_dev, stream_info, stream_cfg_cmd);
if (rc) {
msm_isp_axi_destroy_stream(vfe_dev, stream_info);
- spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: Request validation failed\n", __func__);
return rc;
}
@@ -1235,7 +1231,6 @@ done:
msm_isp_axi_free_wm(vfe_dev, stream_info);
msm_isp_axi_destroy_stream(vfe_dev, stream_info);
}
- spin_unlock_irqrestore(&stream_info->lock, flags);
return rc;
}
@@ -1246,7 +1241,6 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_stream_cfg_cmd stream_cfg;
int vfe_idx;
- unsigned long flags;
if (HANDLE_TO_IDX(stream_release_cmd->stream_handle) >=
VFE_AXI_SRC_MAX) {
@@ -1256,13 +1250,10 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
stream_info = msm_isp_get_stream_common_data(vfe_dev,
HANDLE_TO_IDX(stream_release_cmd->stream_handle));
- spin_lock_irqsave(&stream_info->lock, flags);
-
vfe_idx = msm_isp_get_vfe_idx_for_stream_user(vfe_dev, stream_info);
if (vfe_idx == -ENOTTY ||
stream_release_cmd->stream_handle !=
stream_info->stream_handle[vfe_idx]) {
- spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: Invalid stream %p handle %x/%x vfe_idx %d vfe_dev %d num_isp %d\n",
__func__, stream_info,
stream_release_cmd->stream_handle,
@@ -1276,9 +1267,7 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
stream_cfg.cmd = STOP_STREAM;
stream_cfg.num_streams = 1;
stream_cfg.stream_handle[0] = stream_release_cmd->stream_handle;
- spin_unlock_irqrestore(&stream_info->lock, flags);
msm_isp_cfg_axi_stream(vfe_dev, (void *) &stream_cfg);
- spin_lock_irqsave(&stream_info->lock, flags);
}
for (i = 0; i < stream_info->num_planes; i++) {
@@ -1296,7 +1285,6 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
msm_isp_axi_free_wm(vfe_dev, stream_info);
msm_isp_axi_destroy_stream(vfe_dev, stream_info);
- spin_unlock_irqrestore(&stream_info->lock, flags);
return rc;
}
@@ -1411,6 +1399,7 @@ static void __msm_isp_axi_stream_update(
switch (stream_info->state) {
case UPDATING:
stream_info->state = ACTIVE;
+ complete_all(&stream_info->active_comp);
break;
case STOP_PENDING:
msm_isp_axi_stream_enable_cfg(stream_info);
@@ -2268,13 +2257,14 @@ int msm_isp_axi_halt(struct vfe_device *vfe_dev,
int msm_isp_axi_reset(struct vfe_device *vfe_dev,
struct msm_vfe_axi_reset_cmd *reset_cmd)
{
- int rc = 0, i, k;
+ int rc = 0, i, k, j;
struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
uint32_t bufq_handle = 0, bufq_id = 0;
struct msm_isp_timestamp timestamp;
unsigned long flags;
struct vfe_device *update_vfes[MAX_VFE] = {0, 0};
+ int vfe_idx;
if (!reset_cmd) {
pr_err("%s: NULL pointer reset cmd %pK\n", __func__, reset_cmd);
@@ -2345,6 +2335,20 @@ int msm_isp_axi_reset(struct vfe_device *vfe_dev,
ISP_EVENT_BUF_FATAL_ERROR);
return rc;
}
+ if (stream_info->num_planes > 1) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_comp_mask(vfe_dev, stream_info);
+ } else {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_irq_mask(vfe_dev, stream_info);
+ }
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(
+ vfe_dev, stream_info);
+ for (j = 0; j < stream_info->num_planes; j++)
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ enable_wm(
+ vfe_dev->vfe_base,
+ stream_info->wm[vfe_idx][j], 1);
axi_data->src_info[SRC_TO_INTF(stream_info->
stream_src)].frame_id =
@@ -2731,6 +2735,7 @@ static void __msm_isp_stop_axi_streams(struct msm_vfe_axi_stream **streams,
&timestamp);
msm_isp_cfg_stream_scratch(stream_info, VFE_PING_FLAG);
msm_isp_cfg_stream_scratch(stream_info, VFE_PONG_FLAG);
+ stream_info->undelivered_request_cnt = 0;
for (k = 0; k < stream_info->num_isp; k++) {
vfe_dev = stream_info->vfe_dev[k];
if (stream_info->num_planes > 1)
@@ -2879,6 +2884,8 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
msm_isp_get_timestamp(&timestamp);
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (stream_cfg_cmd->stream_handle[i] == 0)
+ continue;
stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl,
HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
@@ -3018,6 +3025,8 @@ static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev_ioctl,
return -EINVAL;
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (stream_cfg_cmd->stream_handle[i] == 0)
+ continue;
stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl,
HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
@@ -3044,12 +3053,37 @@ int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg)
{
int rc = 0, ret;
struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd = arg;
+ uint32_t stream_idx[MAX_NUM_STREAM];
int i;
+ int vfe_idx;
+ struct msm_vfe_axi_stream *stream_info;
+
+ memset(stream_idx, 0, sizeof(stream_idx));
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
VFE_AXI_SRC_MAX)
return -EINVAL;
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
+ vfe_idx = msm_isp_get_vfe_idx_for_stream_user(vfe_dev,
+ stream_info);
+ if (vfe_idx == -ENOTTY || stream_info->stream_handle[vfe_idx] !=
+ stream_cfg_cmd->stream_handle[i]) {
+ pr_err("%s: Invalid stream handle %x vfe_idx %d expected %x\n",
+ __func__, stream_cfg_cmd->stream_handle[i],
+ vfe_idx,
+ (vfe_idx != -ENOTTY) ?
+ stream_info->stream_handle[vfe_idx] : 0);
+ return -EINVAL;
+ }
+ /* check for duplicate stream handle */
+ if (stream_idx[stream_info->stream_src] ==
+ stream_cfg_cmd->stream_handle[i])
+ stream_cfg_cmd->stream_handle[i] = 0;
+ else
+ stream_idx[stream_info->stream_src] =
+ stream_cfg_cmd->stream_handle[i];
}
if (stream_cfg_cmd->cmd == START_STREAM) {
msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 1);
@@ -3387,8 +3421,10 @@ static void msm_isp_remove_buf_queue(struct vfe_device *vfe_dev,
if (stream_info->bufq_handle[bufq_id]) {
stream_info->bufq_handle[bufq_id] = 0;
- if (stream_info->state == ACTIVE)
+ if (stream_info->state == ACTIVE) {
+ init_completion(&stream_info->active_comp);
stream_info->state = UPDATING;
+ }
}
spin_unlock_irqrestore(&stream_info->lock, flags);
if (stream_info->state == UPDATING)
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index f851e8c9289e..e226f7e40a07 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -460,7 +460,6 @@ int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg)
struct msm_vfe_stats_stream_request_cmd *stream_req_cmd = arg;
struct msm_vfe_stats_stream *stream_info = NULL;
uint32_t stats_idx;
- unsigned long flags;
stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops.
get_stats_idx(stream_req_cmd->stats_type);
@@ -472,11 +471,8 @@ int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg)
stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, stats_idx);
- spin_lock_irqsave(&stream_info->lock, flags);
-
rc = msm_isp_stats_create_stream(vfe_dev, stream_req_cmd, stream_info);
if (rc < 0) {
- spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: create stream failed\n", __func__);
return rc;
}
@@ -491,7 +487,6 @@ int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg)
msm_isp_stats_cfg_stream_scratch(stream_info,
VFE_PONG_FLAG);
}
- spin_unlock_irqrestore(&stream_info->lock, flags);
return rc;
}
@@ -505,7 +500,6 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
int vfe_idx;
int i;
int k;
- unsigned long flags;
if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
pr_err("%s Invalid stats index %d", __func__, stats_idx);
@@ -513,12 +507,10 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
}
stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, stats_idx);
- spin_lock_irqsave(&stream_info->lock, flags);
vfe_idx = msm_isp_get_vfe_idx_for_stats_stream_user(
vfe_dev, stream_info);
if (vfe_idx == -ENOTTY || stream_info->stream_handle[vfe_idx] !=
stream_release_cmd->stream_handle) {
- spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: Invalid stream handle %x, expected %x\n",
__func__, stream_release_cmd->stream_handle,
vfe_idx != -ENOTTY ?
@@ -526,7 +518,6 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
return -EINVAL;
}
if (stream_info->state == STATS_AVAILABLE) {
- spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: stream already release\n", __func__);
return rc;
}
@@ -537,9 +528,7 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
stream_cfg_cmd.num_streams = 1;
stream_cfg_cmd.stream_handle[0] =
stream_release_cmd->stream_handle;
- spin_unlock_irqrestore(&stream_info->lock, flags);
msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
- spin_lock_irqsave(&stream_info->lock, flags);
}
for (i = vfe_idx, k = vfe_idx + 1; k < stream_info->num_isp; k++, i++) {
@@ -556,7 +545,6 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
if (stream_info->num_isp == 0)
stream_info->state = STATS_AVAILABLE;
- spin_unlock_irqrestore(&stream_info->lock, flags);
return 0;
}
@@ -904,6 +892,10 @@ int msm_isp_stats_reset(struct vfe_device *vfe_dev)
ISP_EVENT_BUF_FATAL_ERROR);
return rc;
}
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_irq_mask(
+ vfe_dev, stream_info);
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+ vfe_dev, BIT(i), 1);
}
}
@@ -960,7 +952,9 @@ static int msm_isp_check_stream_cfg_cmd(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info;
uint32_t idx;
int vfe_idx;
+ uint32_t stats_idx[MSM_ISP_STATS_MAX];
+ memset(stats_idx, 0, sizeof(stats_idx));
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
@@ -980,6 +974,11 @@ static int msm_isp_check_stream_cfg_cmd(struct vfe_device *vfe_dev,
stream_info->stream_handle[vfe_idx]);
return -EINVAL;
}
+ /* remove duplicate handles */
+ if (stats_idx[idx] == stream_cfg_cmd->stream_handle[i])
+ stream_cfg_cmd->stream_handle[i] = 0;
+ else
+ stats_idx[idx] = stream_cfg_cmd->stream_handle[i];
}
return 0;
}
@@ -1083,6 +1082,8 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl,
num_stats_comp_mask =
vfe_dev_ioctl->hw_info->stats_hw_info->num_stats_comp_mask;
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (stream_cfg_cmd->stream_handle[i] == 0)
+ continue;
idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
stream_info = msm_isp_get_stats_stream_common_data(
vfe_dev_ioctl, idx);
@@ -1169,7 +1170,8 @@ static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
-
+ if (stream_cfg_cmd->stream_handle[i] == 0)
+ continue;
idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
stream_info = msm_isp_get_stats_stream_common_data(
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index a4eb80f31984..71c907f2b381 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -20,6 +20,7 @@
#include "msm_isp_stats_util.h"
#include "msm_camera_io_util.h"
#include "cam_smmu_api.h"
+#include "msm_isp48.h"
#define MAX_ISP_V4l2_EVENTS 100
static DEFINE_MUTEX(bandwidth_mgr_mutex);
@@ -482,11 +483,18 @@ int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
}
pixel_clock = input_cfg->input_pix_clk;
- rc = vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(vfe_dev,
- &pixel_clock);
- if (rc < 0) {
- pr_err("%s: clock set rate failed\n", __func__);
- return rc;
+ /*
+ * Only set rate to higher, do not lower higher
+ * rate needed by another input
+ */
+ if (pixel_clock > vfe_dev->msm_isp_vfe_clk_rate) {
+ rc = vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(
+ vfe_dev,
+ &pixel_clock);
+ if (rc < 0) {
+ pr_err("%s: clock set rate failed\n", __func__);
+ return rc;
+ }
}
return rc;
}
@@ -1739,6 +1747,9 @@ static void msm_isp_process_overflow_irq(
if (overflow_mask) {
struct msm_isp_event_data error_event;
struct msm_vfe_axi_halt_cmd halt_cmd;
+ uint32_t val = 0;
+ int i;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
if (vfe_dev->reset_pending == 1) {
pr_err("%s:%d failed: overflow %x during reset\n",
@@ -1747,6 +1758,16 @@ static void msm_isp_process_overflow_irq(
*irq_status1 &= ~overflow_mask;
return;
}
+ if (msm_vfe_is_vfe48(vfe_dev))
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0xC94);
+ pr_err("%s: vfe %d overflow mask %x, bus_error %x\n",
+ __func__, vfe_dev->pdev->id, overflow_mask, val);
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (!axi_data->free_wm[i])
+ continue;
+ pr_err("%s: wm %d assigned to stream handle %x\n",
+ __func__, i, axi_data->free_wm[i]);
+ }
halt_cmd.overflow_detected = 1;
halt_cmd.stop_camif = 1;
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 3ac4c3af3208..258e08c1b34f 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -114,6 +114,13 @@ static int msm_cpp_update_gdscr_status(struct cpp_device *cpp_dev,
bool status);
static int msm_cpp_buffer_private_ops(struct cpp_device *cpp_dev,
uint32_t buff_mgr_ops, uint32_t id, void *arg);
+static void msm_cpp_set_micro_irq_mask(struct cpp_device *cpp_dev,
+ uint8_t enable, uint32_t irq_mask);
+static void msm_cpp_flush_queue_and_release_buffer(struct cpp_device *cpp_dev,
+ int queue_len);
+static int msm_cpp_dump_frame_cmd(struct msm_cpp_frame_info_t *frame_info);
+static int msm_cpp_dump_addr(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *frame_info);
#if CONFIG_MSM_CPP_DBG
#define CPP_DBG(fmt, args...) pr_err(fmt, ##args)
@@ -636,6 +643,127 @@ static int32_t msm_cpp_poll_rx_empty(void __iomem *cpp_base)
return rc;
}
+static int msm_cpp_dump_addr(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *frame_info)
+{
+ int32_t s_base, p_base;
+ uint32_t rd_off, wr0_off, wr1_off, wr2_off, wr3_off;
+ uint32_t wr0_mdata_off, wr1_mdata_off, wr2_mdata_off, wr3_mdata_off;
+ uint32_t rd_ref_off, wr_ref_off;
+ uint32_t s_size, p_size;
+ uint8_t tnr_enabled, ubwc_enabled, cds_en;
+ int32_t i = 0;
+ uint32_t *cpp_frame_msg;
+
+ cpp_frame_msg = frame_info->cpp_cmd_msg;
+
+ /* Update stripe/plane size and base offsets */
+ s_base = cpp_dev->payload_params.stripe_base;
+ s_size = cpp_dev->payload_params.stripe_size;
+ p_base = cpp_dev->payload_params.plane_base;
+ p_size = cpp_dev->payload_params.plane_size;
+
+ /* Fetch engine Offset */
+ rd_off = cpp_dev->payload_params.rd_pntr_off;
+ /* Write engine offsets */
+ wr0_off = cpp_dev->payload_params.wr_0_pntr_off;
+ wr1_off = wr0_off + 1;
+ wr2_off = wr1_off + 1;
+ wr3_off = wr2_off + 1;
+ /* Reference engine offsets */
+ rd_ref_off = cpp_dev->payload_params.rd_ref_pntr_off;
+ wr_ref_off = cpp_dev->payload_params.wr_ref_pntr_off;
+ /* Meta data offsets */
+ wr0_mdata_off =
+ cpp_dev->payload_params.wr_0_meta_data_wr_pntr_off;
+ wr1_mdata_off = (wr0_mdata_off + 1);
+ wr2_mdata_off = (wr1_mdata_off + 1);
+ wr3_mdata_off = (wr2_mdata_off + 1);
+
+ tnr_enabled = ((frame_info->feature_mask & TNR_MASK) >> 2);
+ ubwc_enabled = ((frame_info->feature_mask & UBWC_MASK) >> 5);
+ cds_en = ((frame_info->feature_mask & CDS_MASK) >> 6);
+
+ for (i = 0; i < frame_info->num_strips; i++) {
+ pr_err("stripe %d: in %x, out1 %x out2 %x, out3 %x, out4 %x\n",
+ i, cpp_frame_msg[s_base + rd_off + i * s_size],
+ cpp_frame_msg[s_base + wr0_off + i * s_size],
+ cpp_frame_msg[s_base + wr1_off + i * s_size],
+ cpp_frame_msg[s_base + wr2_off + i * s_size],
+ cpp_frame_msg[s_base + wr3_off + i * s_size]);
+
+ if (tnr_enabled) {
+ pr_err("stripe %d: read_ref %x, write_ref %x\n", i,
+ cpp_frame_msg[s_base + rd_ref_off + i * s_size],
+ cpp_frame_msg[s_base + wr_ref_off + i * s_size]
+ );
+ }
+
+ if (cds_en) {
+ pr_err("stripe %d:, dsdn_off %x\n", i,
+ cpp_frame_msg[s_base + rd_ref_off + i * s_size]
+ );
+ }
+
+ if (ubwc_enabled) {
+ pr_err("stripe %d: metadata %x, %x, %x, %x\n", i,
+ cpp_frame_msg[s_base + wr0_mdata_off +
+ i * s_size],
+ cpp_frame_msg[s_base + wr1_mdata_off +
+ i * s_size],
+ cpp_frame_msg[s_base + wr2_mdata_off +
+ i * s_size],
+ cpp_frame_msg[s_base + wr3_mdata_off +
+ i * s_size]
+ );
+ }
+
+ }
+ return 0;
+}
+
+static void msm_cpp_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags, void *token)
+{
+ struct cpp_device *cpp_dev = NULL;
+ struct msm_cpp_frame_info_t *processed_frame[MAX_CPP_PROCESSING_FRAME];
+ int32_t i = 0, queue_len = 0;
+ struct msm_device_queue *queue = NULL;
+
+ if (token) {
+ cpp_dev = token;
+ disable_irq(cpp_dev->irq->start);
+ if (atomic_read(&cpp_timer.used)) {
+ atomic_set(&cpp_timer.used, 0);
+ del_timer_sync(&cpp_timer.cpp_timer);
+ }
+ mutex_lock(&cpp_dev->mutex);
+ tasklet_kill(&cpp_dev->cpp_tasklet);
+ cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
+ queue = &cpp_timer.data.cpp_dev->processing_q;
+ queue_len = queue->len;
+ if (!queue_len) {
+ pr_err("%s:%d: Invalid queuelen\n", __func__, __LINE__);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ mutex_unlock(&cpp_dev->mutex);
+ return;
+ }
+ for (i = 0; i < queue_len; i++) {
+ if (cpp_timer.data.processed_frame[i]) {
+ processed_frame[i] =
+ cpp_timer.data.processed_frame[i];
+ pr_err("Fault on identity=0x%x, frame_id=%03d\n",
+ processed_frame[i]->identity,
+ processed_frame[i]->frame_id);
+ msm_cpp_dump_addr(cpp_dev, processed_frame[i]);
+ msm_cpp_dump_frame_cmd(processed_frame[i]);
+ }
+ }
+ msm_cpp_flush_queue_and_release_buffer(cpp_dev, queue_len);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ mutex_unlock(&cpp_dev->mutex);
+ }
+}
static int cpp_init_mem(struct cpp_device *cpp_dev)
{
@@ -652,6 +780,9 @@ static int cpp_init_mem(struct cpp_device *cpp_dev)
return -ENODEV;
cpp_dev->iommu_hdl = iommu_hdl;
+ cam_smmu_reg_client_page_fault_handler(
+ cpp_dev->iommu_hdl,
+ msm_cpp_iommu_fault_handler, cpp_dev);
return 0;
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/Makefile b/drivers/media/platform/msm/camera_v2/sensor/Makefile
index 539ba24e109b..872dc59d218e 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/Makefile
+++ b/drivers/media/platform/msm/camera_v2/sensor/Makefile
@@ -4,5 +4,5 @@ ccflags-y += -Idrivers/media/platform/msm/camera_v2/msm_vb2
ccflags-y += -Idrivers/media/platform/msm/camera_v2/camera
ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
-obj-$(CONFIG_MSMB_CAMERA) += cci/ io/ csiphy/ csid/ actuator/ eeprom/ ois/ flash/
+obj-$(CONFIG_MSMB_CAMERA) += cci/ io/ csiphy/ csid/ actuator/ eeprom/ ois/ flash/ ir_led/ ir_cut/
obj-$(CONFIG_MSM_CAMERA_SENSOR) += msm_sensor_init.o msm_sensor_driver.o msm_sensor.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
index 0b3e4e1fcf04..bf3973888573 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
@@ -101,11 +101,6 @@ static void msm_actuator_parse_i2c_params(struct msm_actuator_ctrl_t *a_ctrl,
i2c_tbl = a_ctrl->i2c_reg_tbl;
for (i = 0; i < size; i++) {
- /* check that the index into i2c_tbl cannot grow larger that
- the allocated size of i2c_tbl */
- if ((a_ctrl->total_steps + 1) < (a_ctrl->i2c_tbl_index))
- break;
-
if (write_arr[i].reg_write_type == MSM_ACTUATOR_WRITE_DAC) {
value = (next_lens_position <<
write_arr[i].data_shift) |
@@ -119,6 +114,11 @@ static void msm_actuator_parse_i2c_params(struct msm_actuator_ctrl_t *a_ctrl,
i2c_byte2 = value & 0xFF;
CDBG("byte1:0x%x, byte2:0x%x\n",
i2c_byte1, i2c_byte2);
+ if (a_ctrl->i2c_tbl_index >
+ a_ctrl->total_steps) {
+ pr_err("failed:i2c table index out of bound\n");
+ break;
+ }
i2c_tbl[a_ctrl->i2c_tbl_index].
reg_addr = i2c_byte1;
i2c_tbl[a_ctrl->i2c_tbl_index].
@@ -139,6 +139,10 @@ static void msm_actuator_parse_i2c_params(struct msm_actuator_ctrl_t *a_ctrl,
i2c_byte2 = (hw_dword & write_arr[i].hw_mask) >>
write_arr[i].hw_shift;
}
+ if (a_ctrl->i2c_tbl_index > a_ctrl->total_steps) {
+ pr_err("failed: i2c table index out of bound\n");
+ break;
+ }
CDBG("i2c_byte1:0x%x, i2c_byte2:0x%x\n", i2c_byte1, i2c_byte2);
i2c_tbl[a_ctrl->i2c_tbl_index].reg_addr = i2c_byte1;
i2c_tbl[a_ctrl->i2c_tbl_index].reg_data = i2c_byte2;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index 6a1b385c3d8b..e4cee1fa4ffc 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -1052,6 +1052,25 @@ static int msm_csiphy_init(struct csiphy_device *csiphy_dev)
CDBG("%s:%d called\n", __func__, __LINE__);
+ rc = msm_camera_config_vreg(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_vreg,
+ csiphy_dev->regulator_count, NULL, 0,
+ &csiphy_dev->csiphy_reg_ptr[0], 1);
+ if (rc < 0) {
+ pr_err("%s:%d csiphy config_vreg failed\n",
+ __func__, __LINE__);
+ goto csiphy_resource_fail;
+ }
+ rc = msm_camera_enable_vreg(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_vreg,
+ csiphy_dev->regulator_count, NULL, 0,
+ &csiphy_dev->csiphy_reg_ptr[0], 1);
+ if (rc < 0) {
+ pr_err("%s:%d csiphy enable_vreg failed\n",
+ __func__, __LINE__);
+ goto top_vreg_enable_failed;
+ }
+
rc = msm_camera_clk_enable(&csiphy_dev->pdev->dev,
csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
csiphy_dev->num_clk, true);
@@ -1088,6 +1107,11 @@ static int msm_csiphy_init(struct csiphy_device *csiphy_dev)
csiphy_dev->csiphy_state = CSIPHY_POWER_UP;
return 0;
+top_vreg_enable_failed:
+ msm_camera_config_vreg(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_vreg,
+ csiphy_dev->regulator_count, NULL, 0,
+ &csiphy_dev->csiphy_reg_ptr[0], 0);
csiphy_resource_fail:
if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
CAM_AHB_SUSPEND_VOTE) < 0)
@@ -1128,6 +1152,24 @@ static int msm_csiphy_init(struct csiphy_device *csiphy_dev)
pr_err("%s: failed to vote for AHB\n", __func__);
return rc;
}
+ rc = msm_camera_config_vreg(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_vreg,
+ csiphy_dev->regulator_count, NULL, 0,
+ &csiphy_dev->csiphy_reg_ptr[0], 1);
+ if (rc < 0) {
+ pr_err("%s:%d csiphy config_vreg failed\n",
+ __func__, __LINE__);
+ goto csiphy_resource_fail;
+ }
+ rc = msm_camera_enable_vreg(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_vreg,
+ csiphy_dev->regulator_count, NULL, 0,
+ &csiphy_dev->csiphy_reg_ptr[0], 1);
+ if (rc < 0) {
+ pr_err("%s:%d csiphy enable_vreg failed\n",
+ __func__, __LINE__);
+ goto top_vreg_enable_failed;
+ }
rc = msm_camera_clk_enable(&csiphy_dev->pdev->dev,
csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
@@ -1139,7 +1181,7 @@ static int msm_csiphy_init(struct csiphy_device *csiphy_dev)
csiphy_dev->ref_count--;
goto csiphy_resource_fail;
}
- CDBG("%s:%d called\n", __func__, __LINE__);
+ CDBG("%s:%d clk enable success\n", __func__, __LINE__);
if (csiphy_dev->csiphy_3phase == CSI_3PHASE_HW)
msm_csiphy_3ph_reset(csiphy_dev);
@@ -1161,7 +1203,11 @@ static int msm_csiphy_init(struct csiphy_device *csiphy_dev)
csiphy_dev->hw_version);
csiphy_dev->csiphy_state = CSIPHY_POWER_UP;
return 0;
-
+top_vreg_enable_failed:
+ msm_camera_config_vreg(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_vreg,
+ csiphy_dev->regulator_count, NULL, 0,
+ &csiphy_dev->csiphy_reg_ptr[0], 0);
csiphy_resource_fail:
if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
CAM_AHB_SUSPEND_VOTE) < 0)
@@ -1275,6 +1321,14 @@ static int msm_csiphy_release(struct csiphy_device *csiphy_dev, void *arg)
csiphy_dev->csiphy_3p_clk, 2, false);
}
+ msm_camera_enable_vreg(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_vreg,
+ csiphy_dev->regulator_count, NULL, 0,
+ &csiphy_dev->csiphy_reg_ptr[0], 0);
+ msm_camera_config_vreg(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_vreg, csiphy_dev->regulator_count,
+ NULL, 0, &csiphy_dev->csiphy_reg_ptr[0], 0);
+
csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
@@ -1386,6 +1440,13 @@ static int msm_csiphy_release(struct csiphy_device *csiphy_dev, void *arg)
csiphy_dev->csiphy_3p_clk, 2, false);
}
+ msm_camera_enable_vreg(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_vreg, csiphy_dev->regulator_count,
+ NULL, 0, &csiphy_dev->csiphy_reg_ptr[0], 0);
+ msm_camera_config_vreg(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_vreg, csiphy_dev->regulator_count,
+ NULL, 0, &csiphy_dev->csiphy_reg_ptr[0], 0);
+
csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
@@ -1711,6 +1772,14 @@ static int csiphy_probe(struct platform_device *pdev)
goto csiphy_no_resource;
}
+ rc = msm_camera_get_dt_vreg_data(pdev->dev.of_node,
+ &(new_csiphy_dev->csiphy_vreg),
+ &(new_csiphy_dev->regulator_count));
+ if (rc < 0) {
+ pr_err("%s: get vreg data from dtsi fail\n", __func__);
+ rc = -EFAULT;
+ goto csiphy_no_resource;
+ }
/* ToDo: Enable 3phase clock for dynamic clock enable/disable */
rc = msm_csiphy_get_clk_info(new_csiphy_dev, pdev);
if (rc < 0) {
@@ -1781,7 +1850,7 @@ static int msm_csiphy_exit(struct platform_device *pdev)
&csiphy_dev->csiphy_all_clk,
csiphy_dev->num_all_clk);
- msm_camera_put_reg_base(pdev, csiphy_dev->base, "csid", true);
+ msm_camera_put_reg_base(pdev, csiphy_dev->base, "csiphy", true);
if (csiphy_dev->hw_dts_version >= CSIPHY_VERSION_V30) {
msm_camera_put_reg_base(pdev, csiphy_dev->clk_mux_base,
"csiphy_clk_mux", true);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
index aba88da1157e..70462dcd3b12 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
@@ -20,6 +20,7 @@
#include <media/msm_cam_sensor.h>
#include "msm_sd.h"
#include "msm_camera_io_util.h"
+#include "msm_camera_dt_util.h"
#include "cam_soc_api.h"
#define MAX_CSIPHY 3
@@ -183,6 +184,9 @@ struct csiphy_device {
uint8_t num_irq_registers;
uint32_t csiphy_sof_debug;
uint32_t csiphy_sof_debug_count;
+ struct camera_vreg_t *csiphy_vreg;
+ struct regulator *csiphy_reg_ptr[MAX_REGULATOR];
+ int32_t regulator_count;
};
#define VIDIOC_MSM_CSIPHY_RELEASE \
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
index 8f911d362477..a4ee5041bfff 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
@@ -276,6 +276,12 @@ int32_t msm_camera_cci_i2c_write_seq_table(
client_addr_type = client->addr_type;
client->addr_type = write_setting->addr_type;
+ if (reg_setting->reg_data_size > I2C_SEQ_REG_DATA_MAX) {
+ pr_err("%s: number of bytes %u exceeding the max supported %d\n",
+ __func__, reg_setting->reg_data_size, I2C_SEQ_REG_DATA_MAX);
+ return rc;
+ }
+
for (i = 0; i < write_setting->size; i++) {
rc = msm_camera_cci_i2c_write_seq(client, reg_setting->reg_addr,
reg_setting->reg_data, reg_setting->reg_data_size);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
index 6b867bfb5c4a..3f079fe2c173 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
@@ -270,6 +270,38 @@ int msm_sensor_get_sub_module_index(struct device_node *of_node,
src_node = NULL;
}
+ src_node = of_parse_phandle(of_node, "qcom,ir-led-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,ir led cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+ goto ERROR;
+ }
+ sensor_info->subdev_id[SUB_MODULE_IR_LED] = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
+ src_node = of_parse_phandle(of_node, "qcom,ir-cut-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,ir cut cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+ goto ERROR;
+ }
+ sensor_info->subdev_id[SUB_MODULE_IR_CUT] = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
rc = of_property_read_u32(of_node, "qcom,strobe-flash-sd-index", &val);
if (rc != -EINVAL) {
CDBG("%s qcom,strobe-flash-sd-index %d, rc %d\n", __func__,
@@ -748,6 +780,54 @@ int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
rc = -ENOMEM;
return rc;
}
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-ir-p", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-ir-p failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-ir-p invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+
+ gconf->gpio_num_info->gpio_num[IR_CUT_FILTER_GPIO_P] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[IR_CUT_FILTER_GPIO_P] = 1;
+
+ CDBG("%s qcom,gpio-ir-p %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[IR_CUT_FILTER_GPIO_P]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-ir-m", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-ir-m failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-ir-m invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+
+ gconf->gpio_num_info->gpio_num[IR_CUT_FILTER_GPIO_M] =
+ gpio_array[val];
+
+ gconf->gpio_num_info->valid[IR_CUT_FILTER_GPIO_M] = 1;
+
+ CDBG("%s qcom,gpio-ir-m %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[IR_CUT_FILTER_GPIO_M]);
+ } else {
+ rc = 0;
+ }
+
rc = of_property_read_u32(of_node, "qcom,gpio-vana", &val);
if (rc != -EINVAL) {
if (rc < 0) {
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c
index 3b101798edac..7a0fb97061d5 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c
@@ -290,6 +290,12 @@ int32_t msm_camera_qup_i2c_write_seq_table(struct msm_camera_i2c_client *client,
client_addr_type = client->addr_type;
client->addr_type = write_setting->addr_type;
+ if (reg_setting->reg_data_size > I2C_SEQ_REG_DATA_MAX) {
+ pr_err("%s: number of bytes %u exceeding the max supported %d\n",
+ __func__, reg_setting->reg_data_size, I2C_SEQ_REG_DATA_MAX);
+ return rc;
+ }
+
for (i = 0; i < write_setting->size; i++) {
rc = msm_camera_qup_i2c_write_seq(client, reg_setting->reg_addr,
reg_setting->reg_data, reg_setting->reg_data_size);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/ir_cut/Makefile b/drivers/media/platform/msm/camera_v2/sensor/ir_cut/Makefile
new file mode 100644
index 000000000000..8950c1c83763
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/ir_cut/Makefile
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+obj-$(CONFIG_MSMB_CAMERA) += msm_ir_cut.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/ir_cut/msm_ir_cut.c b/drivers/media/platform/msm/camera_v2/sensor/ir_cut/msm_ir_cut.c
new file mode 100644
index 000000000000..a0e35ba99a25
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/ir_cut/msm_ir_cut.c
@@ -0,0 +1,665 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include "msm_ir_cut.h"
+#include "msm_camera_dt_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+DEFINE_MSM_MUTEX(msm_ir_cut_mutex);
+
+static struct v4l2_file_operations msm_ir_cut_v4l2_subdev_fops;
+
+static const struct of_device_id msm_ir_cut_dt_match[] = {
+ {.compatible = "qcom,ir-cut", .data = NULL},
+ {}
+};
+
+static struct msm_ir_cut_table msm_gpio_ir_cut_table;
+
+static struct msm_ir_cut_table *ir_cut_table[] = {
+ &msm_gpio_ir_cut_table,
+};
+
+static int32_t msm_ir_cut_get_subdev_id(
+ struct msm_ir_cut_ctrl_t *ir_cut_ctrl, void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+
+ CDBG("Enter\n");
+ if (!subdev_id) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+ if (ir_cut_ctrl->ir_cut_device_type != MSM_CAMERA_PLATFORM_DEVICE) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+
+ *subdev_id = ir_cut_ctrl->pdev->id;
+
+ CDBG("subdev_id %d\n", *subdev_id);
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_ir_cut_init(
+ struct msm_ir_cut_ctrl_t *ir_cut_ctrl,
+ struct msm_ir_cut_cfg_data_t *ir_cut_data)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter");
+
+ rc = ir_cut_ctrl->func_tbl->camera_ir_cut_on(ir_cut_ctrl, ir_cut_data);
+
+ CDBG("Exit");
+ return rc;
+}
+
+static int32_t msm_ir_cut_release(
+ struct msm_ir_cut_ctrl_t *ir_cut_ctrl)
+{
+ int32_t rc = 0;
+
+ if (ir_cut_ctrl->ir_cut_state == MSM_CAMERA_IR_CUT_RELEASE) {
+ pr_err("%s:%d Invalid ir_cut state = %d",
+ __func__, __LINE__, ir_cut_ctrl->ir_cut_state);
+ return 0;
+ }
+
+ rc = ir_cut_ctrl->func_tbl->camera_ir_cut_on(ir_cut_ctrl, NULL);
+ if (rc < 0) {
+ pr_err("%s:%d camera_ir_cut_on failed rc = %d",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+ ir_cut_ctrl->ir_cut_state = MSM_CAMERA_IR_CUT_RELEASE;
+ return 0;
+}
+
+static int32_t msm_ir_cut_off(struct msm_ir_cut_ctrl_t *ir_cut_ctrl,
+ struct msm_ir_cut_cfg_data_t *ir_cut_data)
+{
+ int rc = 0;
+
+ CDBG("Enter cut off\n");
+
+ if (ir_cut_ctrl->gconf) {
+ rc = msm_camera_request_gpio_table(
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl,
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl_size, 1);
+
+ if (rc < 0) {
+ pr_err("ERR:%s:Failed in selecting state: %d\n",
+ __func__, rc);
+
+ return rc;
+ }
+ } else {
+ pr_err("%s: No IR CUT GPIOs\n", __func__);
+ return 0;
+ }
+
+ if (ir_cut_ctrl->cam_pinctrl_status) {
+ rc = pinctrl_select_state(
+ ir_cut_ctrl->pinctrl_info.pinctrl,
+ ir_cut_ctrl->pinctrl_info.gpio_state_active);
+
+ if (rc < 0)
+ pr_err("ERR:%s:%d cannot set pin to active state: %d",
+ __func__, __LINE__, rc);
+ }
+
+ CDBG("ERR:%s:gpio_conf->gpio_num_info->gpio_num[0] = %d",
+ __func__,
+ ir_cut_ctrl->gconf->gpio_num_info->
+ gpio_num[IR_CUT_FILTER_GPIO_P]);
+
+ CDBG("ERR:%s:gpio_conf->gpio_num_info->gpio_num[1] = %d",
+ __func__,
+ ir_cut_ctrl->gconf->gpio_num_info->
+ gpio_num[IR_CUT_FILTER_GPIO_M]);
+
+ gpio_set_value_cansleep(
+ ir_cut_ctrl->gconf->gpio_num_info->
+ gpio_num[IR_CUT_FILTER_GPIO_P],
+ 0);
+
+ gpio_set_value_cansleep(
+ ir_cut_ctrl->gconf->gpio_num_info->
+ gpio_num[IR_CUT_FILTER_GPIO_M],
+ 1);
+
+ if (ir_cut_ctrl->gconf) {
+ rc = msm_camera_request_gpio_table(
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl,
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl_size, 0);
+
+ if (rc < 0) {
+ pr_err("ERR:%s:Failed in selecting state: %d\n",
+ __func__, rc);
+
+ return rc;
+ }
+ } else {
+ pr_err("%s: No IR CUT GPIOs\n", __func__);
+ return 0;
+ }
+
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_ir_cut_on(
+ struct msm_ir_cut_ctrl_t *ir_cut_ctrl,
+ struct msm_ir_cut_cfg_data_t *ir_cut_data)
+{
+ int rc = 0;
+
+ CDBG("Enter ir cut on\n");
+
+ if (ir_cut_ctrl->gconf) {
+ rc = msm_camera_request_gpio_table(
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl,
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl_size, 1);
+
+ if (rc < 0) {
+ pr_err("ERR:%s:Failed in selecting state: %d\n",
+ __func__, rc);
+
+ return rc;
+ }
+ } else {
+ pr_err("%s: No IR CUT GPIOs\n", __func__);
+ return 0;
+ }
+
+ if (ir_cut_ctrl->cam_pinctrl_status) {
+ rc = pinctrl_select_state(
+ ir_cut_ctrl->pinctrl_info.pinctrl,
+ ir_cut_ctrl->pinctrl_info.gpio_state_active);
+
+ if (rc < 0)
+ pr_err("ERR:%s:%d cannot set pin to active state: %d",
+ __func__, __LINE__, rc);
+ }
+
+ CDBG("ERR:%s: gpio_conf->gpio_num_info->gpio_num[0] = %d",
+ __func__,
+ ir_cut_ctrl->gconf->gpio_num_info->
+ gpio_num[IR_CUT_FILTER_GPIO_P]);
+
+ CDBG("ERR:%s: gpio_conf->gpio_num_info->gpio_num[1] = %d",
+ __func__,
+ ir_cut_ctrl->gconf->gpio_num_info->
+ gpio_num[IR_CUT_FILTER_GPIO_M]);
+
+ gpio_set_value_cansleep(
+ ir_cut_ctrl->gconf->gpio_num_info->
+ gpio_num[IR_CUT_FILTER_GPIO_P],
+ 1);
+
+ gpio_set_value_cansleep(
+ ir_cut_ctrl->gconf->
+ gpio_num_info->gpio_num[IR_CUT_FILTER_GPIO_M],
+ 1);
+
+ if (ir_cut_ctrl->gconf) {
+ rc = msm_camera_request_gpio_table(
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl,
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl_size, 0);
+
+ if (rc < 0) {
+ pr_err("ERR:%s:Failed in selecting state: %d\n",
+ __func__, rc);
+
+ return rc;
+ }
+ } else {
+ pr_err("%s: No IR CUT GPIOs\n", __func__);
+ return 0;
+ }
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_ir_cut_handle_init(
+ struct msm_ir_cut_ctrl_t *ir_cut_ctrl,
+ struct msm_ir_cut_cfg_data_t *ir_cut_data)
+{
+ uint32_t i = 0;
+ int32_t rc = -EFAULT;
+ enum msm_ir_cut_driver_type ir_cut_driver_type =
+ ir_cut_ctrl->ir_cut_driver_type;
+
+ CDBG("Enter");
+
+ if (ir_cut_ctrl->ir_cut_state == MSM_CAMERA_IR_CUT_INIT) {
+ pr_err("%s:%d Invalid ir_cut state = %d",
+ __func__, __LINE__, ir_cut_ctrl->ir_cut_state);
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ir_cut_table); i++) {
+ if (ir_cut_driver_type == ir_cut_table[i]->ir_cut_driver_type) {
+ ir_cut_ctrl->func_tbl = &ir_cut_table[i]->func_tbl;
+ rc = 0;
+ break;
+ }
+ }
+
+ if (rc < 0) {
+ pr_err("%s:%d failed invalid ir_cut_driver_type %d\n",
+ __func__, __LINE__, ir_cut_driver_type);
+ return -EINVAL;
+ }
+
+ rc = ir_cut_ctrl->func_tbl->camera_ir_cut_init(
+ ir_cut_ctrl, ir_cut_data);
+ if (rc < 0) {
+ pr_err("%s:%d camera_ir_cut_init failed rc = %d",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+
+ ir_cut_ctrl->ir_cut_state = MSM_CAMERA_IR_CUT_INIT;
+
+ CDBG("Exit");
+ return 0;
+}
+
+static int32_t msm_ir_cut_config(struct msm_ir_cut_ctrl_t *ir_cut_ctrl,
+ void __user *argp)
+{
+ int32_t rc = -EINVAL;
+ struct msm_ir_cut_cfg_data_t *ir_cut_data =
+ (struct msm_ir_cut_cfg_data_t *) argp;
+
+ mutex_lock(ir_cut_ctrl->ir_cut_mutex);
+
+ CDBG("Enter %s type %d\n", __func__, ir_cut_data->cfg_type);
+
+ switch (ir_cut_data->cfg_type) {
+ case CFG_IR_CUT_INIT:
+ rc = msm_ir_cut_handle_init(ir_cut_ctrl, ir_cut_data);
+ break;
+ case CFG_IR_CUT_RELEASE:
+ if (ir_cut_ctrl->ir_cut_state == MSM_CAMERA_IR_CUT_INIT)
+ rc = ir_cut_ctrl->func_tbl->camera_ir_cut_release(
+ ir_cut_ctrl);
+ break;
+ case CFG_IR_CUT_OFF:
+ if (ir_cut_ctrl->ir_cut_state == MSM_CAMERA_IR_CUT_INIT)
+ rc = ir_cut_ctrl->func_tbl->camera_ir_cut_off(
+ ir_cut_ctrl, ir_cut_data);
+ break;
+ case CFG_IR_CUT_ON:
+ if (ir_cut_ctrl->ir_cut_state == MSM_CAMERA_IR_CUT_INIT)
+ rc = ir_cut_ctrl->func_tbl->camera_ir_cut_on(
+ ir_cut_ctrl, ir_cut_data);
+ break;
+ default:
+ rc = -EFAULT;
+ break;
+ }
+
+ mutex_unlock(ir_cut_ctrl->ir_cut_mutex);
+
+ CDBG("Exit %s type %d\n", __func__, ir_cut_data->cfg_type);
+
+ return rc;
+}
+
+static long msm_ir_cut_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct msm_ir_cut_ctrl_t *fctrl = NULL;
+ void __user *argp = (void __user *)arg;
+
+ CDBG("Enter\n");
+
+ if (!sd) {
+ pr_err("sd NULL\n");
+ return -EINVAL;
+ }
+ fctrl = v4l2_get_subdevdata(sd);
+ if (!fctrl) {
+ pr_err("fctrl NULL\n");
+ return -EINVAL;
+ }
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ return msm_ir_cut_get_subdev_id(fctrl, argp);
+ case VIDIOC_MSM_IR_CUT_CFG:
+ return msm_ir_cut_config(fctrl, argp);
+ case MSM_SD_NOTIFY_FREEZE:
+ return 0;
+ case MSM_SD_SHUTDOWN:
+ if (!fctrl->func_tbl) {
+ pr_err("fctrl->func_tbl NULL\n");
+ return -EINVAL;
+ } else {
+ return fctrl->func_tbl->camera_ir_cut_release(fctrl);
+ }
+ default:
+ pr_err_ratelimited("invalid cmd %d\n", cmd);
+ return -ENOIOCTLCMD;
+ }
+ CDBG("Exit\n");
+}
+
+static struct v4l2_subdev_core_ops msm_ir_cut_subdev_core_ops = {
+ .ioctl = msm_ir_cut_subdev_ioctl,
+};
+
+static struct v4l2_subdev_ops msm_ir_cut_subdev_ops = {
+ .core = &msm_ir_cut_subdev_core_ops,
+};
+static int msm_ir_cut_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh) {
+
+ int rc = 0;
+ struct msm_ir_cut_ctrl_t *ir_cut_ctrl = v4l2_get_subdevdata(sd);
+
+ CDBG("Enter\n");
+
+ if (!ir_cut_ctrl) {
+ pr_err("%s: failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (ir_cut_ctrl->ir_cut_state == MSM_CAMERA_IR_CUT_INIT)
+ rc = ir_cut_ctrl->func_tbl->camera_ir_cut_release(
+ ir_cut_ctrl);
+
+ CDBG("Exit\n");
+
+ return rc;
+};
+
+static const struct v4l2_subdev_internal_ops msm_ir_cut_internal_ops = {
+ .close = msm_ir_cut_close,
+};
+
+static int32_t msm_ir_cut_get_gpio_dt_data(struct device_node *of_node,
+ struct msm_ir_cut_ctrl_t *fctrl)
+{
+ int32_t rc = 0, i = 0;
+ uint16_t *gpio_array = NULL;
+ int16_t gpio_array_size = 0;
+ struct msm_camera_gpio_conf *gconf = NULL;
+
+ gpio_array_size = of_gpio_count(of_node);
+ CDBG("%s gpio count %d\n", __func__, gpio_array_size);
+
+ if (gpio_array_size > 0) {
+ fctrl->power_info.gpio_conf =
+ kzalloc(sizeof(struct msm_camera_gpio_conf),
+ GFP_KERNEL);
+ if (!fctrl->power_info.gpio_conf) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ return rc;
+ }
+ gconf = fctrl->power_info.gpio_conf;
+
+ gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t),
+ GFP_KERNEL);
+ if (!gpio_array)
+ return -ENOMEM;
+ for (i = 0; i < gpio_array_size; i++) {
+ gpio_array[i] = of_get_gpio(of_node, i);
+ if (((int16_t)gpio_array[i]) < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -EINVAL;
+ goto free_gpio_array;
+ }
+ CDBG("%s gpio_array[%d] = %d\n", __func__, i,
+ gpio_array[i]);
+ }
+
+ rc = msm_camera_get_dt_gpio_req_tbl(of_node, gconf,
+ gpio_array, gpio_array_size);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto free_gpio_array;
+ }
+ kfree(gpio_array);
+
+ if (fctrl->ir_cut_driver_type == IR_CUT_DRIVER_DEFAULT)
+ fctrl->ir_cut_driver_type = IR_CUT_DRIVER_GPIO;
+ CDBG("%s:%d fctrl->ir_cut_driver_type = %d", __func__, __LINE__,
+ fctrl->ir_cut_driver_type);
+ }
+
+ return rc;
+
+free_gpio_array:
+ kfree(gpio_array);
+ return rc;
+}
+
+static int32_t msm_ir_cut_get_dt_data(struct device_node *of_node,
+ struct msm_ir_cut_ctrl_t *fctrl)
+{
+ int32_t rc = 0;
+
+ CDBG("called\n");
+
+ if (!of_node) {
+ pr_err("of_node NULL\n");
+ return -EINVAL;
+ }
+
+ /* Read the sub device */
+ rc = of_property_read_u32(of_node, "cell-index", &fctrl->pdev->id);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+
+ fctrl->ir_cut_driver_type = IR_CUT_DRIVER_DEFAULT;
+
+ /* Read the gpio information from device tree */
+ rc = msm_ir_cut_get_gpio_dt_data(of_node, fctrl);
+ if (rc < 0) {
+ pr_err("%s:%d msm_ir_cut_get_gpio_dt_data failed rc %d\n",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_ir_cut_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ int32_t rc = 0;
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct msm_ir_cut_cfg_data_t32 *u32 =
+ (struct msm_ir_cut_cfg_data_t32 *)arg;
+ struct msm_ir_cut_cfg_data_t ir_cut_data;
+
+ CDBG("Enter");
+ ir_cut_data.cfg_type = u32->cfg_type;
+
+ switch (cmd) {
+ case VIDIOC_MSM_IR_CUT_CFG32:
+ cmd = VIDIOC_MSM_IR_CUT_CFG;
+ break;
+ default:
+ return msm_ir_cut_subdev_ioctl(sd, cmd, arg);
+ }
+
+ rc = msm_ir_cut_subdev_ioctl(sd, cmd, &ir_cut_data);
+
+ CDBG("Exit");
+ return rc;
+}
+
+static long msm_ir_cut_subdev_fops_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_ir_cut_subdev_do_ioctl);
+}
+#endif
+
+static int32_t msm_ir_cut_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0, i = 0;
+ struct msm_ir_cut_ctrl_t *ir_cut_ctrl = NULL;
+
+ CDBG("Enter");
+ if (!pdev->dev.of_node) {
+ pr_err("of_node NULL\n");
+ return -EINVAL;
+ }
+
+ ir_cut_ctrl = kzalloc(sizeof(struct msm_ir_cut_ctrl_t), GFP_KERNEL);
+ if (!ir_cut_ctrl)
+ return -ENOMEM;
+
+ memset(ir_cut_ctrl, 0, sizeof(struct msm_ir_cut_ctrl_t));
+
+ ir_cut_ctrl->pdev = pdev;
+
+ rc = msm_ir_cut_get_dt_data(pdev->dev.of_node, ir_cut_ctrl);
+
+ if (rc < 0) {
+ pr_err("%s:%d msm_ir_cut_get_dt_data failed\n",
+ __func__, __LINE__);
+ kfree(ir_cut_ctrl);
+ return -EINVAL;
+ }
+
+ rc = msm_sensor_driver_get_gpio_data(&(ir_cut_ctrl->gconf),
+ (&pdev->dev)->of_node);
+
+ if ((rc < 0) || (ir_cut_ctrl->gconf == NULL)) {
+ pr_err("%s: No IR CUT GPIOs\n", __func__);
+
+ kfree(ir_cut_ctrl);
+ return -EINVAL;
+ }
+
+ CDBG("%s: gpio_request_table_size = %d\n",
+ __func__,
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl_size);
+
+ for (i = 0;
+ i < ir_cut_ctrl->gconf->cam_gpio_req_tbl_size; i++) {
+ CDBG("%s: gpio = %d\n", __func__,
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl[i].gpio);
+ CDBG("%s: gpio-flags = %lu\n", __func__,
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl[i].flags);
+ CDBG("%s: gconf->gpio_num_info->gpio_num[%d] = %d\n",
+ __func__, i,
+ ir_cut_ctrl->gconf->gpio_num_info->gpio_num[i]);
+ }
+
+ ir_cut_ctrl->cam_pinctrl_status = 1;
+
+ rc = msm_camera_pinctrl_init(
+ &(ir_cut_ctrl->pinctrl_info), &(pdev->dev));
+
+ if (rc < 0) {
+ pr_err("ERR:%s: Error in reading IR CUT pinctrl\n",
+ __func__);
+ ir_cut_ctrl->cam_pinctrl_status = 0;
+ }
+
+ ir_cut_ctrl->ir_cut_state = MSM_CAMERA_IR_CUT_RELEASE;
+ ir_cut_ctrl->power_info.dev = &ir_cut_ctrl->pdev->dev;
+ ir_cut_ctrl->ir_cut_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ ir_cut_ctrl->ir_cut_mutex = &msm_ir_cut_mutex;
+
+ /* Initialize sub device */
+ v4l2_subdev_init(&ir_cut_ctrl->msm_sd.sd, &msm_ir_cut_subdev_ops);
+ v4l2_set_subdevdata(&ir_cut_ctrl->msm_sd.sd, ir_cut_ctrl);
+
+ ir_cut_ctrl->msm_sd.sd.internal_ops = &msm_ir_cut_internal_ops;
+ ir_cut_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(ir_cut_ctrl->msm_sd.sd.name,
+ ARRAY_SIZE(ir_cut_ctrl->msm_sd.sd.name),
+ "msm_camera_ir_cut");
+ media_entity_init(&ir_cut_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ ir_cut_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ ir_cut_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_IR_CUT;
+ ir_cut_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x1;
+ msm_sd_register(&ir_cut_ctrl->msm_sd);
+
+ CDBG("%s:%d ir_cut sd name = %s", __func__, __LINE__,
+ ir_cut_ctrl->msm_sd.sd.entity.name);
+ msm_ir_cut_v4l2_subdev_fops = v4l2_subdev_fops;
+#ifdef CONFIG_COMPAT
+ msm_ir_cut_v4l2_subdev_fops.compat_ioctl32 =
+ msm_ir_cut_subdev_fops_ioctl;
+#endif
+ ir_cut_ctrl->msm_sd.sd.devnode->fops = &msm_ir_cut_v4l2_subdev_fops;
+
+ CDBG("probe success\n");
+ return rc;
+}
+
+MODULE_DEVICE_TABLE(of, msm_ir_cut_dt_match);
+
+static struct platform_driver msm_ir_cut_platform_driver = {
+ .probe = msm_ir_cut_platform_probe,
+ .driver = {
+ .name = "qcom,ir-cut",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ir_cut_dt_match,
+ },
+};
+
+static int __init msm_ir_cut_init_module(void)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+ rc = platform_driver_register(&msm_ir_cut_platform_driver);
+ if (!rc)
+ return rc;
+
+ pr_err("platform probe for ir_cut failed");
+
+ return rc;
+}
+
+static void __exit msm_ir_cut_exit_module(void)
+{
+ platform_driver_unregister(&msm_ir_cut_platform_driver);
+}
+
+static struct msm_ir_cut_table msm_gpio_ir_cut_table = {
+ .ir_cut_driver_type = IR_CUT_DRIVER_GPIO,
+ .func_tbl = {
+ .camera_ir_cut_init = msm_ir_cut_init,
+ .camera_ir_cut_release = msm_ir_cut_release,
+ .camera_ir_cut_off = msm_ir_cut_off,
+ .camera_ir_cut_on = msm_ir_cut_on,
+ },
+};
+
+module_init(msm_ir_cut_init_module);
+module_exit(msm_ir_cut_exit_module);
+MODULE_DESCRIPTION("MSM IR CUT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/ir_cut/msm_ir_cut.h b/drivers/media/platform/msm/camera_v2/sensor/ir_cut/msm_ir_cut.h
new file mode 100644
index 000000000000..23fd23952523
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/ir_cut/msm_ir_cut.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MSM_IR_CUT_H
+#define MSM_IR_CUT_H
+
+#include <soc/qcom/camera2.h>
+#include "msm_camera_dt_util.h"
+#include "msm_camera_io_util.h"
+#include "msm_sd.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+enum msm_camera_ir_cut_state_t {
+ MSM_CAMERA_IR_CUT_INIT,
+ MSM_CAMERA_IR_CUT_RELEASE,
+};
+
+enum msm_ir_cut_driver_type {
+ IR_CUT_DRIVER_GPIO,
+ IR_CUT_DRIVER_DEFAULT,
+};
+
+struct msm_ir_cut_ctrl_t;
+
+struct msm_ir_cut_func_t {
+ int32_t (*camera_ir_cut_init)(struct msm_ir_cut_ctrl_t *,
+ struct msm_ir_cut_cfg_data_t *);
+ int32_t (*camera_ir_cut_release)(struct msm_ir_cut_ctrl_t *);
+ int32_t (*camera_ir_cut_off)(struct msm_ir_cut_ctrl_t *,
+ struct msm_ir_cut_cfg_data_t *);
+ int32_t (*camera_ir_cut_on)(struct msm_ir_cut_ctrl_t *,
+ struct msm_ir_cut_cfg_data_t *);
+};
+
+struct msm_ir_cut_table {
+ enum msm_ir_cut_driver_type ir_cut_driver_type;
+ struct msm_ir_cut_func_t func_tbl;
+};
+
+struct msm_ir_cut_ctrl_t {
+ struct msm_sd_subdev msm_sd;
+ struct platform_device *pdev;
+ struct msm_ir_cut_func_t *func_tbl;
+ struct msm_camera_power_ctrl_t power_info;
+
+ enum msm_camera_device_type_t ir_cut_device_type;
+ struct mutex *ir_cut_mutex;
+
+ /* ir_cut driver type */
+ enum msm_ir_cut_driver_type ir_cut_driver_type;
+
+ /* ir_cut state */
+ enum msm_camera_ir_cut_state_t ir_cut_state;
+
+ struct msm_camera_gpio_conf *gconf;
+ struct msm_pinctrl_info pinctrl_info;
+ uint8_t cam_pinctrl_status;
+};
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/ir_led/Makefile b/drivers/media/platform/msm/camera_v2/sensor/ir_led/Makefile
new file mode 100644
index 000000000000..6e99ecc2c78d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/ir_led/Makefile
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+obj-$(CONFIG_MSMB_CAMERA) += msm_ir_led.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/ir_led/msm_ir_led.c b/drivers/media/platform/msm/camera_v2/sensor/ir_led/msm_ir_led.c
new file mode 100644
index 000000000000..9af7abc5cd04
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/ir_led/msm_ir_led.c
@@ -0,0 +1,462 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/pwm.h>
+#include "msm_ir_led.h"
+#include "msm_camera_dt_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+DEFINE_MSM_MUTEX(msm_ir_led_mutex);
+
+static struct v4l2_file_operations msm_ir_led_v4l2_subdev_fops;
+
+static const struct of_device_id msm_ir_led_dt_match[] = {
+ {.compatible = "qcom,ir-led", .data = NULL},
+ {}
+};
+
+static struct msm_ir_led_table msm_default_ir_led_table;
+
+static struct msm_ir_led_table *ir_led_table[] = {
+ &msm_default_ir_led_table,
+};
+
+static int32_t msm_ir_led_get_subdev_id(
+ struct msm_ir_led_ctrl_t *ir_led_ctrl, void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+
+ CDBG("Enter\n");
+ if (!subdev_id) {
+ pr_err("subdevice ID is not valid\n");
+ return -EINVAL;
+ }
+ if (ir_led_ctrl->ir_led_device_type != MSM_CAMERA_PLATFORM_DEVICE) {
+ pr_err("device type is not matching\n");
+ return -EINVAL;
+ }
+
+ *subdev_id = ir_led_ctrl->pdev->id;
+
+ CDBG("subdev_id %d\n", *subdev_id);
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_ir_led_init(
+ struct msm_ir_led_ctrl_t *ir_led_ctrl,
+ struct msm_ir_led_cfg_data_t *ir_led_data)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+
+ rc = ir_led_ctrl->func_tbl->camera_ir_led_off(ir_led_ctrl, ir_led_data);
+
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_ir_led_release(
+ struct msm_ir_led_ctrl_t *ir_led_ctrl)
+{
+ int32_t rc = 0;
+
+ if (ir_led_ctrl->ir_led_state == MSM_CAMERA_IR_LED_RELEASE) {
+ pr_err("Invalid ir_led state = %d\n",
+ ir_led_ctrl->ir_led_state);
+ return 0;
+ }
+
+ rc = ir_led_ctrl->func_tbl->camera_ir_led_off(ir_led_ctrl, NULL);
+ if (rc < 0) {
+ pr_err("camera_ir_led_off failed (%d)\n", rc);
+ return rc;
+ }
+ ir_led_ctrl->ir_led_state = MSM_CAMERA_IR_LED_RELEASE;
+ return 0;
+}
+
+static int32_t msm_ir_led_off(struct msm_ir_led_ctrl_t *ir_led_ctrl,
+ struct msm_ir_led_cfg_data_t *ir_led_data)
+{
+ CDBG("Enter\n");
+
+ if (ir_led_ctrl->pwm_dev)
+ pwm_disable(ir_led_ctrl->pwm_dev);
+ else
+ pr_err("pwm device is null\n");
+
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_ir_led_on(
+ struct msm_ir_led_ctrl_t *ir_led_ctrl,
+ struct msm_ir_led_cfg_data_t *ir_led_data)
+{
+ int rc;
+
+ CDBG("pwm duty on(ns) %d, pwm period(ns) %d\n",
+ ir_led_data->pwm_duty_on_ns, ir_led_data->pwm_period_ns);
+
+ if (ir_led_ctrl->pwm_dev) {
+ rc = pwm_config(ir_led_ctrl->pwm_dev,
+ ir_led_data->pwm_duty_on_ns,
+ ir_led_data->pwm_period_ns);
+ if (rc) {
+ pr_err("PWM config failed (%d)\n", rc);
+ return rc;
+ }
+
+ rc = pwm_enable(ir_led_ctrl->pwm_dev);
+ if (rc) {
+ pr_err("PWM enable failed(%d)\n", rc);
+ return rc;
+ }
+ } else
+ pr_err("pwm device is null\n");
+
+ return 0;
+}
+
+static int32_t msm_ir_led_handle_init(
+ struct msm_ir_led_ctrl_t *ir_led_ctrl,
+ struct msm_ir_led_cfg_data_t *ir_led_data)
+{
+ uint32_t i = 0;
+ int32_t rc = -EFAULT;
+ enum msm_ir_led_driver_type ir_led_driver_type =
+ ir_led_ctrl->ir_led_driver_type;
+
+ CDBG("Enter\n");
+
+ if (ir_led_ctrl->ir_led_state == MSM_CAMERA_IR_LED_INIT) {
+ pr_err("Invalid ir_led state = %d\n",
+ ir_led_ctrl->ir_led_state);
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ir_led_table); i++) {
+ if (ir_led_driver_type == ir_led_table[i]->ir_led_driver_type) {
+ ir_led_ctrl->func_tbl = &ir_led_table[i]->func_tbl;
+ rc = 0;
+ break;
+ }
+ }
+
+ if (rc < 0) {
+ pr_err("failed invalid ir_led_driver_type %d\n",
+ ir_led_driver_type);
+ return -EINVAL;
+ }
+
+ rc = ir_led_ctrl->func_tbl->camera_ir_led_init(
+ ir_led_ctrl, ir_led_data);
+ if (rc < 0) {
+ pr_err("camera_ir_led_init failed (%d)\n", rc);
+ return rc;
+ }
+
+ ir_led_ctrl->ir_led_state = MSM_CAMERA_IR_LED_INIT;
+
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_ir_led_config(struct msm_ir_led_ctrl_t *ir_led_ctrl,
+ void __user *argp)
+{
+ int32_t rc = -EINVAL;
+ struct msm_ir_led_cfg_data_t *ir_led_data =
+ (struct msm_ir_led_cfg_data_t *) argp;
+
+ mutex_lock(ir_led_ctrl->ir_led_mutex);
+
+ CDBG("type %d\n", ir_led_data->cfg_type);
+
+ switch (ir_led_data->cfg_type) {
+ case CFG_IR_LED_INIT:
+ rc = msm_ir_led_handle_init(ir_led_ctrl, ir_led_data);
+ break;
+ case CFG_IR_LED_RELEASE:
+ if (ir_led_ctrl->ir_led_state == MSM_CAMERA_IR_LED_INIT)
+ rc = ir_led_ctrl->func_tbl->camera_ir_led_release(
+ ir_led_ctrl);
+ break;
+ case CFG_IR_LED_OFF:
+ if (ir_led_ctrl->ir_led_state == MSM_CAMERA_IR_LED_INIT)
+ rc = ir_led_ctrl->func_tbl->camera_ir_led_off(
+ ir_led_ctrl, ir_led_data);
+ break;
+ case CFG_IR_LED_ON:
+ if (ir_led_ctrl->ir_led_state == MSM_CAMERA_IR_LED_INIT)
+ rc = ir_led_ctrl->func_tbl->camera_ir_led_on(
+ ir_led_ctrl, ir_led_data);
+ break;
+ default:
+ rc = -EFAULT;
+ break;
+ }
+
+ mutex_unlock(ir_led_ctrl->ir_led_mutex);
+
+ CDBG("Exit: type %d\n", ir_led_data->cfg_type);
+
+ return rc;
+}
+
+static long msm_ir_led_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct msm_ir_led_ctrl_t *fctrl = NULL;
+ void __user *argp = (void __user *)arg;
+
+ CDBG("Enter\n");
+
+ if (!sd) {
+ pr_err(" v4l2 ir led subdevice is NULL\n");
+ return -EINVAL;
+ }
+ fctrl = v4l2_get_subdevdata(sd);
+ if (!fctrl) {
+ pr_err("fctrl NULL\n");
+ return -EINVAL;
+ }
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ return msm_ir_led_get_subdev_id(fctrl, argp);
+ case VIDIOC_MSM_IR_LED_CFG:
+ return msm_ir_led_config(fctrl, argp);
+ case MSM_SD_NOTIFY_FREEZE:
+ return 0;
+ case MSM_SD_SHUTDOWN:
+ if (!fctrl->func_tbl) {
+ pr_err("No call back funcions\n");
+ return -EINVAL;
+ } else {
+ return fctrl->func_tbl->camera_ir_led_release(fctrl);
+ }
+ default:
+ pr_err_ratelimited("invalid cmd %d\n", cmd);
+ return -ENOIOCTLCMD;
+ }
+ CDBG("Exit\n");
+}
+
+static struct v4l2_subdev_core_ops msm_ir_led_subdev_core_ops = {
+ .ioctl = msm_ir_led_subdev_ioctl,
+};
+
+static struct v4l2_subdev_ops msm_ir_led_subdev_ops = {
+ .core = &msm_ir_led_subdev_core_ops,
+};
+
+static int msm_ir_led_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh) {
+
+ int rc = 0;
+ struct msm_ir_led_ctrl_t *ir_led_ctrl = v4l2_get_subdevdata(sd);
+
+ if (!ir_led_ctrl) {
+ pr_err("v4l2 subdevice data read failed\n");
+ return -EINVAL;
+ }
+
+ CDBG("Enter\n");
+
+ if (ir_led_ctrl->ir_led_state == MSM_CAMERA_IR_LED_INIT)
+ rc = ir_led_ctrl->func_tbl->camera_ir_led_release(
+ ir_led_ctrl);
+
+ CDBG("Exit (%d)\n", rc);
+
+ return rc;
+}
+
+static const struct v4l2_subdev_internal_ops msm_ir_led_internal_ops = {
+ .close = msm_ir_led_close,
+};
+
+static int32_t msm_ir_led_get_dt_data(struct device_node *of_node,
+ struct msm_ir_led_ctrl_t *fctrl)
+{
+ int32_t rc = 0;
+
+ CDBG("called\n");
+
+ /* Read the sub device */
+ rc = of_property_read_u32(of_node, "cell-index", &fctrl->pdev->id);
+ if (rc < 0) {
+ pr_err("reading cell-index for ir-led node is failed(rc) %d\n",
+ rc);
+ return rc;
+ }
+
+ fctrl->ir_led_driver_type = IR_LED_DRIVER_DEFAULT;
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_ir_led_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ int32_t rc = 0;
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct msm_ir_led_cfg_data_t32 *u32 =
+ (struct msm_ir_led_cfg_data_t32 *)arg;
+ struct msm_ir_led_cfg_data_t ir_led_data;
+
+ CDBG("Enter\n");
+ ir_led_data.cfg_type = u32->cfg_type;
+ ir_led_data.pwm_duty_on_ns = u32->pwm_duty_on_ns;
+ ir_led_data.pwm_period_ns = u32->pwm_period_ns;
+
+ switch (cmd) {
+ case VIDIOC_MSM_IR_LED_CFG32:
+ cmd = VIDIOC_MSM_IR_LED_CFG;
+ break;
+ default:
+ return msm_ir_led_subdev_ioctl(sd, cmd, arg);
+ }
+
+ rc = msm_ir_led_subdev_ioctl(sd, cmd, &ir_led_data);
+
+ CDBG("Exit\n");
+ return rc;
+}
+
+static long msm_ir_led_subdev_fops_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_ir_led_subdev_do_ioctl);
+}
+#endif
+
+static int32_t msm_ir_led_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct msm_ir_led_ctrl_t *ir_led_ctrl = NULL;
+
+ CDBG("Enter\n");
+ if (!pdev->dev.of_node) {
+ pr_err("IR LED device node is not present in device tree\n");
+ return -EINVAL;
+ }
+
+ ir_led_ctrl = devm_kzalloc(&pdev->dev, sizeof(struct msm_ir_led_ctrl_t),
+ GFP_KERNEL);
+ if (!ir_led_ctrl)
+ return -ENOMEM;
+
+ ir_led_ctrl->pdev = pdev;
+
+ /* Reading PWM device node */
+ ir_led_ctrl->pwm_dev = of_pwm_get(pdev->dev.of_node, NULL);
+
+ if (IS_ERR(ir_led_ctrl->pwm_dev)) {
+ rc = PTR_ERR(ir_led_ctrl->pwm_dev);
+ pr_err("Cannot get PWM device (%d)\n", rc);
+ ir_led_ctrl->pwm_dev = NULL;
+ }
+
+ rc = msm_ir_led_get_dt_data(pdev->dev.of_node, ir_led_ctrl);
+ if (rc < 0) {
+ pr_err("msm_ir_led_get_dt_data failed\n");
+ devm_kfree(&pdev->dev, ir_led_ctrl);
+ return -EINVAL;
+ }
+
+ ir_led_ctrl->ir_led_state = MSM_CAMERA_IR_LED_RELEASE;
+ ir_led_ctrl->power_info.dev = &ir_led_ctrl->pdev->dev;
+ ir_led_ctrl->ir_led_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ ir_led_ctrl->ir_led_mutex = &msm_ir_led_mutex;
+
+ /* Initialize sub device */
+ v4l2_subdev_init(&ir_led_ctrl->msm_sd.sd, &msm_ir_led_subdev_ops);
+ v4l2_set_subdevdata(&ir_led_ctrl->msm_sd.sd, ir_led_ctrl);
+
+ ir_led_ctrl->msm_sd.sd.internal_ops = &msm_ir_led_internal_ops;
+ ir_led_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(ir_led_ctrl->msm_sd.sd.name,
+ ARRAY_SIZE(ir_led_ctrl->msm_sd.sd.name),
+ "msm_camera_ir_led");
+ media_entity_init(&ir_led_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ ir_led_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ ir_led_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_IR_LED;
+ ir_led_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x1;
+ msm_sd_register(&ir_led_ctrl->msm_sd);
+
+ CDBG("ir_led sd name = %s\n",
+ ir_led_ctrl->msm_sd.sd.entity.name);
+ msm_ir_led_v4l2_subdev_fops = v4l2_subdev_fops;
+#ifdef CONFIG_COMPAT
+ msm_ir_led_v4l2_subdev_fops.compat_ioctl32 =
+ msm_ir_led_subdev_fops_ioctl;
+#endif
+ ir_led_ctrl->msm_sd.sd.devnode->fops = &msm_ir_led_v4l2_subdev_fops;
+
+ CDBG("probe success\n");
+ return rc;
+}
+
+MODULE_DEVICE_TABLE(of, msm_ir_led_dt_match);
+
+static struct platform_driver msm_ir_led_platform_driver = {
+ .probe = msm_ir_led_platform_probe,
+ .driver = {
+ .name = "qcom,ir-led",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ir_led_dt_match,
+ },
+};
+
+static int __init msm_ir_led_init_module(void)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+ rc = platform_driver_register(&msm_ir_led_platform_driver);
+ if (!rc)
+ return rc;
+
+ pr_err("ir-led driver register failed (%d)\n", rc);
+
+ return rc;
+}
+
+static void __exit msm_ir_led_exit_module(void)
+{
+ platform_driver_unregister(&msm_ir_led_platform_driver);
+}
+
+static struct msm_ir_led_table msm_default_ir_led_table = {
+ .ir_led_driver_type = IR_LED_DRIVER_DEFAULT,
+ .func_tbl = {
+ .camera_ir_led_init = msm_ir_led_init,
+ .camera_ir_led_release = msm_ir_led_release,
+ .camera_ir_led_off = msm_ir_led_off,
+ .camera_ir_led_on = msm_ir_led_on,
+ },
+};
+
+module_init(msm_ir_led_init_module);
+module_exit(msm_ir_led_exit_module);
+MODULE_DESCRIPTION("MSM IR LED");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/ir_led/msm_ir_led.h b/drivers/media/platform/msm/camera_v2/sensor/ir_led/msm_ir_led.h
new file mode 100644
index 000000000000..a0923ffc81da
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/ir_led/msm_ir_led.h
@@ -0,0 +1,71 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MSM_IR_LED_H
+#define MSM_IR_LED_H
+
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/msm_cam_sensor.h>
+#include <soc/qcom/camera2.h>
+#include "msm_sd.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+enum msm_camera_ir_led_state_t {
+ MSM_CAMERA_IR_LED_INIT,
+ MSM_CAMERA_IR_LED_RELEASE,
+};
+
+enum msm_ir_led_driver_type {
+ IR_LED_DRIVER_GPIO,
+ IR_LED_DRIVER_DEFAULT,
+};
+
+struct msm_ir_led_ctrl_t;
+
+struct msm_ir_led_func_t {
+ int32_t (*camera_ir_led_init)(struct msm_ir_led_ctrl_t *,
+ struct msm_ir_led_cfg_data_t *);
+ int32_t (*camera_ir_led_release)(struct msm_ir_led_ctrl_t *);
+ int32_t (*camera_ir_led_off)(struct msm_ir_led_ctrl_t *,
+ struct msm_ir_led_cfg_data_t *);
+ int32_t (*camera_ir_led_on)(struct msm_ir_led_ctrl_t *,
+ struct msm_ir_led_cfg_data_t *);
+};
+
+struct msm_ir_led_table {
+ enum msm_ir_led_driver_type ir_led_driver_type;
+ struct msm_ir_led_func_t func_tbl;
+};
+
+struct msm_ir_led_ctrl_t {
+ struct msm_sd_subdev msm_sd;
+ struct platform_device *pdev;
+ struct pwm_device *pwm_dev;
+ struct msm_ir_led_func_t *func_tbl;
+ struct msm_camera_power_ctrl_t power_info;
+
+ enum msm_camera_device_type_t ir_led_device_type;
+ struct mutex *ir_led_mutex;
+
+ /* ir_led driver type */
+ enum msm_ir_led_driver_type ir_led_driver_type;
+
+ /* ir_led state */
+ enum msm_camera_ir_led_state_t ir_led_state;
+};
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
index 43aadffa2983..86e7837cc02a 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
@@ -1167,6 +1167,7 @@ static int32_t msm_sensor_driver_parse(struct msm_sensor_ctrl_t *s_ctrl)
if (!s_ctrl->msm_sensor_mutex) {
pr_err("failed: no memory msm_sensor_mutex %pK",
s_ctrl->msm_sensor_mutex);
+ rc = -ENOMEM;
goto FREE_SENSOR_I2C_CLIENT;
}
diff --git a/drivers/media/platform/msm/dvb/Kconfig b/drivers/media/platform/msm/dvb/Kconfig
new file mode 100644
index 000000000000..e205c8172075
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/Kconfig
@@ -0,0 +1,10 @@
+config DVB_MPQ
+ tristate "Qualcomm Technologies Inc Multimedia Processor DVB Adapter"
+ depends on ARCH_QCOM && DVB_CORE
+ default n
+
+ help
+ Support for Qualcomm Technologies Inc MPQ based DVB adapter.
+ Say Y or M if you own such a device and want to use it.
+
+source "drivers/media/platform/msm/dvb/demux/Kconfig"
diff --git a/drivers/media/platform/msm/dvb/Makefile b/drivers/media/platform/msm/dvb/Makefile
new file mode 100644
index 000000000000..862ebca24db9
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_DVB_MPQ) += adapter/
+obj-$(CONFIG_DVB_MPQ_DEMUX) += demux/
diff --git a/drivers/media/platform/msm/dvb/adapter/Makefile b/drivers/media/platform/msm/dvb/adapter/Makefile
new file mode 100644
index 000000000000..f7da6b5b2f06
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/adapter/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/dvb-core/
+ccflags-y += -Idrivers/media/platform/msm/dvb/include/
+
+obj-$(CONFIG_DVB_MPQ) += mpq-adapter.o
+
+mpq-adapter-y := mpq_adapter.o mpq_stream_buffer.o
diff --git a/drivers/media/platform/msm/dvb/adapter/mpq_adapter.c b/drivers/media/platform/msm/dvb/adapter/mpq_adapter.c
new file mode 100644
index 000000000000..a3d2533485b5
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/adapter/mpq_adapter.c
@@ -0,0 +1,211 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include "mpq_adapter.h"
+#include "mpq_dvb_debug.h"
+
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+/* data-structure holding MPQ adapter information */
+static struct
+{
+ /* MPQ adapter registered to dvb-core */
+ struct dvb_adapter adapter;
+
+ /* mutex protect against the data-structure */
+ struct mutex mutex;
+
+ /* List of stream interfaces registered to the MPQ adapter */
+ struct {
+ /* pointer to the stream buffer using for data tunneling */
+ struct mpq_streambuffer *stream_buffer;
+
+ /* callback triggered when the stream interface is registered */
+ mpq_adapter_stream_if_callback callback;
+
+ /* parameter passed to the callback function */
+ void *user_param;
+ } interfaces[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES];
+} mpq_info;
+
+
+/**
+ * Initialize MPQ DVB adapter module.
+ *
+ * Return error status
+ */
+static int __init mpq_adapter_init(void)
+{
+ int i;
+ int result;
+
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ mutex_init(&mpq_info.mutex);
+
+ /* reset stream interfaces list */
+ for (i = 0; i < MPQ_ADAPTER_MAX_NUM_OF_INTERFACES; i++) {
+ mpq_info.interfaces[i].stream_buffer = NULL;
+ mpq_info.interfaces[i].callback = NULL;
+ }
+
+ /* regsiter a new dvb-adapter to dvb-core */
+ result = dvb_register_adapter(&mpq_info.adapter,
+ "Qualcomm DVB adapter",
+ THIS_MODULE,
+ NULL,
+ adapter_nr);
+
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: dvb_register_adapter failed, errno %d\n",
+ __func__,
+ result);
+ }
+
+ return result;
+}
+
+
+/**
+ * Cleanup MPQ DVB adapter module.
+ */
+static void __exit mpq_adapter_exit(void)
+{
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ /* un-regsiter adapter from dvb-core */
+ dvb_unregister_adapter(&mpq_info.adapter);
+ mutex_destroy(&mpq_info.mutex);
+}
+
+struct dvb_adapter *mpq_adapter_get(void)
+{
+ return &mpq_info.adapter;
+}
+EXPORT_SYMBOL(mpq_adapter_get);
+
+
+int mpq_adapter_register_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ struct mpq_streambuffer *stream_buffer)
+{
+ int ret;
+
+ if (interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES) {
+ ret = -EINVAL;
+ goto register_failed;
+ }
+
+ if (mutex_lock_interruptible(&mpq_info.mutex)) {
+ ret = -ERESTARTSYS;
+ goto register_failed;
+ }
+
+ if (mpq_info.interfaces[interface_id].stream_buffer != NULL) {
+ /* already registered interface */
+ ret = -EINVAL;
+ goto register_failed_unlock_mutex;
+ }
+
+ mpq_info.interfaces[interface_id].stream_buffer = stream_buffer;
+ mutex_unlock(&mpq_info.mutex);
+
+ /*
+ * If callback is installed, trigger it to notify that
+ * stream interface was registered.
+ */
+ if (mpq_info.interfaces[interface_id].callback != NULL) {
+ mpq_info.interfaces[interface_id].callback(
+ interface_id,
+ mpq_info.interfaces[interface_id].user_param);
+ }
+
+ return 0;
+
+register_failed_unlock_mutex:
+ mutex_unlock(&mpq_info.mutex);
+register_failed:
+ return ret;
+}
+EXPORT_SYMBOL(mpq_adapter_register_stream_if);
+
+
+int mpq_adapter_unregister_stream_if(
+ enum mpq_adapter_stream_if interface_id)
+{
+ if (interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&mpq_info.mutex))
+ return -ERESTARTSYS;
+
+ /* clear the registered interface */
+ mpq_info.interfaces[interface_id].stream_buffer = NULL;
+
+ mutex_unlock(&mpq_info.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_adapter_unregister_stream_if);
+
+
+int mpq_adapter_get_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ struct mpq_streambuffer **stream_buffer)
+{
+ if ((interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES) ||
+ (stream_buffer == NULL))
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&mpq_info.mutex))
+ return -ERESTARTSYS;
+
+ *stream_buffer = mpq_info.interfaces[interface_id].stream_buffer;
+
+ mutex_unlock(&mpq_info.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_adapter_get_stream_if);
+
+
+int mpq_adapter_notify_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ mpq_adapter_stream_if_callback callback,
+ void *user_param)
+{
+ if (interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&mpq_info.mutex))
+ return -ERESTARTSYS;
+
+ mpq_info.interfaces[interface_id].callback = callback;
+ mpq_info.interfaces[interface_id].user_param = user_param;
+
+ mutex_unlock(&mpq_info.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_adapter_notify_stream_if);
+
+
+module_init(mpq_adapter_init);
+module_exit(mpq_adapter_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies Inc. MPQ adapter");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c b/drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c
new file mode 100644
index 000000000000..97533081766a
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c
@@ -0,0 +1,827 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_stream_buffer.h"
+
+
+int mpq_streambuffer_init(
+ struct mpq_streambuffer *sbuff,
+ enum mpq_streambuffer_mode mode,
+ struct mpq_streambuffer_buffer_desc *data_buffers,
+ u32 data_buff_num,
+ void *packet_buff,
+ size_t packet_buff_size)
+{
+ if ((sbuff == NULL) || (data_buffers == NULL) ||
+ (packet_buff == NULL) || (data_buff_num == 0))
+ return -EINVAL;
+
+ if (data_buff_num > 1) {
+ if (mode != MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+ return -EINVAL;
+ /* Linear buffer group */
+ dvb_ringbuffer_init(
+ &sbuff->raw_data,
+ data_buffers,
+ data_buff_num *
+ sizeof(struct mpq_streambuffer_buffer_desc));
+ } else {
+ if (mode != MPQ_STREAMBUFFER_BUFFER_MODE_RING)
+ return -EINVAL;
+ /* Single ring-buffer */
+ dvb_ringbuffer_init(&sbuff->raw_data,
+ data_buffers[0].base, data_buffers[0].size);
+ }
+ sbuff->mode = mode;
+ sbuff->buffers = data_buffers;
+ sbuff->pending_buffers_count = 0;
+ sbuff->buffers_num = data_buff_num;
+ sbuff->cb = NULL;
+ dvb_ringbuffer_init(&sbuff->packet_data, packet_buff, packet_buff_size);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_init);
+
+void mpq_streambuffer_terminate(struct mpq_streambuffer *sbuff)
+{
+ spin_lock(&sbuff->packet_data.lock);
+ spin_lock(&sbuff->raw_data.lock);
+ sbuff->packet_data.error = -ENODEV;
+ sbuff->raw_data.error = -ENODEV;
+ spin_unlock(&sbuff->raw_data.lock);
+ spin_unlock(&sbuff->packet_data.lock);
+
+ wake_up_all(&sbuff->raw_data.queue);
+ wake_up_all(&sbuff->packet_data.queue);
+}
+EXPORT_SYMBOL(mpq_streambuffer_terminate);
+
+ssize_t mpq_streambuffer_pkt_next(
+ struct mpq_streambuffer *sbuff,
+ ssize_t idx, size_t *pktlen)
+{
+ ssize_t packet_idx;
+
+ spin_lock(&sbuff->packet_data.lock);
+
+ /* buffer was released, return no packet available */
+ if (sbuff->packet_data.error == -ENODEV) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ packet_idx = dvb_ringbuffer_pkt_next(&sbuff->packet_data, idx, pktlen);
+ spin_unlock(&sbuff->packet_data.lock);
+
+ return packet_idx;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_next);
+
+
+ssize_t mpq_streambuffer_pkt_read(
+ struct mpq_streambuffer *sbuff,
+ size_t idx,
+ struct mpq_streambuffer_packet_header *packet,
+ u8 *user_data)
+{
+ size_t ret;
+ size_t read_len;
+
+ spin_lock(&sbuff->packet_data.lock);
+
+ /* buffer was released, return no packet available */
+ if (sbuff->packet_data.error == -ENODEV) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ /* read-out the packet header first */
+ ret = dvb_ringbuffer_pkt_read(
+ &sbuff->packet_data, idx, 0,
+ (u8 *)packet,
+ sizeof(struct mpq_streambuffer_packet_header));
+
+ /* verify length, at least packet header should exist */
+ if (ret != sizeof(struct mpq_streambuffer_packet_header)) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -EINVAL;
+ }
+
+ read_len = ret;
+
+ /* read-out private user-data if there are such */
+ if ((packet->user_data_len) && (user_data != NULL)) {
+ ret = dvb_ringbuffer_pkt_read(
+ &sbuff->packet_data,
+ idx,
+ sizeof(struct mpq_streambuffer_packet_header),
+ user_data,
+ packet->user_data_len);
+
+ if (ret < 0) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return ret;
+ }
+
+ read_len += ret;
+ }
+
+ spin_unlock(&sbuff->packet_data.lock);
+
+ return read_len;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_read);
+
+
+int mpq_streambuffer_pkt_dispose(
+ struct mpq_streambuffer *sbuff,
+ size_t idx,
+ int dispose_data)
+{
+ int ret;
+ struct mpq_streambuffer_packet_header packet;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->packet_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->packet_data.error == -ENODEV) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ /* read-out the packet header first */
+ ret = dvb_ringbuffer_pkt_read(&sbuff->packet_data, idx,
+ 0,
+ (u8 *)&packet,
+ sizeof(struct mpq_streambuffer_packet_header));
+
+ spin_unlock(&sbuff->packet_data.lock);
+
+ if (ret != sizeof(struct mpq_streambuffer_packet_header))
+ return -EINVAL;
+
+ if ((sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) ||
+ (dispose_data)) {
+ /* Advance the read pointer in the raw-data buffer first */
+ ret = mpq_streambuffer_data_read_dispose(sbuff,
+ packet.raw_data_len);
+ if (ret != 0)
+ return ret;
+ }
+
+ spin_lock(&sbuff->packet_data.lock);
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if ((sbuff->packet_data.error == -ENODEV) ||
+ (sbuff->raw_data.error == -ENODEV)) {
+ spin_unlock(&sbuff->raw_data.lock);
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ /* Move read pointer to the next linear buffer for subsequent reads */
+ if ((sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) &&
+ (packet.raw_data_len > 0)) {
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+
+ desc->write_ptr = 0;
+ desc->read_ptr = 0;
+
+ DVB_RINGBUFFER_SKIP(&sbuff->raw_data,
+ sizeof(struct mpq_streambuffer_buffer_desc));
+ sbuff->pending_buffers_count--;
+
+ wake_up_all(&sbuff->raw_data.queue);
+ }
+
+ /* Now clear the packet from the packet header */
+ dvb_ringbuffer_pkt_dispose(&sbuff->packet_data, idx);
+
+ spin_unlock(&sbuff->raw_data.lock);
+ spin_unlock(&sbuff->packet_data.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_dispose);
+
+int mpq_streambuffer_pkt_write(
+ struct mpq_streambuffer *sbuff,
+ struct mpq_streambuffer_packet_header *packet,
+ u8 *user_data)
+{
+ ssize_t idx;
+ size_t len;
+
+ if ((sbuff == NULL) || (packet == NULL))
+ return -EINVAL;
+
+ spin_lock(&sbuff->packet_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->packet_data.error == -ENODEV) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ /* Make sure we can go to the next linear buffer */
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR &&
+ sbuff->pending_buffers_count == sbuff->buffers_num &&
+ packet->raw_data_len) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENOSPC;
+ }
+
+ len = sizeof(struct mpq_streambuffer_packet_header) +
+ packet->user_data_len;
+
+ /* Make sure enough space available for packet header */
+ if (dvb_ringbuffer_free(&sbuff->packet_data) <
+ (len + DVB_RINGBUFFER_PKTHDRSIZE)) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENOSPC;
+ }
+
+ /* Starting writing packet header */
+ idx = dvb_ringbuffer_pkt_start(&sbuff->packet_data, len);
+
+ /* Write non-user private data header */
+ dvb_ringbuffer_write(&sbuff->packet_data,
+ (u8 *)packet,
+ sizeof(struct mpq_streambuffer_packet_header));
+
+ /* Write user's own private data header */
+ dvb_ringbuffer_write(&sbuff->packet_data,
+ user_data,
+ packet->user_data_len);
+
+ dvb_ringbuffer_pkt_close(&sbuff->packet_data, idx);
+
+ /* Move write pointer to next linear buffer for subsequent writes */
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR &&
+ packet->raw_data_len) {
+ DVB_RINGBUFFER_PUSH(&sbuff->raw_data,
+ sizeof(struct mpq_streambuffer_buffer_desc));
+ sbuff->pending_buffers_count++;
+ }
+
+ spin_unlock(&sbuff->packet_data.lock);
+ wake_up_all(&sbuff->packet_data.queue);
+
+ return idx;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_write);
+
+ssize_t mpq_streambuffer_data_write(
+ struct mpq_streambuffer *sbuff,
+ const u8 *buf, size_t len)
+{
+ int res;
+
+ if ((sbuff == NULL) || (buf == NULL))
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ if (unlikely(dvb_ringbuffer_free(&sbuff->raw_data) < len)) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENOSPC;
+ }
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (sbuff->raw_data.data == NULL) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -EPERM;
+ }
+ res = dvb_ringbuffer_write(&sbuff->raw_data, buf, len);
+ wake_up_all(&sbuff->raw_data.queue);
+ } else {
+ /* Linear buffer group */
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (desc->base == NULL) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -EPERM;
+ }
+
+ if ((sbuff->pending_buffers_count == sbuff->buffers_num) ||
+ ((desc->size - desc->write_ptr) < len)) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: No space available! %d pending buffers out of %d total buffers. write_ptr=%d, size=%d\n",
+ __func__,
+ sbuff->pending_buffers_count,
+ sbuff->buffers_num,
+ desc->write_ptr,
+ desc->size);
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENOSPC;
+ }
+ memcpy(desc->base + desc->write_ptr, buf, len);
+ desc->write_ptr += len;
+ res = len;
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+ return res;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_write);
+
+
+int mpq_streambuffer_data_write_deposit(
+ struct mpq_streambuffer *sbuff,
+ size_t len)
+{
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ if (unlikely(dvb_ringbuffer_free(&sbuff->raw_data) < len)) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENOSPC;
+ }
+
+ DVB_RINGBUFFER_PUSH(&sbuff->raw_data, len);
+ wake_up_all(&sbuff->raw_data.queue);
+ } else {
+ /* Linear buffer group */
+ struct mpq_streambuffer_buffer_desc *desc =
+ (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+
+ if ((sbuff->pending_buffers_count == sbuff->buffers_num) ||
+ ((desc->size - desc->write_ptr) < len)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: No space available!\n",
+ __func__);
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENOSPC;
+ }
+ desc->write_ptr += len;
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_write_deposit);
+
+
+ssize_t mpq_streambuffer_data_read(
+ struct mpq_streambuffer *sbuff,
+ u8 *buf, size_t len)
+{
+ ssize_t actual_len = 0;
+ u32 offset;
+
+ if ((sbuff == NULL) || (buf == NULL))
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (sbuff->raw_data.data == NULL) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -EPERM;
+ }
+
+ offset = sbuff->raw_data.pread;
+ actual_len = dvb_ringbuffer_avail(&sbuff->raw_data);
+ if (actual_len < len)
+ len = actual_len;
+ if (len)
+ dvb_ringbuffer_read(&sbuff->raw_data, buf, len);
+
+ wake_up_all(&sbuff->raw_data.queue);
+ } else {
+ /* Linear buffer group */
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (desc->base == NULL) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -EPERM;
+ }
+
+ actual_len = (desc->write_ptr - desc->read_ptr);
+ if (actual_len < len)
+ len = actual_len;
+ memcpy(buf, desc->base + desc->read_ptr, len);
+ offset = desc->read_ptr;
+ desc->read_ptr += len;
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ if (sbuff->cb)
+ sbuff->cb(sbuff, offset, len, sbuff->cb_user_data);
+
+ return len;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_read);
+
+
+ssize_t mpq_streambuffer_data_read_user(
+ struct mpq_streambuffer *sbuff,
+ u8 __user *buf, size_t len)
+{
+ ssize_t actual_len = 0;
+ u32 offset;
+
+ if ((sbuff == NULL) || (buf == NULL))
+ return -EINVAL;
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV)
+ return -ENODEV;
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (sbuff->raw_data.data == NULL)
+ return -EPERM;
+
+ offset = sbuff->raw_data.pread;
+ actual_len = dvb_ringbuffer_avail(&sbuff->raw_data);
+ if (actual_len < len)
+ len = actual_len;
+ if (len)
+ dvb_ringbuffer_read_user(&sbuff->raw_data, buf, len);
+ wake_up_all(&sbuff->raw_data.queue);
+ } else {
+ /* Linear buffer group */
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (desc->base == NULL)
+ return -EPERM;
+
+ actual_len = (desc->write_ptr - desc->read_ptr);
+ if (actual_len < len)
+ len = actual_len;
+ if (copy_to_user(buf, desc->base + desc->read_ptr, len))
+ return -EFAULT;
+
+ offset = desc->read_ptr;
+ desc->read_ptr += len;
+ }
+
+ if (sbuff->cb)
+ sbuff->cb(sbuff, offset, len, sbuff->cb_user_data);
+
+ return len;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_read_user);
+
+int mpq_streambuffer_data_read_dispose(
+ struct mpq_streambuffer *sbuff,
+ size_t len)
+{
+ u32 offset;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ if (unlikely(dvb_ringbuffer_avail(&sbuff->raw_data) < len)) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -EINVAL;
+ }
+
+ offset = sbuff->raw_data.pread;
+ DVB_RINGBUFFER_SKIP(&sbuff->raw_data, len);
+ wake_up_all(&sbuff->raw_data.queue);
+ } else {
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+ offset = desc->read_ptr;
+
+ if ((desc->read_ptr + len) > desc->size)
+ desc->read_ptr = desc->size;
+ else
+ desc->read_ptr += len;
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ if (sbuff->cb)
+ sbuff->cb(sbuff, offset, len, sbuff->cb_user_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_read_dispose);
+
+
+int mpq_streambuffer_get_buffer_handle(
+ struct mpq_streambuffer *sbuff,
+ int read_buffer,
+ int *handle)
+{
+ struct mpq_streambuffer_buffer_desc *desc = NULL;
+
+ if ((sbuff == NULL) || (handle == NULL))
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ *handle = sbuff->buffers[0].handle;
+ } else {
+ if (read_buffer)
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+ else
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+ *handle = desc->handle;
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_get_buffer_handle);
+
+
+int mpq_streambuffer_register_data_dispose(
+ struct mpq_streambuffer *sbuff,
+ mpq_streambuffer_dispose_cb cb_func,
+ void *user_data)
+{
+ if ((sbuff == NULL) || (cb_func == NULL))
+ return -EINVAL;
+
+ sbuff->cb = cb_func;
+ sbuff->cb_user_data = user_data;
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_register_data_dispose);
+
+
+ssize_t mpq_streambuffer_data_free(
+ struct mpq_streambuffer *sbuff)
+{
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return dvb_ringbuffer_free(&sbuff->raw_data);
+ }
+
+ if (sbuff->pending_buffers_count == sbuff->buffers_num) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return 0;
+ }
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ return desc->size - desc->write_ptr;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_free);
+
+
+ssize_t mpq_streambuffer_data_avail(
+ struct mpq_streambuffer *sbuff)
+{
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ ssize_t avail = dvb_ringbuffer_avail(&sbuff->raw_data);
+
+ spin_unlock(&sbuff->raw_data.lock);
+ return avail;
+ }
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ return desc->write_ptr - desc->read_ptr;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_avail);
+
+int mpq_streambuffer_get_data_rw_offset(
+ struct mpq_streambuffer *sbuff,
+ u32 *read_offset,
+ u32 *write_offset)
+{
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ if (read_offset)
+ *read_offset = sbuff->raw_data.pread;
+ if (write_offset)
+ *write_offset = sbuff->raw_data.pwrite;
+ } else {
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ if (read_offset) {
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+ *read_offset = desc->read_ptr;
+ }
+ if (write_offset) {
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+ *write_offset = desc->write_ptr;
+ }
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_get_data_rw_offset);
+
+ssize_t mpq_streambuffer_metadata_free(struct mpq_streambuffer *sbuff)
+{
+ ssize_t free;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->packet_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->packet_data.error == -ENODEV) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ free = dvb_ringbuffer_free(&sbuff->packet_data);
+
+ spin_unlock(&sbuff->packet_data.lock);
+
+ return free;
+}
+EXPORT_SYMBOL(mpq_streambuffer_metadata_free);
+
+int mpq_streambuffer_flush(struct mpq_streambuffer *sbuff)
+{
+ struct mpq_streambuffer_buffer_desc *desc;
+ size_t len;
+ int idx;
+ int ret = 0;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->packet_data.lock);
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* Check if buffer was released */
+ if (sbuff->packet_data.error == -ENODEV ||
+ sbuff->raw_data.error == -ENODEV) {
+ ret = -ENODEV;
+ goto end;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+ while (sbuff->pending_buffers_count) {
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+ desc->write_ptr = 0;
+ desc->read_ptr = 0;
+ DVB_RINGBUFFER_SKIP(&sbuff->raw_data,
+ sizeof(struct mpq_streambuffer_buffer_desc));
+ sbuff->pending_buffers_count--;
+ }
+ else
+ dvb_ringbuffer_flush(&sbuff->raw_data);
+
+ /*
+ * Dispose all packets (simply flushing is not enough since we want
+ * the packets' status to move to disposed).
+ */
+ do {
+ idx = dvb_ringbuffer_pkt_next(&sbuff->packet_data, -1, &len);
+ if (idx >= 0)
+ dvb_ringbuffer_pkt_dispose(&sbuff->packet_data, idx);
+ } while (idx >= 0);
+
+end:
+ spin_unlock(&sbuff->raw_data.lock);
+ spin_unlock(&sbuff->packet_data.lock);
+ return ret;
+}
+EXPORT_SYMBOL(mpq_streambuffer_flush);
diff --git a/drivers/media/platform/msm/dvb/demux/Kconfig b/drivers/media/platform/msm/dvb/demux/Kconfig
new file mode 100644
index 000000000000..319e2ab2eb96
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/Kconfig
@@ -0,0 +1,46 @@
+config DVB_MPQ_DEMUX
+ tristate "DVB Demux Device"
+ depends on DVB_MPQ && ION && ION_MSM
+ default n
+
+ help
+ Support for Qualcomm Technologies Inc based dvb demux device.
+ Say Y or M if you own such a device and want to use it.
+ The Demux device is used to stream playback either
+ from TSIF interface or from DVR interface.
+
+config DVB_MPQ_NUM_DMX_DEVICES
+ int "Number of demux devices"
+ depends on DVB_MPQ_DEMUX
+ default 4
+ range 1 255
+
+ help
+ Configure number of demux devices.
+ Depends on your use-cases for maximum concurrent stream playback.
+
+choice
+ prompt "Demux Hardware Plugin"
+ depends on DVB_MPQ_DEMUX
+ default DVB_MPQ_TSPP1
+ help
+ Enable support of specific demux HW plugin depending on the existing HW support.
+ Depending on the enabled HW, demux may take advantage of HW capbailities
+ to perform some tasks in HW instead of SW.
+
+ config DVB_MPQ_TSPP1
+ bool "TSPPv1 plugin"
+ depends on TSPP
+ help
+ Use this option if your HW has
+ Transport Stream Packet Processor(TSPP) version1 support.
+ Demux may take adavantage of HW capabilities to perform
+ some tasks in HW instead of SW.
+
+ config DVB_MPQ_SW
+ bool "Software plugin"
+ help
+ Use this option if your HW does not have any
+ TSPP hardware support. All demux tasks will be
+ performed in SW.
+endchoice
diff --git a/drivers/media/platform/msm/dvb/demux/Makefile b/drivers/media/platform/msm/dvb/demux/Makefile
new file mode 100644
index 000000000000..c08fa85a8d5d
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/Makefile
@@ -0,0 +1,14 @@
+
+ccflags-y += -Idrivers/media/dvb-core/
+ccflags-y += -Idrivers/media/platform/msm/dvb/include/
+ccflags-y += -Idrivers/misc/
+
+obj-$(CONFIG_DVB_MPQ_DEMUX) += mpq-dmx-hw-plugin.o
+
+mpq-dmx-hw-plugin-y := mpq_dmx_plugin_common.o
+
+mpq-dmx-hw-plugin-$(CONFIG_QSEECOM) += mpq_sdmx.o
+
+mpq-dmx-hw-plugin-$(CONFIG_DVB_MPQ_TSPP1) += mpq_dmx_plugin_tspp_v1.o
+
+mpq-dmx-hw-plugin-$(CONFIG_DVB_MPQ_SW) += mpq_dmx_plugin_sw.o
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
new file mode 100644
index 000000000000..d1e3c090c972
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -0,0 +1,5195 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/file.h>
+#include <linux/scatterlist.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_dmx_plugin_common.h"
+#include "mpq_sdmx.h"
+
+#define SDMX_MAJOR_VERSION_MATCH (8)
+
+/* Length of mandatory fields that must exist in header of video PES */
+#define PES_MANDATORY_FIELDS_LEN 9
+
+/* Index of first byte in TS packet holding STC */
+#define STC_LOCATION_IDX 188
+
+#define MAX_PES_LENGTH (SZ_64K)
+
+#define MAX_TS_PACKETS_FOR_SDMX_PROCESS (500)
+
+/*
+ * PES header length field is 8 bits so PES header length after this field
+ * can be up to 256 bytes.
+ * Preceding fields of the PES header total to 9 bytes
+ * (including the PES header length field).
+ */
+#define MAX_PES_HEADER_LENGTH (256 + PES_MANDATORY_FIELDS_LEN)
+
+/* TS packet with adaptation field only can take up the entire TSP */
+#define MAX_TSP_ADAPTATION_LENGTH (184)
+
+#define MAX_SDMX_METADATA_LENGTH \
+ (TS_PACKET_HEADER_LENGTH + \
+ MAX_TSP_ADAPTATION_LENGTH + \
+ MAX_PES_HEADER_LENGTH)
+
+#define SDMX_METADATA_BUFFER_SIZE (64*1024)
+#define SDMX_SECTION_BUFFER_SIZE (64*1024)
+#define SDMX_PCR_BUFFER_SIZE (64*1024)
+
+/* Number of demux devices, has default of linux configuration */
+static int mpq_demux_device_num = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+module_param(mpq_demux_device_num, int, S_IRUGO);
+
+/* ION heap IDs used for allocating video output buffer */
+static int video_secure_ion_heap = ION_CP_MM_HEAP_ID;
+module_param(video_secure_ion_heap, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(video_secure_ion_heap, "ION heap for secure video buffer allocation");
+
+static int video_nonsecure_ion_heap = ION_IOMMU_HEAP_ID;
+module_param(video_nonsecure_ion_heap, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(video_nonsecure_ion_heap, "ION heap for non-secure video buffer allocation");
+
+/* Value of TS packet scramble bits field for even key */
+static int mpq_sdmx_scramble_even = 0x2;
+module_param(mpq_sdmx_scramble_even, int, S_IRUGO | S_IWUSR);
+
+/* Value of TS packet scramble bits field for odd key */
+static int mpq_sdmx_scramble_odd = 0x3;
+module_param(mpq_sdmx_scramble_odd, int, S_IRUGO | S_IWUSR);
+
+/*
+ * Default action (discard or pass) taken when scramble bit is not one of the
+ * pass-through / odd / even values.
+ * When set packets will be discarded, otherwise passed through.
+ */
+static int mpq_sdmx_scramble_default_discard = 1;
+module_param(mpq_sdmx_scramble_default_discard, int, S_IRUGO | S_IWUSR);
+
+/* Max number of TS packets allowed as input for a single sdmx process */
+static int mpq_sdmx_proc_limit = MAX_TS_PACKETS_FOR_SDMX_PROCESS;
+module_param(mpq_sdmx_proc_limit, int, S_IRUGO | S_IWUSR);
+
+/* Debug flag for secure demux process */
+static int mpq_sdmx_debug;
+module_param(mpq_sdmx_debug, int, S_IRUGO | S_IWUSR);
+
+/*
+ * Indicates whether the demux should search for frame boundaries
+ * and notify on video packets on frame-basis or whether to provide
+ * only video PES packet payloads as-is.
+ */
+static int video_framing = 1;
+module_param(video_framing, int, S_IRUGO | S_IWUSR);
+
+/* TSIF operation mode: 1 = TSIF_MODE_1, 2 = TSIF_MODE_2, 3 = TSIF_LOOPBACK */
+static int tsif_mode = 2;
+module_param(tsif_mode, int, S_IRUGO | S_IWUSR);
+
+/* Inverse TSIF clock signal */
+static int clock_inv;
+module_param(clock_inv, int, S_IRUGO | S_IWUSR);
+
+/* Global data-structure for managing demux devices */
+static struct
+{
+ /* ION demux client used for memory allocation */
+ struct ion_client *ion_client;
+
+ /* demux devices array */
+ struct mpq_demux *devices;
+
+ /* Stream buffers objects used for tunneling to decoders */
+ struct mpq_streambuffer
+ decoder_buffers[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES];
+
+ /* Indicates whether secure demux TZ application is available */
+ int secure_demux_app_loaded;
+} mpq_dmx_info;
+
+
+int mpq_dmx_get_param_scramble_odd(void)
+{
+ return mpq_sdmx_scramble_odd;
+}
+
+int mpq_dmx_get_param_scramble_even(void)
+{
+ return mpq_sdmx_scramble_even;
+}
+
+int mpq_dmx_get_param_scramble_default_discard(void)
+{
+ return mpq_sdmx_scramble_default_discard;
+}
+
+int mpq_dmx_get_param_tsif_mode(void)
+{
+ return tsif_mode;
+}
+
+int mpq_dmx_get_param_clock_inv(void)
+{
+ return clock_inv;
+}
+
+/* Check that PES header is valid and that it is a video PES */
+static int mpq_dmx_is_valid_video_pes(struct pes_packet_header *pes_header)
+{
+ /* start-code valid? */
+ if ((pes_header->packet_start_code_prefix_1 != 0) ||
+ (pes_header->packet_start_code_prefix_2 != 0) ||
+ (pes_header->packet_start_code_prefix_3 != 1))
+ return -EINVAL;
+
+ /* stream_id is video? */
+ if ((pes_header->stream_id & 0xF0) != 0xE0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Check if a framing pattern is a video frame pattern or a header pattern */
+static inline int mpq_dmx_is_video_frame(
+ enum dmx_video_codec codec,
+ u64 pattern_type)
+{
+ switch (codec) {
+ case DMX_VIDEO_CODEC_MPEG2:
+ if ((pattern_type == DMX_IDX_MPEG_I_FRAME_START) ||
+ (pattern_type == DMX_IDX_MPEG_P_FRAME_START) ||
+ (pattern_type == DMX_IDX_MPEG_B_FRAME_START))
+ return 1;
+ return 0;
+
+ case DMX_VIDEO_CODEC_H264:
+ if ((pattern_type == DMX_IDX_H264_IDR_START) ||
+ (pattern_type == DMX_IDX_H264_NON_IDR_START))
+ return 1;
+ return 0;
+
+ case DMX_VIDEO_CODEC_VC1:
+ if (pattern_type == DMX_IDX_VC1_FRAME_START)
+ return 1;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * mpq_dmx_get_pattern_params - Returns the required video
+ * patterns for framing operation based on video codec.
+ *
+ * @video_codec: the video codec.
+ * @patterns: a pointer to the pattern parameters, updated by this function.
+ * @patterns_num: number of patterns, updated by this function.
+ */
+static inline int mpq_dmx_get_pattern_params(
+ enum dmx_video_codec video_codec,
+ const struct dvb_dmx_video_patterns
+ *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM],
+ int *patterns_num)
+{
+ switch (video_codec) {
+ case DMX_VIDEO_CODEC_MPEG2:
+ patterns[0] = dvb_dmx_get_pattern(DMX_IDX_MPEG_SEQ_HEADER);
+ patterns[1] = dvb_dmx_get_pattern(DMX_IDX_MPEG_GOP);
+ patterns[2] = dvb_dmx_get_pattern(DMX_IDX_MPEG_I_FRAME_START);
+ patterns[3] = dvb_dmx_get_pattern(DMX_IDX_MPEG_P_FRAME_START);
+ patterns[4] = dvb_dmx_get_pattern(DMX_IDX_MPEG_B_FRAME_START);
+ *patterns_num = 5;
+ break;
+
+ case DMX_VIDEO_CODEC_H264:
+ patterns[0] = dvb_dmx_get_pattern(DMX_IDX_H264_SPS);
+ patterns[1] = dvb_dmx_get_pattern(DMX_IDX_H264_PPS);
+ patterns[2] = dvb_dmx_get_pattern(DMX_IDX_H264_IDR_START);
+ patterns[3] = dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_START);
+ patterns[4] = dvb_dmx_get_pattern(DMX_IDX_H264_SEI);
+ *patterns_num = 5;
+ break;
+
+ case DMX_VIDEO_CODEC_VC1:
+ patterns[0] = dvb_dmx_get_pattern(DMX_IDX_VC1_SEQ_HEADER);
+ patterns[1] = dvb_dmx_get_pattern(DMX_IDX_VC1_ENTRY_POINT);
+ patterns[2] = dvb_dmx_get_pattern(DMX_IDX_VC1_FRAME_START);
+ *patterns_num = 3;
+ break;
+
+ default:
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ *patterns_num = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * mpq_dmx_update_decoder_stat -
+ * Update decoder output statistics in debug-fs.
+ *
+ * @mpq_feed: decoder feed object
+ */
+void mpq_dmx_update_decoder_stat(struct mpq_feed *mpq_feed)
+{
+ struct timespec curr_time;
+ u64 delta_time_ms;
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+ enum mpq_adapter_stream_if idx;
+
+ if (!dvb_dmx_is_video_feed(mpq_feed->dvb_demux_feed) ||
+ (mpq_feed->video_info.stream_interface >
+ MPQ_ADAPTER_VIDEO3_STREAM_IF))
+ return;
+
+ idx = mpq_feed->video_info.stream_interface;
+
+ curr_time = current_kernel_time();
+ if (unlikely(!mpq_demux->decoder_stat[idx].out_count)) {
+ mpq_demux->decoder_stat[idx].out_last_time = curr_time;
+ mpq_demux->decoder_stat[idx].out_count++;
+ return;
+ }
+
+ /* calculate time-delta between frame */
+ delta_time_ms = mpq_dmx_calc_time_delta(&curr_time,
+ &mpq_demux->decoder_stat[idx].out_last_time);
+
+ mpq_demux->decoder_stat[idx].out_interval_sum += (u32)delta_time_ms;
+
+ mpq_demux->decoder_stat[idx].out_interval_average =
+ mpq_demux->decoder_stat[idx].out_interval_sum /
+ mpq_demux->decoder_stat[idx].out_count;
+
+ if (delta_time_ms > mpq_demux->decoder_stat[idx].out_interval_max)
+ mpq_demux->decoder_stat[idx].out_interval_max = delta_time_ms;
+
+ mpq_demux->decoder_stat[idx].out_last_time = curr_time;
+ mpq_demux->decoder_stat[idx].out_count++;
+}
+
+/*
+ * mpq_dmx_update_sdmx_stat -
+ * Update SDMX statistics in debug-fs.
+ *
+ * @mpq_demux: mpq_demux object
+ * @bytes_processed: number of bytes processed by sdmx
+ * @process_start_time: time before sdmx process was triggered
+ * @process_end_time: time after sdmx process finished
+ */
+static inline void mpq_dmx_update_sdmx_stat(struct mpq_demux *mpq_demux,
+ u32 bytes_processed, struct timespec *process_start_time,
+ struct timespec *process_end_time)
+{
+ u32 packets_num;
+ u64 process_time;
+
+ mpq_demux->sdmx_process_count++;
+ packets_num = bytes_processed / mpq_demux->demux.ts_packet_size;
+ mpq_demux->sdmx_process_packets_sum += packets_num;
+ mpq_demux->sdmx_process_packets_average =
+ mpq_demux->sdmx_process_packets_sum /
+ mpq_demux->sdmx_process_count;
+
+ process_time =
+ mpq_dmx_calc_time_delta(process_end_time, process_start_time);
+
+ mpq_demux->sdmx_process_time_sum += process_time;
+ mpq_demux->sdmx_process_time_average =
+ mpq_demux->sdmx_process_time_sum /
+ mpq_demux->sdmx_process_count;
+
+ if ((mpq_demux->sdmx_process_count == 1) ||
+ (packets_num < mpq_demux->sdmx_process_packets_min))
+ mpq_demux->sdmx_process_packets_min = packets_num;
+
+ if ((mpq_demux->sdmx_process_count == 1) ||
+ (process_time > mpq_demux->sdmx_process_time_max))
+ mpq_demux->sdmx_process_time_max = process_time;
+}
+
+static int mpq_sdmx_log_level_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t mpq_sdmx_log_level_read(struct file *fp,
+ char __user *user_buffer, size_t count, loff_t *position)
+{
+ char user_str[16];
+ struct mpq_demux *mpq_demux = fp->private_data;
+ int ret;
+
+ ret = scnprintf(user_str, 16, "%d", mpq_demux->sdmx_log_level);
+ ret = simple_read_from_buffer(user_buffer, count, position,
+ user_str, ret+1);
+
+ return ret;
+}
+
+static ssize_t mpq_sdmx_log_level_write(struct file *fp,
+ const char __user *user_buffer, size_t count, loff_t *position)
+{
+ char user_str[16];
+ int ret;
+ int ret_count;
+ int level;
+ struct mpq_demux *mpq_demux = fp->private_data;
+
+ if (count >= 16)
+ return -EINVAL;
+
+ ret_count = simple_write_to_buffer(user_str, 16, position, user_buffer,
+ count);
+ if (ret_count < 0)
+ return ret_count;
+
+ ret = kstrtoint(user_str, 0, &level);
+ if (ret)
+ return ret;
+
+ if (level < SDMX_LOG_NO_PRINT || level > SDMX_LOG_VERBOSE)
+ return -EINVAL;
+
+ mutex_lock(&mpq_demux->mutex);
+ mpq_demux->sdmx_log_level = level;
+ if (mpq_demux->sdmx_session_handle != SDMX_INVALID_SESSION_HANDLE) {
+ ret = sdmx_set_log_level(mpq_demux->sdmx_session_handle,
+ mpq_demux->sdmx_log_level);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Could not set sdmx log level. ret = %d\n",
+ __func__, ret);
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+ }
+
+ mutex_unlock(&mpq_demux->mutex);
+ return ret_count;
+}
+
+static const struct file_operations sdmx_debug_fops = {
+ .open = mpq_sdmx_log_level_open,
+ .read = mpq_sdmx_log_level_read,
+ .write = mpq_sdmx_log_level_write,
+ .owner = THIS_MODULE,
+};
+
+/* Extend dvb-demux debugfs with common plug-in entries */
+void mpq_dmx_init_debugfs_entries(struct mpq_demux *mpq_demux)
+{
+ int i;
+ char file_name[50];
+ struct dentry *debugfs_decoder_dir;
+
+ /*
+ * Extend dvb-demux debugfs with HW statistics.
+ * Note that destruction of debugfs directory is done
+ * when dvb-demux is terminated.
+ */
+ mpq_demux->hw_notification_count = 0;
+ mpq_demux->hw_notification_interval = 0;
+ mpq_demux->hw_notification_size = 0;
+ mpq_demux->hw_notification_min_size = 0xFFFFFFFF;
+
+ if (mpq_demux->demux.dmx.debugfs_demux_dir == NULL)
+ return;
+
+ debugfs_create_u32(
+ "hw_notification_interval",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->hw_notification_interval);
+
+ debugfs_create_u32(
+ "hw_notification_min_interval",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->hw_notification_min_interval);
+
+ debugfs_create_u32(
+ "hw_notification_count",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->hw_notification_count);
+
+ debugfs_create_u32(
+ "hw_notification_size",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->hw_notification_size);
+
+ debugfs_create_u32(
+ "hw_notification_min_size",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->hw_notification_min_size);
+
+ debugfs_decoder_dir = debugfs_create_dir("decoder",
+ mpq_demux->demux.dmx.debugfs_demux_dir);
+
+ for (i = 0;
+ debugfs_decoder_dir &&
+ (i < MPQ_ADAPTER_MAX_NUM_OF_INTERFACES);
+ i++) {
+ snprintf(file_name, 50, "decoder%d_drop_count", i);
+ debugfs_create_u32(
+ file_name,
+ S_IRUGO,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].drop_count);
+
+ snprintf(file_name, 50, "decoder%d_out_count", i);
+ debugfs_create_u32(
+ file_name,
+ S_IRUGO,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].out_count);
+
+ snprintf(file_name, 50, "decoder%d_out_interval_sum", i);
+ debugfs_create_u32(
+ file_name,
+ S_IRUGO,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].out_interval_sum);
+
+ snprintf(file_name, 50, "decoder%d_out_interval_average", i);
+ debugfs_create_u32(
+ file_name,
+ S_IRUGO,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].out_interval_average);
+
+ snprintf(file_name, 50, "decoder%d_out_interval_max", i);
+ debugfs_create_u32(
+ file_name,
+ S_IRUGO,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].out_interval_max);
+
+ snprintf(file_name, 50, "decoder%d_ts_errors", i);
+ debugfs_create_u32(
+ file_name,
+ S_IRUGO,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].ts_errors);
+
+ snprintf(file_name, 50, "decoder%d_cc_errors", i);
+ debugfs_create_u32(
+ file_name,
+ S_IRUGO,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].cc_errors);
+ }
+
+ debugfs_create_u32(
+ "sdmx_process_count",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_count);
+
+ debugfs_create_u32(
+ "sdmx_process_time_sum",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_time_sum);
+
+ debugfs_create_u32(
+ "sdmx_process_time_average",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_time_average);
+
+ debugfs_create_u32(
+ "sdmx_process_time_max",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_time_max);
+
+ debugfs_create_u32(
+ "sdmx_process_packets_sum",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_packets_sum);
+
+ debugfs_create_u32(
+ "sdmx_process_packets_average",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_packets_average);
+
+ debugfs_create_u32(
+ "sdmx_process_packets_min",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_packets_min);
+
+ debugfs_create_file("sdmx_log_level",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ mpq_demux,
+ &sdmx_debug_fops);
+}
+
+/* Update dvb-demux debugfs with HW notification statistics */
+void mpq_dmx_update_hw_statistics(struct mpq_demux *mpq_demux)
+{
+ struct timespec curr_time;
+ u64 delta_time_ms;
+
+ curr_time = current_kernel_time();
+ if (likely(mpq_demux->hw_notification_count)) {
+ /* calculate time-delta between notifications */
+ delta_time_ms = mpq_dmx_calc_time_delta(&curr_time,
+ &mpq_demux->last_notification_time);
+
+ mpq_demux->hw_notification_interval = delta_time_ms;
+
+ if ((mpq_demux->hw_notification_count == 1) ||
+ (mpq_demux->hw_notification_interval &&
+ mpq_demux->hw_notification_interval <
+ mpq_demux->hw_notification_min_interval))
+ mpq_demux->hw_notification_min_interval =
+ mpq_demux->hw_notification_interval;
+ }
+
+ mpq_demux->hw_notification_count++;
+ mpq_demux->last_notification_time = curr_time;
+}
+
+static void mpq_sdmx_check_app_loaded(void)
+{
+ int session;
+ u32 version;
+ int ret;
+
+ ret = sdmx_open_session(&session);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Could not initialize session with SDMX. ret = %d\n",
+ __func__, ret);
+ mpq_dmx_info.secure_demux_app_loaded = 0;
+ return;
+ }
+
+ /* Check proper sdmx major version */
+ ret = sdmx_get_version(session, &version);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Could not get sdmx version. ret = %d\n",
+ __func__, ret);
+ } else {
+ if ((version >> 8) != SDMX_MAJOR_VERSION_MATCH)
+ MPQ_DVB_ERR_PRINT(
+ "%s: sdmx major version does not match. expected=%d, actual=%d\n",
+ __func__, SDMX_MAJOR_VERSION_MATCH,
+ (version >> 8));
+ else
+ MPQ_DVB_DBG_PRINT(
+ "%s: sdmx major version is ok = %d\n",
+ __func__, SDMX_MAJOR_VERSION_MATCH);
+ }
+
+ mpq_dmx_info.secure_demux_app_loaded = 1;
+ sdmx_close_session(session);
+}
+
+int mpq_dmx_plugin_init(mpq_dmx_init dmx_init_func)
+{
+ int i;
+ int j;
+ int result;
+ struct mpq_demux *mpq_demux;
+ struct dvb_adapter *mpq_adapter;
+ struct mpq_feed *feed;
+
+ MPQ_DVB_DBG_PRINT("%s executed, device num %d\n",
+ __func__,
+ mpq_demux_device_num);
+
+ mpq_adapter = mpq_adapter_get();
+
+ if (mpq_adapter == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_adapter is not valid\n",
+ __func__);
+ result = -EPERM;
+ goto init_failed;
+ }
+
+ if (mpq_demux_device_num == 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_demux_device_num set to 0\n",
+ __func__);
+
+ result = -EPERM;
+ goto init_failed;
+ }
+
+ mpq_dmx_info.devices = NULL;
+ mpq_dmx_info.ion_client = NULL;
+
+ mpq_dmx_info.secure_demux_app_loaded = 0;
+
+ /* Allocate memory for all MPQ devices */
+ mpq_dmx_info.devices =
+ vzalloc(mpq_demux_device_num*sizeof(struct mpq_demux));
+
+ if (!mpq_dmx_info.devices) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: failed to allocate devices memory\n",
+ __func__);
+
+ result = -ENOMEM;
+ goto init_failed;
+ }
+
+ /*
+ * Create a new ION client used by demux to allocate memory
+ * for decoder's buffers.
+ */
+ mpq_dmx_info.ion_client =
+ msm_ion_client_create("demux_client");
+ if (IS_ERR_OR_NULL(mpq_dmx_info.ion_client)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: msm_ion_client_create\n",
+ __func__);
+
+ result = PTR_ERR(mpq_dmx_info.ion_client);
+ if (!result)
+ result = -ENOMEM;
+ mpq_dmx_info.ion_client = NULL;
+ goto init_failed_free_demux_devices;
+ }
+
+ /* Initialize and register all demux devices to the system */
+ for (i = 0; i < mpq_demux_device_num; i++) {
+ mpq_demux = mpq_dmx_info.devices+i;
+ mpq_demux->idx = i;
+
+ /* initialize demux source to memory by default */
+ mpq_demux->source = DMX_SOURCE_DVR0 + i;
+
+ /*
+ * Give the plugin pointer to the ion client so
+ * that it can allocate memory from ION if it requires so
+ */
+ mpq_demux->ion_client = mpq_dmx_info.ion_client;
+
+ mutex_init(&mpq_demux->mutex);
+
+ mpq_demux->num_secure_feeds = 0;
+ mpq_demux->num_active_feeds = 0;
+ mpq_demux->sdmx_filter_count = 0;
+ mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
+ mpq_demux->sdmx_eos = 0;
+ mpq_demux->sdmx_log_level = SDMX_LOG_NO_PRINT;
+
+ if (mpq_demux->demux.feednum > MPQ_MAX_DMX_FILES) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: err - actual feednum (%d) larger than max, enlarge MPQ_MAX_DMX_FILES!\n",
+ __func__,
+ mpq_demux->demux.feednum);
+ result = -EINVAL;
+ goto init_failed_free_demux_devices;
+ }
+
+ /* Initialize private feed info */
+ for (j = 0; j < MPQ_MAX_DMX_FILES; j++) {
+ feed = &mpq_demux->feeds[j];
+ memset(feed, 0, sizeof(*feed));
+ feed->sdmx_filter_handle = SDMX_INVALID_FILTER_HANDLE;
+ feed->mpq_demux = mpq_demux;
+ feed->session_id = 0;
+ }
+
+ /*
+ * mpq_demux_plugin_hw_init should be implemented
+ * by the specific plugin
+ */
+ result = dmx_init_func(mpq_adapter, mpq_demux);
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: dmx_init_func (errno=%d)\n",
+ __func__,
+ result);
+
+ goto init_failed_free_demux_devices;
+ }
+
+ mpq_demux->is_initialized = 1;
+
+ /*
+ * dvb-demux is now initialized,
+ * update back-pointers of private feeds
+ */
+ for (j = 0; j < MPQ_MAX_DMX_FILES; j++) {
+ feed = &mpq_demux->feeds[j];
+ feed->dvb_demux_feed = &mpq_demux->demux.feed[j];
+ mpq_demux->demux.feed[j].priv = feed;
+ }
+
+ /*
+ * Add capability of receiving input from memory.
+ * Every demux in our system may be connected to memory input,
+ * or any live input.
+ */
+ mpq_demux->fe_memory.source = DMX_MEMORY_FE;
+ result =
+ mpq_demux->demux.dmx.add_frontend(
+ &mpq_demux->demux.dmx,
+ &mpq_demux->fe_memory);
+
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: add_frontend (mem) failed (errno=%d)\n",
+ __func__,
+ result);
+
+ goto init_failed_free_demux_devices;
+ }
+ }
+
+ return 0;
+
+init_failed_free_demux_devices:
+ mpq_dmx_plugin_exit();
+init_failed:
+ return result;
+}
+
+void mpq_dmx_plugin_exit(void)
+{
+ int i;
+ struct mpq_demux *mpq_demux;
+
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ if (mpq_dmx_info.ion_client != NULL) {
+ ion_client_destroy(mpq_dmx_info.ion_client);
+ mpq_dmx_info.ion_client = NULL;
+ }
+
+ if (mpq_dmx_info.devices != NULL) {
+ for (i = 0; i < mpq_demux_device_num; i++) {
+ mpq_demux = mpq_dmx_info.devices + i;
+
+ if (!mpq_demux->is_initialized)
+ continue;
+
+ if (mpq_demux->mpq_dmx_plugin_release)
+ mpq_demux->mpq_dmx_plugin_release(mpq_demux);
+
+ mpq_demux->demux.dmx.remove_frontend(
+ &mpq_demux->demux.dmx,
+ &mpq_demux->fe_memory);
+
+ if (mpq_dmx_info.secure_demux_app_loaded)
+ mpq_sdmx_close_session(mpq_demux);
+ mutex_destroy(&mpq_demux->mutex);
+ dvb_dmxdev_release(&mpq_demux->dmxdev);
+ dvb_dmx_release(&mpq_demux->demux);
+ }
+
+ vfree(mpq_dmx_info.devices);
+ mpq_dmx_info.devices = NULL;
+ }
+}
+
+int mpq_dmx_set_source(
+ struct dmx_demux *demux,
+ const dmx_source_t *src)
+{
+ int i;
+ int dvr_index;
+ int dmx_index;
+ struct dvb_demux *dvb_demux = demux->priv;
+ struct mpq_demux *mpq_demux;
+
+ if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL)) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ mpq_demux = dvb_demux->priv;
+ if (mpq_demux == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * For dvr sources,
+ * verify that this source is connected to the respective demux
+ */
+ dmx_index = mpq_demux - mpq_dmx_info.devices;
+
+ if (*src >= DMX_SOURCE_DVR0) {
+ dvr_index = *src - DMX_SOURCE_DVR0;
+
+ if (dvr_index != dmx_index) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: can't connect demux%d to dvr%d\n",
+ __func__,
+ dmx_index,
+ dvr_index);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * For front-end sources,
+ * verify that this source is not already set to different demux
+ */
+ for (i = 0; i < mpq_demux_device_num; i++) {
+ if ((&mpq_dmx_info.devices[i] != mpq_demux) &&
+ (mpq_dmx_info.devices[i].source == *src)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: demux%d source can't be set,\n"
+ "demux%d occupies this source already\n",
+ __func__,
+ dmx_index,
+ i);
+ return -EBUSY;
+ }
+ }
+
+ mpq_demux->source = *src;
+ return 0;
+}
+
+/**
+ * Takes an ION allocated buffer's file descriptor and handles the details of
+ * mapping it into kernel memory and obtaining an ION handle for it.
+ * Internal helper function.
+ *
+ * @client: ION client
+ * @handle: ION file descriptor to map
+ * @priv_handle: returned ION handle. Must be freed when no longer needed
+ * @kernel_mem: returned kernel mapped pointer
+ *
+ * Note: mapping might not be possible in secured heaps/buffers, and so NULL
+ * might be returned in kernel_mem
+ *
+ * Return errors status
+ */
+static int mpq_map_buffer_to_kernel(
+ struct ion_client *client,
+ int handle,
+ struct ion_handle **priv_handle,
+ void **kernel_mem)
+{
+ struct ion_handle *ion_handle;
+ unsigned long ionflag = 0;
+ int ret;
+
+ if (client == NULL || priv_handle == NULL || kernel_mem == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ ion_handle = ion_import_dma_buf(client, handle);
+ if (IS_ERR_OR_NULL(ion_handle)) {
+ ret = PTR_ERR(ion_handle);
+ MPQ_DVB_ERR_PRINT("%s: ion_import_dma_buf failed %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+
+ goto map_buffer_failed;
+ }
+
+ ret = ion_handle_get_flags(client, ion_handle, &ionflag);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: ion_handle_get_flags failed %d\n",
+ __func__, ret);
+ goto map_buffer_failed_free_buff;
+ }
+
+ if (ionflag & ION_FLAG_SECURE) {
+ MPQ_DVB_DBG_PRINT("%s: secured buffer\n", __func__);
+ *kernel_mem = NULL;
+ } else {
+ unsigned long tmp;
+ *kernel_mem = ion_map_kernel(client, ion_handle);
+ if (IS_ERR_OR_NULL(*kernel_mem)) {
+ ret = PTR_ERR(*kernel_mem);
+ MPQ_DVB_ERR_PRINT("%s: ion_map_kernel failed, ret=%d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto map_buffer_failed_free_buff;
+ }
+ ion_handle_get_size(client, ion_handle, &tmp);
+ MPQ_DVB_DBG_PRINT(
+ "%s: mapped to address 0x%p, size=%lu\n",
+ __func__, *kernel_mem, tmp);
+ }
+
+ *priv_handle = ion_handle;
+ return 0;
+
+map_buffer_failed_free_buff:
+ ion_free(client, ion_handle);
+map_buffer_failed:
+ return ret;
+}
+
+int mpq_dmx_map_buffer(struct dmx_demux *demux, struct dmx_buffer *dmx_buffer,
+ void **priv_handle, void **kernel_mem)
+{
+ struct dvb_demux *dvb_demux = demux->priv;
+ struct mpq_demux *mpq_demux;
+
+ if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL) ||
+ (priv_handle == NULL) || (kernel_mem == NULL)) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ mpq_demux = dvb_demux->priv;
+ if (mpq_demux == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ return mpq_map_buffer_to_kernel(
+ mpq_demux->ion_client,
+ dmx_buffer->handle,
+ (struct ion_handle **)priv_handle, kernel_mem);
+}
+
+int mpq_dmx_unmap_buffer(struct dmx_demux *demux,
+ void *priv_handle)
+{
+ struct dvb_demux *dvb_demux = demux->priv;
+ struct ion_handle *ion_handle = priv_handle;
+ struct mpq_demux *mpq_demux;
+ unsigned long ionflag = 0;
+ int ret;
+
+ if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL) ||
+ (priv_handle == NULL)) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ mpq_demux = dvb_demux->priv;
+ if (mpq_demux == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = ion_handle_get_flags(mpq_demux->ion_client, ion_handle, &ionflag);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: ion_handle_get_flags failed %d\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+
+ if (!(ionflag & ION_FLAG_SECURE))
+ ion_unmap_kernel(mpq_demux->ion_client, ion_handle);
+
+ ion_free(mpq_demux->ion_client, ion_handle);
+
+ return 0;
+}
+
+int mpq_dmx_reuse_decoder_buffer(struct dvb_demux_feed *feed, int cookie)
+{
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ MPQ_DVB_DBG_PRINT("%s: cookie=%d\n", __func__, cookie);
+
+ if (cookie < 0) {
+ MPQ_DVB_ERR_PRINT("%s: invalid cookie parameter\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dvb_dmx_is_video_feed(feed)) {
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_feed *mpq_feed;
+ struct mpq_streambuffer *stream_buffer;
+ int ret;
+
+ mutex_lock(&mpq_demux->mutex);
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+ if (stream_buffer == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid feed, feed_data->video_buffer is NULL\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+
+ ret = mpq_streambuffer_pkt_dispose(stream_buffer, cookie, 1);
+ spin_unlock(&feed_data->video_buffer_lock);
+ mutex_unlock(&mpq_demux->mutex);
+
+ return ret;
+ }
+
+ /* else */
+ MPQ_DVB_ERR_PRINT("%s: Invalid feed type %d\n",
+ __func__, feed->pes_type);
+
+ return -EINVAL;
+}
+
+/**
+ * Handles the details of internal decoder buffer allocation via ION.
+ * Internal helper function.
+ * @feed_data: decoder feed object
+ * @dec_buffs: buffer information
+ * @client: ION client
+ *
+ * Return error status
+ */
+static int mpq_dmx_init_internal_buffers(
+ struct mpq_demux *mpq_demux,
+ struct mpq_video_feed_info *feed_data,
+ struct dmx_decoder_buffers *dec_buffs)
+{
+ struct ion_handle *temp_handle = NULL;
+ void *payload_buffer = NULL;
+ int actual_buffer_size = 0;
+ int ret = 0;
+
+ MPQ_DVB_DBG_PRINT("%s: Internal decoder buffer allocation\n", __func__);
+
+ actual_buffer_size = dec_buffs->buffers_size;
+ actual_buffer_size += (SZ_4K - 1);
+ actual_buffer_size &= ~(SZ_4K - 1);
+
+ temp_handle = ion_alloc(mpq_demux->ion_client,
+ actual_buffer_size, SZ_4K,
+ ION_HEAP(video_secure_ion_heap) |
+ ION_HEAP(video_nonsecure_ion_heap),
+ mpq_demux->decoder_alloc_flags);
+
+ if (IS_ERR_OR_NULL(temp_handle)) {
+ ret = PTR_ERR(temp_handle);
+ MPQ_DVB_ERR_PRINT("%s: FAILED to allocate payload buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ payload_buffer = ion_map_kernel(mpq_demux->ion_client, temp_handle);
+
+ if (IS_ERR_OR_NULL(payload_buffer)) {
+ ret = PTR_ERR(payload_buffer);
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to map payload buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto init_failed_free_payload_buffer;
+ }
+ feed_data->buffer_desc.decoder_buffers_num = 1;
+ feed_data->buffer_desc.ion_handle[0] = temp_handle;
+ feed_data->buffer_desc.desc[0].base = payload_buffer;
+ feed_data->buffer_desc.desc[0].size = actual_buffer_size;
+ feed_data->buffer_desc.desc[0].read_ptr = 0;
+ feed_data->buffer_desc.desc[0].write_ptr = 0;
+ feed_data->buffer_desc.desc[0].handle =
+ ion_share_dma_buf_fd(mpq_demux->ion_client, temp_handle);
+ if (IS_ERR_VALUE(feed_data->buffer_desc.desc[0].handle)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to share payload buffer %d\n",
+ __func__, ret);
+ ret = -ENOMEM;
+ goto init_failed_unmap_payload_buffer;
+ }
+
+ feed_data->buffer_desc.shared_file = fget(
+ feed_data->buffer_desc.desc[0].handle);
+
+ return 0;
+
+init_failed_unmap_payload_buffer:
+ ion_unmap_kernel(mpq_demux->ion_client, temp_handle);
+ feed_data->buffer_desc.desc[0].base = NULL;
+init_failed_free_payload_buffer:
+ ion_free(mpq_demux->ion_client, temp_handle);
+ feed_data->buffer_desc.ion_handle[0] = NULL;
+ feed_data->buffer_desc.desc[0].size = 0;
+ feed_data->buffer_desc.decoder_buffers_num = 0;
+ feed_data->buffer_desc.shared_file = NULL;
+end:
+ return ret;
+}
+
+/**
+ * Handles the details of external decoder buffers allocated by user.
+ * Each buffer is mapped into kernel memory and an ION handle is obtained, and
+ * decoder feed object is updated with related information.
+ * Internal helper function.
+ * @feed_data: decoder feed object
+ * @dec_buffs: buffer information
+ * @client: ION client
+ *
+ * Return error status
+ */
+static int mpq_dmx_init_external_buffers(
+ struct mpq_video_feed_info *feed_data,
+ struct dmx_decoder_buffers *dec_buffs,
+ struct ion_client *client)
+{
+ struct ion_handle *temp_handle = NULL;
+ void *payload_buffer = NULL;
+ int actual_buffer_size = 0;
+ int ret = 0;
+ int i;
+
+ /*
+ * Payload buffer was allocated externally (through ION).
+ * Map the ion handles to kernel memory
+ */
+ MPQ_DVB_DBG_PRINT("%s: External decoder buffer allocation\n", __func__);
+
+ actual_buffer_size = dec_buffs->buffers_size;
+ if (!dec_buffs->is_linear) {
+ MPQ_DVB_DBG_PRINT("%s: Ex. Ring-buffer\n", __func__);
+ feed_data->buffer_desc.decoder_buffers_num = 1;
+ } else {
+ MPQ_DVB_DBG_PRINT("%s: Ex. Linear\n", __func__);
+ feed_data->buffer_desc.decoder_buffers_num =
+ dec_buffs->buffers_num;
+ }
+
+ for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+ ret = mpq_map_buffer_to_kernel(
+ client,
+ dec_buffs->handles[i],
+ &temp_handle,
+ &payload_buffer);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed mapping buffer %d\n",
+ __func__, i);
+ goto init_failed;
+ }
+ feed_data->buffer_desc.ion_handle[i] = temp_handle;
+ feed_data->buffer_desc.desc[i].base = payload_buffer;
+ feed_data->buffer_desc.desc[i].handle =
+ dec_buffs->handles[i];
+ feed_data->buffer_desc.desc[i].size =
+ dec_buffs->buffers_size;
+ feed_data->buffer_desc.desc[i].read_ptr = 0;
+ feed_data->buffer_desc.desc[i].write_ptr = 0;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Buffer #%d: base=0x%p, handle=%d, size=%d\n",
+ __func__, i,
+ feed_data->buffer_desc.desc[i].base,
+ feed_data->buffer_desc.desc[i].handle,
+ feed_data->buffer_desc.desc[i].size);
+ }
+
+ return 0;
+
+init_failed:
+ for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+ if (feed_data->buffer_desc.ion_handle[i]) {
+ if (feed_data->buffer_desc.desc[i].base) {
+ ion_unmap_kernel(client,
+ feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.desc[i].base = NULL;
+ }
+ ion_free(client, feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.ion_handle[i] = NULL;
+ feed_data->buffer_desc.desc[i].size = 0;
+ }
+ }
+ return ret;
+}
+
+/**
+ * Handles the details of initializing the mpq_streambuffer object according
+ * to the user decoder buffer configuration: External/Internal buffers and
+ * ring/linear buffering mode.
+ * Internal helper function.
+ * @feed: dvb demux feed object, contains the buffers configuration
+ * @feed_data: decoder feed object
+ * @stream_buffer: stream buffer object to initialize
+ *
+ * Return error status
+ */
+static int mpq_dmx_init_streambuffer(
+ struct mpq_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct mpq_streambuffer *stream_buffer)
+{
+ int ret;
+ void *packet_buffer = NULL;
+ struct mpq_demux *mpq_demux = feed->mpq_demux;
+ struct ion_client *client = mpq_demux->ion_client;
+ struct dmx_decoder_buffers *dec_buffs = NULL;
+ enum mpq_streambuffer_mode mode;
+
+ dec_buffs = feed->dvb_demux_feed->feed.ts.decoder_buffers;
+
+ /* Allocate packet buffer holding the meta-data */
+ packet_buffer = vmalloc(VIDEO_META_DATA_BUFFER_SIZE);
+
+ if (packet_buffer == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to allocate packets buffer\n",
+ __func__);
+
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s: dec_buffs: num=%d, size=%d, linear=%d\n",
+ __func__,
+ dec_buffs->buffers_num,
+ dec_buffs->buffers_size,
+ dec_buffs->is_linear);
+
+ if (dec_buffs->buffers_num == 0)
+ ret = mpq_dmx_init_internal_buffers(
+ mpq_demux, feed_data, dec_buffs);
+ else
+ ret = mpq_dmx_init_external_buffers(
+ feed_data, dec_buffs, client);
+
+ if (ret != 0)
+ goto init_failed_free_packet_buffer;
+
+ mode = dec_buffs->is_linear ? MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR :
+ MPQ_STREAMBUFFER_BUFFER_MODE_RING;
+ ret = mpq_streambuffer_init(
+ feed_data->video_buffer,
+ mode,
+ feed_data->buffer_desc.desc,
+ feed_data->buffer_desc.decoder_buffers_num,
+ packet_buffer,
+ VIDEO_META_DATA_BUFFER_SIZE);
+
+ if (ret != 0)
+ goto init_failed_free_packet_buffer;
+
+ goto end;
+
+
+init_failed_free_packet_buffer:
+ vfree(packet_buffer);
+end:
+ return ret;
+}
+
+static void mpq_dmx_release_streambuffer(
+ struct mpq_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct mpq_streambuffer *video_buffer,
+ struct ion_client *client)
+{
+ int buf_num = 0;
+ int i;
+ struct dmx_decoder_buffers *dec_buffs =
+ feed->dvb_demux_feed->feed.ts.decoder_buffers;
+
+ mpq_adapter_unregister_stream_if(feed_data->stream_interface);
+
+ mpq_streambuffer_terminate(video_buffer);
+
+ vfree(video_buffer->packet_data.data);
+
+ buf_num = feed_data->buffer_desc.decoder_buffers_num;
+
+ for (i = 0; i < buf_num; i++) {
+ if (feed_data->buffer_desc.ion_handle[i]) {
+ if (feed_data->buffer_desc.desc[i].base) {
+ ion_unmap_kernel(client,
+ feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.desc[i].base = NULL;
+ }
+
+ /*
+ * Un-share the buffer if kernel it the one that
+ * shared it.
+ */
+ if (!dec_buffs->buffers_num &&
+ feed_data->buffer_desc.shared_file) {
+ fput(feed_data->buffer_desc.shared_file);
+ feed_data->buffer_desc.shared_file = NULL;
+ }
+
+ ion_free(client, feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.ion_handle[i] = NULL;
+ feed_data->buffer_desc.desc[i].size = 0;
+ }
+ }
+}
+
+int mpq_dmx_flush_stream_buffer(struct dvb_demux_feed *feed)
+{
+ struct mpq_feed *mpq_feed = feed->priv;
+ struct mpq_video_feed_info *feed_data = &mpq_feed->video_info;
+ struct mpq_streambuffer *sbuff;
+ int ret = 0;
+
+ if (!dvb_dmx_is_video_feed(feed)) {
+ MPQ_DVB_DBG_PRINT("%s: not a video feed, feed type=%d\n",
+ __func__, feed->pes_type);
+ return 0;
+ }
+
+ spin_lock(&feed_data->video_buffer_lock);
+
+ sbuff = feed_data->video_buffer;
+ if (sbuff == NULL) {
+ MPQ_DVB_DBG_PRINT("%s: feed_data->video_buffer is NULL\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return -ENODEV;
+ }
+
+ feed_data->pending_pattern_len = 0;
+
+ ret = mpq_streambuffer_flush(sbuff);
+ if (ret)
+ MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer_flush failed, ret=%d\n",
+ __func__, ret);
+
+ spin_unlock(&feed_data->video_buffer_lock);
+
+ return ret;
+}
+
+static int mpq_dmx_flush_buffer(struct dmx_ts_feed *ts_feed, size_t length)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&demux->mutex))
+ return -ERESTARTSYS;
+
+ dvbdmx_ts_reset_pes_state(feed);
+
+ if (dvb_dmx_is_video_feed(feed)) {
+ MPQ_DVB_DBG_PRINT("%s: flushing video buffer\n", __func__);
+
+ ret = mpq_dmx_flush_stream_buffer(feed);
+ }
+
+ mutex_unlock(&demux->mutex);
+ return ret;
+}
+
+/**
+ * mpq_dmx_init_video_feed - Initializes of video feed information
+ * used to pass data directly to decoder.
+ *
+ * @mpq_feed: The mpq feed object
+ *
+ * Return error code.
+ */
+int mpq_dmx_init_video_feed(struct mpq_feed *mpq_feed)
+{
+ int ret;
+ struct mpq_video_feed_info *feed_data = &mpq_feed->video_info;
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+ struct mpq_streambuffer *stream_buffer;
+
+ /* get and store framing information if required */
+ if (video_framing) {
+ mpq_dmx_get_pattern_params(
+ mpq_feed->dvb_demux_feed->video_codec,
+ feed_data->patterns, &feed_data->patterns_num);
+ if (!feed_data->patterns_num) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to get framing pattern parameters\n",
+ __func__);
+
+ ret = -EINVAL;
+ goto init_failed_free_priv_data;
+ }
+ }
+
+ /* Register the new stream-buffer interface to MPQ adapter */
+ switch (mpq_feed->dvb_demux_feed->pes_type) {
+ case DMX_PES_VIDEO0:
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO0_STREAM_IF;
+ break;
+
+ case DMX_PES_VIDEO1:
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO1_STREAM_IF;
+ break;
+
+ case DMX_PES_VIDEO2:
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO2_STREAM_IF;
+ break;
+
+ case DMX_PES_VIDEO3:
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO3_STREAM_IF;
+ break;
+
+ default:
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid pes type %d\n",
+ __func__,
+ mpq_feed->dvb_demux_feed->pes_type);
+ ret = -EINVAL;
+ goto init_failed_free_priv_data;
+ }
+
+ /* make sure not occupied already */
+ stream_buffer = NULL;
+ mpq_adapter_get_stream_if(
+ feed_data->stream_interface,
+ &stream_buffer);
+ if (stream_buffer != NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Video interface %d already occupied!\n",
+ __func__,
+ feed_data->stream_interface);
+ ret = -EBUSY;
+ goto init_failed_free_priv_data;
+ }
+
+ feed_data->video_buffer =
+ &mpq_dmx_info.decoder_buffers[feed_data->stream_interface];
+
+ ret = mpq_dmx_init_streambuffer(
+ mpq_feed, feed_data, feed_data->video_buffer);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_init_streambuffer failed, err = %d\n",
+ __func__, ret);
+ goto init_failed_free_priv_data;
+ }
+
+ ret = mpq_adapter_register_stream_if(
+ feed_data->stream_interface,
+ feed_data->video_buffer);
+
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_adapter_register_stream_if failed, err = %d\n",
+ __func__, ret);
+ goto init_failed_free_stream_buffer;
+ }
+
+ spin_lock_init(&feed_data->video_buffer_lock);
+
+ feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN;
+ feed_data->pes_header_offset = 0;
+ mpq_feed->dvb_demux_feed->pusi_seen = 0;
+ mpq_feed->dvb_demux_feed->peslen = 0;
+ feed_data->fullness_wait_cancel = 0;
+ mpq_streambuffer_get_data_rw_offset(feed_data->video_buffer, NULL,
+ &feed_data->frame_offset);
+ feed_data->last_pattern_offset = 0;
+ feed_data->pending_pattern_len = 0;
+ feed_data->last_framing_match_type = 0;
+ feed_data->found_sequence_header_pattern = 0;
+ memset(&feed_data->prefix_size, 0,
+ sizeof(struct dvb_dmx_video_prefix_size_masks));
+ feed_data->first_prefix_size = 0;
+ feed_data->saved_pts_dts_info.pts_exist = 0;
+ feed_data->saved_pts_dts_info.dts_exist = 0;
+ feed_data->new_pts_dts_info.pts_exist = 0;
+ feed_data->new_pts_dts_info.dts_exist = 0;
+ feed_data->saved_info_used = 1;
+ feed_data->new_info_exists = 0;
+ feed_data->first_pts_dts_copy = 1;
+ feed_data->tei_errs = 0;
+ feed_data->last_continuity = -1;
+ feed_data->continuity_errs = 0;
+ feed_data->ts_packets_num = 0;
+ feed_data->ts_dropped_bytes = 0;
+
+ mpq_demux->decoder_stat[feed_data->stream_interface].drop_count = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].out_count = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].
+ out_interval_sum = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].
+ out_interval_max = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors = 0;
+
+ return 0;
+
+init_failed_free_stream_buffer:
+ mpq_dmx_release_streambuffer(mpq_feed, feed_data,
+ feed_data->video_buffer, mpq_demux->ion_client);
+ mpq_adapter_unregister_stream_if(feed_data->stream_interface);
+init_failed_free_priv_data:
+ feed_data->video_buffer = NULL;
+ return ret;
+}
+
+/**
+ * mpq_dmx_terminate_video_feed - terminate video feed information
+ * that was previously initialized in mpq_dmx_init_video_feed
+ *
+ * @mpq_feed: The mpq feed used for the video TS packets
+ *
+ * Return error code.
+ */
+int mpq_dmx_terminate_video_feed(struct mpq_feed *mpq_feed)
+{
+ struct mpq_streambuffer *video_buffer;
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+
+ if (mpq_feed == NULL)
+ return -EINVAL;
+
+ feed_data = &mpq_feed->video_info;
+
+ spin_lock(&feed_data->video_buffer_lock);
+ video_buffer = feed_data->video_buffer;
+ feed_data->video_buffer = NULL;
+ wake_up_all(&video_buffer->raw_data.queue);
+ spin_unlock(&feed_data->video_buffer_lock);
+
+ mpq_dmx_release_streambuffer(mpq_feed, feed_data,
+ video_buffer, mpq_demux->ion_client);
+
+ return 0;
+}
+
+struct dvb_demux_feed *mpq_dmx_peer_rec_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux_feed *tmp;
+ struct dvb_demux *dvb_demux = feed->demux;
+
+ list_for_each_entry(tmp, &dvb_demux->feed_list, list_head) {
+ if (tmp != feed && tmp->state == DMX_STATE_GO &&
+ tmp->feed.ts.buffer.ringbuff ==
+ feed->feed.ts.buffer.ringbuff) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: main feed pid=%d, secondary feed pid=%d\n",
+ __func__, tmp->pid, feed->pid);
+ return tmp;
+ }
+ }
+
+ return NULL;
+}
+
+static int mpq_sdmx_alloc_data_buf(struct mpq_feed *mpq_feed, size_t size)
+{
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+ void *buf_base;
+ int ret;
+
+ mpq_feed->sdmx_buf_handle = ion_alloc(mpq_demux->ion_client,
+ size,
+ SZ_4K,
+ ION_HEAP(ION_QSECOM_HEAP_ID),
+ 0);
+ if (IS_ERR_OR_NULL(mpq_feed->sdmx_buf_handle)) {
+ ret = PTR_ERR(mpq_feed->sdmx_buf_handle);
+ mpq_feed->sdmx_buf_handle = NULL;
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to allocate sdmx buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ buf_base = ion_map_kernel(mpq_demux->ion_client,
+ mpq_feed->sdmx_buf_handle);
+ if (IS_ERR_OR_NULL(buf_base)) {
+ ret = PTR_ERR(buf_base);
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to map sdmx buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto failed_free_buf;
+ }
+
+ dvb_ringbuffer_init(&mpq_feed->sdmx_buf, buf_base, size);
+
+ return 0;
+
+failed_free_buf:
+ ion_free(mpq_demux->ion_client, mpq_feed->sdmx_buf_handle);
+ mpq_feed->sdmx_buf_handle = NULL;
+end:
+ return ret;
+}
+
+static int mpq_sdmx_free_data_buf(struct mpq_feed *mpq_feed)
+{
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+
+ if (mpq_feed->sdmx_buf_handle) {
+ ion_unmap_kernel(mpq_demux->ion_client,
+ mpq_feed->sdmx_buf_handle);
+ mpq_feed->sdmx_buf.data = NULL;
+ ion_free(mpq_demux->ion_client,
+ mpq_feed->sdmx_buf_handle);
+ mpq_feed->sdmx_buf_handle = NULL;
+ }
+
+ return 0;
+}
+
+static int mpq_sdmx_init_metadata_buffer(struct mpq_demux *mpq_demux,
+ struct mpq_feed *feed, struct sdmx_buff_descr *metadata_buff_desc)
+{
+ void *metadata_buff_base;
+ ion_phys_addr_t temp;
+ int ret;
+ size_t size;
+
+ feed->metadata_buf_handle = ion_alloc(mpq_demux->ion_client,
+ SDMX_METADATA_BUFFER_SIZE,
+ SZ_4K,
+ ION_HEAP(ION_QSECOM_HEAP_ID),
+ 0);
+ if (IS_ERR_OR_NULL(feed->metadata_buf_handle)) {
+ ret = PTR_ERR(feed->metadata_buf_handle);
+ feed->metadata_buf_handle = NULL;
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to allocate metadata buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ metadata_buff_base = ion_map_kernel(mpq_demux->ion_client,
+ feed->metadata_buf_handle);
+ if (IS_ERR_OR_NULL(metadata_buff_base)) {
+ ret = PTR_ERR(metadata_buff_base);
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to map metadata buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto failed_free_metadata_buf;
+ }
+
+ ret = ion_phys(mpq_demux->ion_client,
+ feed->metadata_buf_handle,
+ &temp,
+ &size);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to get physical address %d\n",
+ __func__, ret);
+ goto failed_unmap_metadata_buf;
+ }
+ metadata_buff_desc->size = size;
+ metadata_buff_desc->base_addr = (u64)temp;
+
+ dvb_ringbuffer_init(&feed->metadata_buf, metadata_buff_base,
+ SDMX_METADATA_BUFFER_SIZE);
+
+ return 0;
+
+failed_unmap_metadata_buf:
+ ion_unmap_kernel(mpq_demux->ion_client, feed->metadata_buf_handle);
+failed_free_metadata_buf:
+ ion_free(mpq_demux->ion_client, feed->metadata_buf_handle);
+ feed->metadata_buf_handle = NULL;
+end:
+ return ret;
+}
+
+static int mpq_sdmx_terminate_metadata_buffer(struct mpq_feed *mpq_feed)
+{
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+
+ if (mpq_feed->metadata_buf_handle) {
+ ion_unmap_kernel(mpq_demux->ion_client,
+ mpq_feed->metadata_buf_handle);
+ mpq_feed->metadata_buf.data = NULL;
+ ion_free(mpq_demux->ion_client,
+ mpq_feed->metadata_buf_handle);
+ mpq_feed->metadata_buf_handle = NULL;
+ }
+
+ return 0;
+}
+
+int mpq_dmx_terminate_feed(struct dvb_demux_feed *feed)
+{
+ int ret = 0;
+ struct mpq_demux *mpq_demux;
+ struct mpq_feed *mpq_feed;
+ struct mpq_feed *main_rec_feed = NULL;
+ struct dvb_demux_feed *tmp;
+
+ if (feed == NULL)
+ return -EINVAL;
+
+ mpq_demux = feed->demux->priv;
+
+ mutex_lock(&mpq_demux->mutex);
+ mpq_feed = feed->priv;
+
+ if (mpq_feed->sdmx_filter_handle != SDMX_INVALID_FILTER_HANDLE) {
+ if (mpq_feed->filter_type == SDMX_RAW_FILTER) {
+ tmp = mpq_dmx_peer_rec_feed(feed);
+ if (tmp)
+ main_rec_feed = tmp->priv;
+ }
+
+ if (main_rec_feed) {
+ /* This feed is part of a recording filter */
+ MPQ_DVB_DBG_PRINT(
+ "%s: Removing raw pid %d from filter %d\n",
+ __func__, feed->pid,
+ mpq_feed->sdmx_filter_handle);
+ ret = sdmx_remove_raw_pid(
+ mpq_demux->sdmx_session_handle,
+ mpq_feed->sdmx_filter_handle, feed->pid);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: SDMX_remove_raw_pid failed. ret = %d\n",
+ __func__, ret);
+
+ /* If this feed that we are removing was set as primary,
+ * now other feeds should be set as primary
+ */
+ if (!mpq_feed->secondary_feed)
+ main_rec_feed->secondary_feed = 0;
+ } else {
+ MPQ_DVB_DBG_PRINT("%s: Removing filter %d, pid %d\n",
+ __func__, mpq_feed->sdmx_filter_handle,
+ feed->pid);
+ ret = sdmx_remove_filter(mpq_demux->sdmx_session_handle,
+ mpq_feed->sdmx_filter_handle);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: SDMX_remove_filter failed. ret = %d\n",
+ __func__, ret);
+ }
+
+ mpq_demux->sdmx_filter_count--;
+ mpq_feed->sdmx_filter_handle =
+ SDMX_INVALID_FILTER_HANDLE;
+ }
+
+ mpq_sdmx_close_session(mpq_demux);
+ mpq_demux->num_secure_feeds--;
+ }
+
+ if (dvb_dmx_is_video_feed(feed)) {
+ ret = mpq_dmx_terminate_video_feed(mpq_feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_terminate_video_feed failed. ret = %d\n",
+ __func__, ret);
+ }
+
+ if (mpq_feed->sdmx_buf_handle) {
+ wake_up_all(&mpq_feed->sdmx_buf.queue);
+ mpq_sdmx_free_data_buf(mpq_feed);
+ }
+
+ mpq_sdmx_terminate_metadata_buffer(mpq_feed);
+ mpq_demux->num_active_feeds--;
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ return ret;
+}
+
+int mpq_dmx_decoder_fullness_init(struct dvb_demux_feed *feed)
+{
+ if (dvb_dmx_is_video_feed(feed)) {
+ struct mpq_feed *mpq_feed;
+ struct mpq_video_feed_info *feed_data;
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+ feed_data->fullness_wait_cancel = 0;
+
+ return 0;
+ }
+
+ /* else */
+ MPQ_DVB_DBG_PRINT(
+ "%s: Invalid feed type %d\n",
+ __func__,
+ feed->pes_type);
+
+ return -EINVAL;
+}
+
+/**
+ * Returns whether the free space of decoder's output
+ * buffer is larger than specific number of bytes.
+ *
+ * @sbuff: MPQ stream buffer used for decoder data.
+ * @required_space: number of required free bytes in the buffer
+ *
+ * Return 1 if required free bytes are available, 0 otherwise.
+ */
+static inline int mpq_dmx_check_decoder_fullness(
+ struct mpq_streambuffer *sbuff,
+ size_t required_space)
+{
+ ssize_t free = mpq_streambuffer_data_free(sbuff);
+ ssize_t free_meta = mpq_streambuffer_metadata_free(sbuff);
+
+ /* Verify meta-data buffer can contain at least 1 packet */
+ if (free_meta < VIDEO_META_DATA_PACKET_SIZE)
+ return 0;
+
+ /*
+ * For linear buffers, verify there's enough space for this TSP
+ * and an additional buffer is free, as framing might required one
+ * more buffer to be available.
+ */
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+ return (free >= required_space &&
+ sbuff->pending_buffers_count < sbuff->buffers_num-1);
+ else
+ /* Ring buffer mode */
+ return (free >= required_space);
+}
+
+/**
+ * Checks whether decoder's output buffer has free space
+ * for specific number of bytes, if not, the function waits
+ * until the amount of free-space is available.
+ *
+ * @feed: decoder's feed object
+ * @required_space: number of required free bytes in the buffer
+ * @lock_feed: indicates whether mutex should be held before
+ * accessing the feed information. If the caller of this function
+ * already holds a mutex then this should be set to 0 and 1 otherwise.
+ *
+ * Return 0 if required space is available and error code
+ * in case waiting on buffer fullness was aborted.
+ */
+static int mpq_dmx_decoder_fullness_check(
+ struct dvb_demux_feed *feed,
+ size_t required_space,
+ int lock_feed)
+{
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct mpq_streambuffer *sbuff = NULL;
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_feed *mpq_feed;
+ int ret = 0;
+
+ if (!dvb_dmx_is_video_feed(feed)) {
+ MPQ_DVB_DBG_PRINT("%s: Invalid feed type %d\n",
+ __func__,
+ feed->pes_type);
+ return -EINVAL;
+ }
+
+ if (lock_feed) {
+ mutex_lock(&mpq_demux->mutex);
+ } else if (!mutex_is_locked(&mpq_demux->mutex)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Mutex should have been locked\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+
+ sbuff = feed_data->video_buffer;
+ if (sbuff == NULL) {
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer object is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((feed_data->video_buffer != NULL) &&
+ (!feed_data->fullness_wait_cancel) &&
+ (!mpq_dmx_check_decoder_fullness(sbuff, required_space))) {
+ DEFINE_WAIT(__wait);
+
+ for (;;) {
+ prepare_to_wait(&sbuff->raw_data.queue,
+ &__wait,
+ TASK_INTERRUPTIBLE);
+ if (!feed_data->video_buffer ||
+ feed_data->fullness_wait_cancel ||
+ mpq_dmx_check_decoder_fullness(sbuff,
+ required_space))
+ break;
+
+ if (!signal_pending(current)) {
+ mutex_unlock(&mpq_demux->mutex);
+ schedule();
+ mutex_lock(&mpq_demux->mutex);
+ continue;
+ }
+
+ ret = -ERESTARTSYS;
+ break;
+ }
+ finish_wait(&sbuff->raw_data.queue, &__wait);
+ }
+
+ if (ret < 0) {
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ return ret;
+ }
+
+ if ((feed_data->fullness_wait_cancel) ||
+ (feed_data->video_buffer == NULL)) {
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ return 0;
+}
+
+int mpq_dmx_decoder_fullness_wait(
+ struct dvb_demux_feed *feed,
+ size_t required_space)
+{
+ return mpq_dmx_decoder_fullness_check(feed, required_space, 1);
+}
+
+int mpq_dmx_decoder_fullness_abort(struct dvb_demux_feed *feed)
+{
+ if (dvb_dmx_is_video_feed(feed)) {
+ struct mpq_feed *mpq_feed;
+ struct mpq_video_feed_info *feed_data;
+ struct dvb_ringbuffer *video_buff;
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+
+ feed_data->fullness_wait_cancel = 1;
+
+ spin_lock(&feed_data->video_buffer_lock);
+ if (feed_data->video_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: video_buffer released\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ video_buff = &feed_data->video_buffer->raw_data;
+ wake_up_all(&video_buff->queue);
+ spin_unlock(&feed_data->video_buffer_lock);
+
+ return 0;
+ }
+
+ /* else */
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid feed type %d\n",
+ __func__,
+ feed->pes_type);
+
+ return -EINVAL;
+}
+
+int mpq_dmx_parse_mandatory_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail)
+{
+ int left_size, copy_len;
+
+ if (feed_data->pes_header_offset < PES_MANDATORY_FIELDS_LEN) {
+ left_size =
+ PES_MANDATORY_FIELDS_LEN -
+ feed_data->pes_header_offset;
+
+ copy_len = (left_size > *bytes_avail) ?
+ *bytes_avail :
+ left_size;
+
+ memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+ (buf + *ts_payload_offset),
+ copy_len);
+
+ feed_data->pes_header_offset += copy_len;
+
+ if (left_size > *bytes_avail)
+ return -EINVAL;
+
+ /* else - we have beginning of PES header */
+ *bytes_avail -= left_size;
+ *ts_payload_offset += left_size;
+
+ /* Make sure the PES packet is valid */
+ if (mpq_dmx_is_valid_video_pes(pes_header) < 0) {
+ /*
+ * Since the new PES header parsing
+ * failed, reset pusi_seen to drop all
+ * data until next PUSI
+ */
+ feed->pusi_seen = 0;
+ feed_data->pes_header_offset = 0;
+
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid packet\n",
+ __func__);
+
+ return -EINVAL;
+ }
+
+ feed_data->pes_header_left_bytes =
+ pes_header->pes_header_data_length;
+ }
+
+ return 0;
+}
+
+static inline void mpq_dmx_get_pts_dts(struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header)
+{
+ struct dmx_pts_dts_info *info = &(feed_data->new_pts_dts_info);
+
+ /* Get PTS/DTS information from PES header */
+
+ if ((pes_header->pts_dts_flag == 2) ||
+ (pes_header->pts_dts_flag == 3)) {
+ info->pts_exist = 1;
+
+ info->pts =
+ ((u64)pes_header->pts_1 << 30) |
+ ((u64)pes_header->pts_2 << 22) |
+ ((u64)pes_header->pts_3 << 15) |
+ ((u64)pes_header->pts_4 << 7) |
+ (u64)pes_header->pts_5;
+ } else {
+ info->pts_exist = 0;
+ info->pts = 0;
+ }
+
+ if (pes_header->pts_dts_flag == 3) {
+ info->dts_exist = 1;
+
+ info->dts =
+ ((u64)pes_header->dts_1 << 30) |
+ ((u64)pes_header->dts_2 << 22) |
+ ((u64)pes_header->dts_3 << 15) |
+ ((u64)pes_header->dts_4 << 7) |
+ (u64)pes_header->dts_5;
+ } else {
+ info->dts_exist = 0;
+ info->dts = 0;
+ }
+
+ feed_data->new_info_exists = 1;
+}
+
+int mpq_dmx_parse_remaining_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail)
+{
+ int left_size, copy_len;
+
+ /* Remaining header bytes that need to be processed? */
+ if (!feed_data->pes_header_left_bytes)
+ return 0;
+
+ /* Did we capture the PTS value (if exists)? */
+ if ((*bytes_avail != 0) &&
+ (feed_data->pes_header_offset <
+ (PES_MANDATORY_FIELDS_LEN+5)) &&
+ ((pes_header->pts_dts_flag == 2) ||
+ (pes_header->pts_dts_flag == 3))) {
+
+ /* 5 more bytes should be there */
+ left_size =
+ PES_MANDATORY_FIELDS_LEN + 5 -
+ feed_data->pes_header_offset;
+
+ copy_len = (left_size > *bytes_avail) ?
+ *bytes_avail :
+ left_size;
+
+ memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+ (buf + *ts_payload_offset),
+ copy_len);
+
+ feed_data->pes_header_offset += copy_len;
+ feed_data->pes_header_left_bytes -= copy_len;
+
+ if (left_size > *bytes_avail)
+ return -EINVAL;
+
+ /* else - we have the PTS */
+ *bytes_avail -= copy_len;
+ *ts_payload_offset += copy_len;
+ }
+
+ /* Did we capture the DTS value (if exist)? */
+ if ((*bytes_avail != 0) &&
+ (feed_data->pes_header_offset <
+ (PES_MANDATORY_FIELDS_LEN+10)) &&
+ (pes_header->pts_dts_flag == 3)) {
+
+ /* 5 more bytes should be there */
+ left_size =
+ PES_MANDATORY_FIELDS_LEN + 10 -
+ feed_data->pes_header_offset;
+
+ copy_len = (left_size > *bytes_avail) ?
+ *bytes_avail :
+ left_size;
+
+ memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+ (buf + *ts_payload_offset),
+ copy_len);
+
+ feed_data->pes_header_offset += copy_len;
+ feed_data->pes_header_left_bytes -= copy_len;
+
+ if (left_size > *bytes_avail)
+ return -EINVAL;
+
+ /* else - we have the DTS */
+ *bytes_avail -= copy_len;
+ *ts_payload_offset += copy_len;
+ }
+
+ /* Any more header bytes?! */
+ if (feed_data->pes_header_left_bytes >= *bytes_avail) {
+ feed_data->pes_header_left_bytes -= *bytes_avail;
+ return -EINVAL;
+ }
+
+ /* get PTS/DTS information from PES header to be written later */
+ mpq_dmx_get_pts_dts(feed_data, pes_header);
+
+ /* Got PES header, process payload */
+ *bytes_avail -= feed_data->pes_header_left_bytes;
+ *ts_payload_offset += feed_data->pes_header_left_bytes;
+ feed_data->pes_header_left_bytes = 0;
+
+ return 0;
+}
+
+static void mpq_dmx_check_continuity(struct mpq_video_feed_info *feed_data,
+ int current_continuity,
+ int discontinuity_indicator)
+{
+ const int max_continuity = 0x0F; /* 4 bits in the TS packet header */
+
+ /* sanity check */
+ if (unlikely((current_continuity < 0) ||
+ (current_continuity > max_continuity))) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: received invalid continuity counter value %d\n",
+ __func__, current_continuity);
+ return;
+ }
+
+ /* reset last continuity */
+ if ((feed_data->last_continuity == -1) ||
+ (discontinuity_indicator)) {
+ feed_data->last_continuity = current_continuity;
+ return;
+ }
+
+ /* check for continuity errors */
+ if (current_continuity !=
+ ((feed_data->last_continuity + 1) & max_continuity))
+ feed_data->continuity_errs++;
+
+ /* save for next time */
+ feed_data->last_continuity = current_continuity;
+}
+
+static inline void mpq_dmx_prepare_es_event_data(
+ struct mpq_streambuffer_packet_header *packet,
+ struct mpq_adapter_video_meta_data *meta_data,
+ struct mpq_video_feed_info *feed_data,
+ struct mpq_streambuffer *stream_buffer,
+ struct dmx_data_ready *data,
+ int cookie)
+{
+ struct dmx_pts_dts_info *pts_dts;
+
+ if (meta_data->packet_type == DMX_PES_PACKET) {
+ pts_dts = &meta_data->info.pes.pts_dts_info;
+ data->buf.stc = meta_data->info.pes.stc;
+ } else {
+ pts_dts = &meta_data->info.framing.pts_dts_info;
+ data->buf.stc = meta_data->info.framing.stc;
+ }
+
+ pts_dts = meta_data->packet_type == DMX_PES_PACKET ?
+ &meta_data->info.pes.pts_dts_info :
+ &meta_data->info.framing.pts_dts_info;
+
+ data->data_length = 0;
+ data->buf.handle = packet->raw_data_handle;
+ data->buf.cookie = cookie;
+ data->buf.offset = packet->raw_data_offset;
+ data->buf.len = packet->raw_data_len;
+ data->buf.pts_exists = pts_dts->pts_exist;
+ data->buf.pts = pts_dts->pts;
+ data->buf.dts_exists = pts_dts->dts_exist;
+ data->buf.dts = pts_dts->dts;
+ data->buf.tei_counter = feed_data->tei_errs;
+ data->buf.cont_err_counter = feed_data->continuity_errs;
+ data->buf.ts_packets_num = feed_data->ts_packets_num;
+ data->buf.ts_dropped_bytes = feed_data->ts_dropped_bytes;
+ data->status = DMX_OK_DECODER_BUF;
+
+ MPQ_DVB_DBG_PRINT("%s: cookie=%d\n", __func__, data->buf.cookie);
+
+ /* reset counters */
+ feed_data->ts_packets_num = 0;
+ feed_data->ts_dropped_bytes = 0;
+ feed_data->tei_errs = 0;
+ feed_data->continuity_errs = 0;
+}
+
+static int mpq_sdmx_dvr_buffer_desc(struct mpq_demux *mpq_demux,
+ struct sdmx_buff_descr *buf_desc)
+{
+ struct dvb_ringbuffer *rbuf = (struct dvb_ringbuffer *)
+ mpq_demux->demux.dmx.dvr_input.ringbuff;
+ struct ion_handle *ion_handle =
+ mpq_demux->demux.dmx.dvr_input.priv_handle;
+ ion_phys_addr_t phys_addr;
+ size_t len;
+ int ret;
+
+ ret = ion_phys(mpq_demux->ion_client, ion_handle, &phys_addr, &len);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to obtain physical address of input buffer. ret = %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ buf_desc->base_addr = (u64)phys_addr;
+ buf_desc->size = rbuf->size;
+
+ return 0;
+}
+
+static inline int mpq_dmx_notify_overflow(struct dvb_demux_feed *feed)
+{
+ struct dmx_data_ready data;
+
+ data.data_length = 0;
+ data.status = DMX_OVERRUN_ERROR;
+ return feed->data_ready_cb.ts(&feed->feed.ts, &data);
+}
+
+/**
+ * mpq_dmx_decoder_frame_closure - Helper function to handle closing current
+ * pending frame upon reaching EOS.
+ *
+ * @mpq_demux - mpq demux instance
+ * @mpq_feed - mpq feed object
+ */
+static void mpq_dmx_decoder_frame_closure(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed)
+{
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_streambuffer *stream_buffer;
+ struct mpq_adapter_video_meta_data meta_data;
+ struct mpq_video_feed_info *feed_data;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dmx_data_ready data;
+ int cookie;
+
+ feed_data = &mpq_feed->video_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of video
+ * output buffer by the API (terminate video feed, re-use of video
+ * buffers).
+ */
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT("%s: video_buffer released\n", __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return;
+ }
+
+ /* Report last pattern found */
+ if ((feed_data->pending_pattern_len) &&
+ mpq_dmx_is_video_frame(feed->video_codec,
+ feed_data->last_framing_match_type)) {
+ meta_data.packet_type = DMX_FRAMING_INFO_PACKET;
+ mpq_dmx_write_pts_dts(feed_data,
+ &(meta_data.info.framing.pts_dts_info));
+ mpq_dmx_save_pts_dts(feed_data);
+ packet.user_data_len =
+ sizeof(struct mpq_adapter_video_meta_data);
+ packet.raw_data_len = feed_data->pending_pattern_len;
+ packet.raw_data_offset = feed_data->frame_offset;
+ meta_data.info.framing.pattern_type =
+ feed_data->last_framing_match_type;
+ meta_data.info.framing.stc = feed_data->last_framing_match_stc;
+ meta_data.info.framing.continuity_error_counter =
+ feed_data->continuity_errs;
+ meta_data.info.framing.transport_error_indicator_counter =
+ feed_data->tei_errs;
+ meta_data.info.framing.ts_dropped_bytes =
+ feed_data->ts_dropped_bytes;
+ meta_data.info.framing.ts_packets_num =
+ feed_data->ts_packets_num;
+
+ mpq_streambuffer_get_buffer_handle(stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+
+ mpq_dmx_update_decoder_stat(mpq_feed);
+
+ /* Writing meta-data that includes the framing information */
+ cookie = mpq_streambuffer_pkt_write(stream_buffer, &packet,
+ (u8 *)&meta_data);
+ if (cookie >= 0) {
+ mpq_dmx_prepare_es_event_data(&packet, &meta_data,
+ feed_data, stream_buffer, &data, cookie);
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+ __func__, cookie);
+ }
+ }
+
+ spin_unlock(&feed_data->video_buffer_lock);
+}
+
+/**
+ * mpq_dmx_decoder_pes_closure - Helper function to handle closing current PES
+ * upon reaching EOS.
+ *
+ * @mpq_demux - mpq demux instance
+ * @mpq_feed - mpq feed object
+ */
+static void mpq_dmx_decoder_pes_closure(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed)
+{
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_streambuffer *stream_buffer;
+ struct mpq_adapter_video_meta_data meta_data;
+ struct mpq_video_feed_info *feed_data;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dmx_data_ready data;
+ int cookie;
+
+ feed_data = &mpq_feed->video_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of video
+ * output buffer by the API (terminate video feed, re-use of video
+ * buffers).
+ */
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT("%s: video_buffer released\n", __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return;
+ }
+
+ /*
+ * Close previous PES.
+ * Push new packet to the meta-data buffer.
+ */
+ if ((feed->pusi_seen) && (feed_data->pes_header_left_bytes == 0)) {
+ packet.raw_data_len = feed->peslen;
+ mpq_streambuffer_get_buffer_handle(stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+ packet.raw_data_offset = feed_data->frame_offset;
+ packet.user_data_len =
+ sizeof(struct mpq_adapter_video_meta_data);
+
+ mpq_dmx_write_pts_dts(feed_data,
+ &(meta_data.info.pes.pts_dts_info));
+
+ meta_data.packet_type = DMX_PES_PACKET;
+ meta_data.info.pes.stc = feed_data->prev_stc;
+
+ mpq_dmx_update_decoder_stat(mpq_feed);
+
+ cookie = mpq_streambuffer_pkt_write(stream_buffer, &packet,
+ (u8 *)&meta_data);
+ if (cookie >= 0) {
+ /* Save write offset where new PES will begin */
+ mpq_streambuffer_get_data_rw_offset(stream_buffer, NULL,
+ &feed_data->frame_offset);
+ mpq_dmx_prepare_es_event_data(&packet, &meta_data,
+ feed_data, stream_buffer, &data, cookie);
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+ __func__, cookie);
+ }
+ }
+ /* Reset PES info */
+ feed->peslen = 0;
+ feed_data->pes_header_offset = 0;
+ feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN;
+
+ spin_unlock(&feed_data->video_buffer_lock);
+}
+
+static int mpq_dmx_process_video_packet_framing(
+ struct dvb_demux_feed *feed,
+ const u8 *buf,
+ u64 curr_stc)
+{
+ int bytes_avail;
+ u32 ts_payload_offset;
+ struct mpq_video_feed_info *feed_data;
+ const struct ts_packet_header *ts_header;
+ struct mpq_streambuffer *stream_buffer;
+ struct pes_packet_header *pes_header;
+ struct mpq_demux *mpq_demux;
+ struct mpq_feed *mpq_feed;
+
+ struct dvb_dmx_video_patterns_results framing_res;
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_adapter_video_meta_data meta_data;
+ int bytes_written = 0;
+ int bytes_to_write = 0;
+ int found_patterns = 0;
+ int first_pattern = 0;
+ int i;
+ int is_video_frame = 0;
+ int pending_data_len = 0;
+ int ret = 0;
+ int discontinuity_indicator = 0;
+ struct dmx_data_ready data;
+
+ mpq_demux = feed->demux->priv;
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of video
+ * output buffer by the API (terminate video feed, re-use of video
+ * buffers). Mutex on the video-feed cannot be held here
+ * since SW demux holds a spin-lock while calling write_to_decoder
+ */
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: video_buffer released\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ ts_header = (const struct ts_packet_header *)buf;
+
+ pes_header = &feed_data->pes_header;
+
+ /* Make sure this TS packet has a payload and not scrambled */
+ if ((ts_header->sync_byte != 0x47) ||
+ (ts_header->adaptation_field_control == 0) ||
+ (ts_header->adaptation_field_control == 2) ||
+ (ts_header->transport_scrambling_control)) {
+ /* continue to next packet */
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ if (ts_header->payload_unit_start_indicator) { /* PUSI? */
+ if (feed->pusi_seen) { /* Did we see PUSI before? */
+ /*
+ * Double check that we are not in middle of
+ * previous PES header parsing.
+ */
+ if (feed_data->pes_header_left_bytes != 0)
+ MPQ_DVB_ERR_PRINT(
+ "%s: received PUSI while handling PES header of previous PES\n",
+ __func__);
+
+ feed->peslen = 0;
+ feed_data->pes_header_offset = 0;
+ feed_data->pes_header_left_bytes =
+ PES_MANDATORY_FIELDS_LEN;
+ } else {
+ feed->pusi_seen = 1;
+ }
+ }
+
+ /*
+ * Parse PES data only if PUSI was encountered,
+ * otherwise the data is dropped
+ */
+ if (!feed->pusi_seen) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0; /* drop and wait for next packets */
+ }
+
+ ts_payload_offset = sizeof(struct ts_packet_header);
+
+ /*
+ * Skip adaptation field if exists.
+ * Save discontinuity indicator if exists.
+ */
+ if (ts_header->adaptation_field_control == 3) {
+ const struct ts_adaptation_field *adaptation_field =
+ (const struct ts_adaptation_field *)(buf +
+ ts_payload_offset);
+
+ discontinuity_indicator =
+ adaptation_field->discontinuity_indicator;
+ ts_payload_offset += buf[ts_payload_offset] + 1;
+ }
+
+ bytes_avail = TS_PACKET_SIZE - ts_payload_offset;
+
+ /* Get the mandatory fields of the video PES header */
+ if (mpq_dmx_parse_mandatory_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ if (mpq_dmx_parse_remaining_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ /*
+ * If we reached here,
+ * then we are now at the PES payload data
+ */
+ if (bytes_avail == 0) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ /*
+ * the decoder requires demux to do framing,
+ * so search for the patterns now.
+ */
+ found_patterns = dvb_dmx_video_pattern_search(
+ feed_data->patterns,
+ feed_data->patterns_num,
+ (buf + ts_payload_offset),
+ bytes_avail,
+ &feed_data->prefix_size,
+ &framing_res);
+
+ if (!feed_data->found_sequence_header_pattern) {
+ for (i = 0; i < found_patterns; i++) {
+ if ((framing_res.info[i].type ==
+ DMX_IDX_MPEG_SEQ_HEADER) ||
+ (framing_res.info[i].type ==
+ DMX_IDX_H264_SPS) ||
+ (framing_res.info[i].type ==
+ DMX_IDX_VC1_SEQ_HEADER)) {
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Found Sequence Pattern, buf %p, i = %d, offset = %d, type = %lld\n",
+ __func__, buf, i,
+ framing_res.info[i].offset,
+ framing_res.info[i].type);
+
+ first_pattern = i;
+ feed_data->found_sequence_header_pattern = 1;
+ ts_payload_offset +=
+ framing_res.info[i].offset;
+ bytes_avail -= framing_res.info[i].offset;
+
+ if (framing_res.info[i].used_prefix_size) {
+ feed_data->first_prefix_size =
+ framing_res.info[i].
+ used_prefix_size;
+ }
+ break;
+ }
+ }
+ }
+
+ /*
+ * If decoder requires demux to do framing,
+ * pass data to decoder only after sequence header
+ * or equivalent is found. Otherwise the data is dropped.
+ */
+ if (!feed_data->found_sequence_header_pattern) {
+ feed_data->prev_stc = curr_stc;
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ /* Update error counters based on TS header */
+ feed_data->ts_packets_num++;
+ feed_data->tei_errs += ts_header->transport_error_indicator;
+ mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors +=
+ ts_header->transport_error_indicator;
+ mpq_dmx_check_continuity(feed_data,
+ ts_header->continuity_counter,
+ discontinuity_indicator);
+ mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors +=
+ feed_data->continuity_errs;
+
+ /* Need to back-up the PTS information of the very first frame */
+ if (feed_data->first_pts_dts_copy) {
+ for (i = first_pattern; i < found_patterns; i++) {
+ is_video_frame = mpq_dmx_is_video_frame(
+ feed->video_codec,
+ framing_res.info[i].type);
+
+ if (is_video_frame == 1) {
+ mpq_dmx_save_pts_dts(feed_data);
+ feed_data->first_pts_dts_copy = 0;
+ break;
+ }
+ }
+ }
+
+ /*
+ * write prefix used to find first Sequence pattern, if needed.
+ * feed_data->patterns[0]->pattern always contains the sequence
+ * header pattern.
+ */
+ if (feed_data->first_prefix_size) {
+ ret = mpq_streambuffer_data_write(stream_buffer,
+ feed_data->patterns[0]->pattern,
+ feed_data->first_prefix_size);
+ if (ret < 0) {
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].drop_count +=
+ feed_data->first_prefix_size;
+ feed_data->ts_dropped_bytes +=
+ feed_data->first_prefix_size;
+ MPQ_DVB_DBG_PRINT("%s: could not write prefix\n",
+ __func__);
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ MPQ_DVB_DBG_PRINT(
+ "%s: Writing pattern prefix of size %d\n",
+ __func__, feed_data->first_prefix_size);
+ /*
+ * update the length of the data we report
+ * to include the size of the prefix that was used.
+ */
+ feed_data->pending_pattern_len +=
+ feed_data->first_prefix_size;
+ }
+ }
+
+ feed->peslen += bytes_avail;
+ pending_data_len += bytes_avail;
+
+ meta_data.packet_type = DMX_FRAMING_INFO_PACKET;
+ packet.user_data_len = sizeof(struct mpq_adapter_video_meta_data);
+
+ /*
+ * Go over all the patterns that were found in this packet.
+ * For each pattern found, write the relevant data to the data
+ * buffer, then write the respective meta-data.
+ * Each pattern can only be reported when the next pattern is found
+ * (in order to know the data length).
+ * There are three possible cases for each pattern:
+ * 1. This is the very first pattern we found in any TS packet in this
+ * feed.
+ * 2. This is the first pattern found in this TS packet, but we've
+ * already found patterns in previous packets.
+ * 3. This is not the first pattern in this packet, i.e., we've
+ * already found patterns in this TS packet.
+ */
+ for (i = first_pattern; i < found_patterns; i++) {
+ if (i == first_pattern) {
+ /*
+ * The way to identify the very first pattern:
+ * 1. It's the first pattern found in this packet.
+ * 2. The pending_pattern_len, which indicates the
+ * data length of the previous pattern that has
+ * not yet been reported, is usually 0. However,
+ * it may be larger than 0 if a prefix was used
+ * to find this pattern (i.e., the pattern was
+ * split over two TS packets). In that case,
+ * pending_pattern_len equals first_prefix_size.
+ * first_prefix_size is set to 0 later in this
+ * function.
+ */
+ if (feed_data->first_prefix_size ==
+ feed_data->pending_pattern_len) {
+ /*
+ * This is the very first pattern, so no
+ * previous pending frame data exists.
+ * Update frame info and skip to the
+ * next frame.
+ */
+ feed_data->last_framing_match_type =
+ framing_res.info[i].type;
+ feed_data->last_pattern_offset =
+ framing_res.info[i].offset;
+ if (framing_res.info[i].used_prefix_size)
+ feed_data->last_framing_match_stc =
+ feed_data->prev_stc;
+ else
+ feed_data->last_framing_match_stc =
+ curr_stc;
+ continue;
+ }
+ /*
+ * This is the first pattern in this
+ * packet and previous frame from
+ * previous packet is pending for report
+ */
+ bytes_to_write = framing_res.info[i].offset;
+ } else {
+ /* Previous pending frame is in the same packet */
+ bytes_to_write =
+ framing_res.info[i].offset -
+ feed_data->last_pattern_offset;
+ }
+
+ ret = mpq_streambuffer_data_write(
+ stream_buffer,
+ (buf + ts_payload_offset + bytes_written),
+ bytes_to_write);
+ if (ret < 0) {
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].drop_count +=
+ bytes_to_write;
+ feed_data->ts_dropped_bytes += bytes_to_write;
+ MPQ_DVB_DBG_PRINT(
+ "%s: Couldn't write %d bytes to data buffer, ret=%d\n",
+ __func__, bytes_to_write, ret);
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ bytes_written += bytes_to_write;
+ pending_data_len -= bytes_to_write;
+ feed_data->pending_pattern_len += bytes_to_write;
+ }
+
+ is_video_frame = mpq_dmx_is_video_frame(
+ feed->video_codec,
+ feed_data->last_framing_match_type);
+ if (is_video_frame == 1) {
+ mpq_dmx_write_pts_dts(feed_data,
+ &(meta_data.info.framing.pts_dts_info));
+ mpq_dmx_save_pts_dts(feed_data);
+
+ packet.raw_data_len = feed_data->pending_pattern_len -
+ framing_res.info[i].used_prefix_size;
+ packet.raw_data_offset = feed_data->frame_offset;
+ meta_data.info.framing.pattern_type =
+ feed_data->last_framing_match_type;
+ meta_data.info.framing.stc =
+ feed_data->last_framing_match_stc;
+ meta_data.info.framing.continuity_error_counter =
+ feed_data->continuity_errs;
+ meta_data.info.framing.
+ transport_error_indicator_counter =
+ feed_data->tei_errs;
+ meta_data.info.framing.ts_dropped_bytes =
+ feed_data->ts_dropped_bytes;
+ meta_data.info.framing.ts_packets_num =
+ feed_data->ts_packets_num;
+
+ mpq_streambuffer_get_buffer_handle(
+ stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+
+ mpq_dmx_update_decoder_stat(mpq_feed);
+
+ /*
+ * Write meta-data that includes the framing information
+ */
+ ret = mpq_streambuffer_pkt_write(stream_buffer, &packet,
+ (u8 *)&meta_data);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+ __func__, ret);
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ mpq_dmx_prepare_es_event_data(
+ &packet, &meta_data, feed_data,
+ stream_buffer, &data, ret);
+
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+
+ if (feed_data->video_buffer->mode ==
+ MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+ feed_data->frame_offset = 0;
+ else
+ mpq_streambuffer_get_data_rw_offset(
+ feed_data->video_buffer,
+ NULL,
+ &feed_data->frame_offset);
+ }
+
+ /*
+ * In linear buffers, after writing the packet
+ * we switched over to a new linear buffer for the new
+ * frame. In that case, we should re-write the prefix
+ * of the existing frame if any exists.
+ */
+ if ((MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR ==
+ feed_data->video_buffer->mode) &&
+ framing_res.info[i].used_prefix_size) {
+ ret = mpq_streambuffer_data_write(stream_buffer,
+ feed_data->prev_pattern +
+ DVB_DMX_MAX_PATTERN_LEN -
+ framing_res.info[i].used_prefix_size,
+ framing_res.info[i].used_prefix_size);
+
+ if (ret < 0) {
+ feed_data->pending_pattern_len = 0;
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].
+ drop_count += bytes_avail;
+ feed_data->ts_dropped_bytes +=
+ framing_res.info[i].used_prefix_size;
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ feed_data->pending_pattern_len =
+ framing_res.info[i].used_prefix_size;
+ }
+ } else {
+ s32 offset = (s32)feed_data->frame_offset;
+ u32 buff_size =
+ feed_data->video_buffer->buffers[0].size;
+
+ offset -= framing_res.info[i].used_prefix_size;
+ offset += (offset < 0) ? buff_size : 0;
+ feed_data->pending_pattern_len =
+ framing_res.info[i].used_prefix_size;
+
+ if (MPQ_STREAMBUFFER_BUFFER_MODE_RING ==
+ feed_data->video_buffer->mode) {
+ feed_data->frame_offset = (u32)offset;
+ }
+ }
+ }
+
+ /* save the last match for next time */
+ feed_data->last_framing_match_type =
+ framing_res.info[i].type;
+ feed_data->last_pattern_offset =
+ framing_res.info[i].offset;
+ if (framing_res.info[i].used_prefix_size)
+ feed_data->last_framing_match_stc = feed_data->prev_stc;
+ else
+ feed_data->last_framing_match_stc = curr_stc;
+ }
+
+ feed_data->prev_stc = curr_stc;
+ feed_data->first_prefix_size = 0;
+
+ /*
+ * Save the trailing of the TS packet as we might have a pattern
+ * split that we need to re-use when closing the next
+ * video linear buffer.
+ */
+ if (MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR ==
+ feed_data->video_buffer->mode)
+ memcpy(feed_data->prev_pattern,
+ buf + TS_PACKET_SIZE - DVB_DMX_MAX_PATTERN_LEN,
+ DVB_DMX_MAX_PATTERN_LEN);
+
+ if (pending_data_len) {
+ ret = mpq_streambuffer_data_write(
+ stream_buffer,
+ (buf + ts_payload_offset + bytes_written),
+ pending_data_len);
+
+ if (ret < 0) {
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].drop_count +=
+ pending_data_len;
+ feed_data->ts_dropped_bytes += pending_data_len;
+ MPQ_DVB_DBG_PRINT(
+ "%s: Couldn't write %d pending bytes to data buffer, ret=%d\n",
+ __func__, pending_data_len, ret);
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ feed_data->pending_pattern_len += pending_data_len;
+ }
+ }
+
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+}
+
+static int mpq_dmx_process_video_packet_no_framing(
+ struct dvb_demux_feed *feed,
+ const u8 *buf,
+ u64 curr_stc)
+{
+ int bytes_avail;
+ u32 ts_payload_offset;
+ struct mpq_video_feed_info *feed_data;
+ const struct ts_packet_header *ts_header;
+ struct mpq_streambuffer *stream_buffer;
+ struct pes_packet_header *pes_header;
+ struct mpq_demux *mpq_demux;
+ struct mpq_feed *mpq_feed;
+ int discontinuity_indicator = 0;
+ struct dmx_data_ready data;
+ int cookie;
+ int ret;
+
+ mpq_demux = feed->demux->priv;
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of video
+ * output buffer by the API (terminate video feed, re-use of video
+ * buffers). Mutex on the video-feed cannot be held here
+ * since SW demux holds a spin-lock while calling write_to_decoder
+ */
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: video_buffer released\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ ts_header = (const struct ts_packet_header *)buf;
+
+ pes_header = &feed_data->pes_header;
+
+ /* Make sure this TS packet has a payload and not scrambled */
+ if ((ts_header->sync_byte != 0x47) ||
+ (ts_header->adaptation_field_control == 0) ||
+ (ts_header->adaptation_field_control == 2) ||
+ (ts_header->transport_scrambling_control)) {
+ /* continue to next packet */
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ if (ts_header->payload_unit_start_indicator) { /* PUSI? */
+ if (feed->pusi_seen) { /* Did we see PUSI before? */
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_adapter_video_meta_data meta_data;
+
+ /*
+ * Close previous PES.
+ * Push new packet to the meta-data buffer.
+ * Double check that we are not in middle of
+ * previous PES header parsing.
+ */
+
+ if (feed_data->pes_header_left_bytes == 0) {
+ packet.raw_data_len = feed->peslen;
+ mpq_streambuffer_get_buffer_handle(
+ stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+ packet.raw_data_offset =
+ feed_data->frame_offset;
+ packet.user_data_len =
+ sizeof(struct
+ mpq_adapter_video_meta_data);
+
+ mpq_dmx_write_pts_dts(feed_data,
+ &(meta_data.info.pes.pts_dts_info));
+
+ /* Mark that we detected start of new PES */
+ feed_data->first_pts_dts_copy = 1;
+
+ meta_data.packet_type = DMX_PES_PACKET;
+ meta_data.info.pes.stc = feed_data->prev_stc;
+
+ mpq_dmx_update_decoder_stat(mpq_feed);
+
+ cookie = mpq_streambuffer_pkt_write(
+ stream_buffer, &packet,
+ (u8 *)&meta_data);
+ if (cookie < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+ __func__, cookie);
+ } else {
+ /*
+ * Save write offset where new PES
+ * will begin
+ */
+ mpq_streambuffer_get_data_rw_offset(
+ stream_buffer,
+ NULL,
+ &feed_data->frame_offset);
+
+ mpq_dmx_prepare_es_event_data(
+ &packet, &meta_data,
+ feed_data,
+ stream_buffer, &data, cookie);
+
+ feed->data_ready_cb.ts(&feed->feed.ts,
+ &data);
+ }
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: received PUSI while handling PES header of previous PES\n",
+ __func__);
+ }
+
+ /* Reset PES info */
+ feed->peslen = 0;
+ feed_data->pes_header_offset = 0;
+ feed_data->pes_header_left_bytes =
+ PES_MANDATORY_FIELDS_LEN;
+ } else {
+ feed->pusi_seen = 1;
+ }
+
+ feed_data->prev_stc = curr_stc;
+ }
+
+ /*
+ * Parse PES data only if PUSI was encountered,
+ * otherwise the data is dropped
+ */
+ if (!feed->pusi_seen) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0; /* drop and wait for next packets */
+ }
+
+ ts_payload_offset = sizeof(struct ts_packet_header);
+
+ /*
+ * Skip adaptation field if exists.
+ * Save discontinuity indicator if exists.
+ */
+ if (ts_header->adaptation_field_control == 3) {
+ const struct ts_adaptation_field *adaptation_field =
+ (const struct ts_adaptation_field *)(buf +
+ ts_payload_offset);
+
+ discontinuity_indicator =
+ adaptation_field->discontinuity_indicator;
+ ts_payload_offset += buf[ts_payload_offset] + 1;
+ }
+
+ bytes_avail = TS_PACKET_SIZE - ts_payload_offset;
+
+ /* Get the mandatory fields of the video PES header */
+ if (mpq_dmx_parse_mandatory_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ if (mpq_dmx_parse_remaining_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ /*
+ * If we reached here,
+ * then we are now at the PES payload data
+ */
+ if (bytes_avail == 0) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ /*
+ * Need to back-up the PTS information
+ * of the start of new PES
+ */
+ if (feed_data->first_pts_dts_copy) {
+ mpq_dmx_save_pts_dts(feed_data);
+ feed_data->first_pts_dts_copy = 0;
+ }
+
+ /* Update error counters based on TS header */
+ feed_data->ts_packets_num++;
+ feed_data->tei_errs += ts_header->transport_error_indicator;
+ mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors +=
+ ts_header->transport_error_indicator;
+ mpq_dmx_check_continuity(feed_data,
+ ts_header->continuity_counter,
+ discontinuity_indicator);
+ mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors +=
+ feed_data->continuity_errs;
+
+ ret = mpq_streambuffer_data_write(stream_buffer, buf+ts_payload_offset,
+ bytes_avail);
+ if (ret < 0) {
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].drop_count += bytes_avail;
+ feed_data->ts_dropped_bytes += bytes_avail;
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ feed->peslen += bytes_avail;
+ }
+
+ spin_unlock(&feed_data->video_buffer_lock);
+
+ return 0;
+}
+
+int mpq_dmx_decoder_buffer_status(struct dvb_demux_feed *feed,
+ struct dmx_buffer_status *dmx_buffer_status)
+{
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_streambuffer *video_buff;
+ struct mpq_feed *mpq_feed;
+
+ if (!dvb_dmx_is_video_feed(feed)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid feed type %d\n",
+ __func__,
+ feed->pes_type);
+ return -EINVAL;
+ }
+
+ mutex_lock(&mpq_demux->mutex);
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+ video_buff = feed_data->video_buffer;
+ if (!video_buff) {
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+
+ dmx_buffer_status->error = video_buff->raw_data.error;
+
+ if (video_buff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) {
+ dmx_buffer_status->fullness =
+ video_buff->buffers[0].size *
+ video_buff->pending_buffers_count;
+ dmx_buffer_status->free_bytes =
+ video_buff->buffers[0].size *
+ (video_buff->buffers_num -
+ video_buff->pending_buffers_count);
+ dmx_buffer_status->size =
+ video_buff->buffers[0].size *
+ video_buff->buffers_num;
+ } else {
+ dmx_buffer_status->fullness =
+ mpq_streambuffer_data_avail(video_buff);
+ dmx_buffer_status->free_bytes =
+ mpq_streambuffer_data_free(video_buff);
+ dmx_buffer_status->size = video_buff->buffers[0].size;
+ }
+
+ mpq_streambuffer_get_data_rw_offset(
+ video_buff,
+ &dmx_buffer_status->read_offset,
+ &dmx_buffer_status->write_offset);
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ return 0;
+}
+
+int mpq_dmx_process_video_packet(
+ struct dvb_demux_feed *feed,
+ const u8 *buf)
+{
+ u64 curr_stc;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ if ((mpq_demux->source >= DMX_SOURCE_DVR0) &&
+ (mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) {
+ curr_stc = 0;
+ } else {
+ curr_stc = buf[STC_LOCATION_IDX + 2] << 16;
+ curr_stc += buf[STC_LOCATION_IDX + 1] << 8;
+ curr_stc += buf[STC_LOCATION_IDX];
+ curr_stc *= 256; /* convert from 105.47 KHZ to 27MHz */
+ }
+
+ if (!video_framing)
+ return mpq_dmx_process_video_packet_no_framing(feed, buf,
+ curr_stc);
+ else
+ return mpq_dmx_process_video_packet_framing(feed, buf,
+ curr_stc);
+}
+
+int mpq_dmx_extract_pcr_and_dci(const u8 *buf, u64 *pcr, int *dci)
+{
+ const struct ts_packet_header *ts_header;
+ const struct ts_adaptation_field *adaptation_field;
+
+ if (buf == NULL || pcr == NULL || dci == NULL)
+ return 0;
+
+ ts_header = (const struct ts_packet_header *)buf;
+
+ /* Make sure this TS packet has a adaptation field */
+ if ((ts_header->sync_byte != 0x47) ||
+ (ts_header->adaptation_field_control == 0) ||
+ (ts_header->adaptation_field_control == 1) ||
+ ts_header->transport_error_indicator)
+ return 0;
+
+ adaptation_field = (const struct ts_adaptation_field *)
+ (buf + sizeof(struct ts_packet_header));
+
+ if ((!adaptation_field->adaptation_field_length) ||
+ (!adaptation_field->PCR_flag))
+ return 0; /* 0 adaptation field or no PCR */
+
+ *pcr = ((u64)adaptation_field->program_clock_reference_base_1) << 25;
+ *pcr += ((u64)adaptation_field->program_clock_reference_base_2) << 17;
+ *pcr += ((u64)adaptation_field->program_clock_reference_base_3) << 9;
+ *pcr += ((u64)adaptation_field->program_clock_reference_base_4) << 1;
+ *pcr += adaptation_field->program_clock_reference_base_5;
+ *pcr *= 300;
+ *pcr += (((u64)adaptation_field->program_clock_reference_ext_1) << 8) +
+ adaptation_field->program_clock_reference_ext_2;
+
+ *dci = adaptation_field->discontinuity_indicator;
+
+ return 1;
+}
+
+int mpq_dmx_process_pcr_packet(
+ struct dvb_demux_feed *feed,
+ const u8 *buf)
+{
+ u64 stc;
+ struct dmx_data_ready data;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ if (mpq_dmx_extract_pcr_and_dci(buf, &data.pcr.pcr,
+ &data.pcr.disc_indicator_set) == 0)
+ return 0;
+
+ /*
+ * When we play from front-end, we configure HW
+ * to output the extra timestamp, if we are playing
+ * from DVR, we don't have a timestamp if the packet
+ * format is not 192-tail.
+ */
+ if ((mpq_demux->source >= DMX_SOURCE_DVR0) &&
+ (mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) {
+ stc = 0;
+ } else {
+ stc = buf[STC_LOCATION_IDX + 2] << 16;
+ stc += buf[STC_LOCATION_IDX + 1] << 8;
+ stc += buf[STC_LOCATION_IDX];
+ stc *= 256; /* convert from 105.47 KHZ to 27MHz */
+ }
+
+ data.data_length = 0;
+ data.pcr.stc = stc;
+ data.status = DMX_OK_PCR;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+
+ return 0;
+}
+
+int mpq_dmx_decoder_eos_cmd(struct mpq_feed *mpq_feed)
+{
+ struct mpq_video_feed_info *feed_data = &mpq_feed->video_info;
+ struct mpq_streambuffer *stream_buffer;
+ struct mpq_streambuffer_packet_header oob_packet;
+ struct mpq_adapter_video_meta_data oob_meta_data;
+ int ret;
+
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT("%s: video_buffer released\n", __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ memset(&oob_packet, 0, sizeof(oob_packet));
+ oob_packet.user_data_len = sizeof(oob_meta_data);
+ oob_meta_data.packet_type = DMX_EOS_PACKET;
+
+ ret = mpq_streambuffer_pkt_write(stream_buffer, &oob_packet,
+ (u8 *)&oob_meta_data);
+
+ spin_unlock(&feed_data->video_buffer_lock);
+ return (ret < 0) ? ret : 0;
+}
+
+void mpq_dmx_convert_tts(struct dvb_demux_feed *feed,
+ const u8 timestamp[TIMESTAMP_LEN],
+ u64 *timestampIn27Mhz)
+{
+ if (unlikely(!timestampIn27Mhz))
+ return;
+
+ *timestampIn27Mhz = timestamp[2] << 16;
+ *timestampIn27Mhz += timestamp[1] << 8;
+ *timestampIn27Mhz += timestamp[0];
+ *timestampIn27Mhz *= 256; /* convert from 105.47 KHZ to 27MHz */
+}
+
+int mpq_sdmx_open_session(struct mpq_demux *mpq_demux)
+{
+ enum sdmx_status ret = SDMX_SUCCESS;
+ enum sdmx_proc_mode proc_mode;
+ enum sdmx_pkt_format pkt_format;
+
+ MPQ_DVB_DBG_PRINT("%s: ref_count %d\n",
+ __func__, mpq_demux->sdmx_session_ref_count);
+
+ if (mpq_demux->sdmx_session_ref_count) {
+ /* session is already open */
+ mpq_demux->sdmx_session_ref_count++;
+ return ret;
+ }
+
+ proc_mode = (mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) ?
+ SDMX_PUSH_MODE : SDMX_PULL_MODE;
+ MPQ_DVB_DBG_PRINT(
+ "%s: Proc mode = %s\n",
+ __func__, SDMX_PUSH_MODE == proc_mode ? "Push" : "Pull");
+
+ if (mpq_demux->source < DMX_SOURCE_DVR0) {
+ pkt_format = SDMX_192_BYTE_PKT;
+ } else if (mpq_demux->demux.tsp_format == DMX_TSP_FORMAT_188) {
+ pkt_format = SDMX_188_BYTE_PKT;
+ } else if (mpq_demux->demux.tsp_format == DMX_TSP_FORMAT_192_TAIL) {
+ pkt_format = SDMX_192_BYTE_PKT;
+ } else {
+ MPQ_DVB_ERR_PRINT("%s: invalid tsp format\n", __func__);
+ return -EINVAL;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s: (%s) source, packet format: %d\n",
+ __func__,
+ (mpq_demux->source < DMX_SOURCE_DVR0) ?
+ "frontend" : "DVR", pkt_format);
+
+ /* open session and set configuration */
+ ret = sdmx_open_session(&mpq_demux->sdmx_session_handle);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT("%s: Could not open session. ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s: new session_handle = %d\n",
+ __func__, mpq_demux->sdmx_session_handle);
+
+ ret = sdmx_set_session_cfg(mpq_demux->sdmx_session_handle,
+ proc_mode,
+ SDMX_PKT_ENC_MODE,
+ pkt_format,
+ mpq_sdmx_scramble_odd,
+ mpq_sdmx_scramble_even);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT("%s: Could not set session config. ret=%d\n",
+ __func__, ret);
+ sdmx_close_session(mpq_demux->sdmx_session_handle);
+ mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
+ return -EINVAL;
+ }
+
+ ret = sdmx_set_log_level(mpq_demux->sdmx_session_handle,
+ mpq_demux->sdmx_log_level);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT("%s: Could not set log level. ret=%d\n",
+ __func__, ret);
+ /* Don't fail open session if just log level setting failed */
+ ret = 0;
+ }
+
+ mpq_demux->sdmx_process_count = 0;
+ mpq_demux->sdmx_process_time_sum = 0;
+ mpq_demux->sdmx_process_time_average = 0;
+ mpq_demux->sdmx_process_time_max = 0;
+ mpq_demux->sdmx_process_packets_sum = 0;
+ mpq_demux->sdmx_process_packets_average = 0;
+ mpq_demux->sdmx_process_packets_min = 0;
+
+ mpq_demux->sdmx_session_ref_count++;
+ return ret;
+}
+
+int mpq_sdmx_close_session(struct mpq_demux *mpq_demux)
+{
+ int ret = 0;
+ enum sdmx_status status;
+
+ MPQ_DVB_DBG_PRINT("%s: session_handle = %d, ref_count %d\n",
+ __func__,
+ mpq_demux->sdmx_session_handle,
+ mpq_demux->sdmx_session_ref_count);
+
+ if (!mpq_demux->sdmx_session_ref_count)
+ return -EINVAL;
+
+ if (mpq_demux->sdmx_session_ref_count == 1) {
+ status = sdmx_close_session(mpq_demux->sdmx_session_handle);
+ if (status != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT("%s: sdmx_close_session failed %d\n",
+ __func__, status);
+ }
+ mpq_demux->sdmx_eos = 0;
+ mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
+ }
+
+ mpq_demux->sdmx_session_ref_count--;
+
+ return ret;
+}
+
+static int mpq_sdmx_get_buffer_chunks(struct mpq_demux *mpq_demux,
+ struct ion_handle *buff_handle,
+ u32 actual_buff_size,
+ struct sdmx_buff_descr buff_chunks[SDMX_MAX_PHYSICAL_CHUNKS])
+{
+ int i;
+ struct sg_table *sg_ptr;
+ struct scatterlist *sg;
+ u32 chunk_size;
+ int ret;
+
+ memset(buff_chunks, 0,
+ sizeof(struct sdmx_buff_descr) * SDMX_MAX_PHYSICAL_CHUNKS);
+
+ sg_ptr = ion_sg_table(mpq_demux->ion_client, buff_handle);
+ if (IS_ERR_OR_NULL(sg_ptr)) {
+ ret = PTR_ERR(sg_ptr);
+ MPQ_DVB_ERR_PRINT("%s: ion_sg_table failed, ret=%d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -EINVAL;
+ return ret;
+ }
+
+ if (sg_ptr->nents == 0) {
+ MPQ_DVB_ERR_PRINT("%s: num of scattered entries is 0\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (sg_ptr->nents > SDMX_MAX_PHYSICAL_CHUNKS) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: num of scattered entries %d greater than max supported %d\n",
+ __func__, sg_ptr->nents, SDMX_MAX_PHYSICAL_CHUNKS);
+ return -EINVAL;
+ }
+
+ sg = sg_ptr->sgl;
+ for (i = 0; i < sg_ptr->nents; i++) {
+ buff_chunks[i].base_addr = (u64)sg_dma_address(sg);
+
+ if (sg->length > actual_buff_size)
+ chunk_size = actual_buff_size;
+ else
+ chunk_size = sg->length;
+
+ buff_chunks[i].size = chunk_size;
+ sg = sg_next(sg);
+ actual_buff_size -= chunk_size;
+ }
+
+ return 0;
+}
+
+static int mpq_sdmx_init_data_buffer(struct mpq_demux *mpq_demux,
+ struct mpq_feed *feed, u32 *num_buffers,
+ struct sdmx_data_buff_descr buf_desc[DMX_MAX_DECODER_BUFFER_NUM],
+ enum sdmx_buf_mode *buf_mode)
+{
+ struct dvb_demux_feed *dvbdmx_feed = feed->dvb_demux_feed;
+ struct dvb_ringbuffer *buffer;
+ struct mpq_video_feed_info *feed_data = &feed->video_info;
+ struct ion_handle *sdmx_buff;
+ int ret;
+ int i;
+
+ *buf_mode = SDMX_RING_BUF;
+
+ if (dvb_dmx_is_video_feed(feed->dvb_demux_feed)) {
+ if (feed_data->buffer_desc.decoder_buffers_num > 1)
+ *buf_mode = SDMX_LINEAR_GROUP_BUF;
+ *num_buffers = feed_data->buffer_desc.decoder_buffers_num;
+
+ for (i = 0; i < *num_buffers; i++) {
+ buf_desc[i].length =
+ feed_data->buffer_desc.desc[i].size;
+
+ ret = mpq_sdmx_get_buffer_chunks(mpq_demux,
+ feed_data->buffer_desc.ion_handle[i],
+ buf_desc[i].length,
+ buf_desc[i].buff_chunks);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_get_buffer_chunks failed\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ return 0;
+ }
+
+ *num_buffers = 1;
+ if (dvb_dmx_is_sec_feed(dvbdmx_feed) ||
+ dvb_dmx_is_pcr_feed(dvbdmx_feed)) {
+ buffer = &feed->sdmx_buf;
+ sdmx_buff = feed->sdmx_buf_handle;
+ } else {
+ buffer = (struct dvb_ringbuffer *)
+ dvbdmx_feed->feed.ts.buffer.ringbuff;
+ sdmx_buff = dvbdmx_feed->feed.ts.buffer.priv_handle;
+ }
+
+ if (sdmx_buff == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid buffer allocation\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ buf_desc[0].length = buffer->size;
+ ret = mpq_sdmx_get_buffer_chunks(mpq_demux, sdmx_buff,
+ buf_desc[0].length,
+ buf_desc[0].buff_chunks);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_get_buffer_chunks failed\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mpq_sdmx_filter_setup(struct mpq_demux *mpq_demux,
+ struct dvb_demux_feed *dvbdmx_feed)
+{
+ int ret = 0;
+ struct mpq_feed *feed;
+ struct mpq_feed *main_rec_feed = NULL;
+ struct dvb_demux_feed *tmp;
+ struct sdmx_buff_descr metadata_buff_desc;
+ struct sdmx_data_buff_descr *data_buff_desc = NULL;
+ u32 data_buf_num = DMX_MAX_DECODER_BUFFER_NUM;
+ enum sdmx_buf_mode buf_mode;
+ enum sdmx_raw_out_format ts_out_format = SDMX_188_OUTPUT;
+ u32 filter_flags = 0;
+
+ feed = dvbdmx_feed->priv;
+
+ if (dvb_dmx_is_sec_feed(dvbdmx_feed)) {
+ feed->filter_type = SDMX_SECTION_FILTER;
+ if (dvbdmx_feed->feed.sec.check_crc)
+ filter_flags |= SDMX_FILTER_FLAG_VERIFY_SECTION_CRC;
+ MPQ_DVB_DBG_PRINT("%s: SDMX_SECTION_FILTER\n", __func__);
+ } else if (dvb_dmx_is_pcr_feed(dvbdmx_feed)) {
+ feed->filter_type = SDMX_PCR_FILTER;
+ MPQ_DVB_DBG_PRINT("%s: SDMX_PCR_FILTER\n", __func__);
+ } else if (dvb_dmx_is_video_feed(dvbdmx_feed)) {
+ feed->filter_type = SDMX_SEPARATED_PES_FILTER;
+ MPQ_DVB_DBG_PRINT("%s: SDMX_SEPARATED_PES_FILTER\n", __func__);
+ } else if (dvb_dmx_is_rec_feed(dvbdmx_feed)) {
+ feed->filter_type = SDMX_RAW_FILTER;
+ switch (dvbdmx_feed->tsp_out_format) {
+ case (DMX_TSP_FORMAT_188):
+ ts_out_format = SDMX_188_OUTPUT;
+ break;
+ case (DMX_TSP_FORMAT_192_HEAD):
+ ts_out_format = SDMX_192_HEAD_OUTPUT;
+ break;
+ case (DMX_TSP_FORMAT_192_TAIL):
+ ts_out_format = SDMX_192_TAIL_OUTPUT;
+ break;
+ default:
+ MPQ_DVB_ERR_PRINT(
+ "%s: Unsupported TS output format %d\n",
+ __func__, dvbdmx_feed->tsp_out_format);
+ return -EINVAL;
+ }
+ MPQ_DVB_DBG_PRINT("%s: SDMX_RAW_FILTER\n", __func__);
+ } else {
+ feed->filter_type = SDMX_PES_FILTER;
+ MPQ_DVB_DBG_PRINT("%s: SDMX_PES_FILTER\n", __func__);
+ }
+
+ data_buff_desc = vmalloc(
+ sizeof(*data_buff_desc)*DMX_MAX_DECODER_BUFFER_NUM);
+ if (!data_buff_desc) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: failed to allocate memory for data buffer\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /*
+ * Recording feed sdmx filter handle lookup:
+ * In case this is a recording filter with multiple feeds,
+ * this feed is either the first feed of a new recording filter,
+ * or it is another feed of an existing filter for which a filter was
+ * already opened with sdmx. In such case, we need to look up in the
+ * feed pool for a allocated feed with same output buffer (meaning they
+ * belong to the same filter) and to use the already allocated sdmx
+ * filter handle.
+ */
+ if (feed->filter_type == SDMX_RAW_FILTER) {
+ tmp = mpq_dmx_peer_rec_feed(dvbdmx_feed);
+ if (tmp)
+ main_rec_feed = tmp->priv;
+ }
+
+ /*
+ * If this PID is not part of existing recording filter,
+ * configure a new filter to SDMX.
+ */
+ if (!main_rec_feed) {
+ feed->secondary_feed = 0;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Adding new sdmx filter, pid %d, flags=0x%X, ts_out_format=%d\n",
+ __func__, dvbdmx_feed->pid, filter_flags,
+ ts_out_format);
+
+ /* Meta-data initialization,
+ * Recording filters do no need meta-data buffers.
+ */
+ if (dvb_dmx_is_rec_feed(dvbdmx_feed)) {
+ metadata_buff_desc.base_addr = 0;
+ metadata_buff_desc.size = 0;
+ } else {
+ ret = mpq_sdmx_init_metadata_buffer(mpq_demux, feed,
+ &metadata_buff_desc);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to initialize metadata buffer. ret=%d\n",
+ __func__, ret);
+ goto sdmx_filter_setup_failed;
+ }
+ }
+
+ ret = mpq_sdmx_init_data_buffer(mpq_demux, feed, &data_buf_num,
+ data_buff_desc, &buf_mode);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to initialize data buffer. ret=%d\n",
+ __func__, ret);
+ mpq_sdmx_terminate_metadata_buffer(feed);
+ goto sdmx_filter_setup_failed;
+ }
+ ret = sdmx_add_filter(mpq_demux->sdmx_session_handle,
+ dvbdmx_feed->pid,
+ feed->filter_type,
+ &metadata_buff_desc,
+ buf_mode,
+ data_buf_num,
+ data_buff_desc,
+ &feed->sdmx_filter_handle,
+ ts_out_format,
+ filter_flags);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: SDMX_add_filter failed. ret = %d\n",
+ __func__, ret);
+ ret = -ENODEV;
+ mpq_sdmx_terminate_metadata_buffer(feed);
+ goto sdmx_filter_setup_failed;
+ }
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: feed=0x%p, filter pid=%d, handle=%d, data buffer(s)=%d, size=%d\n",
+ __func__, feed, dvbdmx_feed->pid,
+ feed->sdmx_filter_handle,
+ data_buf_num, data_buff_desc[0].length);
+
+ mpq_demux->sdmx_filter_count++;
+ } else {
+ MPQ_DVB_DBG_PRINT(
+ "%s: Adding RAW pid to sdmx, pid %d\n",
+ __func__, dvbdmx_feed->pid);
+
+ feed->secondary_feed = 1;
+ feed->sdmx_filter_handle = main_rec_feed->sdmx_filter_handle;
+ ret = sdmx_add_raw_pid(mpq_demux->sdmx_session_handle,
+ feed->sdmx_filter_handle, dvbdmx_feed->pid);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to add raw pid, ret=%d\n",
+ __func__, ret);
+ ret = -ENODEV;
+ goto sdmx_filter_setup_failed;
+ }
+ }
+
+ /*
+ * If pid has a key ladder id associated, we need to
+ * set it to SDMX.
+ */
+ if (dvbdmx_feed->secure_mode.is_secured &&
+ dvbdmx_feed->cipher_ops.operations_count) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: set key-ladder %d to PID %d\n",
+ __func__,
+ dvbdmx_feed->cipher_ops.operations[0].key_ladder_id,
+ dvbdmx_feed->cipher_ops.pid);
+
+ ret = sdmx_set_kl_ind(mpq_demux->sdmx_session_handle,
+ dvbdmx_feed->cipher_ops.pid,
+ dvbdmx_feed->cipher_ops.operations[0].key_ladder_id);
+
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to set key ladder, ret=%d\n",
+ __func__, ret);
+ }
+ }
+
+ vfree(data_buff_desc);
+ return 0;
+
+sdmx_filter_setup_failed:
+ vfree(data_buff_desc);
+ return ret;
+}
+
+/**
+ * mpq_sdmx_init_feed - initialize secure demux related elements of mpq feed
+ *
+ * @mpq_demux: mpq_demux object
+ * @mpq_feed: mpq_feed object
+ *
+ * Note: the function assumes mpq_demux->mutex locking is done by caller.
+ */
+static int mpq_sdmx_init_feed(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed)
+{
+ int ret;
+
+ ret = mpq_sdmx_open_session(mpq_demux);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_open_session failed, ret=%d\n",
+ __func__, ret);
+
+ ret = -ENODEV;
+ goto init_sdmx_feed_failed;
+ }
+
+ /* PCR and sections have internal buffer for SDMX */
+ if (dvb_dmx_is_pcr_feed(mpq_feed->dvb_demux_feed))
+ ret = mpq_sdmx_alloc_data_buf(mpq_feed, SDMX_PCR_BUFFER_SIZE);
+ else if (dvb_dmx_is_sec_feed(mpq_feed->dvb_demux_feed))
+ ret = mpq_sdmx_alloc_data_buf(mpq_feed,
+ SDMX_SECTION_BUFFER_SIZE);
+ else
+ ret = 0;
+
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: init buffer failed, ret=%d\n",
+ __func__, ret);
+ goto init_sdmx_feed_failed_free_sdmx;
+ }
+
+ ret = mpq_sdmx_filter_setup(mpq_demux, mpq_feed->dvb_demux_feed);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_filter_setup failed, ret=%d\n",
+ __func__, ret);
+ goto init_sdmx_feed_failed_free_data_buff;
+ }
+
+ mpq_demux->num_secure_feeds++;
+ return 0;
+
+init_sdmx_feed_failed_free_data_buff:
+ mpq_sdmx_free_data_buf(mpq_feed);
+init_sdmx_feed_failed_free_sdmx:
+ mpq_sdmx_close_session(mpq_demux);
+init_sdmx_feed_failed:
+ return ret;
+}
+
+int mpq_dmx_init_mpq_feed(struct dvb_demux_feed *feed)
+{
+ int ret = 0;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct mpq_feed *mpq_feed = feed->priv;
+
+ if (mutex_lock_interruptible(&mpq_demux->mutex))
+ return -ERESTARTSYS;
+
+ mpq_feed->sdmx_buf_handle = NULL;
+ mpq_feed->metadata_buf_handle = NULL;
+ mpq_feed->sdmx_filter_handle = SDMX_INVALID_FILTER_HANDLE;
+
+ if (feed->type != DMX_TYPE_SEC)
+ feed->feed.ts.flush_buffer = mpq_dmx_flush_buffer;
+
+ if (dvb_dmx_is_video_feed(feed)) {
+ ret = mpq_dmx_init_video_feed(mpq_feed);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_init_video_feed failed, ret=%d\n",
+ __func__, ret);
+ goto init_mpq_feed_end;
+ }
+ }
+
+ /*
+ * sdmx is not relevant for recording filters, which always use
+ * regular filters (non-sdmx)
+ */
+ if (!mpq_sdmx_is_loaded() || !feed->secure_mode.is_secured ||
+ dvb_dmx_is_rec_feed(feed)) {
+ if (!mpq_sdmx_is_loaded())
+ mpq_demux->sdmx_session_handle =
+ SDMX_INVALID_SESSION_HANDLE;
+ goto init_mpq_feed_end;
+ }
+
+ /* Initialization of secure demux filters (PES/PCR/Video/Section) */
+ ret = mpq_sdmx_init_feed(mpq_demux, mpq_feed);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_init_feed failed, ret=%d\n",
+ __func__, ret);
+ if (dvb_dmx_is_video_feed(feed))
+ mpq_dmx_terminate_video_feed(mpq_feed);
+ }
+
+init_mpq_feed_end:
+ if (!ret) {
+ mpq_demux->num_active_feeds++;
+ mpq_feed->session_id++;
+ }
+ mutex_unlock(&mpq_demux->mutex);
+ return ret;
+}
+
+/**
+ * Note: Called only when filter is in "GO" state - after feed has been started.
+ */
+int mpq_dmx_set_cipher_ops(struct dvb_demux_feed *feed,
+ struct dmx_cipher_operations *cipher_ops)
+{
+ struct mpq_feed *mpq_feed;
+ struct mpq_demux *mpq_demux;
+ int ret = 0;
+
+ if (!feed || !feed->priv || !cipher_ops) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid parameters\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s(%d, %d, %d)\n",
+ __func__, cipher_ops->pid,
+ cipher_ops->operations_count,
+ cipher_ops->operations[0].key_ladder_id);
+
+ if ((cipher_ops->operations_count > 1) ||
+ (cipher_ops->operations_count &&
+ cipher_ops->operations[0].encrypt)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid cipher operations, count=%d, encrypt=%d\n",
+ __func__, cipher_ops->operations_count,
+ cipher_ops->operations[0].encrypt);
+ return -EINVAL;
+ }
+
+ if (!feed->secure_mode.is_secured) {
+ /*
+ * Filter is not configured as secured, setting cipher
+ * operations is not allowed.
+ */
+ MPQ_DVB_ERR_PRINT(
+ "%s: Cannot set cipher operations to non-secure filter\n",
+ __func__);
+ return -EPERM;
+ }
+
+ mpq_feed = feed->priv;
+ mpq_demux = mpq_feed->mpq_demux;
+
+ mutex_lock(&mpq_demux->mutex);
+
+ /*
+ * Feed is running in secure mode, this secure mode request is to
+ * update the key ladder id
+ */
+ if ((mpq_demux->sdmx_session_handle != SDMX_INVALID_SESSION_HANDLE) &&
+ cipher_ops->operations_count) {
+ ret = sdmx_set_kl_ind(mpq_demux->sdmx_session_handle,
+ cipher_ops->pid,
+ cipher_ops->operations[0].key_ladder_id);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to set key ladder, ret=%d\n",
+ __func__, ret);
+ ret = -ENODEV;
+ }
+ }
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ return ret;
+}
+
+static int mpq_sdmx_invalidate_buffer(struct mpq_feed *mpq_feed)
+{
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct mpq_video_feed_info *feed_data;
+ struct dvb_ringbuffer *buffer;
+ struct ion_handle *ion_handle;
+ int ret = 0;
+ int i;
+
+ if (!dvb_dmx_is_video_feed(feed)) {
+ if (dvb_dmx_is_sec_feed(feed) ||
+ dvb_dmx_is_pcr_feed(feed)) {
+ buffer = (struct dvb_ringbuffer *)
+ &mpq_feed->sdmx_buf;
+ ion_handle = mpq_feed->sdmx_buf_handle;
+ } else {
+ buffer = (struct dvb_ringbuffer *)
+ feed->feed.ts.buffer.ringbuff;
+ ion_handle = feed->feed.ts.buffer.priv_handle;
+ }
+
+ ret = msm_ion_do_cache_op(mpq_feed->mpq_demux->ion_client,
+ ion_handle, buffer->data,
+ buffer->size, ION_IOC_INV_CACHES);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: msm_ion_do_cache_op failed, ret = %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Video buffers */
+ feed_data = &mpq_feed->video_info;
+ for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+ if (feed_data->buffer_desc.desc[i].base) {
+ /* Non-secured buffer */
+ ret = msm_ion_do_cache_op(
+ mpq_feed->mpq_demux->ion_client,
+ feed_data->buffer_desc.ion_handle[i],
+ feed_data->buffer_desc.desc[i].base,
+ feed_data->buffer_desc.desc[i].size,
+ ION_IOC_INV_CACHES);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: msm_ion_do_cache_op failed, ret = %d\n",
+ __func__, ret);
+ }
+ }
+
+ return ret;
+}
+
+static void mpq_sdmx_prepare_filter_status(struct mpq_demux *mpq_demux,
+ struct sdmx_filter_status *filter_sts,
+ struct mpq_feed *mpq_feed)
+{
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_streambuffer *sbuff;
+
+ filter_sts->filter_handle = mpq_feed->sdmx_filter_handle;
+ filter_sts->metadata_fill_count =
+ dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ filter_sts->metadata_write_offset = mpq_feed->metadata_buf.pwrite;
+ filter_sts->error_indicators = 0;
+ filter_sts->status_indicators = 0;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Filter meta-data buffer status: fill count = %d, write_offset = %d\n",
+ __func__, filter_sts->metadata_fill_count,
+ filter_sts->metadata_write_offset);
+
+ if (!dvb_dmx_is_video_feed(feed)) {
+ struct dvb_ringbuffer *buffer;
+
+ if (dvb_dmx_is_sec_feed(feed) ||
+ dvb_dmx_is_pcr_feed(feed)) {
+ buffer = (struct dvb_ringbuffer *)
+ &mpq_feed->sdmx_buf;
+ } else {
+ buffer = (struct dvb_ringbuffer *)
+ feed->feed.ts.buffer.ringbuff;
+ }
+
+ filter_sts->data_fill_count = dvb_ringbuffer_avail(buffer);
+ filter_sts->data_write_offset = buffer->pwrite;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Filter buffers status: fill count = %d, write_offset = %d\n",
+ __func__, filter_sts->data_fill_count,
+ filter_sts->data_write_offset);
+
+ return;
+ }
+
+ /* Video feed - decoder buffers */
+ feed_data = &mpq_feed->video_info;
+
+ spin_lock(&mpq_feed->video_info.video_buffer_lock);
+ sbuff = feed_data->video_buffer;
+ if (sbuff == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: video_buffer released\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return;
+ }
+
+ if (feed_data->buffer_desc.decoder_buffers_num > 1) {
+ /* linear mode */
+ filter_sts->data_fill_count = sbuff->pending_buffers_count;
+ filter_sts->data_write_offset =
+ sbuff->raw_data.pwrite /
+ sizeof(struct mpq_streambuffer_buffer_desc);
+ } else {
+ /* ring buffer mode */
+ filter_sts->data_fill_count =
+ mpq_streambuffer_data_avail(sbuff);
+ mpq_streambuffer_get_data_rw_offset(sbuff, NULL,
+ &filter_sts->data_write_offset);
+
+ }
+
+ spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Decoder buffers filter status: fill count = %d, write_offset = %d\n",
+ __func__, filter_sts->data_fill_count,
+ filter_sts->data_write_offset);
+}
+
+static int mpq_sdmx_section_filtering(struct mpq_feed *mpq_feed,
+ struct dvb_demux_filter *f,
+ struct sdmx_metadata_header *header)
+{
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ int ret;
+ u8 neq = 0;
+ u8 xor;
+ u8 tmp;
+ int i;
+
+ if (!mutex_is_locked(&mpq_feed->mpq_demux->mutex)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Mutex should have been locked\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < DVB_DEMUX_MASK_MAX; i++) {
+ tmp = DVB_RINGBUFFER_PEEK(&mpq_feed->sdmx_buf, i);
+ xor = f->filter.filter_value[i] ^ tmp;
+
+ if (f->maskandmode[i] & xor)
+ return 0;
+
+ neq |= f->maskandnotmode[i] & xor;
+ }
+
+ if (f->doneq && !neq)
+ return 0;
+
+ if (feed->demux->playback_mode == DMX_PB_MODE_PULL) {
+ mutex_unlock(&mpq_feed->mpq_demux->mutex);
+
+ ret = feed->demux->buffer_ctrl.sec(&f->filter,
+ header->payload_length, 1);
+
+ mutex_lock(&mpq_feed->mpq_demux->mutex);
+
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: buffer_ctrl.sec aborted\n",
+ __func__);
+ return ret;
+ }
+
+ if (mpq_feed->sdmx_filter_handle ==
+ SDMX_INVALID_FILTER_HANDLE) {
+ MPQ_DVB_DBG_PRINT("%s: filter was stopped\n",
+ __func__);
+ return -ENODEV;
+ }
+ }
+
+ if (mpq_feed->sdmx_buf.pread + header->payload_length <
+ mpq_feed->sdmx_buf.size) {
+ feed->cb.sec(&mpq_feed->sdmx_buf.data[mpq_feed->sdmx_buf.pread],
+ header->payload_length,
+ NULL, 0, &f->filter, DMX_OK);
+ } else {
+ int split = mpq_feed->sdmx_buf.size - mpq_feed->sdmx_buf.pread;
+
+ feed->cb.sec(&mpq_feed->sdmx_buf.data[mpq_feed->sdmx_buf.pread],
+ split,
+ &mpq_feed->sdmx_buf.data[0],
+ header->payload_length - split,
+ &f->filter, DMX_OK);
+ }
+
+ return 0;
+}
+
+static int mpq_sdmx_check_ts_stall(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts,
+ size_t req,
+ int events_only)
+{
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ int ret;
+
+ if (!mutex_is_locked(&mpq_feed->mpq_demux->mutex)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Mutex should have been locked\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * For PULL mode need to verify there is enough space for the dmxdev
+ * event. Also, if data buffer is full we want to stall until some
+ * data is removed from it to prevent calling the sdmx when it cannot
+ * output data to the still full buffer.
+ */
+ if (mpq_demux->demux.playback_mode == DMX_PB_MODE_PULL) {
+ MPQ_DVB_DBG_PRINT("%s: Stalling for events and %zu bytes\n",
+ __func__, req);
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ ret = mpq_demux->demux.buffer_ctrl.ts(&feed->feed.ts, req, 1);
+ MPQ_DVB_DBG_PRINT("%s: stall result = %d\n",
+ __func__, ret);
+
+ mutex_lock(&mpq_demux->mutex);
+
+ if (mpq_feed->sdmx_filter_handle ==
+ SDMX_INVALID_FILTER_HANDLE) {
+ MPQ_DVB_DBG_PRINT("%s: filter was stopped\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Handle filter results for filters with no extra meta-data */
+static void mpq_sdmx_pes_filter_results(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts)
+{
+ int ret;
+ struct sdmx_metadata_header header;
+ struct sdmx_pes_counters counters;
+ struct dmx_data_ready data_event;
+ struct dmx_data_ready pes_event;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dvb_ringbuffer *buf = (struct dvb_ringbuffer *)
+ feed->feed.ts.buffer.ringbuff;
+ ssize_t bytes_avail;
+
+ if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+ goto pes_filter_check_overflow;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Meta: fill=%u, write=%u. Data: fill=%u, write=%u\n",
+ __func__, sts->metadata_fill_count, sts->metadata_write_offset,
+ sts->data_fill_count, sts->data_write_offset);
+
+ mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+
+ if ((sts->metadata_fill_count == 0) &&
+ (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) {
+ ssize_t free = dvb_ringbuffer_free(buf);
+
+ ret = 0;
+ if ((free + SZ_2K) < MAX_PES_LENGTH)
+ ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts,
+ free + SZ_2K, 0);
+ else
+ MPQ_DVB_ERR_PRINT(
+ "%s: Cannot stall when free space bigger than max PES size\n",
+ __func__);
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_sdmx_check_ts_stall aborted\n",
+ __func__);
+ return;
+ }
+ }
+
+ while (sts->metadata_fill_count) {
+ bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ if (bytes_avail < (sizeof(header) + sizeof(counters))) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: metadata_fill_count is %d less than required %zu bytes\n",
+ __func__,
+ sts->metadata_fill_count,
+ sizeof(header) + sizeof(counters));
+
+ /* clean-up remaining bytes to try to recover */
+ DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+ bytes_avail);
+ sts->metadata_fill_count = 0;
+ break;
+ }
+
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&header,
+ sizeof(header));
+ MPQ_DVB_DBG_PRINT(
+ "%s: metadata header: start=%u, length=%u\n",
+ __func__, header.payload_start, header.payload_length);
+ sts->metadata_fill_count -= sizeof(header);
+
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&counters,
+ sizeof(counters));
+ sts->metadata_fill_count -= sizeof(counters);
+
+ /* Notify new data in buffer */
+ data_event.status = DMX_OK;
+ data_event.data_length = header.payload_length;
+ ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts,
+ data_event.data_length, 0);
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_sdmx_check_ts_stall aborted\n",
+ __func__);
+ return;
+ }
+
+ feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+
+ /* Notify new complete PES */
+ pes_event.status = DMX_OK_PES_END;
+ pes_event.pes_end.actual_length = header.payload_length;
+ pes_event.pes_end.start_gap = 0;
+ pes_event.data_length = 0;
+
+ /* Parse error indicators */
+ if (sts->error_indicators & SDMX_FILTER_ERR_INVALID_PES_LEN)
+ pes_event.pes_end.pes_length_mismatch = 1;
+ else
+ pes_event.pes_end.pes_length_mismatch = 0;
+
+ pes_event.pes_end.disc_indicator_set = 0;
+
+ pes_event.pes_end.stc = 0;
+ pes_event.pes_end.tei_counter = counters.transport_err_count;
+ pes_event.pes_end.cont_err_counter =
+ counters.continuity_err_count;
+ pes_event.pes_end.ts_packets_num =
+ counters.pes_ts_count;
+
+ ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts, 0, 1);
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_sdmx_check_ts_stall aborted\n",
+ __func__);
+ return;
+ }
+ feed->data_ready_cb.ts(&feed->feed.ts, &pes_event);
+ }
+
+pes_filter_check_overflow:
+ if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) &&
+ (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) {
+ MPQ_DVB_ERR_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__);
+ mpq_dmx_notify_overflow(feed);
+ }
+
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ data_event.data_length = 0;
+ data_event.status = DMX_OK_EOS;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+ }
+}
+
+static void mpq_sdmx_section_filter_results(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts)
+{
+ struct sdmx_metadata_header header;
+ struct dmx_data_ready event;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dvb_demux_filter *f;
+ struct dmx_section_feed *sec = &feed->feed.sec;
+ ssize_t bytes_avail;
+
+ /* Parse error indicators */
+ if (sts->error_indicators & SDMX_FILTER_ERR_SEC_VERIF_CRC32_FAIL) {
+ MPQ_DVB_DBG_PRINT("%s: Notify CRC err event\n", __func__);
+ event.status = DMX_CRC_ERROR;
+ event.data_length = 0;
+ dvb_dmx_notify_section_event(feed, &event, 1);
+ }
+
+ if (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)
+ MPQ_DVB_ERR_PRINT("%s: internal section buffer overflowed!\n",
+ __func__);
+
+ if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+ goto section_filter_check_eos;
+
+ mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+ mpq_feed->sdmx_buf.pwrite = sts->data_write_offset;
+
+ while (sts->metadata_fill_count) {
+ bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ if (bytes_avail < sizeof(header)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: metadata_fill_count is %d less than required %zu bytes\n",
+ __func__,
+ sts->metadata_fill_count,
+ sizeof(header));
+
+ /* clean-up remaining bytes to try to recover */
+ DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+ bytes_avail);
+ sts->metadata_fill_count = 0;
+ break;
+ }
+
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *) &header,
+ sizeof(header));
+ sts->metadata_fill_count -= sizeof(header);
+ MPQ_DVB_DBG_PRINT(
+ "%s: metadata header: start=%u, length=%u\n",
+ __func__, header.payload_start, header.payload_length);
+
+ f = feed->filter;
+ do {
+ if (mpq_sdmx_section_filtering(mpq_feed, f, &header))
+ return;
+ } while ((f = f->next) && sec->is_filtering);
+
+ DVB_RINGBUFFER_SKIP(&mpq_feed->sdmx_buf, header.payload_length);
+ }
+
+section_filter_check_eos:
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ event.data_length = 0;
+ event.status = DMX_OK_EOS;
+ dvb_dmx_notify_section_event(feed, &event, 1);
+ }
+}
+
+static void mpq_sdmx_decoder_filter_results(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts)
+{
+ struct sdmx_metadata_header header;
+ struct sdmx_pes_counters counters;
+ int pes_header_offset;
+ struct ts_packet_header *ts_header;
+ struct ts_adaptation_field *ts_adapt;
+ struct pes_packet_header *pes_header;
+ u8 metadata_buf[MAX_SDMX_METADATA_LENGTH];
+ struct mpq_streambuffer *sbuf;
+ int ret;
+ struct dmx_data_ready data_event;
+ struct dmx_data_ready data;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ ssize_t bytes_avail;
+
+ if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+ goto decoder_filter_check_flags;
+
+ /* Update meta data buffer write pointer */
+ mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+
+ if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PULL) &&
+ (sts->error_indicators & SDMX_FILTER_ERR_D_LIN_BUFS_FULL)) {
+ MPQ_DVB_DBG_PRINT("%s: Decoder stall...\n", __func__);
+
+ ret = mpq_dmx_decoder_fullness_check(
+ mpq_feed->dvb_demux_feed, 0, 0);
+ if (ret) {
+ /* we reach here if demuxing was aborted */
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_dmx_decoder_fullness_check aborted\n",
+ __func__);
+ return;
+ }
+ }
+
+ while (sts->metadata_fill_count) {
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_adapter_video_meta_data meta_data;
+
+ bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ if (bytes_avail < (sizeof(header) + sizeof(counters))) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: metadata_fill_count is %d less than required %zu bytes\n",
+ __func__,
+ sts->metadata_fill_count,
+ sizeof(header) + sizeof(counters));
+
+ /* clean-up remaining bytes to try to recover */
+ DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+ bytes_avail);
+ sts->metadata_fill_count = 0;
+ break;
+ }
+
+ /* Read metadata header */
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&header,
+ sizeof(header));
+ sts->metadata_fill_count -= sizeof(header);
+ MPQ_DVB_DBG_PRINT(
+ "%s: metadata header: start=%u, length=%u, metadata=%u\n",
+ __func__, header.payload_start, header.payload_length,
+ header.metadata_length);
+
+ /* Read metadata - PES counters */
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&counters,
+ sizeof(counters));
+ sts->metadata_fill_count -= sizeof(counters);
+
+ /* Read metadata - TS & PES headers */
+ bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ if ((header.metadata_length < MAX_SDMX_METADATA_LENGTH) &&
+ (header.metadata_length >= sizeof(counters)) &&
+ (bytes_avail >=
+ (header.metadata_length - sizeof(counters)))) {
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf,
+ metadata_buf,
+ header.metadata_length - sizeof(counters));
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: meta-data size %d larger than available meta-data %zd or max allowed %d\n",
+ __func__, header.metadata_length,
+ bytes_avail,
+ MAX_SDMX_METADATA_LENGTH);
+
+ /* clean-up remaining bytes to try to recover */
+ DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+ bytes_avail);
+ sts->metadata_fill_count = 0;
+ break;
+ }
+
+ sts->metadata_fill_count -=
+ (header.metadata_length - sizeof(counters));
+
+ ts_header = (struct ts_packet_header *)&metadata_buf[0];
+ if (ts_header->adaptation_field_control == 1) {
+ ts_adapt = NULL;
+ pes_header_offset = sizeof(*ts_header);
+ } else {
+ ts_adapt = (struct ts_adaptation_field *)
+ &metadata_buf[sizeof(*ts_header)];
+ pes_header_offset = sizeof(*ts_header) + 1 +
+ ts_adapt->adaptation_field_length;
+ }
+ pes_header = (struct pes_packet_header *)
+ &metadata_buf[pes_header_offset];
+ meta_data.packet_type = DMX_PES_PACKET;
+ /* TODO - set to real STC when SDMX supports it */
+ meta_data.info.pes.stc = 0;
+
+ if (pes_header->pts_dts_flag & 0x2) {
+ meta_data.info.pes.pts_dts_info.pts_exist = 1;
+ meta_data.info.pes.pts_dts_info.pts =
+ ((u64)pes_header->pts_1 << 30) |
+ ((u64)pes_header->pts_2 << 22) |
+ ((u64)pes_header->pts_3 << 15) |
+ ((u64)pes_header->pts_4 << 7) |
+ (u64)pes_header->pts_5;
+ } else {
+ meta_data.info.pes.pts_dts_info.pts_exist = 0;
+ }
+
+ if (pes_header->pts_dts_flag & 0x1) {
+ meta_data.info.pes.pts_dts_info.dts_exist = 1;
+ meta_data.info.pes.pts_dts_info.dts =
+ ((u64)pes_header->dts_1 << 30) |
+ ((u64)pes_header->dts_2 << 22) |
+ ((u64)pes_header->dts_3 << 15) |
+ ((u64)pes_header->dts_4 << 7) |
+ (u64)pes_header->dts_5;
+ } else {
+ meta_data.info.pes.pts_dts_info.dts_exist = 0;
+ }
+
+ spin_lock(&mpq_feed->video_info.video_buffer_lock);
+
+ mpq_feed->video_info.tei_errs =
+ counters.transport_err_count;
+ mpq_feed->video_info.continuity_errs =
+ counters.continuity_err_count;
+ mpq_feed->video_info.ts_packets_num =
+ counters.pes_ts_count;
+ mpq_feed->video_info.ts_dropped_bytes =
+ counters.drop_count *
+ mpq_demux->demux.ts_packet_size;
+
+ sbuf = mpq_feed->video_info.video_buffer;
+ if (sbuf == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: video_buffer released\n",
+ __func__);
+ spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+ return;
+ }
+
+ if (!header.payload_length) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: warnning - video frame with 0 length, dropping\n",
+ __func__);
+ spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+ continue;
+ }
+
+ packet.raw_data_len = header.payload_length;
+ packet.user_data_len = sizeof(meta_data);
+ mpq_streambuffer_get_buffer_handle(sbuf, 0,
+ &packet.raw_data_handle);
+ mpq_streambuffer_get_data_rw_offset(sbuf,
+ NULL, &packet.raw_data_offset);
+ ret = mpq_streambuffer_data_write_deposit(sbuf,
+ header.payload_length);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_data_write_deposit failed. ret=%d\n",
+ __func__, ret);
+ }
+ mpq_dmx_update_decoder_stat(mpq_feed);
+ ret = mpq_streambuffer_pkt_write(sbuf, &packet,
+ (u8 *)&meta_data);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+ __func__, ret);
+ } else {
+ mpq_dmx_prepare_es_event_data(
+ &packet, &meta_data, &mpq_feed->video_info,
+ sbuf, &data, ret);
+ MPQ_DVB_DBG_PRINT("%s: Notify ES Event\n", __func__);
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ }
+
+ spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+ }
+
+decoder_filter_check_flags:
+ if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) &&
+ (sts->error_indicators & SDMX_FILTER_ERR_D_LIN_BUFS_FULL)) {
+ MPQ_DVB_ERR_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__);
+ mpq_dmx_notify_overflow(mpq_feed->dvb_demux_feed);
+ }
+
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ /* Notify decoder via the stream buffer */
+ ret = mpq_dmx_decoder_eos_cmd(mpq_feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to notify decoder on EOS, ret=%d\n",
+ __func__, ret);
+
+ /* Notify user filter */
+ data_event.data_length = 0;
+ data_event.status = DMX_OK_EOS;
+ mpq_feed->dvb_demux_feed->data_ready_cb.ts(
+ &mpq_feed->dvb_demux_feed->feed.ts, &data_event);
+ }
+}
+
+static void mpq_sdmx_pcr_filter_results(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts)
+{
+ int ret;
+ struct sdmx_metadata_header header;
+ struct dmx_data_ready data;
+ struct dvb_ringbuffer *rbuff = &mpq_feed->sdmx_buf;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ u8 buf[TS_PACKET_HEADER_LENGTH + MAX_TSP_ADAPTATION_LENGTH +
+ TIMESTAMP_LEN];
+ size_t stc_len = 0;
+ ssize_t bytes_avail;
+
+ if (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)
+ MPQ_DVB_ERR_PRINT("%s: internal PCR buffer overflowed!\n",
+ __func__);
+
+ if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+ goto pcr_filter_check_eos;
+
+ if (mpq_demux->demux.tsp_format == DMX_TSP_FORMAT_192_TAIL)
+ stc_len = 4;
+
+ mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+ rbuff->pwrite = sts->data_write_offset;
+
+ while (sts->metadata_fill_count) {
+ bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ if (bytes_avail < sizeof(header)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: metadata_fill_count is %d less than required %zu bytes\n",
+ __func__,
+ sts->metadata_fill_count,
+ sizeof(header));
+
+ /* clean-up remaining bytes to try to recover */
+ DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+ bytes_avail);
+ sts->metadata_fill_count = 0;
+ break;
+ }
+
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *) &header,
+ sizeof(header));
+ MPQ_DVB_DBG_PRINT(
+ "%s: metadata header: start=%u, length=%u\n",
+ __func__, header.payload_start, header.payload_length);
+ sts->metadata_fill_count -= sizeof(header);
+
+ dvb_ringbuffer_read(rbuff, buf, header.payload_length);
+
+ if (mpq_dmx_extract_pcr_and_dci(buf, &data.pcr.pcr,
+ &data.pcr.disc_indicator_set)) {
+
+ if (stc_len) {
+ data.pcr.stc =
+ buf[header.payload_length-2] << 16;
+ data.pcr.stc +=
+ buf[header.payload_length-3] << 8;
+ data.pcr.stc += buf[header.payload_length-4];
+ /* convert from 105.47 KHZ to 27MHz */
+ data.pcr.stc *= 256;
+ } else {
+ data.pcr.stc = 0;
+ }
+
+ data.data_length = 0;
+ data.status = DMX_OK_PCR;
+ ret = mpq_sdmx_check_ts_stall(
+ mpq_demux, mpq_feed, sts, 0, 1);
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_sdmx_check_ts_stall aborted\n",
+ __func__);
+ return;
+ }
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ }
+ }
+
+pcr_filter_check_eos:
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ data.data_length = 0;
+ data.status = DMX_OK_EOS;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ }
+}
+
+static void mpq_sdmx_raw_filter_results(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts)
+{
+ int ret;
+ ssize_t new_data;
+ struct dmx_data_ready data_event;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dvb_ringbuffer *buf = (struct dvb_ringbuffer *)
+ feed->feed.ts.buffer.ringbuff;
+
+ if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+ goto raw_filter_check_flags;
+
+ new_data = sts->data_write_offset -
+ buf->pwrite;
+ if (new_data < 0)
+ new_data += buf->size;
+
+ ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts,
+ new_data + feed->demux->ts_packet_size, 0);
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_sdmx_check_ts_stall aborted\n",
+ __func__);
+ return;
+ }
+
+ data_event.status = DMX_OK;
+ data_event.data_length = new_data;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+ MPQ_DVB_DBG_PRINT("%s: Callback DMX_OK, size=%d\n",
+ __func__, data_event.data_length);
+
+raw_filter_check_flags:
+ if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) &&
+ (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) {
+ MPQ_DVB_DBG_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__);
+ mpq_dmx_notify_overflow(feed);
+ }
+
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ data_event.data_length = 0;
+ data_event.status = DMX_OK_EOS;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+ }
+
+}
+
+static void mpq_sdmx_process_results(struct mpq_demux *mpq_demux)
+{
+ int i;
+ int sdmx_filters;
+ struct sdmx_filter_status *sts;
+ struct mpq_feed *mpq_feed;
+ u8 mpq_feed_idx;
+
+ sdmx_filters = mpq_demux->sdmx_filter_count;
+ for (i = 0; i < sdmx_filters; i++) {
+ sts = &mpq_demux->sdmx_filters_state.status[i];
+ MPQ_DVB_DBG_PRINT(
+ "%s: Filter: handle=%d, status=0x%x, errors=0x%x\n",
+ __func__, sts->filter_handle, sts->status_indicators,
+ sts->error_indicators);
+ MPQ_DVB_DBG_PRINT("%s: Metadata fill count=%d (write=%d)\n",
+ __func__, sts->metadata_fill_count,
+ sts->metadata_write_offset);
+ MPQ_DVB_DBG_PRINT("%s: Data fill count=%d (write=%d)\n",
+ __func__, sts->data_fill_count, sts->data_write_offset);
+
+ mpq_feed_idx = mpq_demux->sdmx_filters_state.mpq_feed_idx[i];
+ mpq_feed = &mpq_demux->feeds[mpq_feed_idx];
+ if ((mpq_feed->dvb_demux_feed->state != DMX_STATE_GO) ||
+ (sts->filter_handle != mpq_feed->sdmx_filter_handle) ||
+ mpq_feed->secondary_feed ||
+ (mpq_demux->sdmx_filters_state.session_id[i] !=
+ mpq_feed->session_id))
+ continue;
+
+ /* Invalidate output buffer before processing the results */
+ mpq_sdmx_invalidate_buffer(mpq_feed);
+
+ if (sts->error_indicators & SDMX_FILTER_ERR_MD_BUF_FULL)
+ MPQ_DVB_ERR_PRINT(
+ "%s: meta-data buff for pid %d overflowed!\n",
+ __func__, mpq_feed->dvb_demux_feed->pid);
+
+ switch (mpq_feed->filter_type) {
+ case SDMX_PCR_FILTER:
+ mpq_sdmx_pcr_filter_results(mpq_demux, mpq_feed, sts);
+ break;
+ case SDMX_PES_FILTER:
+ mpq_sdmx_pes_filter_results(mpq_demux, mpq_feed,
+ sts);
+ break;
+ case SDMX_SEPARATED_PES_FILTER:
+ mpq_sdmx_decoder_filter_results(mpq_demux, mpq_feed,
+ sts);
+ break;
+ case SDMX_SECTION_FILTER:
+ mpq_sdmx_section_filter_results(mpq_demux, mpq_feed,
+ sts);
+ break;
+ case SDMX_RAW_FILTER:
+ mpq_sdmx_raw_filter_results(mpq_demux, mpq_feed, sts);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int mpq_sdmx_process_buffer(struct mpq_demux *mpq_demux,
+ struct sdmx_buff_descr *input,
+ u32 fill_count,
+ u32 read_offset)
+{
+ struct sdmx_filter_status *sts;
+ struct mpq_feed *mpq_feed;
+ u8 flags = 0;
+ u32 errors;
+ u32 status;
+ u32 prev_read_offset;
+ u32 prev_fill_count;
+ enum sdmx_status sdmx_res;
+ int i;
+ int filter_index = 0;
+ int bytes_read;
+ struct timespec process_start_time;
+ struct timespec process_end_time;
+
+ mutex_lock(&mpq_demux->mutex);
+
+ /*
+ * All active filters may get totally closed and therefore
+ * sdmx session may get terminated, in such case nothing to process
+ */
+ if (mpq_demux->sdmx_session_handle == SDMX_INVALID_SESSION_HANDLE) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: sdmx filters aborted, filter-count %d, session %d\n",
+ __func__, mpq_demux->sdmx_filter_count,
+ mpq_demux->sdmx_session_handle);
+ mutex_unlock(&mpq_demux->mutex);
+ return 0;
+ }
+
+ /* Set input flags */
+ if (mpq_demux->sdmx_eos)
+ flags |= SDMX_INPUT_FLAG_EOS;
+ if (mpq_sdmx_debug)
+ flags |= SDMX_INPUT_FLAG_DBG_ENABLE;
+
+ /* Build up to date filter status array */
+ for (i = 0; i < MPQ_MAX_DMX_FILES; i++) {
+ mpq_feed = &mpq_demux->feeds[i];
+ if ((mpq_feed->sdmx_filter_handle != SDMX_INVALID_FILTER_HANDLE)
+ && (!mpq_feed->secondary_feed)) {
+ sts = mpq_demux->sdmx_filters_state.status +
+ filter_index;
+ mpq_sdmx_prepare_filter_status(mpq_demux, sts,
+ mpq_feed);
+ mpq_demux->sdmx_filters_state.mpq_feed_idx[filter_index]
+ = i;
+ mpq_demux->sdmx_filters_state.session_id[filter_index] =
+ mpq_feed->session_id;
+ filter_index++;
+ }
+ }
+
+ /* Sanity check */
+ if (filter_index != mpq_demux->sdmx_filter_count) {
+ mutex_unlock(&mpq_demux->mutex);
+ MPQ_DVB_ERR_PRINT(
+ "%s: Updated %d SDMX filters status but should be %d\n",
+ __func__, filter_index, mpq_demux->sdmx_filter_count);
+ return -ERESTART;
+ }
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Before SDMX_process: input read_offset=%u, fill count=%u\n",
+ __func__, read_offset, fill_count);
+
+ process_start_time = current_kernel_time();
+
+ prev_read_offset = read_offset;
+ prev_fill_count = fill_count;
+ sdmx_res = sdmx_process(mpq_demux->sdmx_session_handle, flags, input,
+ &fill_count, &read_offset, &errors, &status,
+ mpq_demux->sdmx_filter_count,
+ mpq_demux->sdmx_filters_state.status);
+
+ process_end_time = current_kernel_time();
+ bytes_read = prev_fill_count - fill_count;
+
+ mpq_dmx_update_sdmx_stat(mpq_demux, bytes_read,
+ &process_start_time, &process_end_time);
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: SDMX result=%d, input_fill_count=%u, read_offset=%u, read %d bytes from input, status=0x%X, errors=0x%X\n",
+ __func__, sdmx_res, fill_count, read_offset, bytes_read,
+ status, errors);
+
+ if ((sdmx_res == SDMX_SUCCESS) ||
+ (sdmx_res == SDMX_STATUS_STALLED_IN_PULL_MODE)) {
+ if (sdmx_res == SDMX_STATUS_STALLED_IN_PULL_MODE)
+ MPQ_DVB_DBG_PRINT("%s: SDMX stalled for PULL mode\n",
+ __func__);
+
+ mpq_sdmx_process_results(mpq_demux);
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: SDMX Process returned %d\n",
+ __func__, sdmx_res);
+ }
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ return bytes_read;
+}
+
+int mpq_sdmx_process(struct mpq_demux *mpq_demux,
+ struct sdmx_buff_descr *input,
+ u32 fill_count,
+ u32 read_offset,
+ size_t tsp_size)
+{
+ int ret;
+ int todo;
+ int total_bytes_read = 0;
+ int limit = mpq_sdmx_proc_limit * tsp_size;
+
+ MPQ_DVB_DBG_PRINT(
+ "\n\n%s: read_offset=%u, fill_count=%u, tsp_size=%zu\n",
+ __func__, read_offset, fill_count, tsp_size);
+
+ while (fill_count >= tsp_size) {
+ todo = fill_count > limit ? limit : fill_count;
+ ret = mpq_sdmx_process_buffer(mpq_demux, input, todo,
+ read_offset);
+
+ if (mpq_demux->demux.sw_filter_abort) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Demuxing from DVR was aborted\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (ret > 0) {
+ total_bytes_read += ret;
+ fill_count -= ret;
+ read_offset += ret;
+ if (read_offset >= input->size)
+ read_offset -= input->size;
+ } else {
+ /*
+ * ret < 0: some error occurred
+ * ret == 0: not enough data (less than 1 TS packet)
+ */
+ if (ret < 0)
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_process_buffer failed, returned %d\n",
+ __func__, ret);
+ break;
+ }
+ }
+
+ return total_bytes_read;
+}
+
+static int mpq_sdmx_write(struct mpq_demux *mpq_demux,
+ struct ion_handle *input_handle,
+ const char *buf,
+ size_t count)
+{
+ struct ion_handle *ion_handle =
+ mpq_demux->demux.dmx.dvr_input.priv_handle;
+ struct dvb_ringbuffer *rbuf = (struct dvb_ringbuffer *)
+ mpq_demux->demux.dmx.dvr_input.ringbuff;
+ struct sdmx_buff_descr buf_desc;
+ u32 read_offset;
+ int ret;
+
+ if (mpq_demux == NULL || input_handle == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = mpq_sdmx_dvr_buffer_desc(mpq_demux, &buf_desc);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to init input buffer descriptor. ret = %d\n",
+ __func__, ret);
+ return ret;
+ }
+ read_offset = mpq_demux->demux.dmx.dvr_input.ringbuff->pread;
+
+
+ /*
+ * We must flush the buffer before SDMX starts reading from it
+ * so that it gets a valid data in memory.
+ */
+ ret = msm_ion_do_cache_op(mpq_demux->ion_client,
+ ion_handle, rbuf->data,
+ rbuf->size, ION_IOC_CLEAN_CACHES);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: msm_ion_do_cache_op failed, ret = %d\n",
+ __func__, ret);
+
+ return mpq_sdmx_process(mpq_demux, &buf_desc, count,
+ read_offset, mpq_demux->demux.ts_packet_size);
+}
+
+int mpq_dmx_write(struct dmx_demux *demux, const char *buf, size_t count)
+{
+ struct dvb_demux *dvb_demux;
+ struct mpq_demux *mpq_demux;
+ int ret = count;
+
+ if (demux == NULL)
+ return -EINVAL;
+
+ dvb_demux = demux->priv;
+ mpq_demux = dvb_demux->priv;
+
+ /* Route through secure demux - process secure feeds if any exist */
+ if (mpq_sdmx_is_loaded() && mpq_demux->sdmx_filter_count) {
+ ret = mpq_sdmx_write(mpq_demux,
+ demux->dvr_input.priv_handle,
+ buf,
+ count);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_write failed. ret = %d\n",
+ __func__, ret);
+ ret = count;
+ }
+ }
+
+ /*
+ * Route through sw filter - process non-secure feeds if any exist.
+ * For sw filter, should process the same amount of bytes the sdmx
+ * process managed to consume, unless some sdmx error occurred, for
+ * which should process the whole buffer
+ */
+ if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds)
+ dvb_dmx_swfilter_format(dvb_demux, buf, ret,
+ dvb_demux->tsp_format);
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ return ret;
+}
+
+int mpq_sdmx_is_loaded(void)
+{
+ static int sdmx_load_checked;
+
+ if (!sdmx_load_checked) {
+ mpq_sdmx_check_app_loaded();
+ sdmx_load_checked = 1;
+ }
+
+ return mpq_dmx_info.secure_demux_app_loaded;
+}
+
+int mpq_dmx_oob_command(struct dvb_demux_feed *feed,
+ struct dmx_oob_command *cmd)
+{
+ struct mpq_feed *mpq_feed = feed->priv;
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+ struct dmx_data_ready event;
+ int ret = 0;
+
+ mutex_lock(&mpq_demux->mutex);
+ mpq_feed = feed->priv;
+
+ if (!dvb_dmx_is_video_feed(feed) && !dvb_dmx_is_pcr_feed(feed) &&
+ !feed->secure_mode.is_secured) {
+ mutex_unlock(&mpq_demux->mutex);
+ return 0;
+ }
+
+ event.data_length = 0;
+
+ switch (cmd->type) {
+ case DMX_OOB_CMD_EOS:
+ event.status = DMX_OK_EOS;
+ if (!feed->secure_mode.is_secured) {
+ if (dvb_dmx_is_video_feed(feed)) {
+ if (!video_framing)
+ mpq_dmx_decoder_pes_closure(mpq_demux,
+ mpq_feed);
+ else
+ mpq_dmx_decoder_frame_closure(mpq_demux,
+ mpq_feed);
+ ret = mpq_dmx_decoder_eos_cmd(mpq_feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: Couldn't write oob eos packet\n",
+ __func__);
+ }
+ ret = feed->data_ready_cb.ts(&feed->feed.ts, &event);
+ } else if (!mpq_demux->sdmx_eos) {
+ struct sdmx_buff_descr buf_desc;
+
+ mpq_demux->sdmx_eos = 1;
+ ret = mpq_sdmx_dvr_buffer_desc(mpq_demux, &buf_desc);
+ if (!ret) {
+ mutex_unlock(&mpq_demux->mutex);
+ mpq_sdmx_process_buffer(mpq_demux, &buf_desc,
+ 0, 0);
+ return 0;
+ }
+ }
+ break;
+ case DMX_OOB_CMD_MARKER:
+ event.status = DMX_OK_MARKER;
+ event.marker.id = cmd->params.marker.id;
+
+ if (feed->type == DMX_TYPE_SEC)
+ ret = dvb_dmx_notify_section_event(feed, &event, 1);
+ else
+ /* MPQ_TODO: Notify decoder via the stream buffer */
+ ret = feed->data_ready_cb.ts(&feed->feed.ts, &event);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&mpq_demux->mutex);
+ return ret;
+}
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
new file mode 100644
index 000000000000..f36e9e7e7a23
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
@@ -0,0 +1,1027 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_DMX_PLUGIN_COMMON_H
+#define _MPQ_DMX_PLUGIN_COMMON_H
+
+#include <linux/msm_ion.h>
+
+#include "dvbdev.h"
+#include "dmxdev.h"
+#include "demux.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "mpq_adapter.h"
+#include "mpq_sdmx.h"
+
+#define TS_PACKET_SYNC_BYTE (0x47)
+#define TS_PACKET_SIZE (188)
+#define TS_PACKET_HEADER_LENGTH (4)
+
+/* Length of mandatory fields that must exist in header of video PES */
+#define PES_MANDATORY_FIELDS_LEN 9
+
+/*
+ * 500 PES header packets in the meta-data buffer,
+ * should be more than enough
+ */
+#define VIDEO_NUM_OF_PES_PACKETS 500
+
+#define VIDEO_META_DATA_PACKET_SIZE \
+ (DVB_RINGBUFFER_PKTHDRSIZE + \
+ sizeof(struct mpq_streambuffer_packet_header) + \
+ sizeof(struct mpq_adapter_video_meta_data))
+
+#define VIDEO_META_DATA_BUFFER_SIZE \
+ (VIDEO_NUM_OF_PES_PACKETS * VIDEO_META_DATA_PACKET_SIZE)
+
+/* Max number open() request can be done on demux device */
+#define MPQ_MAX_DMX_FILES 128
+
+/**
+ * TSIF alias name length
+ */
+#define TSIF_NAME_LENGTH 20
+
+/**
+ * struct ts_packet_header - Transport packet header
+ * as defined in MPEG2 transport stream standard.
+ */
+struct ts_packet_header {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned sync_byte:8;
+ unsigned transport_error_indicator:1;
+ unsigned payload_unit_start_indicator:1;
+ unsigned transport_priority:1;
+ unsigned pid_msb:5;
+ unsigned pid_lsb:8;
+ unsigned transport_scrambling_control:2;
+ unsigned adaptation_field_control:2;
+ unsigned continuity_counter:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned sync_byte:8;
+ unsigned pid_msb:5;
+ unsigned transport_priority:1;
+ unsigned payload_unit_start_indicator:1;
+ unsigned transport_error_indicator:1;
+ unsigned pid_lsb:8;
+ unsigned continuity_counter:4;
+ unsigned adaptation_field_control:2;
+ unsigned transport_scrambling_control:2;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+/**
+ * struct ts_adaptation_field - Adaptation field prefix
+ * as defined in MPEG2 transport stream standard.
+ */
+struct ts_adaptation_field {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned adaptation_field_length:8;
+ unsigned discontinuity_indicator:1;
+ unsigned random_access_indicator:1;
+ unsigned elementary_stream_priority_indicator:1;
+ unsigned PCR_flag:1;
+ unsigned OPCR_flag:1;
+ unsigned splicing_point_flag:1;
+ unsigned transport_private_data_flag:1;
+ unsigned adaptation_field_extension_flag:1;
+ unsigned program_clock_reference_base_1:8;
+ unsigned program_clock_reference_base_2:8;
+ unsigned program_clock_reference_base_3:8;
+ unsigned program_clock_reference_base_4:8;
+ unsigned program_clock_reference_base_5:1;
+ unsigned reserved:6;
+ unsigned program_clock_reference_ext_1:1;
+ unsigned program_clock_reference_ext_2:8;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned adaptation_field_length:8;
+ unsigned adaptation_field_extension_flag:1;
+ unsigned transport_private_data_flag:1;
+ unsigned splicing_point_flag:1;
+ unsigned OPCR_flag:1;
+ unsigned PCR_flag:1;
+ unsigned elementary_stream_priority_indicator:1;
+ unsigned random_access_indicator:1;
+ unsigned discontinuity_indicator:1;
+ unsigned program_clock_reference_base_1:8;
+ unsigned program_clock_reference_base_2:8;
+ unsigned program_clock_reference_base_3:8;
+ unsigned program_clock_reference_base_4:8;
+ unsigned program_clock_reference_ext_1:1;
+ unsigned reserved:6;
+ unsigned program_clock_reference_base_5:1;
+ unsigned program_clock_reference_ext_2:8;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+
+/*
+ * PES packet header containing dts and/or pts values
+ * as defined in MPEG2 transport stream standard.
+ */
+struct pes_packet_header {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned packet_start_code_prefix_1:8;
+ unsigned packet_start_code_prefix_2:8;
+ unsigned packet_start_code_prefix_3:8;
+ unsigned stream_id:8;
+ unsigned pes_packet_length_msb:8;
+ unsigned pes_packet_length_lsb:8;
+ unsigned reserved_bits0:2;
+ unsigned pes_scrambling_control:2;
+ unsigned pes_priority:1;
+ unsigned data_alignment_indicator:1;
+ unsigned copyright:1;
+ unsigned original_or_copy:1;
+ unsigned pts_dts_flag:2;
+ unsigned escr_flag:1;
+ unsigned es_rate_flag:1;
+ unsigned dsm_trick_mode_flag:1;
+ unsigned additional_copy_info_flag:1;
+ unsigned pes_crc_flag:1;
+ unsigned pes_extension_flag:1;
+ unsigned pes_header_data_length:8;
+ unsigned reserved_bits1:4;
+ unsigned pts_1:3;
+ unsigned marker_bit0:1;
+ unsigned pts_2:8;
+ unsigned pts_3:7;
+ unsigned marker_bit1:1;
+ unsigned pts_4:8;
+ unsigned pts_5:7;
+ unsigned marker_bit2:1;
+ unsigned reserved_bits2:4;
+ unsigned dts_1:3;
+ unsigned marker_bit3:1;
+ unsigned dts_2:8;
+ unsigned dts_3:7;
+ unsigned marker_bit4:1;
+ unsigned dts_4:8;
+ unsigned dts_5:7;
+ unsigned marker_bit5:1;
+ unsigned reserved_bits3:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned packet_start_code_prefix_1:8;
+ unsigned packet_start_code_prefix_2:8;
+ unsigned packet_start_code_prefix_3:8;
+ unsigned stream_id:8;
+ unsigned pes_packet_length_lsb:8;
+ unsigned pes_packet_length_msb:8;
+ unsigned original_or_copy:1;
+ unsigned copyright:1;
+ unsigned data_alignment_indicator:1;
+ unsigned pes_priority:1;
+ unsigned pes_scrambling_control:2;
+ unsigned reserved_bits0:2;
+ unsigned pes_extension_flag:1;
+ unsigned pes_crc_flag:1;
+ unsigned additional_copy_info_flag:1;
+ unsigned dsm_trick_mode_flag:1;
+ unsigned es_rate_flag:1;
+ unsigned escr_flag:1;
+ unsigned pts_dts_flag:2;
+ unsigned pes_header_data_length:8;
+ unsigned marker_bit0:1;
+ unsigned pts_1:3;
+ unsigned reserved_bits1:4;
+ unsigned pts_2:8;
+ unsigned marker_bit1:1;
+ unsigned pts_3:7;
+ unsigned pts_4:8;
+ unsigned marker_bit2:1;
+ unsigned pts_5:7;
+ unsigned marker_bit3:1;
+ unsigned dts_1:3;
+ unsigned reserved_bits2:4;
+ unsigned dts_2:8;
+ unsigned marker_bit4:1;
+ unsigned dts_3:7;
+ unsigned dts_4:8;
+ unsigned marker_bit5:1;
+ unsigned dts_5:7;
+ unsigned reserved_bits3:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+/**
+ * mpq_decoder_buffers_desc - decoder buffer(s) management information.
+ *
+ * @desc: Array of buffer descriptors as they are passed to mpq_streambuffer
+ * upon its initialization. These descriptors must remain valid as long as
+ * the mpq_streambuffer object is used.
+ * @ion_handle: Array of ION handles, one for each decoder buffer, used for
+ * kernel memory mapping or allocation. Handles are saved in order to release
+ * resources properly later on.
+ * @decoder_buffers_num: number of buffers that are managed, either externally
+ * or internally by the mpq_streambuffer object
+ * @shared_file: File handle of internally allocated video buffer shared
+ * with video consumer.
+ */
+struct mpq_decoder_buffers_desc {
+ struct mpq_streambuffer_buffer_desc desc[DMX_MAX_DECODER_BUFFER_NUM];
+ struct ion_handle *ion_handle[DMX_MAX_DECODER_BUFFER_NUM];
+ u32 decoder_buffers_num;
+ struct file *shared_file;
+};
+
+/*
+ * mpq_video_feed_info - private data used for video feed.
+ *
+ * @video_buffer: Holds the streamer buffer shared with
+ * the decoder for feeds having the data going to the decoder.
+ * @video_buffer_lock: Lock protecting against video output buffer.
+ * The lock protects against API calls to manipulate the output buffer
+ * (initialize, free, re-use buffers) and dvb-sw demux parsing the video
+ * data through mpq_dmx_process_video_packet().
+ * @buffer_desc: Holds decoder buffer(s) information used for stream buffer.
+ * @pes_header: Used for feeds that output data to decoder,
+ * holds PES header of current processed PES.
+ * @pes_header_left_bytes: Used for feeds that output data to decoder,
+ * holds remaining PES header bytes of current processed PES.
+ * @pes_header_offset: Holds the offset within the current processed
+ * pes header.
+ * @fullness_wait_cancel: Flag used to signal to abort waiting for
+ * decoder's fullness.
+ * @stream_interface: The ID of the video stream interface registered
+ * with this stream buffer.
+ * @patterns: pointer to the framing patterns to look for.
+ * @patterns_num: number of framing patterns.
+ * @prev_pattern: holds the trailing data of the last processed video packet.
+ * @frame_offset: Saves data buffer offset to which a new frame will be written
+ * @last_pattern_offset: Holds the previous pattern offset
+ * @pending_pattern_len: Accumulated number of data bytes that will be
+ * reported for this frame.
+ * @last_framing_match_type: Used for saving the type of
+ * the previous pattern match found in this video feed.
+ * @last_framing_match_stc: Used for saving the STC attached to TS packet
+ * of the previous pattern match found in this video feed.
+ * @found_sequence_header_pattern: Flag used to note that an MPEG-2
+ * Sequence Header, H.264 SPS or VC-1 Sequence Header pattern
+ * (whichever is relevant according to the video standard) had already
+ * been found.
+ * @prefix_size: a bit mask representing the size(s) of possible prefixes
+ * to the pattern, already found in the previous buffer. If bit 0 is set,
+ * a prefix of size 1 was found. If bit 1 is set, a prefix of size 2 was
+ * found, etc. This supports a prefix size of up to 32, which is more
+ * than we need. The search function updates prefix_size as needed
+ * for the next buffer search.
+ * @first_prefix_size: used to save the prefix size used to find the first
+ * pattern written to the stream buffer.
+ * @saved_pts_dts_info: used to save PTS/DTS information until it is written.
+ * @new_pts_dts_info: used to store PTS/DTS information from current PES header.
+ * @saved_info_used: indicates if saved PTS/DTS information was used.
+ * @new_info_exists: indicates if new PTS/DTS information exists in
+ * new_pts_dts_info that should be saved to saved_pts_dts_info.
+ * @first_pts_dts_copy: a flag used to indicate if PTS/DTS information needs
+ * to be copied from the currently parsed PES header to the saved_pts_dts_info.
+ * @tei_errs: Transport stream Transport Error Indicator (TEI) counter.
+ * @last_continuity: last continuity counter value found in TS packet header.
+ * Initialized to -1.
+ * @continuity_errs: Transport stream continuity error counter.
+ * @ts_packets_num: TS packets counter.
+ * @ts_dropped_bytes: counts the number of bytes dropped due to insufficient
+ * buffer space.
+ * @prev_stc: STC attached to the previous video TS packet
+ */
+struct mpq_video_feed_info {
+ struct mpq_streambuffer *video_buffer;
+ spinlock_t video_buffer_lock;
+ struct mpq_decoder_buffers_desc buffer_desc;
+ struct pes_packet_header pes_header;
+ u32 pes_header_left_bytes;
+ u32 pes_header_offset;
+ int fullness_wait_cancel;
+ enum mpq_adapter_stream_if stream_interface;
+const struct dvb_dmx_video_patterns *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM];
+ int patterns_num;
+ char prev_pattern[DVB_DMX_MAX_PATTERN_LEN];
+ u32 frame_offset;
+ u32 last_pattern_offset;
+ u32 pending_pattern_len;
+ u64 last_framing_match_type;
+ u64 last_framing_match_stc;
+ int found_sequence_header_pattern;
+ struct dvb_dmx_video_prefix_size_masks prefix_size;
+ u32 first_prefix_size;
+ struct dmx_pts_dts_info saved_pts_dts_info;
+ struct dmx_pts_dts_info new_pts_dts_info;
+ int saved_info_used;
+ int new_info_exists;
+ int first_pts_dts_copy;
+ u32 tei_errs;
+ int last_continuity;
+ u32 continuity_errs;
+ u32 ts_packets_num;
+ u32 ts_dropped_bytes;
+ u64 prev_stc;
+};
+
+/**
+ * mpq feed object - mpq common plugin feed information
+ *
+ * @dvb_demux_feed: Back pointer to dvb demux level feed object
+ * @mpq_demux: Pointer to common mpq demux object
+ * @plugin_priv: Plugin specific private data
+ * @sdmx_filter_handle: Secure demux filter handle. Recording feed may share
+ * same filter handle
+ * @secondary_feed: Specifies if this feed shares filter handle with
+ * other feeds
+ * @metadata_buf: Ring buffer object for managing the metadata buffer
+ * @metadata_buf_handle: Allocation handle for the metadata buffer
+ * @session_id: Counter that is incremented every time feed is initialized
+ * through mpq_dmx_init_mpq_feed
+ * @sdmx_buf: Ring buffer object for intermediate output data from the sdmx
+ * @sdmx_buf_handle: Allocation handle for the sdmx intermediate data buffer
+ * @video_info: Video feed specific information
+ */
+struct mpq_feed {
+ struct dvb_demux_feed *dvb_demux_feed;
+ struct mpq_demux *mpq_demux;
+ void *plugin_priv;
+
+ /* Secure demux related */
+ int sdmx_filter_handle;
+ int secondary_feed;
+ enum sdmx_filter filter_type;
+ struct dvb_ringbuffer metadata_buf;
+ struct ion_handle *metadata_buf_handle;
+
+ u8 session_id;
+ struct dvb_ringbuffer sdmx_buf;
+ struct ion_handle *sdmx_buf_handle;
+
+ struct mpq_video_feed_info video_info;
+};
+
+/**
+ * struct mpq_demux - mpq demux information
+ * @idx: Instance index
+ * @demux: The dvb_demux instance used by mpq_demux
+ * @dmxdev: The dmxdev instance used by mpq_demux
+ * @fe_memory: Handle of front-end memory source to mpq_demux
+ * @source: The current source connected to the demux
+ * @is_initialized: Indicates whether this demux device was
+ * initialized or not.
+ * @ion_client: ION demux client used to allocate memory from ION.
+ * @mutex: Lock used to protect against private feed data
+ * @feeds: mpq common feed object pool
+ * @num_active_feeds: Number of active mpq feeds
+ * @num_secure_feeds: Number of secure feeds (have a sdmx filter associated)
+ * currently allocated.
+ * Used before each call to sdmx_process() to build up to date state.
+ * @sdmx_session_handle: Secure demux open session handle
+ * @sdmx_filter_count: Number of active secure demux filters
+ * @sdmx_eos: End-of-stream indication flag for current sdmx session
+ * @sdmx_filters_state: Array holding buffers status for each secure
+ * demux filter.
+ * @decoder_alloc_flags: ION flags to be used when allocating internally
+ * @plugin_priv: Underlying plugin's own private data
+ * @mpq_dmx_plugin_release: Underlying plugin's release function
+ * @hw_notification_interval: Notification interval in msec,
+ * exposed in debugfs.
+ * @hw_notification_min_interval: Minimum notification internal in msec,
+ * exposed in debugfs.
+ * @hw_notification_count: Notification count, exposed in debugfs.
+ * @hw_notification_size: Notification size in bytes, exposed in debugfs.
+ * @hw_notification_min_size: Minimum notification size in bytes,
+ * exposed in debugfs.
+ * @decoder_stat: Decoder output statistics, exposed in debug-fs.
+ * @sdmx_process_count: Total number of times sdmx_process is called.
+ * @sdmx_process_time_sum: Total time sdmx_process takes.
+ * @sdmx_process_time_average: Average time sdmx_process takes.
+ * @sdmx_process_time_max: Max time sdmx_process takes.
+ * @sdmx_process_packets_sum: Total packets number sdmx_process handled.
+ * @sdmx_process_packets_average: Average packets number sdmx_process handled.
+ * @sdmx_process_packets_min: Minimum packets number sdmx_process handled.
+ * @last_notification_time: Time of last HW notification.
+ */
+struct mpq_demux {
+ int idx;
+ struct dvb_demux demux;
+ struct dmxdev dmxdev;
+ struct dmx_frontend fe_memory;
+ dmx_source_t source;
+ int is_initialized;
+ struct ion_client *ion_client;
+ struct mutex mutex;
+ struct mpq_feed feeds[MPQ_MAX_DMX_FILES];
+ u32 num_active_feeds;
+ u32 num_secure_feeds;
+ int sdmx_session_handle;
+ int sdmx_session_ref_count;
+ int sdmx_filter_count;
+ int sdmx_eos;
+ struct {
+ /* SDMX filters status */
+ struct sdmx_filter_status status[MPQ_MAX_DMX_FILES];
+
+ /* Index of the feed respective to SDMX filter */
+ u8 mpq_feed_idx[MPQ_MAX_DMX_FILES];
+
+ /*
+ * Snapshot of session_id of the feed
+ * when SDMX process was called. This is used
+ * to identify whether the feed has been
+ * restarted when processing SDMX results.
+ * May happen when demux is stalled in playback
+ * from memory with PULL mode.
+ */
+ u8 session_id[MPQ_MAX_DMX_FILES];
+ } sdmx_filters_state;
+
+ unsigned int decoder_alloc_flags;
+
+ /* HW plugin specific */
+ void *plugin_priv;
+ int (*mpq_dmx_plugin_release)(struct mpq_demux *mpq_demux);
+
+ /* debug-fs */
+ u32 hw_notification_interval;
+ u32 hw_notification_min_interval;
+ u32 hw_notification_count;
+ u32 hw_notification_size;
+ u32 hw_notification_min_size;
+
+ struct {
+ /*
+ * Accumulated number of bytes
+ * dropped due to decoder buffer fullness.
+ */
+ u32 drop_count;
+
+ /* Counter incremeneted for each video frame output by demux */
+ u32 out_count;
+
+ /*
+ * Sum of intervals (msec) holding the time
+ * between two successive video frames output.
+ */
+ u32 out_interval_sum;
+
+ /*
+ * Average interval (msec) between two
+ * successive video frames output.
+ */
+ u32 out_interval_average;
+
+ /*
+ * Max interval (msec) between two
+ * successive video frames output.
+ */
+ u32 out_interval_max;
+
+ /* Counter for number of decoder packets with TEI bit set */
+ u32 ts_errors;
+
+ /*
+ * Counter for number of decoder packets
+ * with continuity counter errors.
+ */
+ u32 cc_errors;
+
+ /* Time of last video frame output */
+ struct timespec out_last_time;
+ } decoder_stat[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES];
+
+ u32 sdmx_process_count;
+ u32 sdmx_process_time_sum;
+ u32 sdmx_process_time_average;
+ u32 sdmx_process_time_max;
+ u32 sdmx_process_packets_sum;
+ u32 sdmx_process_packets_average;
+ u32 sdmx_process_packets_min;
+ enum sdmx_log_level sdmx_log_level;
+
+ struct timespec last_notification_time;
+};
+
+/**
+ * mpq_dmx_init - initialization and registration function of
+ * single MPQ demux device
+ *
+ * @adapter: The adapter to register mpq_demux to
+ * @mpq_demux: The mpq demux to initialize
+ *
+ * Every HW plug-in needs to provide implementation of such
+ * function that will be called for each demux device on the
+ * module initialization. The function mpq_demux_plugin_init
+ * should be called during the HW plug-in module initialization.
+ */
+typedef int (*mpq_dmx_init)(struct dvb_adapter *mpq_adapter,
+ struct mpq_demux *demux);
+
+/**
+ * mpq_demux_plugin_init - Initialize demux devices and register
+ * them to the dvb adapter.
+ *
+ * @dmx_init_func: Pointer to the function to be used
+ * to initialize demux of the underlying HW plugin.
+ *
+ * Return error code
+ *
+ * Should be called at the HW plugin module initialization.
+ */
+int mpq_dmx_plugin_init(mpq_dmx_init dmx_init_func);
+
+/**
+ * mpq_demux_plugin_exit - terminate demux devices.
+ *
+ * Should be called at the HW plugin module termination.
+ */
+void mpq_dmx_plugin_exit(void);
+
+/**
+ * mpq_dmx_set_source - implmenetation of set_source routine.
+ *
+ * @demux: The demux device to set its source.
+ * @src: The source to be set.
+ *
+ * Return error code
+ *
+ * Can be used by the underlying plugins to implement kernel
+ * demux API set_source routine.
+ */
+int mpq_dmx_set_source(struct dmx_demux *demux, const dmx_source_t *src);
+
+/**
+ * mpq_dmx_map_buffer - map user-space buffer into kernel space.
+ *
+ * @demux: The demux device.
+ * @dmx_buffer: The demux buffer from user-space, assumes that
+ * buffer handle is ION file-handle.
+ * @priv_handle: Saves ION-handle of the buffer imported by this function.
+ * @kernel_mem: Saves kernel mapped address of the buffer.
+ *
+ * Return error code
+ *
+ * The function maps the buffer into kernel memory only if the buffer
+ * was not allocated with secure flag, otherwise the returned kernel
+ * memory address is set to NULL.
+ */
+int mpq_dmx_map_buffer(struct dmx_demux *demux, struct dmx_buffer *dmx_buffer,
+ void **priv_handle, void **kernel_mem);
+
+/**
+ * mpq_dmx_unmap_buffer - unmap user-space buffer from kernel space memory.
+ *
+ * @demux: The demux device.
+ * @priv_handle: ION-handle of the buffer returned from mpq_dmx_map_buffer.
+ *
+ * Return error code
+ *
+ * The function unmaps the buffer from kernel memory only if the buffer
+ * was not allocated with secure flag.
+ */
+int mpq_dmx_unmap_buffer(struct dmx_demux *demux, void *priv_handle);
+
+/**
+ * mpq_dmx_decoder_fullness_init - Initialize waiting
+ * mechanism on decoder's buffer fullness.
+ *
+ * @feed: The decoder's feed
+ *
+ * Return error code.
+ */
+int mpq_dmx_decoder_fullness_init(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_decoder_fullness_wait - Checks whether decoder buffer
+ * have free space as required, if not, wait for it.
+ *
+ * @feed: The decoder's feed
+ * @required_space: the required free space to wait for
+ *
+ * Return error code.
+ */
+int mpq_dmx_decoder_fullness_wait(struct dvb_demux_feed *feed,
+ size_t required_space);
+
+/**
+ * mpq_dmx_decoder_fullness_abort - Aborts waiting
+ * on decoder's buffer fullness if any waiting is done
+ * now. After calling this, to wait again the user must
+ * call mpq_dmx_decoder_fullness_init.
+ *
+ * @feed: The decoder's feed
+ *
+ * Return error code.
+ */
+int mpq_dmx_decoder_fullness_abort(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_decoder_buffer_status - Returns the
+ * status of the decoder's buffer.
+ *
+ * @feed: The decoder's feed
+ * @dmx_buffer_status: Status of decoder's buffer
+ *
+ * Return error code.
+ */
+int mpq_dmx_decoder_buffer_status(struct dvb_demux_feed *feed,
+ struct dmx_buffer_status *dmx_buffer_status);
+
+/**
+ * mpq_dmx_reuse_decoder_buffer - release buffer passed to decoder for reuse
+ * by the stream-buffer.
+ *
+ * @feed: The decoder's feed.
+ * @cookie: stream-buffer handle of the buffer.
+ *
+ * Return error code
+ *
+ * The function releases the buffer provided by the stream-buffer
+ * connected to the decoder back to the stream-buffer for reuse.
+ */
+int mpq_dmx_reuse_decoder_buffer(struct dvb_demux_feed *feed, int cookie);
+
+/**
+ * mpq_dmx_process_video_packet - Assemble PES data and output it
+ * to the stream-buffer connected to the decoder.
+ *
+ * @feed: The feed used for the video TS packets
+ * @buf: The buffer holding video TS packet.
+ *
+ * Return error code.
+ *
+ * The function assumes it receives buffer with single TS packet
+ * of the relevant PID.
+ * If the output buffer is full while assembly, the function drops
+ * the packet and does not write them to the output buffer.
+ * Scrambled packets are bypassed.
+ */
+int mpq_dmx_process_video_packet(struct dvb_demux_feed *feed, const u8 *buf);
+
+/**
+ * mpq_dmx_process_pcr_packet - Extract PCR/STC pairs from
+ * a 192 bytes packet.
+ *
+ * @feed: The feed used for the PCR TS packets
+ * @buf: The buffer holding pcr/stc packet.
+ *
+ * Return error code.
+ *
+ * The function assumes it receives buffer with single TS packet
+ * of the relevant PID, and that it has 4 bytes
+ * suffix as extra timestamp in the following format:
+ *
+ * Byte3: TSIF flags
+ * Byte0-2: TTS, 0..2^24-1 at 105.47 Khz (27*10^6/256).
+ *
+ * The function callbacks dmxdev after extraction of the pcr/stc
+ * pair.
+ */
+int mpq_dmx_process_pcr_packet(struct dvb_demux_feed *feed, const u8 *buf);
+
+/**
+ * mpq_dmx_extract_pcr_and_dci() - Extract the PCR field and discontinuity
+ * indicator from a TS packet buffer.
+ *
+ * @buf: TS packet buffer
+ * @pcr: returned PCR value
+ * @dci: returned discontinuity indicator
+ *
+ * Returns 1 if PCR was extracted, 0 otherwise.
+ */
+int mpq_dmx_extract_pcr_and_dci(const u8 *buf, u64 *pcr, int *dci);
+
+/**
+ * mpq_dmx_init_debugfs_entries -
+ * Extend dvb-demux debugfs with mpq related entries (HW statistics and secure
+ * demux log level).
+ *
+ * @mpq_demux: The mpq_demux device to initialize.
+ */
+void mpq_dmx_init_debugfs_entries(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_dmx_update_hw_statistics -
+ * Update dvb-demux debugfs with HW notification statistics.
+ *
+ * @mpq_demux: The mpq_demux device to update.
+ */
+void mpq_dmx_update_hw_statistics(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_dmx_set_cipher_ops - Handles setting of cipher operations
+ *
+ * @feed: The feed to set its cipher operations
+ * @cipher_ops: Cipher operations to be set
+ *
+ * This common function handles only the case when working with
+ * secure-demux. When working with secure demux a single decrypt cipher
+ * operation is allowed.
+ *
+ * Return error code
+ */
+int mpq_dmx_set_cipher_ops(struct dvb_demux_feed *feed,
+ struct dmx_cipher_operations *cipher_ops);
+
+/**
+ * mpq_dmx_convert_tts - Convert timestamp attached by HW to each TS
+ * packet to 27MHz.
+ *
+ * @feed: The feed with TTS attached
+ * @timestamp: Buffer holding the timestamp attached by the HW
+ * @timestampIn27Mhz: Timestamp result in 27MHz
+ *
+ * Return error code
+ */
+void mpq_dmx_convert_tts(struct dvb_demux_feed *feed,
+ const u8 timestamp[TIMESTAMP_LEN],
+ u64 *timestampIn27Mhz);
+
+/**
+ * mpq_sdmx_open_session - Handle the details of opening a new secure demux
+ * session for the specified mpq demux instance. Multiple calls to this
+ * is allowed, reference counting is managed to open it only when needed.
+ *
+ * @mpq_demux: mpq demux instance
+ *
+ * Return error code
+ */
+int mpq_sdmx_open_session(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_sdmx_close_session - Closes secure demux session. The session
+ * is closed only if reference counter of the session reaches 0.
+ *
+ * @mpq_demux: mpq demux instance
+ *
+ * Return error code
+ */
+int mpq_sdmx_close_session(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_dmx_init_mpq_feed - Initialize an mpq feed object
+ * The function allocates mpq_feed object and saves in the dvb_demux_feed
+ * priv field.
+ *
+ * @feed: A dvb demux level feed parent object
+ *
+ * Return error code
+ */
+int mpq_dmx_init_mpq_feed(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_terminate_feed - Destroy an mpq feed object
+ *
+ * @feed: A dvb demux level feed parent object
+ *
+ * Return error code
+ */
+int mpq_dmx_terminate_feed(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_init_video_feed() - Initializes video related data structures
+ *
+ * @mpq_feed: mpq_feed object to initialize
+ *
+ * Return error code
+ */
+int mpq_dmx_init_video_feed(struct mpq_feed *mpq_feed);
+
+/**
+ * mpq_dmx_terminate_video_feed() - Release video related feed resources
+ *
+ * @mpq_feed: mpq_feed object to terminate
+ *
+ * Return error code
+ */
+int mpq_dmx_terminate_video_feed(struct mpq_feed *mpq_feed);
+
+/**
+ * mpq_dmx_write - demux write() function implementation.
+ *
+ * A wrapper function used for writing new data into the demux via DVR.
+ * It checks where new data should actually go, the secure demux or the normal
+ * dvb demux software demux.
+ *
+ * @demux: demux interface
+ * @buf: input buffer
+ * @count: number of data bytes in input buffer
+ *
+ * Return number of bytes processed or error code
+ */
+int mpq_dmx_write(struct dmx_demux *demux, const char *buf, size_t count);
+
+/**
+ * mpq_sdmx_process - Perform demuxing process on the specified input buffer
+ * in the secure demux instance
+ *
+ * @mpq_demux: mpq demux instance
+ * @input: input buffer descriptor
+ * @fill_count: number of data bytes in input buffer that can be read
+ * @read_offset: offset in buffer for reading
+ * @tsp_size: size of single TS packet
+ *
+ * Return number of bytes read or error code
+ */
+int mpq_sdmx_process(struct mpq_demux *mpq_demux,
+ struct sdmx_buff_descr *input,
+ u32 fill_count,
+ u32 read_offset,
+ size_t tsp_size);
+
+/**
+ * mpq_sdmx_loaded - Returns 1 if secure demux application is loaded,
+ * 0 otherwise. This function should be used to determine whether or not
+ * processing should take place in the SDMX.
+ */
+int mpq_sdmx_is_loaded(void);
+
+/**
+ * mpq_dmx_oob_command - Handles OOB command from dvb-demux.
+ *
+ * OOB marker commands trigger callback to the dmxdev.
+ * Handling of EOS command may trigger current (last on stream) PES/Frame to
+ * be reported, in addition to callback to the dmxdev.
+ * In case secure demux is active for the feed, EOS command is passed to the
+ * secure demux for handling.
+ *
+ * @feed: dvb demux feed object
+ * @cmd: oob command data
+ *
+ * returns 0 on success or error
+ */
+int mpq_dmx_oob_command(struct dvb_demux_feed *feed,
+ struct dmx_oob_command *cmd);
+
+/**
+ * mpq_dmx_peer_rec_feed() - For a recording filter with multiple feeds objects
+ * search for a feed object that shares the same filter as the specified feed
+ * object, and return it.
+ * This can be used to test whether the specified feed object is the first feed
+ * allocate for the recording filter - return value is NULL.
+ *
+ * @feed: dvb demux feed object
+ *
+ * Return the dvb_demux_feed sharing the same filter's buffer or NULL if no
+ * such is found.
+ */
+struct dvb_demux_feed *mpq_dmx_peer_rec_feed(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_decoder_eos_cmd() - Report EOS event to the mpq_streambuffer
+ *
+ * @mpq_feed: Video mpq_feed object for notification
+ *
+ * Return error code
+ */
+int mpq_dmx_decoder_eos_cmd(struct mpq_feed *mpq_feed);
+
+/**
+ * mpq_dmx_parse_mandatory_pes_header() - Parse non-optional PES header fields
+ * from TS packet buffer and save results in the feed object.
+ *
+ * @feed: Video dvb demux feed object
+ * @feed_data: Structure where results will be saved
+ * @pes_header: Saved PES header
+ * @buf: Input buffer containing TS packet with the PES header
+ * @ts_payload_offset: Offset in 'buf' where payload begins
+ * @bytes_avail: Length of actual payload
+ *
+ * Return error code
+ */
+int mpq_dmx_parse_mandatory_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail);
+
+/**
+ * mpq_dmx_parse_remaining_pes_header() - Parse optional PES header fields
+ * from TS packet buffer and save results in the feed object.
+ * This function depends on mpq_dmx_parse_mandatory_pes_header being called
+ * first for state to be valid.
+ *
+ * @feed: Video dvb demux feed object
+ * @feed_data: Structure where results will be saved
+ * @pes_header: Saved PES header
+ * @buf: Input buffer containing TS packet with the PES header
+ * @ts_payload_offset: Offset in 'buf' where payload begins
+ * @bytes_avail: Length of actual payload
+ *
+ * Return error code
+ */
+int mpq_dmx_parse_remaining_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail);
+
+/**
+ * mpq_dmx_flush_stream_buffer() - Flush video stream buffer object of the
+ * specific video feed, both meta-data packets and data.
+ *
+ * @feed: dvb demux video feed object
+ *
+ * Return error code
+ */
+int mpq_dmx_flush_stream_buffer(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_save_pts_dts() - Save the current PTS/DTS data
+ *
+ * @feed_data: Video feed structure where PTS/DTS is saved
+ */
+static inline void mpq_dmx_save_pts_dts(struct mpq_video_feed_info *feed_data)
+{
+ if (feed_data->new_info_exists) {
+ feed_data->saved_pts_dts_info.pts_exist =
+ feed_data->new_pts_dts_info.pts_exist;
+ feed_data->saved_pts_dts_info.pts =
+ feed_data->new_pts_dts_info.pts;
+ feed_data->saved_pts_dts_info.dts_exist =
+ feed_data->new_pts_dts_info.dts_exist;
+ feed_data->saved_pts_dts_info.dts =
+ feed_data->new_pts_dts_info.dts;
+
+ feed_data->new_info_exists = 0;
+ feed_data->saved_info_used = 0;
+ }
+}
+
+/**
+ * mpq_dmx_write_pts_dts() - Write out the saved PTS/DTS data and mark as used
+ *
+ * @feed_data: Video feed structure where PTS/DTS was saved
+ * @info: PTS/DTS structure to write to
+ */
+static inline void mpq_dmx_write_pts_dts(struct mpq_video_feed_info *feed_data,
+ struct dmx_pts_dts_info *info)
+{
+ if (!feed_data->saved_info_used) {
+ info->pts_exist = feed_data->saved_pts_dts_info.pts_exist;
+ info->pts = feed_data->saved_pts_dts_info.pts;
+ info->dts_exist = feed_data->saved_pts_dts_info.dts_exist;
+ info->dts = feed_data->saved_pts_dts_info.dts;
+
+ feed_data->saved_info_used = 1;
+ } else {
+ info->pts_exist = 0;
+ info->dts_exist = 0;
+ }
+}
+
+/*
+ * mpq_dmx_calc_time_delta -
+ * Calculate delta in msec between two time snapshots.
+ *
+ * @curr_time: value of current time
+ * @prev_time: value of previous time
+ *
+ * Return time-delta in msec
+ */
+static inline u32 mpq_dmx_calc_time_delta(struct timespec *curr_time,
+ struct timespec *prev_time)
+{
+ struct timespec delta_time;
+ u64 delta_time_ms;
+
+ delta_time = timespec_sub(*curr_time, *prev_time);
+
+ delta_time_ms = ((u64)delta_time.tv_sec * MSEC_PER_SEC) +
+ delta_time.tv_nsec / NSEC_PER_MSEC;
+
+ return (u32)delta_time_ms;
+}
+
+void mpq_dmx_update_decoder_stat(struct mpq_feed *mpq_feed);
+
+/* Return the common module parameter tsif_mode */
+int mpq_dmx_get_param_tsif_mode(void);
+
+/* Return the common module parameter clock_inv */
+int mpq_dmx_get_param_clock_inv(void);
+
+/* Return the common module parameter mpq_sdmx_scramble_odd */
+int mpq_dmx_get_param_scramble_odd(void);
+
+/* Return the common module parameter mpq_sdmx_scramble_even */
+int mpq_dmx_get_param_scramble_even(void);
+
+/* Return the common module parameter mpq_sdmx_scramble_default_discard */
+int mpq_dmx_get_param_scramble_default_discard(void);
+
+
+#endif /* _MPQ_DMX_PLUGIN_COMMON_H */
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
new file mode 100644
index 000000000000..b350e4f4144a
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
@@ -0,0 +1,280 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_dmx_plugin_common.h"
+
+
+static int mpq_sw_dmx_start_filtering(struct dvb_demux_feed *feed)
+{
+ int ret = -EINVAL;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ MPQ_DVB_DBG_PRINT("%s(pid=%d) executed\n", __func__, feed->pid);
+
+ if (mpq_demux == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid mpq_demux handle\n", __func__);
+ goto out;
+ }
+
+ if (mpq_demux->source < DMX_SOURCE_DVR0) {
+ MPQ_DVB_ERR_PRINT("%s: only DVR source is supported (%d)\n",
+ __func__, mpq_demux->source);
+ goto out;
+ }
+
+ /*
+ * Always feed sections/PES starting from a new one and
+ * do not partial transfer data from older one
+ */
+ feed->pusi_seen = 0;
+
+ ret = mpq_dmx_init_mpq_feed(feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT("%s: mpq_dmx_init_mpq_feed failed(%d)\n",
+ __func__, ret);
+out:
+ return ret;
+}
+
+static int mpq_sw_dmx_stop_filtering(struct dvb_demux_feed *feed)
+{
+ int ret;
+
+ MPQ_DVB_DBG_PRINT("%s(%d) executed\n", __func__, feed->pid);
+
+ ret = mpq_dmx_terminate_feed(feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT("%s: mpq_dmx_terminate_feed failed(%d)\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int mpq_sw_dmx_write_to_decoder(struct dvb_demux_feed *feed,
+ const u8 *buf, size_t len)
+{
+ /*
+ * It is assumed that this function is called once for each
+ * TS packet of the relevant feed.
+ */
+ if (len > (TIMESTAMP_LEN + TS_PACKET_SIZE))
+ MPQ_DVB_DBG_PRINT(
+ "%s: warnning - len larger than one packet\n",
+ __func__);
+
+ if (dvb_dmx_is_video_feed(feed))
+ return mpq_dmx_process_video_packet(feed, buf);
+
+ if (dvb_dmx_is_pcr_feed(feed))
+ return mpq_dmx_process_pcr_packet(feed, buf);
+
+ return 0;
+}
+
+static int mpq_sw_dmx_set_source(struct dmx_demux *demux,
+ const dmx_source_t *src)
+{
+ int ret = -EINVAL;
+
+ if (demux == NULL || demux->priv == NULL || src == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ goto out;
+ }
+
+ if (*src >= DMX_SOURCE_DVR0 && *src <= DMX_SOURCE_DVR3) {
+ ret = mpq_dmx_set_source(demux, src);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_set_source(%d) failed, ret=%d\n",
+ __func__, *src, ret);
+ } else {
+ MPQ_DVB_ERR_PRINT("%s: not a DVR source\n", __func__);
+ }
+
+out:
+ return ret;
+}
+
+static int mpq_sw_dmx_get_caps(struct dmx_demux *demux, struct dmx_caps *caps)
+{
+ struct dvb_demux *dvb_demux = demux->priv;
+
+ if (dvb_demux == NULL || caps == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ caps->caps = DMX_CAP_PULL_MODE | DMX_CAP_VIDEO_DECODER_DATA |
+ DMX_CAP_TS_INSERTION | DMX_CAP_VIDEO_INDEXING |
+ DMX_CAP_AUTO_BUFFER_FLUSH;
+ caps->recording_max_video_pids_indexed = 0;
+ caps->num_decoders = MPQ_ADAPTER_MAX_NUM_OF_INTERFACES;
+ caps->num_demux_devices = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+ caps->num_pid_filters = MPQ_MAX_DMX_FILES;
+ caps->num_section_filters = dvb_demux->filternum;
+ caps->num_section_filters_per_pid = dvb_demux->filternum;
+ caps->section_filter_length = DMX_FILTER_SIZE;
+ caps->num_demod_inputs = 0;
+ caps->num_memory_inputs = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+ caps->max_bitrate = 192;
+ caps->demod_input_max_bitrate = 96;
+ caps->memory_input_max_bitrate = 96;
+ caps->num_cipher_ops = 1;
+
+ /* No STC support */
+ caps->max_stc = 0;
+
+ /* Buffer requirements */
+ caps->section.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->section.max_buffer_num = 1;
+ caps->section.max_size = 0xFFFFFFFF;
+ caps->section.size_alignment = 0;
+ caps->pes.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->pes.max_buffer_num = 1;
+ caps->pes.max_size = 0xFFFFFFFF;
+ caps->pes.size_alignment = 0;
+ caps->recording_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->recording_188_tsp.max_buffer_num = 1;
+ caps->recording_188_tsp.max_size = 0xFFFFFFFF;
+ caps->recording_188_tsp.size_alignment = 0;
+ caps->recording_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->recording_192_tsp.max_buffer_num = 1;
+ caps->recording_192_tsp.max_size = 0xFFFFFFFF;
+ caps->recording_192_tsp.size_alignment = 0;
+ caps->playback_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->playback_188_tsp.max_buffer_num = 1;
+ caps->playback_188_tsp.max_size = 0xFFFFFFFF;
+ caps->playback_188_tsp.size_alignment = 188;
+ caps->playback_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->playback_192_tsp.max_buffer_num = 1;
+ caps->playback_192_tsp.max_size = 0xFFFFFFFF;
+ caps->playback_192_tsp.size_alignment = 192;
+ caps->decoder.flags =
+ DMX_BUFFER_SECURED_IF_DECRYPTED |
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_LINEAR_GROUP_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->decoder.max_buffer_num = DMX_MAX_DECODER_BUFFER_NUM;
+ caps->decoder.max_size = 0xFFFFFFFF;
+ caps->decoder.size_alignment = SZ_4K;
+
+ return 0;
+}
+
+static int mpq_sw_dmx_init(struct dvb_adapter *mpq_adapter,
+ struct mpq_demux *mpq_demux)
+{
+ int ret;
+ struct dvb_demux *dvb_demux = &mpq_demux->demux;
+
+ /* Set the kernel-demux object capabilities */
+ mpq_demux->demux.dmx.capabilities =
+ DMX_TS_FILTERING |
+ DMX_PES_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING |
+ DMX_CRC_CHECKING |
+ DMX_TS_DESCRAMBLING;
+
+ mpq_demux->decoder_alloc_flags = ION_FLAG_CACHED;
+
+ /* Set dvb-demux "virtual" function pointers */
+ dvb_demux->priv = (void *)mpq_demux;
+ dvb_demux->filternum = MPQ_MAX_DMX_FILES;
+ dvb_demux->feednum = MPQ_MAX_DMX_FILES;
+ dvb_demux->start_feed = mpq_sw_dmx_start_filtering;
+ dvb_demux->stop_feed = mpq_sw_dmx_stop_filtering;
+ dvb_demux->write_to_decoder = mpq_sw_dmx_write_to_decoder;
+ dvb_demux->decoder_fullness_init = mpq_dmx_decoder_fullness_init;
+ dvb_demux->decoder_fullness_wait = mpq_dmx_decoder_fullness_wait;
+ dvb_demux->decoder_fullness_abort = mpq_dmx_decoder_fullness_abort;
+ dvb_demux->decoder_buffer_status = mpq_dmx_decoder_buffer_status;
+ dvb_demux->reuse_decoder_buffer = mpq_dmx_reuse_decoder_buffer;
+ dvb_demux->set_cipher_op = mpq_dmx_set_cipher_ops;
+ dvb_demux->oob_command = mpq_dmx_oob_command;
+ dvb_demux->convert_ts = mpq_dmx_convert_tts;
+ dvb_demux->flush_decoder_buffer = NULL;
+
+ /* Initialize dvb_demux object */
+ ret = dvb_dmx_init(dvb_demux);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: dvb_dmx_init failed, ret=%d\n",
+ __func__, ret);
+ goto init_failed;
+ }
+
+ /* Now initialize the dmx-dev object */
+ mpq_demux->dmxdev.filternum = MPQ_MAX_DMX_FILES;
+ mpq_demux->dmxdev.demux = &mpq_demux->demux.dmx;
+ mpq_demux->dmxdev.capabilities = DMXDEV_CAP_DUPLEX;
+
+ mpq_demux->dmxdev.demux->set_source = mpq_sw_dmx_set_source;
+ mpq_demux->dmxdev.demux->get_stc = NULL;
+ mpq_demux->dmxdev.demux->get_caps = mpq_sw_dmx_get_caps;
+ mpq_demux->dmxdev.demux->map_buffer = mpq_dmx_map_buffer;
+ mpq_demux->dmxdev.demux->unmap_buffer = mpq_dmx_unmap_buffer;
+ mpq_demux->dmxdev.demux->write = mpq_dmx_write;
+ ret = dvb_dmxdev_init(&mpq_demux->dmxdev, mpq_adapter);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: dvb_dmxdev_init failed, ret=%d\n",
+ __func__, ret);
+ goto init_failed_dmx_release;
+ }
+
+ /* Extend dvb-demux debugfs with mpq demux statistics. */
+ mpq_dmx_init_debugfs_entries(mpq_demux);
+
+ return 0;
+
+init_failed_dmx_release:
+ dvb_dmx_release(dvb_demux);
+init_failed:
+ return ret;
+}
+
+static int __init mpq_dmx_sw_plugin_init(void)
+{
+ return mpq_dmx_plugin_init(mpq_sw_dmx_init);
+}
+
+static void __exit mpq_dmx_sw_plugin_exit(void)
+{
+ mpq_dmx_plugin_exit();
+}
+
+
+module_init(mpq_dmx_sw_plugin_init);
+module_exit(mpq_dmx_sw_plugin_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies Inc. demux software plugin");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
new file mode 100644
index 000000000000..be88bc1bf19f
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
@@ -0,0 +1,1968 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/qcom_tspp.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_dmx_plugin_common.h"
+
+#define TSIF_COUNT 2
+
+/* Max number of PID filters */
+#define TSPP_MAX_PID_FILTER_NUM 128
+
+/* Max number of user-defined HW PID filters */
+#define TSPP_MAX_HW_PID_FILTER_NUM 15
+
+/* HW index of the last entry in the TSPP HW filter table */
+#define TSPP_LAST_HW_FILTER_INDEX 15
+
+/* Number of filters required to accept all packets except NULL packets */
+#define TSPP_BLOCK_NULLS_FILTERS_NUM 13
+
+/* Max number of section filters */
+#define TSPP_MAX_SECTION_FILTER_NUM 128
+
+/* For each TSIF we use a single pipe holding the data after PID filtering */
+#define TSPP_CHANNEL 0
+
+/* the channel_id set to TSPP driver based on TSIF number and channel type */
+#define TSPP_CHANNEL_ID(tsif, ch) ((tsif << 1) + ch)
+#define TSPP_GET_TSIF_NUM(ch_id) (ch_id >> 1)
+
+/* mask that set to care for all bits in pid filter */
+#define TSPP_PID_MASK 0x1FFF
+
+/* dvb-demux defines pid 0x2000 as full capture pid */
+#define TSPP_PASS_THROUGH_PID 0x2000
+
+/* NULL packets pid */
+#define TSPP_NULL_PACKETS_PID 0x1FFF
+
+#define TSPP_RAW_TTS_SIZE 192
+#define TSPP_RAW_SIZE 188
+
+#define MAX_BAM_DESCRIPTOR_SIZE (32 * 1024 - 1)
+
+#define MAX_BAM_DESCRIPTOR_COUNT (8 * 1024 - 2)
+
+#define TSPP_BUFFER_SIZE (500 * 1024) /* 500KB */
+
+#define TSPP_DEFAULT_DESCRIPTOR_SIZE (TSPP_RAW_TTS_SIZE)
+
+#define TSPP_BUFFER_COUNT(buffer_size) \
+ ((buffer_size) / tspp_desc_size)
+
+/* When TSPP notifies demux that new packets are received.
+ * Using max descriptor size (170 packets).
+ * Assuming 20MBit/sec stream, with 170 packets
+ * per descriptor there would be about 82 descriptors,
+ * Meaning about 82 notifications per second.
+ */
+#define TSPP_NOTIFICATION_SIZE(desc_size) \
+ (MAX_BAM_DESCRIPTOR_SIZE / (desc_size))
+
+/* Channel timeout in msec */
+#define TSPP_CHANNEL_TIMEOUT 100
+
+enum mem_buffer_allocation_mode {
+ MPQ_DMX_TSPP_INTERNAL_ALLOC = 0,
+ MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC = 1
+};
+
+/* module parameters for load time configuration */
+static int allocation_mode = MPQ_DMX_TSPP_INTERNAL_ALLOC;
+static int tspp_out_buffer_size = TSPP_BUFFER_SIZE;
+static int tspp_desc_size = TSPP_DEFAULT_DESCRIPTOR_SIZE;
+static int tspp_notification_size =
+ TSPP_NOTIFICATION_SIZE(TSPP_DEFAULT_DESCRIPTOR_SIZE);
+static int tspp_channel_timeout = TSPP_CHANNEL_TIMEOUT;
+static int tspp_out_ion_heap = ION_QSECOM_HEAP_ID;
+
+module_param(allocation_mode, int, S_IRUGO | S_IWUSR);
+module_param(tspp_out_buffer_size, int, S_IRUGO | S_IWUSR);
+module_param(tspp_desc_size, int, S_IRUGO | S_IWUSR);
+module_param(tspp_notification_size, int, S_IRUGO | S_IWUSR);
+module_param(tspp_channel_timeout, int, S_IRUGO | S_IWUSR);
+module_param(tspp_out_ion_heap, int, S_IRUGO | S_IWUSR);
+
+/* The following structure hold singleton information
+ * required for dmx implementation on top of TSPP.
+ */
+static struct
+{
+ /* Information for each TSIF input processing */
+ struct {
+ /*
+ * TSPP pipe holding all TS packets after PID filtering.
+ * The following is reference count for number of feeds
+ * allocated on that pipe.
+ */
+ int channel_ref;
+
+ /* Counter for data notifications on the pipe */
+ atomic_t data_cnt;
+
+ /* flag to indicate control operation is in progress */
+ atomic_t control_op;
+
+ /* ION handle used for TSPP data buffer allocation */
+ struct ion_handle *ch_mem_heap_handle;
+
+ /* TSPP data buffer heap virtual base address */
+ void *ch_mem_heap_virt_base;
+
+ /* TSPP data buffer heap physical base address */
+ ion_phys_addr_t ch_mem_heap_phys_base;
+
+ /* Buffer allocation index */
+ int buff_index;
+
+ /* Number of buffers */
+ u32 buffer_count;
+
+ /*
+ * Array holding the IDs of the TSPP buffer descriptors in the
+ * current aggregate, in order to release these descriptors at
+ * the end of processing.
+ */
+ int *aggregate_ids;
+
+ /*
+ * Holds PIDs of allocated filters along with
+ * how many feeds are opened on the same PID. For
+ * TSPP HW filters, holds also the filter table index.
+ * When pid == -1, the entry is free.
+ */
+ struct {
+ int pid;
+ int ref_count;
+ int hw_index;
+ } filters[TSPP_MAX_PID_FILTER_NUM];
+
+ /* Indicates available/allocated filter table indexes */
+ int hw_indexes[TSPP_MAX_HW_PID_FILTER_NUM];
+
+ /* Number of currently allocated PID filters */
+ u16 current_filter_count;
+
+ /*
+ * Flag to indicate whether the user added a filter to accept
+ * NULL packets (PID = 0x1FFF)
+ */
+ int pass_nulls_flag;
+
+ /*
+ * Flag to indicate whether the user added a filter to accept
+ * all packets (PID = 0x2000)
+ */
+ int pass_all_flag;
+
+ /*
+ * Flag to indicate whether the filter that accepts
+ * all packets has already been added and is
+ * currently enabled
+ */
+ int accept_all_filter_exists_flag;
+
+ /* Thread processing TS packets from TSPP */
+ struct task_struct *thread;
+ wait_queue_head_t wait_queue;
+
+ /* TSIF alias */
+ char name[TSIF_NAME_LENGTH];
+
+ /* Pointer to the demux connected to this TSIF */
+ struct mpq_demux *mpq_demux;
+
+ /* Mutex protecting the data-structure */
+ struct mutex mutex;
+ } tsif[TSIF_COUNT];
+
+ /* ION client used for TSPP data buffer allocation */
+ struct ion_client *ion_client;
+} mpq_dmx_tspp_info;
+
+static void *tspp_mem_allocator(int channel_id, u32 size,
+ phys_addr_t *phys_base, void *user)
+{
+ void *virt_addr = NULL;
+ int i = TSPP_GET_TSIF_NUM(channel_id);
+
+ if (mpq_dmx_tspp_info.tsif[i].buff_index ==
+ mpq_dmx_tspp_info.tsif[i].buffer_count)
+ return NULL;
+
+ virt_addr =
+ (mpq_dmx_tspp_info.tsif[i].ch_mem_heap_virt_base +
+ (mpq_dmx_tspp_info.tsif[i].buff_index * size));
+
+ *phys_base =
+ (mpq_dmx_tspp_info.tsif[i].ch_mem_heap_phys_base +
+ (mpq_dmx_tspp_info.tsif[i].buff_index * size));
+
+ mpq_dmx_tspp_info.tsif[i].buff_index++;
+
+ return virt_addr;
+}
+
+static void tspp_mem_free(int channel_id, u32 size,
+ void *virt_base, phys_addr_t phys_base, void *user)
+{
+ int i = TSPP_GET_TSIF_NUM(channel_id);
+
+ /*
+ * actual buffer heap free is done in mpq_dmx_tspp_plugin_exit().
+ * we update index here, so if this function is called repetitively
+ * for all the buffers, then afterwards tspp_mem_allocator()
+ * can be called again.
+ * Note: it would be incorrect to call tspp_mem_allocator()
+ * a few times, then call tspp_mem_free(), then call
+ * tspp_mem_allocator() again.
+ */
+ if (mpq_dmx_tspp_info.tsif[i].buff_index > 0)
+ mpq_dmx_tspp_info.tsif[i].buff_index--;
+}
+
+/**
+ * Returns a free HW filter index that can be used.
+ *
+ * @tsif: The TSIF to allocate filter from
+ *
+ * Return HW filter index or -ENOMEM if no filters available
+ */
+static int mpq_tspp_allocate_hw_filter_index(int tsif)
+{
+ int i;
+
+ for (i = 0; i < TSPP_MAX_HW_PID_FILTER_NUM; i++) {
+ if (mpq_dmx_tspp_info.tsif[tsif].hw_indexes[i] == 0) {
+ mpq_dmx_tspp_info.tsif[tsif].hw_indexes[i] = 1;
+ return i;
+ }
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * Releases a HW filter index for future reuse.
+ *
+ * @tsif: The TSIF from which the filter should be released
+ * @hw_index: The HW index to release
+ *
+ */
+static inline void mpq_tspp_release_hw_filter_index(int tsif, int hw_index)
+{
+ if ((hw_index >= 0) && (hw_index < TSPP_MAX_HW_PID_FILTER_NUM))
+ mpq_dmx_tspp_info.tsif[tsif].hw_indexes[hw_index] = 0;
+}
+
+
+/**
+ * Returns a free filter slot that can be used.
+ *
+ * @tsif: The TSIF to allocate filter from
+ *
+ * Return filter index or -ENOMEM if no filters available
+ */
+static int mpq_tspp_get_free_filter_slot(int tsif)
+{
+ int slot;
+
+ for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++)
+ if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == -1)
+ return slot;
+
+ return -ENOMEM;
+}
+
+/**
+ * Returns filter index of specific pid.
+ *
+ * @tsif: The TSIF to which the pid is allocated
+ * @pid: The pid to search for
+ *
+ * Return filter index or -1 if no filter available
+ */
+static int mpq_tspp_get_filter_slot(int tsif, int pid)
+{
+ int slot;
+
+ for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++)
+ if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == pid)
+ return slot;
+
+ return -EINVAL;
+}
+
+/**
+ * mpq_dmx_tspp_swfilter_desc - helper function
+ *
+ * Takes a tspp buffer descriptor and send it to the SW filter for demuxing,
+ * one TS packet at a time.
+ *
+ * @mpq_demux - mpq demux object
+ * @tspp_data_desc - tspp buffer descriptor
+ */
+static inline void mpq_dmx_tspp_swfilter_desc(struct mpq_demux *mpq_demux,
+ const struct tspp_data_descriptor *tspp_data_desc)
+{
+ u32 notif_size;
+ int i;
+
+ notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
+ for (i = 0; i < notif_size; i++)
+ dvb_dmx_swfilter_packet(&mpq_demux->demux,
+ ((u8 *)tspp_data_desc->virt_base) +
+ i * TSPP_RAW_TTS_SIZE,
+ ((u8 *)tspp_data_desc->virt_base) +
+ i * TSPP_RAW_TTS_SIZE + TSPP_RAW_SIZE);
+}
+
+/**
+ * Demux TS packets from TSPP by secure-demux.
+ * The function assumes the buffer is physically contiguous
+ * and that TSPP descriptors are continuous in memory.
+ *
+ * @tsif: The TSIF interface to process its packets
+ * @channel_id: the TSPP output pipe with the TS packets
+ */
+static void mpq_dmx_tspp_aggregated_process(int tsif, int channel_id)
+{
+ const struct tspp_data_descriptor *tspp_data_desc;
+ struct mpq_demux *mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
+ struct sdmx_buff_descr input;
+ size_t aggregate_len = 0;
+ size_t aggregate_count = 0;
+ phys_addr_t buff_start_addr_phys;
+ phys_addr_t buff_current_addr_phys = 0;
+ u32 notif_size;
+ int i;
+
+ while ((tspp_data_desc = tspp_get_buffer(0, channel_id)) != NULL) {
+ if (aggregate_count == 0)
+ buff_current_addr_phys = tspp_data_desc->phys_base;
+ notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
+ mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[aggregate_count] =
+ tspp_data_desc->id;
+ aggregate_len += tspp_data_desc->size;
+ aggregate_count++;
+ mpq_demux->hw_notification_size += notif_size;
+
+ /* Let SW filter process only if it might be relevant */
+ if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds)
+ mpq_dmx_tspp_swfilter_desc(mpq_demux, tspp_data_desc);
+
+ }
+
+ if (!aggregate_count)
+ return;
+
+ buff_start_addr_phys =
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base;
+
+ input.base_addr = (u64)buff_start_addr_phys;
+ input.size = mpq_dmx_tspp_info.tsif[tsif].buffer_count * tspp_desc_size;
+
+ if (mpq_sdmx_is_loaded() && mpq_demux->sdmx_filter_count) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: SDMX Processing %zu descriptors: %zu bytes at start address 0x%llx, read offset %d\n",
+ __func__, aggregate_count, aggregate_len,
+ input.base_addr,
+ (int)(buff_current_addr_phys - buff_start_addr_phys));
+
+ mpq_sdmx_process(mpq_demux, &input, aggregate_len,
+ buff_current_addr_phys - buff_start_addr_phys,
+ TSPP_RAW_TTS_SIZE);
+ }
+
+ for (i = 0; i < aggregate_count; i++)
+ tspp_release_buffer(0, channel_id,
+ mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[i]);
+}
+
+
+/**
+ * Demux thread function handling data from specific TSIF.
+ *
+ * @arg: TSIF number
+ */
+static int mpq_dmx_tspp_thread(void *arg)
+{
+ int tsif = (int)(uintptr_t)arg;
+ struct mpq_demux *mpq_demux;
+ const struct tspp_data_descriptor *tspp_data_desc;
+ atomic_t *data_cnt;
+ u32 notif_size;
+ int channel_id;
+ int ref_count;
+ int ret;
+
+ do {
+ ret = wait_event_interruptible(
+ mpq_dmx_tspp_info.tsif[tsif].wait_queue,
+ (atomic_read(&mpq_dmx_tspp_info.tsif[tsif].data_cnt) &&
+ !atomic_read(&mpq_dmx_tspp_info.tsif[tsif].control_op))
+ || kthread_should_stop());
+
+ if ((ret < 0) || kthread_should_stop()) {
+ MPQ_DVB_ERR_PRINT("%s: exit\n", __func__);
+ break;
+ }
+
+ /* Lock against the TSPP filters data-structure */
+ if (mutex_lock_interruptible(
+ &mpq_dmx_tspp_info.tsif[tsif].mutex))
+ return -ERESTARTSYS;
+
+ channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
+
+ ref_count = mpq_dmx_tspp_info.tsif[tsif].channel_ref;
+ data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;
+
+ /* Make sure channel is still active */
+ if (ref_count == 0) {
+ mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+ continue;
+ }
+
+ atomic_dec(data_cnt);
+
+ mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
+ mpq_demux->hw_notification_size = 0;
+
+ if (allocation_mode != MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC &&
+ mpq_sdmx_is_loaded())
+ pr_err_once(
+ "%s: TSPP Allocation mode does not support secure demux.\n",
+ __func__);
+
+ if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC &&
+ mpq_sdmx_is_loaded()) {
+ mpq_dmx_tspp_aggregated_process(tsif, channel_id);
+ } else {
+ /*
+ * Go through all filled descriptors
+ * and perform demuxing on them
+ */
+ do {
+ if (atomic_read(&mpq_dmx_tspp_info.tsif[tsif].
+ control_op)) {
+ /* restore for next iteration */
+ atomic_inc(data_cnt);
+ break;
+ }
+ tspp_data_desc = tspp_get_buffer(0, channel_id);
+ if (!tspp_data_desc)
+ break;
+
+ notif_size = tspp_data_desc->size /
+ TSPP_RAW_TTS_SIZE;
+ mpq_demux->hw_notification_size += notif_size;
+
+ mpq_dmx_tspp_swfilter_desc(mpq_demux,
+ tspp_data_desc);
+ /*
+ * Notify TSPP that the buffer
+ * is no longer needed
+ */
+ tspp_release_buffer(0, channel_id,
+ tspp_data_desc->id);
+ } while (1);
+ }
+
+ if (mpq_demux->hw_notification_size &&
+ (mpq_demux->hw_notification_size <
+ mpq_demux->hw_notification_min_size))
+ mpq_demux->hw_notification_min_size =
+ mpq_demux->hw_notification_size;
+
+ mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+ } while (1);
+
+ return 0;
+}
+
+/**
+ * Callback function from TSPP when new data is ready.
+ *
+ * @channel_id: Channel with new TS packets
+ * @user: user-data holding TSIF number
+ */
+static void mpq_tspp_callback(int channel_id, void *user)
+{
+ int tsif = (int)(uintptr_t)user;
+ struct mpq_demux *mpq_demux;
+
+ /* Save statistics on TSPP notifications */
+ mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
+ mpq_dmx_update_hw_statistics(mpq_demux);
+
+ atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].data_cnt);
+ wake_up(&mpq_dmx_tspp_info.tsif[tsif].wait_queue);
+}
+
+/**
+ * Free memory of channel output of specific TSIF.
+ *
+ * @tsif: The TSIF id to which memory should be freed.
+ */
+static void mpq_dmx_channel_mem_free(int tsif)
+{
+ MPQ_DVB_DBG_PRINT("%s(%d)\n", __func__, tsif);
+
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base = 0;
+
+ if (!IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle)) {
+ if (!IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].
+ ch_mem_heap_virt_base))
+ ion_unmap_kernel(mpq_dmx_tspp_info.ion_client,
+ mpq_dmx_tspp_info.tsif[tsif].
+ ch_mem_heap_handle);
+
+ ion_free(mpq_dmx_tspp_info.ion_client,
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle);
+ }
+
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_virt_base = NULL;
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle = NULL;
+}
+
+/**
+ * Allocate memory for channel output of specific TSIF.
+ *
+ * @tsif: The TSIF id to which memory should be allocated.
+ *
+ * Return error status
+ */
+static int mpq_dmx_channel_mem_alloc(int tsif)
+{
+ int result;
+ size_t len;
+
+ MPQ_DVB_DBG_PRINT("%s(%d)\n", __func__, tsif);
+
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle =
+ ion_alloc(mpq_dmx_tspp_info.ion_client,
+ (mpq_dmx_tspp_info.tsif[tsif].buffer_count * tspp_desc_size),
+ SZ_4K,
+ ION_HEAP(tspp_out_ion_heap),
+ 0); /* non-cached */
+
+ if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle)) {
+ MPQ_DVB_ERR_PRINT("%s: ion_alloc() failed\n", __func__);
+ mpq_dmx_channel_mem_free(tsif);
+ return -ENOMEM;
+ }
+
+ /* save virtual base address of heap */
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_virt_base =
+ ion_map_kernel(mpq_dmx_tspp_info.ion_client,
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle);
+ if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].
+ ch_mem_heap_virt_base)) {
+ MPQ_DVB_ERR_PRINT("%s: ion_map_kernel() failed\n", __func__);
+ mpq_dmx_channel_mem_free(tsif);
+ return -ENOMEM;
+ }
+
+ /* save physical base address of heap */
+ result = ion_phys(mpq_dmx_tspp_info.ion_client,
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle,
+ &(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base), &len);
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT("%s: ion_phys() failed\n", __func__);
+ mpq_dmx_channel_mem_free(tsif);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * Add a filter to accept all packets as the last entry
+ * of the TSPP HW filter table.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return error status
+ */
+static int mpq_tspp_add_accept_all_filter(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+ int ret;
+
+ MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+ __func__, channel_id, source);
+
+ if (mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag) {
+ MPQ_DVB_DBG_PRINT("%s: accept all filter already exists\n",
+ __func__);
+ return 0;
+ }
+
+ /* This filter will be the last entry in the table */
+ tspp_filter.priority = TSPP_LAST_HW_FILTER_INDEX;
+ /* Pass all pids - set mask to 0 */
+ tspp_filter.pid = 0;
+ tspp_filter.mask = 0;
+ /*
+ * Include TTS in RAW packets, if you change this to
+ * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+ * accordingly.
+ */
+ tspp_filter.mode = TSPP_MODE_RAW;
+ tspp_filter.source = source;
+ tspp_filter.decrypt = 0;
+
+ ret = tspp_add_filter(0, channel_id, &tspp_filter);
+ if (!ret) {
+ mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag = 1;
+ MPQ_DVB_DBG_PRINT(
+ "%s: accept all filter added successfully\n",
+ __func__);
+ }
+
+ return ret;
+}
+
+/**
+ * Remove the filter that accepts all packets from the last entry
+ * of the TSPP HW filter table.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return error status
+ */
+static int mpq_tspp_remove_accept_all_filter(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+ int ret;
+
+ MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+ __func__, channel_id, source);
+
+ if (mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag == 0) {
+ MPQ_DVB_DBG_PRINT("%s: accept all filter doesn't exist\n",
+ __func__);
+ return 0;
+ }
+
+ tspp_filter.priority = TSPP_LAST_HW_FILTER_INDEX;
+
+ ret = tspp_remove_filter(0, channel_id, &tspp_filter);
+ if (!ret) {
+ mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag = 0;
+ MPQ_DVB_DBG_PRINT(
+ "%s: accept all filter removed successfully\n",
+ __func__);
+ }
+
+ return ret;
+}
+
+/**
+ * Add filters designed to accept all packets except NULL packets, i.e.
+ * packets with PID = 0x1FFF.
+ * This function is called after user-defined filters were removed,
+ * so it assumes that the first 13 HW filters in the TSPP filter
+ * table are free for use.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return 0 on success, -1 otherwise
+ */
+static int mpq_tspp_add_null_blocking_filters(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int ret = 0;
+ int i, j;
+ u16 full_pid_mask = 0x1FFF;
+ u8 mask_shift;
+ u8 pid_shift;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+
+ MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+ __func__, channel_id, source);
+
+ /*
+ * Add a total of 13 filters that will accept packets with
+ * every PID other than 0x1FFF, which is the NULL PID.
+ *
+ * Filter 0: accept all PIDs with bit 12 clear, i.e.
+ * PID = 0x0000 .. 0x0FFF (4096 PIDs in total):
+ * Mask = 0x1000, PID = 0x0000.
+ *
+ * Filter 12: Accept PID 0x1FFE:
+ * Mask = 0x1FFF, PID = 0x1FFE.
+ *
+ * In general: For N = 0 .. 12,
+ * Filter <N>: accept all PIDs with <N> MSBits set and bit <N-1> clear.
+ * Filter <N> Mask = N+1 MSBits set, others clear.
+ * Filter <N> PID = <N> MSBits set, others clear.
+ */
+
+ /*
+ * Include TTS in RAW packets, if you change this to
+ * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+ * accordingly.
+ */
+ tspp_filter.mode = TSPP_MODE_RAW;
+ tspp_filter.source = source;
+ tspp_filter.decrypt = 0;
+
+ for (i = 0; i < TSPP_BLOCK_NULLS_FILTERS_NUM; i++) {
+ tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+ if (tspp_filter.priority != i) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: got unexpected HW index %d, expected %d\n",
+ __func__, tspp_filter.priority, i);
+ ret = -1;
+ break;
+ }
+ mask_shift = (TSPP_BLOCK_NULLS_FILTERS_NUM - 1 - i);
+ pid_shift = (TSPP_BLOCK_NULLS_FILTERS_NUM - i);
+ tspp_filter.mask =
+ ((full_pid_mask >> mask_shift) << mask_shift);
+ tspp_filter.pid = ((full_pid_mask >> pid_shift) << pid_shift);
+
+ if (tspp_add_filter(0, channel_id, &tspp_filter)) {
+ ret = -1;
+ break;
+ }
+ }
+
+ if (ret) {
+ /* cleanup on failure */
+ for (j = 0; j < i; j++) {
+ tspp_filter.priority = j;
+ mpq_tspp_release_hw_filter_index(tsif, j);
+ tspp_remove_filter(0, channel_id, &tspp_filter);
+ }
+ } else {
+ MPQ_DVB_DBG_PRINT(
+ "%s: NULL blocking filters added successfully\n",
+ __func__);
+ }
+
+ return ret;
+}
+
+/**
+ * Remove filters designed to accept all packets except NULL packets, i.e.
+ * packets with PID = 0x1FFF.
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return 0 on success, -1 otherwise
+ */
+static int mpq_tspp_remove_null_blocking_filters(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+ int ret = 0;
+ int i;
+
+ MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+ __func__, channel_id, source);
+
+ for (i = 0; i < TSPP_BLOCK_NULLS_FILTERS_NUM; i++) {
+ tspp_filter.priority = i;
+ if (tspp_remove_filter(0, channel_id, &tspp_filter)) {
+ MPQ_DVB_ERR_PRINT("%s: failed to remove filter %d\n",
+ __func__, i);
+ ret = -1;
+ }
+
+ mpq_tspp_release_hw_filter_index(tsif, i);
+ }
+
+ return ret;
+}
+
+/**
+ * Add all current user-defined filters (up to 15) as HW filters
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return 0 on success, -1 otherwise
+ */
+static int mpq_tspp_add_all_user_filters(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+ int slot;
+ u16 added_count = 0;
+ u16 total_filters_count = 0;
+
+ MPQ_DVB_DBG_PRINT("%s: executed\n", __func__);
+
+ /*
+ * Include TTS in RAW packets, if you change this to
+ * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+ * accordingly.
+ */
+ tspp_filter.mode = TSPP_MODE_RAW;
+ tspp_filter.source = source;
+ tspp_filter.decrypt = 0;
+
+ for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++) {
+ if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == -1)
+ continue;
+
+ /*
+ * count total number of user filters to verify that it is
+ * exactly TSPP_MAX_HW_PID_FILTER_NUM as expected.
+ */
+ total_filters_count++;
+
+ if (added_count > TSPP_MAX_HW_PID_FILTER_NUM)
+ continue;
+
+ tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+
+ if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid ==
+ TSPP_PASS_THROUGH_PID) {
+ /* pass all pids */
+ tspp_filter.pid = 0;
+ tspp_filter.mask = 0;
+ } else {
+ tspp_filter.pid =
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid;
+ tspp_filter.mask = TSPP_PID_MASK;
+ }
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: adding HW filter, PID = %d, mask = 0x%X, index = %d\n",
+ __func__, tspp_filter.pid, tspp_filter.mask,
+ tspp_filter.priority);
+
+ if (!tspp_add_filter(0, channel_id, &tspp_filter)) {
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index =
+ tspp_filter.priority;
+ added_count++;
+ } else {
+ MPQ_DVB_ERR_PRINT("%s: tspp_add_filter failed\n",
+ __func__);
+ }
+ }
+
+ if ((added_count != TSPP_MAX_HW_PID_FILTER_NUM) ||
+ (added_count != total_filters_count))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * Remove all user-defined HW filters
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return 0 on success, -1 otherwise
+ */
+static int mpq_tspp_remove_all_user_filters(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int ret = 0;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+ int i;
+
+ MPQ_DVB_DBG_PRINT("%s: executed\n", __func__);
+
+ for (i = 0; i < TSPP_MAX_HW_PID_FILTER_NUM; i++) {
+ tspp_filter.priority = i;
+ MPQ_DVB_DBG_PRINT("%s: Removing HW filter %d\n",
+ __func__, tspp_filter.priority);
+ if (tspp_remove_filter(0, channel_id, &tspp_filter))
+ ret = -1;
+
+ mpq_tspp_release_hw_filter_index(tsif, i);
+ mpq_dmx_tspp_info.tsif[tsif].filters[i].hw_index = -1;
+ }
+
+ return ret;
+}
+
+/**
+ * Configure TSPP channel to filter the PID of new feed.
+ *
+ * @feed: The feed to configure the channel with
+ *
+ * Return error status
+ *
+ * The function checks if the new PID can be added to an already
+ * allocated channel, if not, a new channel is allocated and configured.
+ */
+static int mpq_tspp_dmx_add_channel(struct dvb_demux_feed *feed)
+{
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct tspp_select_source tspp_source;
+ struct tspp_filter tspp_filter;
+ int tsif;
+ int tsif_mode = mpq_dmx_get_param_tsif_mode();
+ int ret = 0;
+ int slot;
+ int channel_id;
+ int *channel_ref_count;
+ u32 buffer_size;
+ int restore_user_filters = 0;
+ int remove_accept_all_filter = 0;
+ int remove_null_blocking_filters = 0;
+ size_t agg_size;
+
+ tspp_source.clk_inverse = mpq_dmx_get_param_clock_inv();
+ tspp_source.data_inverse = 0;
+ tspp_source.sync_inverse = 0;
+ tspp_source.enable_inverse = 0;
+
+ MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid);
+
+ switch (tsif_mode) {
+ case 1:
+ tspp_source.mode = TSPP_TSIF_MODE_1;
+ break;
+ case 2:
+ tspp_source.mode = TSPP_TSIF_MODE_2;
+ break;
+ default:
+ tspp_source.mode = TSPP_TSIF_MODE_LOOPBACK;
+ break;
+ }
+
+ /* determine the TSIF we are reading from */
+ if (mpq_demux->source == DMX_SOURCE_FRONT0) {
+ tsif = 0;
+ tspp_source.source = TSPP_SOURCE_TSIF0;
+ } else if (mpq_demux->source == DMX_SOURCE_FRONT1) {
+ tsif = 1;
+ tspp_source.source = TSPP_SOURCE_TSIF1;
+ } else {
+ /* invalid source */
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid input source (%d)\n",
+ __func__,
+ mpq_demux->source);
+
+ return -EINVAL;
+ }
+
+ atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ if (mutex_lock_interruptible(&mpq_dmx_tspp_info.tsif[tsif].mutex)) {
+ atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ return -ERESTARTSYS;
+ }
+
+ /*
+ * It is possible that this PID was already requested before.
+ * Can happen if we play and record same PES or PCR
+ * piggypacked on video packet.
+ */
+ slot = mpq_tspp_get_filter_slot(tsif, feed->pid);
+ if (slot >= 0) {
+ /* PID already configured */
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+ goto out;
+ }
+
+
+ channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
+ channel_ref_count = &mpq_dmx_tspp_info.tsif[tsif].channel_ref;
+
+ /*
+ * Recalculate 'tspp_notification_size' and buffer count in case
+ * 'tspp_desc_size' or 'tspp_out_buffer_size' parameters have changed.
+ */
+ buffer_size = tspp_desc_size;
+ tspp_notification_size = TSPP_NOTIFICATION_SIZE(tspp_desc_size);
+ mpq_dmx_tspp_info.tsif[tsif].buffer_count =
+ TSPP_BUFFER_COUNT(tspp_out_buffer_size);
+ if (mpq_dmx_tspp_info.tsif[tsif].buffer_count >
+ MAX_BAM_DESCRIPTOR_COUNT)
+ mpq_dmx_tspp_info.tsif[tsif].buffer_count =
+ MAX_BAM_DESCRIPTOR_COUNT;
+
+ /* check if required TSPP pipe is already allocated or not */
+ if (*channel_ref_count == 0) {
+ if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+ agg_size = mpq_dmx_tspp_info.tsif[tsif].buffer_count *
+ sizeof(int);
+ mpq_dmx_tspp_info.tsif[tsif].aggregate_ids =
+ vzalloc(agg_size);
+ if (!mpq_dmx_tspp_info.tsif[tsif].aggregate_ids) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to allocate memory for buffer descriptors aggregation\n",
+ __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mpq_dmx_channel_mem_alloc(tsif);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_channel_mem_alloc(%d) failed (%d)\n",
+ __func__,
+ channel_id,
+ ret);
+
+ goto add_channel_failed;
+ }
+ }
+
+ ret = tspp_open_channel(0, channel_id);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: tspp_open_channel(%d) failed (%d)\n",
+ __func__,
+ channel_id,
+ ret);
+
+ goto add_channel_failed;
+ }
+
+ /* set TSPP source */
+ ret = tspp_open_stream(0, channel_id, &tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: tspp_select_source(%d,%d) failed (%d)\n",
+ __func__,
+ channel_id,
+ tspp_source.source,
+ ret);
+
+ goto add_channel_close_ch;
+ }
+
+ /* register notification on TS packets */
+ tspp_register_notification(0,
+ channel_id,
+ mpq_tspp_callback,
+ (void *)(uintptr_t)tsif,
+ tspp_channel_timeout);
+
+ /*
+ * Register allocator and provide allocation function
+ * that allocates from contiguous memory so that we can have
+ * big notification size, smallest descriptor, and still provide
+ * TZ with single big buffer based on notification size.
+ */
+ if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+ ret = tspp_allocate_buffers(0, channel_id,
+ mpq_dmx_tspp_info.tsif[tsif].buffer_count,
+ buffer_size, tspp_notification_size,
+ tspp_mem_allocator, tspp_mem_free, NULL);
+ } else {
+ ret = tspp_allocate_buffers(0, channel_id,
+ mpq_dmx_tspp_info.tsif[tsif].buffer_count,
+ buffer_size, tspp_notification_size,
+ NULL, NULL, NULL);
+ }
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: tspp_allocate_buffers(%d) failed (%d)\n",
+ __func__,
+ channel_id,
+ ret);
+
+ goto add_channel_unregister_notif;
+ }
+
+ mpq_dmx_tspp_info.tsif[tsif].mpq_demux = mpq_demux;
+ }
+
+ /* add new PID to the existing pipe */
+ slot = mpq_tspp_get_free_filter_slot(tsif);
+ if (slot < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_get_free_filter_slot(%d) failed\n",
+ __func__, tsif);
+
+ goto add_channel_unregister_notif;
+ }
+
+ if (feed->pid == TSPP_PASS_THROUGH_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1;
+ else if (feed->pid == TSPP_NULL_PACKETS_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1;
+
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid;
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+
+ tspp_filter.priority = -1;
+
+ if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count <
+ TSPP_MAX_HW_PID_FILTER_NUM) {
+ /* HW filtering mode */
+ tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+ if (tspp_filter.priority < 0)
+ goto add_channel_free_filter_slot;
+
+ if (feed->pid == TSPP_PASS_THROUGH_PID) {
+ /* pass all pids */
+ tspp_filter.pid = 0;
+ tspp_filter.mask = 0;
+ } else {
+ tspp_filter.pid = feed->pid;
+ tspp_filter.mask = TSPP_PID_MASK;
+ }
+
+ /*
+ * Include TTS in RAW packets, if you change this to
+ * TSPP_MODE_RAW_NO_SUFFIX you must also change
+ * TSPP_RAW_TTS_SIZE accordingly.
+ */
+ tspp_filter.mode = TSPP_MODE_RAW;
+ tspp_filter.source = tspp_source.source;
+ tspp_filter.decrypt = 0;
+ ret = tspp_add_filter(0, channel_id, &tspp_filter);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: tspp_add_filter(%d) failed (%d)\n",
+ __func__,
+ channel_id,
+ ret);
+
+ goto add_channel_free_filter_slot;
+ }
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index =
+ tspp_filter.priority;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: HW filtering mode: added TSPP HW filter, PID = %d, mask = 0x%X, index = %d\n",
+ __func__, tspp_filter.pid, tspp_filter.mask,
+ tspp_filter.priority);
+ } else if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count ==
+ TSPP_MAX_HW_PID_FILTER_NUM) {
+ /* Crossing the threshold - from HW to SW filtering mode */
+
+ /* Add a temporary filter to accept all packets */
+ ret = mpq_tspp_add_accept_all_filter(channel_id,
+ tspp_source.source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id, tspp_source.source);
+
+ goto add_channel_free_filter_slot;
+ }
+
+ /* Remove all existing user filters */
+ ret = mpq_tspp_remove_all_user_filters(channel_id,
+ tspp_source.source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_remove_all_user_filters(%d, %d) failed\n",
+ __func__, channel_id, tspp_source.source);
+
+ restore_user_filters = 1;
+ remove_accept_all_filter = 1;
+
+ goto add_channel_free_filter_slot;
+ }
+
+ /* Add HW filters to block NULL packets */
+ ret = mpq_tspp_add_null_blocking_filters(channel_id,
+ tspp_source.source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_add_null_blocking_filters(%d, %d) failed\n",
+ __func__, channel_id, tspp_source.source);
+
+ restore_user_filters = 1;
+ remove_accept_all_filter = 1;
+
+ goto add_channel_free_filter_slot;
+ }
+
+ /* Remove filters that accepts all packets, if necessary */
+ if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) &&
+ (mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) {
+
+ ret = mpq_tspp_remove_accept_all_filter(channel_id,
+ tspp_source.source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id,
+ tspp_source.source);
+
+ remove_null_blocking_filters = 1;
+ restore_user_filters = 1;
+ remove_accept_all_filter = 1;
+
+ goto add_channel_free_filter_slot;
+ }
+ }
+ } else {
+ /* Already working in SW filtering mode */
+ if (mpq_dmx_tspp_info.tsif[tsif].pass_all_flag ||
+ mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag) {
+
+ ret = mpq_tspp_add_accept_all_filter(channel_id,
+ tspp_source.source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id,
+ tspp_source.source);
+
+ goto add_channel_free_filter_slot;
+ }
+ }
+ }
+
+ (*channel_ref_count)++;
+ mpq_dmx_tspp_info.tsif[tsif].current_filter_count++;
+
+ MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n",
+ __func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count);
+
+ goto out;
+
+add_channel_free_filter_slot:
+ /* restore internal database state */
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1;
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--;
+
+ /* release HW index if we allocated one */
+ if (tspp_filter.priority >= 0) {
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1;
+ mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority);
+ }
+
+ /* restore HW filter table state if necessary */
+ if (remove_null_blocking_filters)
+ mpq_tspp_remove_null_blocking_filters(channel_id,
+ tspp_source.source);
+
+ if (restore_user_filters)
+ mpq_tspp_add_all_user_filters(channel_id, tspp_source.source);
+
+ if (remove_accept_all_filter)
+ mpq_tspp_remove_accept_all_filter(channel_id,
+ tspp_source.source);
+
+ /* restore flags. we can only get here if we changed the flags. */
+ if (feed->pid == TSPP_PASS_THROUGH_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0;
+ else if (feed->pid == TSPP_NULL_PACKETS_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0;
+
+add_channel_unregister_notif:
+ if (*channel_ref_count == 0) {
+ tspp_unregister_notification(0, channel_id);
+ tspp_close_stream(0, channel_id);
+ }
+add_channel_close_ch:
+ if (*channel_ref_count == 0)
+ tspp_close_channel(0, channel_id);
+add_channel_failed:
+ if (*channel_ref_count == 0)
+ if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+ vfree(mpq_dmx_tspp_info.tsif[tsif].aggregate_ids);
+ mpq_dmx_tspp_info.tsif[tsif].aggregate_ids = NULL;
+ mpq_dmx_channel_mem_free(tsif);
+ }
+
+out:
+ mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+ atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ return ret;
+}
+
+/**
+ * Removes filter from TSPP.
+ *
+ * @feed: The feed to remove
+ *
+ * Return error status
+ *
+ * The function checks if this is the only PID allocated within
+ * the channel, if so, the channel is closed as well.
+ */
+static int mpq_tspp_dmx_remove_channel(struct dvb_demux_feed *feed)
+{
+ int tsif;
+ int ret = 0;
+ int channel_id;
+ int slot;
+ atomic_t *data_cnt;
+ int *channel_ref_count;
+ enum tspp_source tspp_source;
+ struct tspp_filter tspp_filter;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ int restore_null_blocking_filters = 0;
+ int remove_accept_all_filter = 0;
+ int remove_user_filters = 0;
+ int accept_all_filter_existed = 0;
+
+ MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid);
+
+ /* determine the TSIF we are reading from */
+ if (mpq_demux->source == DMX_SOURCE_FRONT0) {
+ tsif = 0;
+ tspp_source = TSPP_SOURCE_TSIF0;
+ } else if (mpq_demux->source == DMX_SOURCE_FRONT1) {
+ tsif = 1;
+ tspp_source = TSPP_SOURCE_TSIF1;
+ } else {
+ /* invalid source */
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid input source (%d)\n",
+ __func__,
+ mpq_demux->source);
+
+ return -EINVAL;
+ }
+
+ atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ if (mutex_lock_interruptible(&mpq_dmx_tspp_info.tsif[tsif].mutex)) {
+ atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ return -ERESTARTSYS;
+ }
+
+ channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
+ channel_ref_count = &mpq_dmx_tspp_info.tsif[tsif].channel_ref;
+ data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;
+
+ /* check if required TSPP pipe is already allocated or not */
+ if (*channel_ref_count == 0) {
+ /* invalid feed provided as the channel is not allocated */
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid feed (%d)\n",
+ __func__,
+ channel_id);
+
+ ret = -EINVAL;
+ goto out;
+ }
+
+ slot = mpq_tspp_get_filter_slot(tsif, feed->pid);
+
+ if (slot < 0) {
+ /* invalid feed provided as it has no filter allocated */
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_get_filter_slot failed (%d,%d)\n",
+ __func__,
+ feed->pid,
+ tsif);
+
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* since filter was found, ref_count > 0 so it's ok to decrement it */
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--;
+
+ if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count) {
+ /*
+ * there are still references to this pid, do not
+ * remove the filter yet
+ */
+ goto out;
+ }
+
+ if (feed->pid == TSPP_PASS_THROUGH_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0;
+ else if (feed->pid == TSPP_NULL_PACKETS_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0;
+
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1;
+
+ if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count <=
+ TSPP_MAX_HW_PID_FILTER_NUM) {
+ /* staying in HW filtering mode */
+ tspp_filter.priority =
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index;
+ ret = tspp_remove_filter(0, channel_id, &tspp_filter);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: tspp_remove_filter failed (%d,%d)\n",
+ __func__,
+ channel_id,
+ tspp_filter.priority);
+
+ goto remove_channel_failed_restore_count;
+ }
+ mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority);
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: HW filtering mode: Removed TSPP HW filter, PID = %d, index = %d\n",
+ __func__, feed->pid, tspp_filter.priority);
+ } else if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count ==
+ (TSPP_MAX_HW_PID_FILTER_NUM + 1)) {
+ /* Crossing the threshold - from SW to HW filtering mode */
+
+ accept_all_filter_existed =
+ mpq_dmx_tspp_info.tsif[tsif].
+ accept_all_filter_exists_flag;
+
+ /* Add a temporary filter to accept all packets */
+ ret = mpq_tspp_add_accept_all_filter(channel_id,
+ tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id, tspp_source);
+
+ goto remove_channel_failed_restore_count;
+ }
+
+ ret = mpq_tspp_remove_null_blocking_filters(channel_id,
+ tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_remove_null_blocking_filters(%d, %d) failed\n",
+ __func__, channel_id, tspp_source);
+
+ restore_null_blocking_filters = 1;
+ if (!accept_all_filter_existed)
+ remove_accept_all_filter = 1;
+
+ goto remove_channel_failed_restore_count;
+ }
+
+ ret = mpq_tspp_add_all_user_filters(channel_id,
+ tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_add_all_user_filters(%d, %d) failed\n",
+ __func__, channel_id, tspp_source);
+
+ remove_user_filters = 1;
+ restore_null_blocking_filters = 1;
+ if (!accept_all_filter_existed)
+ remove_accept_all_filter = 1;
+
+ goto remove_channel_failed_restore_count;
+ }
+
+ ret = mpq_tspp_remove_accept_all_filter(channel_id,
+ tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id, tspp_source);
+
+ remove_user_filters = 1;
+ restore_null_blocking_filters = 1;
+ if (!accept_all_filter_existed)
+ remove_accept_all_filter = 1;
+
+ goto remove_channel_failed_restore_count;
+ }
+ } else {
+ /* staying in SW filtering mode */
+ if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) &&
+ (mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) {
+
+ ret = mpq_tspp_remove_accept_all_filter(channel_id,
+ tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id,
+ tspp_source);
+
+ goto remove_channel_failed_restore_count;
+ }
+ }
+ }
+
+ mpq_dmx_tspp_info.tsif[tsif].current_filter_count--;
+ (*channel_ref_count)--;
+
+ MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n",
+ __func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count);
+
+ if (*channel_ref_count == 0) {
+ /* channel is not used any more, release it */
+ tspp_unregister_notification(0, channel_id);
+ tspp_close_stream(0, channel_id);
+ tspp_close_channel(0, channel_id);
+ atomic_set(data_cnt, 0);
+
+ if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+ vfree(mpq_dmx_tspp_info.tsif[tsif].aggregate_ids);
+ mpq_dmx_tspp_info.tsif[tsif].aggregate_ids = NULL;
+ mpq_dmx_channel_mem_free(tsif);
+ }
+ }
+
+ goto out;
+
+remove_channel_failed_restore_count:
+ /* restore internal database state */
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid;
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+
+ if (remove_user_filters)
+ mpq_tspp_remove_all_user_filters(channel_id, tspp_source);
+
+ if (restore_null_blocking_filters)
+ mpq_tspp_add_null_blocking_filters(channel_id, tspp_source);
+
+ if (remove_accept_all_filter)
+ mpq_tspp_remove_accept_all_filter(channel_id, tspp_source);
+
+ /* restore flags. we can only get here if we changed the flags. */
+ if (feed->pid == TSPP_PASS_THROUGH_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1;
+ else if (feed->pid == TSPP_NULL_PACKETS_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1;
+
+out:
+ mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+ atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ return ret;
+}
+
+static int mpq_tspp_dmx_start_filtering(struct dvb_demux_feed *feed)
+{
+ int ret;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s(pid=%d) executed\n",
+ __func__,
+ feed->pid);
+
+ if (mpq_demux == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid mpq_demux handle\n",
+ __func__);
+
+ return -EINVAL;
+ }
+
+ if (mpq_demux->source < DMX_SOURCE_DVR0) {
+ /* source from TSPP, need to configure tspp pipe */
+ ret = mpq_tspp_dmx_add_channel(feed);
+
+ if (ret < 0) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_tspp_dmx_add_channel failed(%d)\n",
+ __func__,
+ ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Always feed sections/PES starting from a new one and
+ * do not partial transfer data from older one
+ */
+ feed->pusi_seen = 0;
+
+ ret = mpq_dmx_init_mpq_feed(feed);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_init_mpq_feed failed(%d)\n",
+ __func__,
+ ret);
+ if (mpq_demux->source < DMX_SOURCE_DVR0)
+ mpq_tspp_dmx_remove_channel(feed);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mpq_tspp_dmx_stop_filtering(struct dvb_demux_feed *feed)
+{
+ int ret = 0;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ MPQ_DVB_DBG_PRINT("%s(%d) executed\n", __func__, feed->pid);
+
+ mpq_dmx_terminate_feed(feed);
+
+ if (mpq_demux->source < DMX_SOURCE_DVR0) {
+ /* source from TSPP, need to configure tspp pipe */
+ ret = mpq_tspp_dmx_remove_channel(feed);
+ }
+
+ return ret;
+}
+
+static int mpq_tspp_dmx_write_to_decoder(
+ struct dvb_demux_feed *feed,
+ const u8 *buf,
+ size_t len)
+{
+ /*
+ * It is assumed that this function is called once for each
+ * TS packet of the relevant feed.
+ */
+ if (len > TSPP_RAW_TTS_SIZE)
+ MPQ_DVB_DBG_PRINT(
+ "%s: warnning - len larger than one packet\n",
+ __func__);
+
+ if (dvb_dmx_is_video_feed(feed))
+ return mpq_dmx_process_video_packet(feed, buf);
+
+ if (dvb_dmx_is_pcr_feed(feed))
+ return mpq_dmx_process_pcr_packet(feed, buf);
+
+ return 0;
+}
+
+/**
+ * Returns demux capabilities of TSPPv1 plugin
+ *
+ * @demux: demux device
+ * @caps: Returned capbabilities
+ *
+ * Return error code
+ */
+static int mpq_tspp_dmx_get_caps(struct dmx_demux *demux,
+ struct dmx_caps *caps)
+{
+ struct dvb_demux *dvb_demux = demux->priv;
+
+ if ((dvb_demux == NULL) || (caps == NULL)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid parameters\n",
+ __func__);
+
+ return -EINVAL;
+ }
+
+ caps->caps = DMX_CAP_PULL_MODE | DMX_CAP_VIDEO_DECODER_DATA |
+ DMX_CAP_TS_INSERTION | DMX_CAP_VIDEO_INDEXING |
+ DMX_CAP_AUTO_BUFFER_FLUSH;
+ caps->recording_max_video_pids_indexed = 0;
+ caps->num_decoders = MPQ_ADAPTER_MAX_NUM_OF_INTERFACES;
+ caps->num_demux_devices = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+ caps->num_pid_filters = TSPP_MAX_PID_FILTER_NUM;
+ caps->num_section_filters = dvb_demux->filternum;
+ caps->num_section_filters_per_pid = dvb_demux->filternum;
+ caps->section_filter_length = DMX_FILTER_SIZE;
+ caps->num_demod_inputs = TSIF_COUNT;
+ caps->num_memory_inputs = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+ caps->max_bitrate = 192;
+ caps->demod_input_max_bitrate = 96;
+ caps->memory_input_max_bitrate = 96;
+ caps->num_cipher_ops = 1;
+
+ /* TSIF reports 3 bytes STC at unit of 27MHz/256 */
+ caps->max_stc = (u64)0xFFFFFF * 256;
+
+ /* Buffer requirements */
+ caps->section.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->section.max_buffer_num = 1;
+ caps->section.max_size = 0xFFFFFFFF;
+ caps->section.size_alignment = 0;
+ caps->pes.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->pes.max_buffer_num = 1;
+ caps->pes.max_size = 0xFFFFFFFF;
+ caps->pes.size_alignment = 0;
+ caps->recording_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->recording_188_tsp.max_buffer_num = 1;
+ caps->recording_188_tsp.max_size = 0xFFFFFFFF;
+ caps->recording_188_tsp.size_alignment = 0;
+ caps->recording_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->recording_192_tsp.max_buffer_num = 1;
+ caps->recording_192_tsp.max_size = 0xFFFFFFFF;
+ caps->recording_192_tsp.size_alignment = 0;
+ caps->playback_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->playback_188_tsp.max_buffer_num = 1;
+ caps->playback_188_tsp.max_size = 0xFFFFFFFF;
+ caps->playback_188_tsp.size_alignment = 188;
+ caps->playback_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->playback_192_tsp.max_buffer_num = 1;
+ caps->playback_192_tsp.max_size = 0xFFFFFFFF;
+ caps->playback_192_tsp.size_alignment = 192;
+ caps->decoder.flags =
+ DMX_BUFFER_SECURED_IF_DECRYPTED |
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_LINEAR_GROUP_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->decoder.max_buffer_num = DMX_MAX_DECODER_BUFFER_NUM;
+ caps->decoder.max_size = 0xFFFFFFFF;
+ caps->decoder.size_alignment = SZ_4K;
+
+ return 0;
+}
+
+
+/**
+ * Reads TSIF STC from TSPP
+ *
+ * @demux: demux device
+ * @num: STC number. 0 for TSIF0 and 1 for TSIF1.
+ * @stc: STC value
+ * @base: divisor to get 90KHz value
+ *
+ * Return error code
+ */
+static int mpq_tspp_dmx_get_stc(struct dmx_demux *demux, unsigned int num,
+ u64 *stc, unsigned int *base)
+{
+ enum tspp_source source;
+ u32 tcr_counter;
+
+ if (!demux || !stc || !base)
+ return -EINVAL;
+
+ if (num == 0)
+ source = TSPP_SOURCE_TSIF0;
+ else if (num == 1)
+ source = TSPP_SOURCE_TSIF1;
+ else
+ return -EINVAL;
+
+ tspp_get_ref_clk_counter(0, source, &tcr_counter);
+
+ *stc = ((u64)tcr_counter) * 256; /* conversion to 27MHz */
+ *base = 300; /* divisor to get 90KHz clock from stc value */
+
+ return 0;
+}
+
+static int mpq_tspp_dmx_init(
+ struct dvb_adapter *mpq_adapter,
+ struct mpq_demux *mpq_demux)
+{
+ int result;
+
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ mpq_dmx_tspp_info.ion_client = mpq_demux->ion_client;
+
+ /* Set the kernel-demux object capabilities */
+ mpq_demux->demux.dmx.capabilities =
+ DMX_TS_FILTERING |
+ DMX_PES_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING |
+ DMX_CRC_CHECKING |
+ DMX_TS_DESCRAMBLING;
+
+ mpq_demux->decoder_alloc_flags = ION_FLAG_CACHED;
+
+ /* Set dvb-demux "virtual" function pointers */
+ mpq_demux->demux.priv = (void *)mpq_demux;
+ mpq_demux->demux.filternum = TSPP_MAX_SECTION_FILTER_NUM;
+ mpq_demux->demux.feednum = MPQ_MAX_DMX_FILES;
+ mpq_demux->demux.start_feed = mpq_tspp_dmx_start_filtering;
+ mpq_demux->demux.stop_feed = mpq_tspp_dmx_stop_filtering;
+ mpq_demux->demux.write_to_decoder = mpq_tspp_dmx_write_to_decoder;
+ mpq_demux->demux.decoder_fullness_init = mpq_dmx_decoder_fullness_init;
+ mpq_demux->demux.decoder_fullness_wait = mpq_dmx_decoder_fullness_wait;
+ mpq_demux->demux.decoder_fullness_abort =
+ mpq_dmx_decoder_fullness_abort;
+ mpq_demux->demux.decoder_buffer_status = mpq_dmx_decoder_buffer_status;
+ mpq_demux->demux.reuse_decoder_buffer = mpq_dmx_reuse_decoder_buffer;
+ mpq_demux->demux.set_cipher_op = mpq_dmx_set_cipher_ops;
+ mpq_demux->demux.oob_command = mpq_dmx_oob_command;
+ mpq_demux->demux.convert_ts = mpq_dmx_convert_tts;
+ mpq_demux->demux.flush_decoder_buffer = NULL;
+
+ /* Initialize dvb_demux object */
+ result = dvb_dmx_init(&mpq_demux->demux);
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT("%s: dvb_dmx_init failed\n", __func__);
+ goto init_failed;
+ }
+
+ /* Now initailize the dmx-dev object */
+ mpq_demux->dmxdev.filternum = MPQ_MAX_DMX_FILES;
+ mpq_demux->dmxdev.demux = &mpq_demux->demux.dmx;
+ mpq_demux->dmxdev.capabilities = DMXDEV_CAP_DUPLEX;
+
+ mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
+ mpq_demux->dmxdev.demux->get_stc = mpq_tspp_dmx_get_stc;
+ mpq_demux->dmxdev.demux->get_caps = mpq_tspp_dmx_get_caps;
+ mpq_demux->dmxdev.demux->map_buffer = mpq_dmx_map_buffer;
+ mpq_demux->dmxdev.demux->unmap_buffer = mpq_dmx_unmap_buffer;
+ mpq_demux->dmxdev.demux->write = mpq_dmx_write;
+ result = dvb_dmxdev_init(&mpq_demux->dmxdev, mpq_adapter);
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT("%s: dvb_dmxdev_init failed (errno=%d)\n",
+ __func__,
+ result);
+ goto init_failed_dmx_release;
+ }
+
+ /* Extend dvb-demux debugfs with TSPP statistics. */
+ mpq_dmx_init_debugfs_entries(mpq_demux);
+
+ return 0;
+
+init_failed_dmx_release:
+ dvb_dmx_release(&mpq_demux->demux);
+init_failed:
+ return result;
+}
+
+static int __init mpq_dmx_tspp_plugin_init(void)
+{
+ int i;
+ int j;
+ int ret;
+
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ for (i = 0; i < TSIF_COUNT; i++) {
+ mpq_dmx_tspp_info.tsif[i].aggregate_ids = NULL;
+ mpq_dmx_tspp_info.tsif[i].channel_ref = 0;
+ mpq_dmx_tspp_info.tsif[i].buff_index = 0;
+ mpq_dmx_tspp_info.tsif[i].ch_mem_heap_handle = NULL;
+ mpq_dmx_tspp_info.tsif[i].ch_mem_heap_virt_base = NULL;
+ mpq_dmx_tspp_info.tsif[i].ch_mem_heap_phys_base = 0;
+ atomic_set(&mpq_dmx_tspp_info.tsif[i].data_cnt, 0);
+ atomic_set(&mpq_dmx_tspp_info.tsif[i].control_op, 0);
+
+ for (j = 0; j < TSPP_MAX_PID_FILTER_NUM; j++) {
+ mpq_dmx_tspp_info.tsif[i].filters[j].pid = -1;
+ mpq_dmx_tspp_info.tsif[i].filters[j].ref_count = 0;
+ mpq_dmx_tspp_info.tsif[i].filters[j].hw_index = -1;
+ }
+
+ for (j = 0; j < TSPP_MAX_HW_PID_FILTER_NUM; j++)
+ mpq_dmx_tspp_info.tsif[i].hw_indexes[j] = 0;
+
+ mpq_dmx_tspp_info.tsif[i].current_filter_count = 0;
+ mpq_dmx_tspp_info.tsif[i].pass_nulls_flag = 0;
+ mpq_dmx_tspp_info.tsif[i].pass_all_flag = 0;
+ mpq_dmx_tspp_info.tsif[i].accept_all_filter_exists_flag = 0;
+
+ snprintf(mpq_dmx_tspp_info.tsif[i].name,
+ TSIF_NAME_LENGTH,
+ "dmx_tsif%d",
+ i);
+
+ init_waitqueue_head(&mpq_dmx_tspp_info.tsif[i].wait_queue);
+ mpq_dmx_tspp_info.tsif[i].thread =
+ kthread_run(
+ mpq_dmx_tspp_thread, (void *)(uintptr_t)i,
+ mpq_dmx_tspp_info.tsif[i].name);
+
+ if (IS_ERR(mpq_dmx_tspp_info.tsif[i].thread)) {
+ for (j = 0; j < i; j++) {
+ kthread_stop(mpq_dmx_tspp_info.tsif[j].thread);
+ mutex_destroy(&mpq_dmx_tspp_info.tsif[j].mutex);
+ }
+
+ MPQ_DVB_ERR_PRINT(
+ "%s: kthread_run failed\n",
+ __func__);
+
+ return -ENOMEM;
+ }
+
+ mutex_init(&mpq_dmx_tspp_info.tsif[i].mutex);
+ }
+
+ ret = mpq_dmx_plugin_init(mpq_tspp_dmx_init);
+
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_plugin_init failed (errno=%d)\n",
+ __func__,
+ ret);
+
+ for (i = 0; i < TSIF_COUNT; i++) {
+ kthread_stop(mpq_dmx_tspp_info.tsif[i].thread);
+ mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex);
+ }
+ }
+
+ return ret;
+}
+
+static void __exit mpq_dmx_tspp_plugin_exit(void)
+{
+ int i;
+
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ for (i = 0; i < TSIF_COUNT; i++) {
+ mutex_lock(&mpq_dmx_tspp_info.tsif[i].mutex);
+
+ /*
+ * Note: tspp_close_channel will also free the TSPP buffers
+ * even if we allocated them ourselves,
+ * using our free function.
+ */
+ if (mpq_dmx_tspp_info.tsif[i].channel_ref) {
+ tspp_unregister_notification(0,
+ TSPP_CHANNEL_ID(i, TSPP_CHANNEL));
+ tspp_close_channel(0,
+ TSPP_CHANNEL_ID(i, TSPP_CHANNEL));
+
+ if (allocation_mode ==
+ MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+ vfree(mpq_dmx_tspp_info.tsif[i].aggregate_ids);
+ mpq_dmx_tspp_info.tsif[i].aggregate_ids = NULL;
+ mpq_dmx_channel_mem_free(i);
+ }
+ }
+
+ mutex_unlock(&mpq_dmx_tspp_info.tsif[i].mutex);
+ kthread_stop(mpq_dmx_tspp_info.tsif[i].thread);
+ mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex);
+ }
+
+ mpq_dmx_plugin_exit();
+}
+
+
+module_init(mpq_dmx_tspp_plugin_init);
+module_exit(mpq_dmx_tspp_plugin_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies Inc. demux TSPP version 1 HW Plugin");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c
new file mode 100644
index 000000000000..625609a07f02
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c
@@ -0,0 +1,1023 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include "qseecom_kernel.h"
+#include "mpq_sdmx.h"
+
+static struct qseecom_handle *sdmx_qseecom_handles[SDMX_MAX_SESSIONS];
+static struct mutex sdmx_lock[SDMX_MAX_SESSIONS];
+
+#define QSEECOM_SBUFF_SIZE SZ_128K
+
+enum sdmx_cmd_id {
+ SDMX_OPEN_SESSION_CMD,
+ SDMX_CLOSE_SESSION_CMD,
+ SDMX_SET_SESSION_CFG_CMD,
+ SDMX_ADD_FILTER_CMD,
+ SDMX_REMOVE_FILTER_CMD,
+ SDMX_SET_KL_IDX_CMD,
+ SDMX_ADD_RAW_PID_CMD,
+ SDMX_REMOVE_RAW_PID_CMD,
+ SDMX_PROCESS_CMD,
+ SDMX_GET_DBG_COUNTERS_CMD,
+ SDMX_RESET_DBG_COUNTERS_CMD,
+ SDMX_GET_VERSION_CMD,
+ SDMX_INVALIDATE_KL_CMD,
+ SDMX_SET_LOG_LEVEL_CMD
+};
+
+#pragma pack(push, sdmx, 1)
+
+struct sdmx_proc_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u8 flags;
+ struct sdmx_buff_descr in_buf_descr;
+ u32 inp_fill_cnt;
+ u32 in_rd_offset;
+ u32 num_filters;
+ struct sdmx_filter_status filters_status[];
+};
+
+struct sdmx_proc_rsp {
+ enum sdmx_status ret;
+ u32 inp_fill_cnt;
+ u32 in_rd_offset;
+ u32 err_indicators;
+ u32 status_indicators;
+};
+
+struct sdmx_open_ses_req {
+ enum sdmx_cmd_id cmd_id;
+};
+
+struct sdmx_open_ses_rsp {
+ enum sdmx_status ret;
+ u32 session_handle;
+};
+
+struct sdmx_close_ses_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+};
+
+struct sdmx_close_ses_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_ses_cfg_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ enum sdmx_proc_mode process_mode;
+ enum sdmx_inp_mode input_mode;
+ enum sdmx_pkt_format packet_len;
+ u8 odd_scramble_bits;
+ u8 even_scramble_bits;
+};
+
+struct sdmx_ses_cfg_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_set_kl_ind_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 pid;
+ u32 kl_index;
+};
+
+struct sdmx_set_kl_ind_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_add_filt_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 pid;
+ enum sdmx_filter filter_type;
+ struct sdmx_buff_descr meta_data_buf;
+ enum sdmx_buf_mode buffer_mode;
+ enum sdmx_raw_out_format ts_out_format;
+ u32 flags;
+ u32 num_data_bufs;
+ struct sdmx_data_buff_descr data_bufs[];
+};
+
+struct sdmx_add_filt_rsp {
+ enum sdmx_status ret;
+ u32 filter_handle;
+};
+
+struct sdmx_rem_filt_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 filter_handle;
+};
+
+struct sdmx_rem_filt_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_add_raw_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 filter_handle;
+ u32 pid;
+};
+
+struct sdmx_add_raw_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_rem_raw_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 filter_handle;
+ u32 pid;
+};
+
+struct sdmx_rem_raw_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_get_counters_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 num_filters;
+};
+
+struct sdmx_get_counters_rsp {
+ enum sdmx_status ret;
+ struct sdmx_session_dbg_counters session_counters;
+ u32 num_filters;
+ struct sdmx_filter_dbg_counters filter_counters[];
+};
+
+struct sdmx_rst_counters_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+};
+
+struct sdmx_rst_counters_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_get_version_req {
+ enum sdmx_cmd_id cmd_id;
+};
+
+struct sdmx_get_version_rsp {
+ enum sdmx_status ret;
+ int32_t version;
+};
+
+struct sdmx_set_log_level_req {
+ enum sdmx_cmd_id cmd_id;
+ enum sdmx_log_level level;
+ u32 session_handle;
+};
+
+struct sdmx_set_log_level_rsp {
+ enum sdmx_status ret;
+};
+
+#pragma pack(pop, sdmx)
+
+static int get_cmd_rsp_buffers(int handle_index,
+ void **cmd,
+ int *cmd_len,
+ void **rsp,
+ int *rsp_len)
+{
+ if (*cmd_len & QSEECOM_ALIGN_MASK)
+ *cmd_len = QSEECOM_ALIGN(*cmd_len);
+
+ if (*rsp_len & QSEECOM_ALIGN_MASK)
+ *rsp_len = QSEECOM_ALIGN(*rsp_len);
+
+ if ((*rsp_len + *cmd_len) > QSEECOM_SBUFF_SIZE) {
+ pr_err("%s: shared buffer too small to hold cmd=%d and rsp=%d\n",
+ __func__, *cmd_len, *rsp_len);
+ return SDMX_STATUS_OUT_OF_MEM;
+ }
+
+ *cmd = sdmx_qseecom_handles[handle_index]->sbuf;
+ *rsp = sdmx_qseecom_handles[handle_index]->sbuf + *cmd_len;
+ return SDMX_SUCCESS;
+}
+
+/*
+ * Returns version of secure-demux app.
+ *
+ * @session_handle: Returned instance handle. Must not be NULL.
+ * Return error code
+ */
+int sdmx_get_version(int session_handle, int32_t *version)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_get_version_req *cmd;
+ struct sdmx_get_version_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+ (version == NULL))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_get_version_req);
+ rsp_len = sizeof(struct sdmx_get_version_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_GET_VERSION_CMD;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+ *version = rsp->version;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+
+}
+EXPORT_SYMBOL(sdmx_get_version);
+
+/*
+ * Initializes a new secure demux instance and returns a handle of the instance.
+ *
+ * @session_handle: handle of a secure demux instance to get its version.
+ * Return the version if successful or an error code.
+ */
+int sdmx_open_session(int *session_handle)
+{
+ int res, cmd_len, rsp_len;
+ enum sdmx_status ret, version_ret;
+ struct sdmx_open_ses_req *cmd;
+ struct sdmx_open_ses_rsp *rsp;
+ struct qseecom_handle *qseecom_handle = NULL;
+ int32_t version;
+
+ /* Input validation */
+ if (session_handle == NULL)
+ return SDMX_STATUS_GENERAL_FAILURE;
+
+ /* Start the TZ app */
+ res = qseecom_start_app(&qseecom_handle, "securemm",
+ QSEECOM_SBUFF_SIZE);
+
+ if (res < 0)
+ return SDMX_STATUS_GENERAL_FAILURE;
+
+ cmd_len = sizeof(struct sdmx_open_ses_req);
+ rsp_len = sizeof(struct sdmx_open_ses_rsp);
+
+ /* Get command and response buffers */
+ cmd = (struct sdmx_open_ses_req *)qseecom_handle->sbuf;
+
+ if (cmd_len & QSEECOM_ALIGN_MASK)
+ cmd_len = QSEECOM_ALIGN(cmd_len);
+
+ rsp = (struct sdmx_open_ses_rsp *)qseecom_handle->sbuf + cmd_len;
+
+ if (rsp_len & QSEECOM_ALIGN_MASK)
+ rsp_len = QSEECOM_ALIGN(rsp_len);
+
+ /* Will be later overridden by SDMX response */
+ *session_handle = SDMX_INVALID_SESSION_HANDLE;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_OPEN_SESSION_CMD;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(qseecom_handle, (void *)cmd, cmd_len,
+ (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ qseecom_shutdown_app(&qseecom_handle);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ /* Parse response struct */
+ *session_handle = rsp->session_handle;
+
+ /* Initialize handle and mutex */
+ sdmx_qseecom_handles[*session_handle] = qseecom_handle;
+ mutex_init(&sdmx_lock[*session_handle]);
+ ret = rsp->ret;
+
+ /* Get and print the app version */
+ version_ret = sdmx_get_version(*session_handle, &version);
+ if (version_ret == SDMX_SUCCESS)
+ pr_info("TZ SDMX version is %x.%x\n", version >> 8,
+ version & 0xFF);
+ else
+ pr_err("Error reading TZ SDMX version\n");
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_open_session);
+
+/*
+ * Closes a secure demux instance.
+ *
+ * @session_handle: handle of a secure demux instance to close.
+ * Return error code
+ */
+int sdmx_close_session(int session_handle)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_close_ses_req *cmd;
+ struct sdmx_close_ses_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_close_ses_req);
+ rsp_len = sizeof(struct sdmx_close_ses_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_CLOSE_SESSION_CMD;
+ cmd->session_handle = session_handle;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+
+ /* Shutdown the TZ app (or at least free the current handle) */
+ res = qseecom_shutdown_app(&sdmx_qseecom_handles[session_handle]);
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ sdmx_qseecom_handles[session_handle] = NULL;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_close_session);
+
+/*
+ * Configures an open secure demux instance.
+ *
+ * @session_handle: secure demux instance
+ * @proc_mode: Defines secure demux's behavior in case of output
+ * buffer overflow.
+ * @inp_mode: Defines the input encryption settings.
+ * @pkt_format: TS packet length in input buffer.
+ * @odd_scramble_bits: Value of the scramble bits indicating the ODD key.
+ * @even_scramble_bits: Value of the scramble bits indicating the EVEN key.
+ * Return error code
+ */
+int sdmx_set_session_cfg(int session_handle,
+ enum sdmx_proc_mode proc_mode,
+ enum sdmx_inp_mode inp_mode,
+ enum sdmx_pkt_format pkt_format,
+ u8 odd_scramble_bits,
+ u8 even_scramble_bits)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_ses_cfg_req *cmd;
+ struct sdmx_ses_cfg_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_ses_cfg_req);
+ rsp_len = sizeof(struct sdmx_ses_cfg_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_SET_SESSION_CFG_CMD;
+ cmd->session_handle = session_handle;
+ cmd->process_mode = proc_mode;
+ cmd->input_mode = inp_mode;
+ cmd->packet_len = pkt_format;
+ cmd->odd_scramble_bits = odd_scramble_bits;
+ cmd->even_scramble_bits = even_scramble_bits;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_set_session_cfg);
+
+/*
+ * Creates a new secure demux filter and returns a filter handle
+ *
+ * @session_handle: secure demux instance
+ * @pid: pid to filter
+ * @filter_type: type of filtering
+ * @meta_data_buf: meta data buffer descriptor
+ * @data_buf_mode: data buffer mode (ring/linear)
+ * @num_data_bufs: number of data buffers (use 1 for a ring buffer)
+ * @data_bufs: data buffers descriptors array
+ * @filter_handle: returned filter handle
+ * @ts_out_format: output format for raw filters
+ * @flags: optional flags for filter
+ * (currently only clear section CRC verification is supported)
+ *
+ * Return error code
+ */
+int sdmx_add_filter(int session_handle,
+ u16 pid,
+ enum sdmx_filter filterype,
+ struct sdmx_buff_descr *meta_data_buf,
+ enum sdmx_buf_mode d_buf_mode,
+ u32 num_data_bufs,
+ struct sdmx_data_buff_descr *data_bufs,
+ int *filter_handle,
+ enum sdmx_raw_out_format ts_out_format,
+ u32 flags)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_add_filt_req *cmd;
+ struct sdmx_add_filt_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+ (filter_handle == NULL))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_add_filt_req)
+ + num_data_bufs * sizeof(struct sdmx_data_buff_descr);
+ rsp_len = sizeof(struct sdmx_add_filt_rsp);
+
+ /* Will be later overridden by SDMX response */
+ *filter_handle = SDMX_INVALID_FILTER_HANDLE;
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_ADD_FILTER_CMD;
+ cmd->session_handle = session_handle;
+ cmd->pid = (u32)pid;
+ cmd->filter_type = filterype;
+ cmd->ts_out_format = ts_out_format;
+ cmd->flags = flags;
+ if (meta_data_buf != NULL)
+ memcpy(&(cmd->meta_data_buf), meta_data_buf,
+ sizeof(struct sdmx_buff_descr));
+ else
+ memset(&(cmd->meta_data_buf), 0, sizeof(cmd->meta_data_buf));
+
+ cmd->buffer_mode = d_buf_mode;
+ cmd->num_data_bufs = num_data_bufs;
+ memcpy(cmd->data_bufs, data_bufs,
+ num_data_bufs * sizeof(struct sdmx_data_buff_descr));
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ /* Parse response struct */
+ *filter_handle = rsp->filter_handle;
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_add_filter);
+
+/*
+ * Removes a secure demux filter
+ *
+ * @session_handle: secure demux instance
+ * @filter_handle: filter handle to remove
+ *
+ * Return error code
+ */
+int sdmx_remove_filter(int session_handle, int filter_handle)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_rem_filt_req *cmd;
+ struct sdmx_rem_filt_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_rem_filt_req);
+ rsp_len = sizeof(struct sdmx_rem_filt_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_REMOVE_FILTER_CMD;
+ cmd->session_handle = session_handle;
+ cmd->filter_handle = filter_handle;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_remove_filter);
+
+/*
+ * Associates a key ladder index for the specified pid
+ *
+ * @session_handle: secure demux instance
+ * @pid: pid
+ * @key_ladder_index: key ladder index to associate to the pid
+ *
+ * Return error code
+ *
+ * Note: if pid already has some key ladder index associated, it will be
+ * overridden.
+ */
+int sdmx_set_kl_ind(int session_handle, u16 pid, u32 key_ladder_index)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_set_kl_ind_req *cmd;
+ struct sdmx_set_kl_ind_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_set_kl_ind_req);
+ rsp_len = sizeof(struct sdmx_set_kl_ind_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_SET_KL_IDX_CMD;
+ cmd->session_handle = session_handle;
+ cmd->pid = (u32)pid;
+ cmd->kl_index = key_ladder_index;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_set_kl_ind);
+
+/*
+ * Adds the specified pid to an existing raw (recording) filter
+ *
+ * @session_handle: secure demux instance
+ * @filter_handle: raw filter handle
+ * @pid: pid
+ *
+ * Return error code
+ */
+int sdmx_add_raw_pid(int session_handle, int filter_handle, u16 pid)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_add_raw_req *cmd;
+ struct sdmx_add_raw_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_add_raw_req);
+ rsp_len = sizeof(struct sdmx_add_raw_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_ADD_RAW_PID_CMD;
+ cmd->session_handle = session_handle;
+ cmd->filter_handle = filter_handle;
+ cmd->pid = (u32)pid;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_add_raw_pid);
+
+/*
+ * Removes the specified pid from a raw (recording) filter
+ *
+ * @session_handle: secure demux instance
+ * @filter_handle: raw filter handle
+ * @pid: pid
+ *
+ * Return error code
+ */
+int sdmx_remove_raw_pid(int session_handle, int filter_handle, u16 pid)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_rem_raw_req *cmd;
+ struct sdmx_rem_raw_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_rem_raw_req);
+ rsp_len = sizeof(struct sdmx_rem_raw_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_REMOVE_RAW_PID_CMD;
+ cmd->session_handle = session_handle;
+ cmd->filter_handle = filter_handle;
+ cmd->pid = (u32)pid;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_remove_raw_pid);
+
+/*
+ * Call secure demux to perform processing on the specified input buffer
+ *
+ * @session_handle: secure demux instance
+ * @flags: input flags. Currently only EOS marking is supported.
+ * @input_buf_desc: input buffer descriptor
+ * @input_fill_count: number of bytes available in input buffer
+ * @input_read_offset: offset inside input buffer where data starts
+ * @error_indicators: returned general error indicators
+ * @status_indicators: returned general status indicators
+ * @num_filters: number of filters in filter status array
+ * @filter_status: filter status descriptor array
+ *
+ * Return error code
+ */
+int sdmx_process(int session_handle, u8 flags,
+ struct sdmx_buff_descr *input_buf_desc,
+ u32 *input_fill_count,
+ u32 *input_read_offset,
+ u32 *error_indicators,
+ u32 *status_indicators,
+ u32 num_filters,
+ struct sdmx_filter_status *filter_status)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_proc_req *cmd;
+ struct sdmx_proc_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+ (input_buf_desc == NULL) ||
+ (input_fill_count == NULL) || (input_read_offset == NULL) ||
+ (error_indicators == NULL) || (status_indicators == NULL) ||
+ (filter_status == NULL))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_proc_req)
+ + num_filters * sizeof(struct sdmx_filter_status);
+ rsp_len = sizeof(struct sdmx_proc_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_PROCESS_CMD;
+ cmd->session_handle = session_handle;
+ cmd->flags = flags;
+ cmd->in_buf_descr.base_addr = input_buf_desc->base_addr;
+ cmd->in_buf_descr.size = input_buf_desc->size;
+ cmd->inp_fill_cnt = *input_fill_count;
+ cmd->in_rd_offset = *input_read_offset;
+ cmd->num_filters = num_filters;
+ memcpy(cmd->filters_status, filter_status,
+ num_filters * sizeof(struct sdmx_filter_status));
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ /* Parse response struct */
+ *input_fill_count = rsp->inp_fill_cnt;
+ *input_read_offset = rsp->in_rd_offset;
+ *error_indicators = rsp->err_indicators;
+ *status_indicators = rsp->status_indicators;
+ memcpy(filter_status, cmd->filters_status,
+ num_filters * sizeof(struct sdmx_filter_status));
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_process);
+
+/*
+ * Returns session-level & filter-level debug counters
+ *
+ * @session_handle: secure demux instance
+ * @session_counters: returned session-level debug counters
+ * @num_filters: returned number of filters reported in filter_counters
+ * @filter_counters: returned filter-level debug counters array
+ *
+ * Return error code
+ */
+int sdmx_get_dbg_counters(int session_handle,
+ struct sdmx_session_dbg_counters *session_counters,
+ u32 *num_filters,
+ struct sdmx_filter_dbg_counters *filter_counters)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_get_counters_req *cmd;
+ struct sdmx_get_counters_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+ (session_counters == NULL) || (num_filters == NULL) ||
+ (filter_counters == NULL))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_get_counters_req);
+ rsp_len = sizeof(struct sdmx_get_counters_rsp)
+ + *num_filters * sizeof(struct sdmx_filter_dbg_counters);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_GET_DBG_COUNTERS_CMD;
+ cmd->session_handle = session_handle;
+ cmd->num_filters = *num_filters;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ /* Parse response struct */
+ *session_counters = rsp->session_counters;
+ *num_filters = rsp->num_filters;
+ memcpy(filter_counters, rsp->filter_counters,
+ *num_filters * sizeof(struct sdmx_filter_dbg_counters));
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_get_dbg_counters);
+
+/*
+ * Reset debug counters
+ *
+ * @session_handle: secure demux instance
+ *
+ * Return error code
+ */
+int sdmx_reset_dbg_counters(int session_handle)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_rst_counters_req *cmd;
+ struct sdmx_rst_counters_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_rst_counters_req);
+ rsp_len = sizeof(struct sdmx_rst_counters_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_RESET_DBG_COUNTERS_CMD;
+ cmd->session_handle = session_handle;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_reset_dbg_counters);
+
+/*
+ * Set debug log verbosity level
+ *
+ * @session_handle: secure demux instance
+ * @level: requested log level
+ *
+ * Return error code
+ */
+int sdmx_set_log_level(int session_handle, enum sdmx_log_level level)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_set_log_level_req *cmd;
+ struct sdmx_set_log_level_rsp *rsp;
+ enum sdmx_status ret;
+
+ cmd_len = sizeof(struct sdmx_set_log_level_req);
+ rsp_len = sizeof(struct sdmx_set_log_level_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_SET_LOG_LEVEL_CMD;
+ cmd->session_handle = session_handle;
+ cmd->level = level;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+ ret = rsp->ret;
+out:
+ /* Unlock */
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return ret;
+}
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
new file mode 100644
index 000000000000..799f688d186a
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
@@ -0,0 +1,368 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_SDMX_H
+#define _MPQ_SDMX_H
+
+#include <linux/types.h>
+
+/* Constant declarations */
+#define SDMX_MAX_SESSIONS (4)
+#define SDMX_LOOPBACK_PID (0x2000)
+
+#define SDMX_MAX_PHYSICAL_CHUNKS (256)
+
+/* Filter-level error indicators */
+#define SDMX_FILTER_SUCCESS (0)
+#define SDMX_FILTER_ERR_MD_BUF_FULL BIT(0)
+#define SDMX_FILTER_ERR_D_BUF_FULL BIT(1)
+#define SDMX_FILTER_ERR_D_LIN_BUFS_FULL BIT(2)
+#define SDMX_FILTER_ERR_INVALID_SCRAMBLE_BITS BIT(3)
+#define SDMX_FILTER_ERR_KL_IND_NOT_SET BIT(4)
+#define SDMX_FILTER_ERR_CAS_DECRYPT_ERROR BIT(5)
+#define SDMX_FILTER_ERR_SEC_VERIF_CRC32_FAIL BIT(6)
+#define SDMX_FILTER_ERR_SEC_INTERNAL_MALLOC_FAIL BIT(7)
+#define SDMX_FILTER_ERR_SEC_LEN_INVALID BIT(8)
+#define SDMX_FILTER_ERR_SEC_PUSI_PTR_INVALID BIT(9)
+#define SDMX_FILTER_ERR_TS_SYNC_BYTE_INVALID BIT(10)
+#define SDMX_FILTER_ERR_TS_TRANSPORT_ERR BIT(11)
+#define SDMX_FILTER_ERR_CONT_CNT_INVALID BIT(12)
+#define SDMX_FILTER_ERR_CONT_CNT_DUPLICATE BIT(13)
+#define SDMX_FILTER_ERR_INVALID_PES_HDR BIT(14)
+#define SDMX_FILTER_ERR_INVALID_PES_LEN BIT(15)
+#define SDMX_FILTER_ERR_INVALID_PES_ENCRYPTION BIT(16)
+#define SDMX_FILTER_ERR_SECURITY_FAULT BIT(17)
+#define SDMX_FILTER_ERR_IN_NS_BUFFER BIT(18)
+
+/* Filter-level status indicators */
+#define SDMX_FILTER_STATUS_EOS BIT(0)
+#define SDMX_FILTER_STATUS_WR_PTR_CHANGED BIT(1)
+
+/* Filter-level flags */
+#define SDMX_FILTER_FLAG_VERIFY_SECTION_CRC BIT(0)
+
+#define SDMX_INVALID_SESSION_HANDLE (-1)
+#define SDMX_INVALID_FILTER_HANDLE (-1)
+
+/* Input flags */
+#define SDMX_INPUT_FLAG_EOS BIT(0)
+#define SDMX_INPUT_FLAG_DBG_ENABLE BIT(1)
+
+
+enum sdmx_buf_mode {
+ SDMX_RING_BUF,
+ SDMX_LINEAR_GROUP_BUF,
+};
+
+enum sdmx_proc_mode {
+ SDMX_PUSH_MODE,
+ SDMX_PULL_MODE,
+};
+
+enum sdmx_inp_mode {
+ SDMX_PKT_ENC_MODE,
+ SDMX_BULK_ENC_MODE,
+ SDMX_CLEAR_MODE,
+};
+
+enum sdmx_pkt_format {
+ SDMX_188_BYTE_PKT = 188,
+ SDMX_192_BYTE_PKT = 192,
+ SDMX_195_BYTE_PKT = 195,
+};
+
+enum sdmx_log_level {
+ SDMX_LOG_NO_PRINT,
+ SDMX_LOG_MSG_ERROR,
+ SDMX_LOG_DEBUG,
+ SDMX_LOG_VERBOSE
+};
+
+enum sdmx_status {
+ SDMX_SUCCESS = 0,
+ SDMX_STATUS_GENERAL_FAILURE = -1,
+ SDMX_STATUS_MAX_OPEN_SESSIONS_REACHED = -2,
+ SDMX_STATUS_INVALID_SESSION_HANDLE = -3,
+ SDMX_STATUS_INVALID_INPUT_PARAMS = -4,
+ SDMX_STATUS_UNSUPPORTED_MODE = -5,
+ SDMX_STATUS_INVALID_PID = -6,
+ SDMX_STATUS_OUT_OF_MEM = -7,
+ SDMX_STATUS_FILTER_EXISTS = -8,
+ SDMX_STATUS_INVALID_FILTER_HANDLE = -9,
+ SDMX_STATUS_MAX_RAW_PIDS_REACHED = -10,
+ SDMX_STATUS_SINGLE_PID_RAW_FILTER = -11,
+ SDMX_STATUS_INP_BUF_INVALID_PARAMS = -12,
+ SDMX_STATUS_INVALID_FILTER_CFG = -13,
+ SDMX_STATUS_STALLED_IN_PULL_MODE = -14,
+ SDMX_STATUS_SECURITY_FAULT = -15,
+ SDMX_STATUS_NS_BUFFER_ERROR = -16,
+};
+
+enum sdmx_filter {
+ SDMX_PES_FILTER, /* Other PES */
+ SDMX_SEPARATED_PES_FILTER, /* Separated PES (for decoder) */
+ SDMX_SECTION_FILTER, /* Section */
+ SDMX_PCR_FILTER, /* PCR */
+ SDMX_RAW_FILTER, /* Recording */
+};
+
+enum sdmx_raw_out_format {
+ SDMX_188_OUTPUT,
+ SDMX_192_HEAD_OUTPUT,
+ SDMX_192_TAIL_OUTPUT
+};
+
+#pragma pack(push, sdmx, 1)
+
+struct sdmx_session_dbg_counters {
+ /* Total number of TS-packets input to SDMX. */
+ u32 ts_pkt_in;
+
+ /* Total number of TS-packets filtered out by SDMX. */
+ u32 ts_pkt_out;
+};
+
+struct sdmx_filter_dbg_counters {
+ int filter_handle;
+
+ /* Number of TS-packets filtered. */
+ u32 ts_pkt_count;
+
+ /* Number of TS-packets with adaptation field only (no payload). */
+ u32 ts_pkt_no_payload;
+
+ /* Number of TS-packets with the discontinuity indicator set. */
+ u32 ts_pkt_discont;
+
+ /* Number of duplicate TS-packets detected. */
+ u32 ts_pkt_dup;
+
+ /* Number of packets not decrypted because the key wasn't ready. */
+ u32 ts_pkt_key_not_ready;
+};
+
+struct sdmx_pes_counters {
+ /* Number of TS packets with the TEI flag set */
+ u32 transport_err_count;
+
+ /* Number of TS packets with continuity counter errors */
+ u32 continuity_err_count;
+
+ /* Number of TS packets composing this PES frame */
+ u32 pes_ts_count;
+
+ /* Number of TS packets dropped due to full buffer */
+ u32 drop_count;
+};
+
+struct sdmx_buff_descr {
+ /* Physical address where buffer starts */
+ u64 base_addr;
+
+ /* Size of buffer */
+ u32 size;
+};
+
+struct sdmx_data_buff_descr {
+ /* Physical chunks of the buffer */
+ struct sdmx_buff_descr buff_chunks[SDMX_MAX_PHYSICAL_CHUNKS];
+
+ /* Length of buffer */
+ u32 length;
+};
+
+/*
+ * Data payload residing in the data buffers is described using this meta-data
+ * header. The meta data header specifies where the payload is located in the
+ * data buffer and how big it is.
+ * The meta data header optionally carries additional relevant meta data
+ * immediately following the meta-data header.
+ */
+struct sdmx_metadata_header {
+ /*
+ * Payload start offset inside data buffer. In case data is managed
+ * as a linear buffer group, this specifies buffer index.
+ */
+ u32 payload_start;
+
+ /* Payload length */
+ u32 payload_length;
+
+ /* Number of meta data bytes immediately following this header */
+ u32 metadata_length;
+};
+
+
+struct sdmx_filter_status {
+ /* Secure demux filter handle */
+ int filter_handle;
+
+ /*
+ * Number of pending bytes in filter's output data buffer.
+ * For linear buffer mode, this is number of buffers pending.
+ */
+ u32 data_fill_count;
+
+ /*
+ * Offset in data buffer for next data payload to be written.
+ * For linear buffer mode, this is a buffer index.
+ */
+ u32 data_write_offset;
+
+ /* Number of pending bytes in filter's output meta data buffer */
+ u32 metadata_fill_count;
+
+ /* Offset in meta data buffer for next metadata header to be written */
+ u32 metadata_write_offset;
+
+ /* Errors (bitmap) reported by secure demux for this filter */
+ u32 error_indicators;
+
+ /* General status (bitmap) reported by secure demux for this filter */
+ u32 status_indicators;
+};
+#pragma pack(pop, sdmx)
+
+#ifdef CONFIG_QSEECOM
+
+int sdmx_open_session(int *session_handle);
+
+int sdmx_close_session(int session_handle);
+
+int sdmx_get_version(int session_handle, int32_t *version);
+
+int sdmx_set_session_cfg(int session_handle, enum sdmx_proc_mode proc_mode,
+ enum sdmx_inp_mode inp_mode, enum sdmx_pkt_format pkt_format,
+ u8 odd_scramble_bits, u8 even_scramble_bits);
+
+int sdmx_add_filter(int session_handle, u16 pid, enum sdmx_filter filter_type,
+ struct sdmx_buff_descr *meta_data_buf, enum sdmx_buf_mode data_buf_mode,
+ u32 num_data_bufs, struct sdmx_data_buff_descr *data_bufs,
+ int *filter_handle, enum sdmx_raw_out_format ts_out_format, u32 flags);
+
+int sdmx_remove_filter(int session_handle, int filter_handle);
+
+int sdmx_set_kl_ind(int session_handle, u16 pid, u32 key_ladder_index);
+
+int sdmx_add_raw_pid(int session_handle, int filter_handle, u16 pid);
+
+int sdmx_remove_raw_pid(int session_handle, int filter_handle, u16 pid);
+
+int sdmx_process(int session_handle, u8 flags,
+ struct sdmx_buff_descr *input_buf_desc,
+ u32 *input_fill_count, u32 *input_read_offset,
+ u32 *error_indicators,
+ u32 *status_indicators,
+ u32 num_filters,
+ struct sdmx_filter_status *filter_status);
+
+int sdmx_get_dbg_counters(int session_handle,
+ struct sdmx_session_dbg_counters *session_counters,
+ u32 *num_filters,
+ struct sdmx_filter_dbg_counters *filter_counters);
+
+int sdmx_reset_dbg_counters(int session_handle);
+
+int sdmx_set_log_level(int session_handle, enum sdmx_log_level level);
+
+#else
+
+static inline int sdmx_open_session(int *session_handle)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_close_session(int session_handle)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_get_version(int session_handle, int32_t *version)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_set_session_cfg(int session_handle,
+ enum sdmx_proc_mode proc_mode,
+ enum sdmx_inp_mode inp_mode, enum sdmx_pkt_format pkt_format,
+ u8 odd_scramble_bits, u8 even_scramble_bits)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_add_filter(int session_handle, u16 pid,
+ enum sdmx_filter filter_type,
+ struct sdmx_buff_descr *meta_data_buf, enum sdmx_buf_mode data_buf_mode,
+ u32 num_data_bufs, struct sdmx_data_buff_descr *data_bufs,
+ int *filter_handle, enum sdmx_raw_out_format ts_out_format, u32 flags)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_remove_filter(int session_handle, int filter_handle)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_set_kl_ind(int session_handle, u16 pid,
+ u32 key_ladder_index)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_add_raw_pid(int session_handle, int filter_handle,
+ u16 pid)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_remove_raw_pid(int session_handle, int filter_handle,
+ u16 pid)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_process(int session_handle, u8 flags,
+ struct sdmx_buff_descr *input_buf_desc,
+ u32 *input_fill_count, u32 *input_read_offset,
+ u32 *error_indicators,
+ u32 *status_indicators,
+ u32 num_filters,
+ struct sdmx_filter_status *filter_status)
+{
+ *status_indicators = 0;
+ *error_indicators = 0;
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_get_dbg_counters(int session_handle,
+ struct sdmx_session_dbg_counters *session_counters,
+ u32 *num_filters,
+ struct sdmx_filter_dbg_counters *filter_counters)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_reset_dbg_counters(int session_handle)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_set_log_level(int session_handle,
+ enum sdmx_log_level level)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+#endif
+
+#endif /* _MPQ_SDMX_H */
diff --git a/drivers/media/platform/msm/dvb/include/mpq_adapter.h b/drivers/media/platform/msm/dvb/include/mpq_adapter.h
new file mode 100644
index 000000000000..54e671c3b38d
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/include/mpq_adapter.h
@@ -0,0 +1,199 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_ADAPTER_H
+#define _MPQ_ADAPTER_H
+
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "mpq_stream_buffer.h"
+
+
+
+/** IDs of interfaces holding stream-buffers */
+enum mpq_adapter_stream_if {
+ /** Interface holding stream-buffer for video0 stream */
+ MPQ_ADAPTER_VIDEO0_STREAM_IF = 0,
+
+ /** Interface holding stream-buffer for video1 stream */
+ MPQ_ADAPTER_VIDEO1_STREAM_IF = 1,
+
+ /** Interface holding stream-buffer for video1 stream */
+ MPQ_ADAPTER_VIDEO2_STREAM_IF = 2,
+
+ /** Interface holding stream-buffer for video1 stream */
+ MPQ_ADAPTER_VIDEO3_STREAM_IF = 3,
+
+ /** Maximum number of interfaces holding stream-buffers */
+ MPQ_ADAPTER_MAX_NUM_OF_INTERFACES,
+};
+
+enum dmx_packet_type {
+ DMX_PES_PACKET,
+ DMX_FRAMING_INFO_PACKET,
+ DMX_EOS_PACKET,
+ DMX_MARKER_PACKET
+};
+
+struct dmx_pts_dts_info {
+ /** Indication whether PTS exist */
+ int pts_exist;
+
+ /** Indication whether DTS exist */
+ int dts_exist;
+
+ /** PTS value associated with the PES data if any */
+ u64 pts;
+
+ /** DTS value associated with the PES data if any */
+ u64 dts;
+};
+
+struct dmx_framing_packet_info {
+ /** framing pattern type, one of DMX_IDX_* definitions */
+ u64 pattern_type;
+
+ /** PTS/DTS information */
+ struct dmx_pts_dts_info pts_dts_info;
+
+ /** STC value attached to first TS packet holding the pattern */
+ u64 stc;
+
+ /*
+ * Number of TS packets with Transport Error Indicator (TEI)
+ * found while constructing the frame.
+ */
+ __u32 transport_error_indicator_counter;
+
+ /* Number of continuity errors found while constructing the frame */
+ __u32 continuity_error_counter;
+
+ /*
+ * Number of dropped bytes due to insufficient buffer space,
+ * since last reported frame.
+ */
+ __u32 ts_dropped_bytes;
+
+ /* Total number of TS packets holding the frame */
+ __u32 ts_packets_num;
+};
+
+struct dmx_pes_packet_info {
+ /** PTS/DTS information */
+ struct dmx_pts_dts_info pts_dts_info;
+
+ /** STC value attached to first TS packet holding the PES */
+ u64 stc;
+};
+
+struct dmx_marker_info {
+ /* marker id */
+ u64 id;
+};
+
+/** The meta-data used for video interface */
+struct mpq_adapter_video_meta_data {
+ /** meta-data packet type */
+ enum dmx_packet_type packet_type;
+
+ /** packet-type specific information */
+ union {
+ struct dmx_framing_packet_info framing;
+ struct dmx_pes_packet_info pes;
+ struct dmx_marker_info marker;
+ } info;
+} __packed;
+
+
+/** Callback function to notify on registrations of specific interfaces */
+typedef void (*mpq_adapter_stream_if_callback)(
+ enum mpq_adapter_stream_if interface_id,
+ void *user_param);
+
+
+/**
+ * mpq_adapter_get - Returns pointer to Qualcomm Technologies Inc. DVB adapter
+ *
+ * Return dvb adapter or NULL if not exist.
+ */
+struct dvb_adapter *mpq_adapter_get(void);
+
+
+/**
+ * mpq_adapter_register_stream_if - Register a stream interface.
+ *
+ * @interface_id: The interface id
+ * @stream_buffer: The buffer used for the interface
+ *
+ * Return error status
+ *
+ * Stream interface used to connect between two units in tunneling
+ * mode using mpq_streambuffer implementation.
+ * The producer of the interface should register the new interface,
+ * consumer may get the interface using mpq_adapter_get_stream_if.
+ *
+ * Note that the function holds a pointer to this interface,
+ * stream_buffer pointer assumed to be valid as long as interface
+ * is active.
+ */
+int mpq_adapter_register_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ struct mpq_streambuffer *stream_buffer);
+
+
+/**
+ * mpq_adapter_unregister_stream_if - Un-register a stream interface.
+ *
+ * @interface_id: The interface id
+ *
+ * Return error status
+ */
+int mpq_adapter_unregister_stream_if(
+ enum mpq_adapter_stream_if interface_id);
+
+
+/**
+ * mpq_adapter_get_stream_if - Get buffer used for a stream interface.
+ *
+ * @interface_id: The interface id
+ * @stream_buffer: The returned stream buffer
+ *
+ * Return error status
+ */
+int mpq_adapter_get_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ struct mpq_streambuffer **stream_buffer);
+
+
+/**
+ * mpq_adapter_notify_stream_if - Register notification
+ * to be triggered when a stream interface is registered.
+ *
+ * @interface_id: The interface id
+ * @callback: The callback to be triggered when the interface is registered
+ * @user_param: A parameter that is passed back to the callback function
+ * when triggered.
+ *
+ * Return error status
+ *
+ * Producer may use this to register notification when desired
+ * interface registered in the system and query its information
+ * afterwards using mpq_adapter_get_stream_if.
+ * To remove the callback, this function should be called with NULL
+ * value in callback parameter.
+ */
+int mpq_adapter_notify_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ mpq_adapter_stream_if_callback callback,
+ void *user_param);
+
+#endif /* _MPQ_ADAPTER_H */
diff --git a/drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h b/drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h
new file mode 100644
index 000000000000..720b1b9cb70b
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_DVB_DEBUG_H
+#define _MPQ_DVB_DEBUG_H
+
+/* Enable this line if you want to output debug printouts */
+#define MPG_DVB_DEBUG_ENABLE
+
+#undef MPQ_DVB_DBG_PRINT /* undef it, just in case */
+
+#ifdef MPG_DVB_DEBUG_ENABLE
+#define MPQ_DVB_ERR_PRINT(fmt, args...) pr_err(fmt, ## args)
+#define MPQ_DVB_WARN_PRINT(fmt, args...) pr_warn(fmt, ## args)
+#define MPQ_DVB_NOTICE_PRINT(fmt, args...) pr_notice(fmt, ## args)
+#define MPQ_DVB_DBG_PRINT(fmt, args...) pr_debug(fmt, ## args)
+#else /* MPG_DVB_DEBUG_ENABLE */
+#define MPQ_DVB_ERR_PRINT(fmt, args...)
+#define MPQ_DVB_WARN_PRINT(fmt, args...)
+#define MPQ_DVB_NOTICE_PRINT(fmt, args...)
+#define MPQ_DVB_DBG_PRINT(fmt, args...)
+#endif /* MPG_DVB_DEBUG_ENABLE */
+
+
+/*
+ * The following can be used to disable specific printout
+ * by adding a letter to the end of MPQ_DVB_DBG_PRINT
+ */
+#undef MPQ_DVB_DBG_PRINTT
+#define MPQ_DVB_DBG_PRINTT(fmt, args...)
+
+#endif /* _MPQ_DVB_DEBUG_H */
diff --git a/drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h b/drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h
new file mode 100644
index 000000000000..b24dc1f3b5ff
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h
@@ -0,0 +1,462 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_STREAM_BUFFER_H
+#define _MPQ_STREAM_BUFFER_H
+
+#include "dvb_ringbuffer.h"
+
+
+/**
+ * DOC: MPQ Stream Buffer
+ *
+ * A stream buffer implementation is used to transfer data between two units
+ * such as demux and decoders. The implementation relies on dvb_ringbuffer
+ * implementation. Refer to dvb_ringbuffer.h for details.
+ *
+ * The implementation uses two dvb_ringbuffers, one to pass the
+ * raw-data (PES payload for example) and the other to pass
+ * meta-data (information from PES header for example).
+ *
+ * The meta-data uses dvb_ringbuffer packet interface. Each meta-data
+ * packet points to the data buffer, and includes the offset to the data in the
+ * buffer, the size of raw-data described by the meta-data packet, and also the
+ * size of user's own parameters if any required.
+ *
+ * Data can be managed in two ways: ring-buffer & linear buffers, as specified
+ * in initialization when calling the mpq_streambuffer_init function.
+ * For managing data as a ring buffer exactly 1 data buffer descriptor must be
+ * specified in initialization. For this mode, dvb_ringbuffer is used "as-is".
+ * For managing data in several linear buffers, an array of buffer descriptors
+ * must be passed.
+ * For both modes, data descriptor(s) must be remain valid throughout the life
+ * span of the mpq_streambuffer object.
+ * Apart from initialization API remains the same for both modes.
+ *
+ * Contrary to dvb_ringbuffer implementation, this API makes sure there's
+ * enough data to read/write when making read/write operations.
+ * Users interested to flush/reset specific buffer, check for bytes
+ * ready or space available for write should use the respective services
+ * in dvb_ringbuffer (dvb_ringbuffer_avail, dvb_ringbuffer_free,
+ * dvb_ringbuffer_reset, dvb_ringbuffer_flush,
+ * dvb_ringbuffer_flush_spinlock_wakeup).
+ *
+ * Concurrency protection is handled in the same manner as in
+ * dvb_ringbuffer implementation.
+ *
+ * Typical call flow from producer:
+ *
+ * - Start writing the raw-data of new packet, the following call is
+ * repeated until end of data of the specific packet
+ *
+ * mpq_streambuffer_data_write(...)
+ *
+ * - Now write a new packet describing the new available raw-data
+ * mpq_streambuffer_pkt_write(...)
+ *
+ * For linear buffer mode, writing a new packet with data size > 0, causes the
+ * current buffer to be marked as pending for reading, and triggers moving to
+ * the next available buffer, that shall now be the current write buffer.
+ *
+ * Typical call flow from consumer:
+ *
+ * - Poll for next available packet:
+ * mpq_streambuffer_pkt_next(&streambuff,-1,&len)
+ *
+ * In different approach, consumer can wait on event for new data and then
+ * call mpq_streambuffer_pkt_next, waiting for data can be done as follows:
+ *
+ * wait_event_interruptible(
+ * streambuff->packet_data->queue,
+ * !dvb_ringbuffer_empty(&streambuff->packet_data) ||
+ * (streambuff->packet_data.error != 0);
+ *
+ * - Get the new packet information:
+ * mpq_streambuffer_pkt_read(..)
+ *
+ * - Read the raw-data of the new packet. Here you can use two methods:
+ *
+ * 1. Read the data to a user supplied buffer:
+ * mpq_streambuffer_data_read()
+ *
+ * In this case memory copy is done, read pointer is updated in the raw
+ * data buffer, the amount of raw-data is provided part of the
+ * packet's information. User should then call mpq_streambuffer_pkt_dispose
+ * with dispose_data set to 0 as the raw-data was already disposed.
+ * Note that secure buffer cannot be accessed directly and an error will
+ * occur.
+ *
+ * 2. Access the data directly using the raw-data address. The address
+ * of the raw data is provided part of the packet's information. User
+ * then should call mpq_streambuffer_pkt_dispose with dispose_data set
+ * to 1 to dispose the packet along with it's raw-data.
+ *
+ * - Disposal of packets:
+ * mpq_streambuffer_pkt_dispose(...)
+ *
+ * For linear buffer mode, disposing of a packet with data size > 0,
+ * regardless of the 'dispose_data' parameter, causes the current buffer's
+ * data to be disposed and marked as free for writing, and triggers moving to
+ * the next available buffer, that shall now be the current read buffer.
+ */
+
+struct mpq_streambuffer;
+struct mpq_streambuffer_packet_header;
+
+typedef void (*mpq_streambuffer_dispose_cb) (
+ struct mpq_streambuffer *sbuff,
+ u32 offset,
+ size_t len,
+ void *user_data);
+
+enum mpq_streambuffer_mode {
+ MPQ_STREAMBUFFER_BUFFER_MODE_RING,
+ MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR
+};
+
+/**
+ * struct mpq_streambuffer - mpq stream buffer representation
+ *
+ * @raw_data: The buffer used to hold raw-data, or linear buffer descriptors
+ * @packet_data: The buffer user to hold the meta-data
+ * @buffers: array of buffer descriptor(s) holding buffer initial & dynamic
+ * buffer information
+ * @mode: mpq_streambuffer buffer management work mode - Ring-buffer or Linear
+ * buffers
+ * @buffers_num: number of data buffers to manage
+ * @pending_buffers_count: for linear buffer management, counts the number of
+ * buffer that has been
+ */
+struct mpq_streambuffer {
+ struct dvb_ringbuffer raw_data;
+ struct dvb_ringbuffer packet_data;
+ struct mpq_streambuffer_buffer_desc *buffers;
+ enum mpq_streambuffer_mode mode;
+ u32 buffers_num;
+ u32 pending_buffers_count;
+ mpq_streambuffer_dispose_cb cb;
+ void *cb_user_data;
+};
+
+/**
+ * mpq_streambuffer_linear_desc
+ * @handle: ION handle's file descriptor of buffer
+ * @base: kernel mapped address to start of buffer.
+ * Can be NULL for secured buffers
+ * @size: size of buffer
+ * @read_ptr: initial read pointer value (should normally be 0)
+ * @write_ptr: initial write pointer value (should normally be 0)
+ */
+struct mpq_streambuffer_buffer_desc {
+ int handle;
+ void *base;
+ u32 size;
+ u32 read_ptr;
+ u32 write_ptr;
+};
+
+/**
+ * struct mpq_streambuffer_packet_header - packet header saved in packet buffer
+ * @user_data_len: length of private user (meta) data
+ * @raw_data_handle: ION handle's file descriptor of raw-data buffer
+ * @raw_data_offset: offset of raw-data from start of buffer (0 for linear)
+ * @raw_data_len: size of raw-data in the raw-data buffer (can be 0)
+ *
+ * The packet structure that is saved in each packet-buffer:
+ * user_data_len
+ * raw_data_handle
+ * raw_data_offset
+ * raw_data_len
+ * private user-data bytes
+ */
+struct mpq_streambuffer_packet_header {
+ u32 user_data_len;
+ int raw_data_handle;
+ u32 raw_data_offset;
+ u32 raw_data_len;
+} __packed;
+
+/**
+ * mpq_streambuffer_init - Initialize a new stream buffer
+ *
+ * @sbuff: The buffer to initialize
+ * @data_buffers: array of data buffer descriptor(s).
+ * Data descriptor(s) must be remain valid throughout the life
+ * span of the mpq_streambuffer object
+ * @data_buff_num: number of data buffer in array
+ * @packet_buff: The buffer holding meta-data
+ * @packet_buff_size: Size of meta-data buffer
+ *
+ * Return Error status, -EINVAL if any of the arguments are invalid
+ *
+ * Note:
+ * for data_buff_num > 1, mpq_streambuffer object manages these buffers as a
+ * separated set of linear buffers. A linear buffer cannot wrap-around and one
+ * can only write as many data bytes as the buffer's size. Data will not be
+ * written to the next free buffer.
+ */
+int mpq_streambuffer_init(
+ struct mpq_streambuffer *sbuff,
+ enum mpq_streambuffer_mode mode,
+ struct mpq_streambuffer_buffer_desc *data_buffers,
+ u32 data_buff_num,
+ void *packet_buff,
+ size_t packet_buff_size);
+
+/**
+ * mpq_streambuffer_terminate - Terminate stream buffer
+ *
+ * @sbuff: The buffer to terminate
+ *
+ * The function sets the the buffers error flags to ENODEV
+ * and wakeup any waiting threads on the buffer queues.
+ * Threads waiting on the buffer queues should check if
+ * error was set.
+ */
+void mpq_streambuffer_terminate(struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_packet_next - Returns index of next available packet.
+ *
+ * @sbuff: The stream buffer
+ * @idx: Previous packet index or -1 to return index of the the first
+ * available packet.
+ * @pktlen: The length of the ready packet
+ *
+ * Return index to the packet-buffer, -1 if buffer is empty
+ *
+ * After getting the index, the user of this function can either
+ * access the packet buffer directly using the returned index
+ * or ask to read the data back from the buffer using mpq_ringbuffer_pkt_read
+ */
+ssize_t mpq_streambuffer_pkt_next(
+ struct mpq_streambuffer *sbuff,
+ ssize_t idx, size_t *pktlen);
+
+/**
+ * mpq_streambuffer_pkt_read - Reads out the packet from the provided index.
+ *
+ * @sbuff: The stream buffer
+ * @idx: The index of the packet to be read
+ * @packet: The read packet's header
+ * @user_data: The read private user data
+ *
+ * Return The actual number of bytes read, -EINVAL if the packet is
+ * already disposed or the packet-data is invalid.
+ *
+ * The packet is not disposed after this function is called, to dispose it
+ * along with the raw-data it points to use mpq_streambuffer_pkt_dispose.
+ * If there are no private user-data, the user-data pointer can be NULL.
+ * The caller of this function must make sure that the private user-data
+ * buffer has enough space for the private user-data length
+ */
+ssize_t mpq_streambuffer_pkt_read(
+ struct mpq_streambuffer *sbuff,
+ size_t idx,
+ struct mpq_streambuffer_packet_header *packet,
+ u8 *user_data);
+
+/**
+ * mpq_streambuffer_pkt_dispose - Disposes a packet from the packet buffer
+ *
+ * @sbuff: The stream buffer
+ * @idx: The index of the packet to be disposed
+ * @dispose_data: Indicates whether to update the read pointer inside the
+ * raw-data buffer for the respective data pointed by the packet.
+ *
+ * Return error status, -EINVAL if the packet-data is invalid
+ *
+ * The function updates the read pointer inside the raw-data buffer
+ * for the respective data pointed by the packet if dispose_data is set.
+ */
+int mpq_streambuffer_pkt_dispose(
+ struct mpq_streambuffer *sbuff,
+ size_t idx,
+ int dispose_data);
+
+/**
+ * mpq_streambuffer_pkt_write - Write a new packet to the packet buffer.
+ *
+ * @sbuff: The stream buffer
+ * @packet: The packet header to write
+ * @user_data: The private user-data to be written
+ *
+ * Return error status, -ENOSPC if there's no space to write the packet
+ */
+int mpq_streambuffer_pkt_write(
+ struct mpq_streambuffer *sbuff,
+ struct mpq_streambuffer_packet_header *packet,
+ u8 *user_data);
+
+/**
+ * mpq_streambuffer_data_write - Write data to raw-data buffer
+ *
+ * @sbuff: The stream buffer
+ * @buf: The buffer holding the data to be written
+ * @len: The length of the data buffer
+ *
+ * Return The actual number of bytes written or -ENOSPC if
+ * no space to write the data
+ */
+ssize_t mpq_streambuffer_data_write(
+ struct mpq_streambuffer *sbuff,
+ const u8 *buf, size_t len);
+
+/**
+ * mpq_streambuffer_data_write_deposit - Advances the raw-buffer write pointer.
+ * Assumes the raw-data was written by the user directly
+ *
+ * @sbuff: The stream buffer
+ * @len: The length of the raw-data that was already written
+ *
+ * Return error status
+ */
+int mpq_streambuffer_data_write_deposit(
+ struct mpq_streambuffer *sbuff,
+ size_t len);
+
+/**
+ * mpq_streambuffer_data_read - Reads out raw-data to the provided buffer.
+ *
+ * @sbuff: The stream buffer
+ * @buf: The buffer to read the raw-data data to
+ * @len: The length of the buffer that will hold the raw-data
+ *
+ * Return The actual number of bytes read or error code
+ *
+ * This function copies the data from the ring-buffer to the
+ * provided buf parameter. The user can save the extra copy by accessing
+ * the data pointer directly and reading from it, then update the
+ * read pointer by the amount of data that was read using
+ * mpq_streambuffer_data_read_dispose
+ */
+ssize_t mpq_streambuffer_data_read(
+ struct mpq_streambuffer *sbuff,
+ u8 *buf, size_t len);
+
+/**
+ * mpq_streambuffer_data_read_user
+ *
+ * Same as mpq_streambuffer_data_read except data can be copied to user-space
+ * buffer.
+ */
+ssize_t mpq_streambuffer_data_read_user(
+ struct mpq_streambuffer *sbuff,
+ u8 __user *buf, size_t len);
+
+/**
+ * mpq_streambuffer_data_read_dispose - Advances the raw-buffer read pointer.
+ * Assumes the raw-data was read by the user directly.
+ *
+ * @sbuff: The stream buffer
+ * @len: The length of the raw-data to be disposed
+ *
+ * Return error status, -EINVAL if buffer there's no enough data to
+ * be disposed
+ *
+ * The user can instead dispose a packet along with the data in the
+ * raw-data buffer using mpq_streambuffer_pkt_dispose.
+ */
+int mpq_streambuffer_data_read_dispose(
+ struct mpq_streambuffer *sbuff,
+ size_t len);
+/**
+ * mpq_streambuffer_get_buffer_handle - Returns the current linear buffer
+ * ION handle.
+ * @sbuff: The stream buffer
+ * @read_buffer: specifies if a read buffer handle is requested (when set),
+ * or a write buffer handle is requested.
+ * For linear buffer mode read & write buffers may be different
+ * buffers. For ring buffer mode, the same (single) buffer handle
+ * is returned.
+ * buffer handle
+ * @handle: returned handle
+ *
+ * Return error status
+ * -EINVAL is arguments are invalid.
+ * -EPERM if stream buffer specified was not initialized with linear support.
+ */
+int mpq_streambuffer_get_buffer_handle(
+ struct mpq_streambuffer *sbuff,
+ int read_buffer,
+ int *handle);
+
+/**
+ * mpq_streambuffer_data_free - Returns number of free bytes in data buffer.
+ * @sbuff: The stream buffer object
+ *
+ * Note: for linear buffer management this return number of free bytes in the
+ * current write buffer only.
+ */
+ssize_t mpq_streambuffer_data_free(
+ struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_data_avail - Returns number of bytes in data buffer that
+ * can be read.
+ * @sbuff: The stream buffer object
+ *
+ * Note: for linear buffer management this return number of data bytes in the
+ * current read buffer only.
+ */
+ssize_t mpq_streambuffer_data_avail(
+ struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_register_pkt_dispose - Registers a callback to notify on
+ * packet disposal events.
+ * can be read.
+ * @sbuff: The stream buffer object
+ * @cb_func: user callback function
+ * @user_data: user data to be passed to callback function.
+ *
+ * Returns error status
+ * -EINVAL if arguments are invalid
+ */
+int mpq_streambuffer_register_data_dispose(
+ struct mpq_streambuffer *sbuff,
+ mpq_streambuffer_dispose_cb cb_func,
+ void *user_data);
+
+/**
+ * mpq_streambuffer_data_rw_offset - returns read/write offsets of current data
+ * buffer.
+ * @sbuff: The stream buffer object
+ * @read_offset: returned read offset
+ * @write_offset: returned write offset
+ *
+ * Note: read offset or write offset may be NULL if not required.
+ * Returns error status
+ * -EINVAL if arguments are invalid
+ */
+int mpq_streambuffer_get_data_rw_offset(
+ struct mpq_streambuffer *sbuff,
+ u32 *read_offset,
+ u32 *write_offset);
+
+/**
+ * mpq_streambuffer_metadata_free - returns number of free bytes in the meta
+ * data buffer, or error status.
+ * @sbuff: the stream buffer object
+ */
+ssize_t mpq_streambuffer_metadata_free(struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_flush - flush both pending packets and data in buffer
+ *
+ * @sbuff: the stream buffer object
+ *
+ * Returns error status
+ */
+int mpq_streambuffer_flush(struct mpq_streambuffer *sbuff);
+
+#endif /* _MPQ_STREAM_BUFFER_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 041a8219e145..594bac6c5902 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -41,8 +41,12 @@
#define ROT_HW_ACQUIRE_TIMEOUT_IN_MS 100
/* default pixel per clock ratio */
-#define ROT_PIXEL_PER_CLK_NUMERATOR 4
-#define ROT_PIXEL_PER_CLK_DENOMINATOR 1
+#define ROT_PIXEL_PER_CLK_NUMERATOR 36
+#define ROT_PIXEL_PER_CLK_DENOMINATOR 10
+#define ROT_FUDGE_FACTOR_NUMERATOR 105
+#define ROT_FUDGE_FACTOR_DENOMINATOR 100
+#define ROT_OVERHEAD_NUMERATOR 27
+#define ROT_OVERHEAD_DENOMINATOR 10000
/*
* Max rotator hw blocks possible. Used for upper array limits instead of
@@ -998,12 +1002,33 @@ static u32 sde_rotator_calc_buf_bw(struct sde_mdp_format_params *fmt,
return bw;
}
+static int sde_rotator_find_max_fps(struct sde_rot_mgr *mgr)
+{
+ struct sde_rot_file_private *priv;
+ struct sde_rot_perf *perf;
+ int max_fps = 0;
+
+ list_for_each_entry(priv, &mgr->file_list, list) {
+ list_for_each_entry(perf, &priv->perf_list, list) {
+ if (perf->config.frame_rate > max_fps)
+ max_fps = perf->config.frame_rate;
+ }
+ }
+
+ SDEROT_DBG("Max fps:%d\n", max_fps);
+ return max_fps;
+}
+
static int sde_rotator_calc_perf(struct sde_rot_mgr *mgr,
struct sde_rot_perf *perf)
{
struct sde_rotation_config *config = &perf->config;
u32 read_bw, write_bw;
struct sde_mdp_format_params *in_fmt, *out_fmt;
+ struct sde_rotator_device *rot_dev;
+ int max_fps;
+
+ rot_dev = platform_get_drvdata(mgr->pdev);
in_fmt = sde_get_format_params(config->input.format);
if (!in_fmt) {
@@ -1016,17 +1041,44 @@ static int sde_rotator_calc_perf(struct sde_rot_mgr *mgr,
return -EINVAL;
}
+ /*
+ * rotator processes 4 pixels per clock, but the actual throughtput
+ * is 3.6. We also need to take into account for overhead time. Final
+ * equation is:
+ * W x H / throughput / (1/fps - overhead) * fudge_factor
+ */
+ max_fps = sde_rotator_find_max_fps(mgr);
perf->clk_rate = config->input.width * config->input.height;
- perf->clk_rate *= config->frame_rate;
- /* rotator processes 4 pixels per clock */
perf->clk_rate = (perf->clk_rate * mgr->pixel_per_clk.denom) /
mgr->pixel_per_clk.numer;
+ perf->clk_rate *= max_fps;
+ perf->clk_rate = (perf->clk_rate * mgr->fudge_factor.numer) /
+ mgr->fudge_factor.denom;
+ perf->clk_rate *= mgr->overhead.denom;
+
+ /*
+ * check for override overhead default value
+ */
+ if (rot_dev->min_overhead_us > (mgr->overhead.numer * 100))
+ perf->clk_rate = DIV_ROUND_UP_ULL(perf->clk_rate,
+ (mgr->overhead.denom - max_fps *
+ (rot_dev->min_overhead_us / 100)));
+ else
+ perf->clk_rate = DIV_ROUND_UP_ULL(perf->clk_rate,
+ (mgr->overhead.denom - max_fps *
+ mgr->overhead.numer));
+
+ /*
+ * check for Override clock calcualtion
+ */
+ if (rot_dev->min_rot_clk > perf->clk_rate)
+ perf->clk_rate = rot_dev->min_rot_clk;
read_bw = sde_rotator_calc_buf_bw(in_fmt, config->input.width,
- config->input.height, config->frame_rate);
+ config->input.height, max_fps);
write_bw = sde_rotator_calc_buf_bw(out_fmt, config->output.width,
- config->output.height, config->frame_rate);
+ config->output.height, max_fps);
read_bw = sde_apply_comp_ratio_factor(read_bw, in_fmt,
&config->input.comp_ratio);
@@ -1035,13 +1087,22 @@ static int sde_rotator_calc_perf(struct sde_rot_mgr *mgr,
perf->bw = read_bw + write_bw;
+ /*
+ * check for override bw calculation
+ */
+ if (rot_dev->min_bw > perf->bw)
+ perf->bw = rot_dev->min_bw;
+
perf->rdot_limit = sde_mdp_get_ot_limit(
config->input.width, config->input.height,
- config->input.format, config->frame_rate, true);
+ config->input.format, max_fps, true);
perf->wrot_limit = sde_mdp_get_ot_limit(
config->input.width, config->input.height,
- config->input.format, config->frame_rate, false);
+ config->input.format, max_fps, false);
+ SDEROT_DBG("clk:%lu, rdBW:%d, wrBW:%d, rdOT:%d, wrOT:%d\n",
+ perf->clk_rate, read_bw, write_bw, perf->rdot_limit,
+ perf->wrot_limit);
return 0;
}
@@ -1747,16 +1808,9 @@ static int sde_rotator_open_session(struct sde_rot_mgr *mgr,
config.session_id = session_id;
perf->config = config;
- perf->last_wb_idx = -1;
+ perf->last_wb_idx = 0;
INIT_LIST_HEAD(&perf->list);
-
- ret = sde_rotator_calc_perf(mgr, perf);
- if (ret) {
- SDEROT_ERR("error setting the session %d\n", ret);
- goto copy_user_err;
- }
-
list_add(&perf->list, &private->perf_list);
ret = sde_rotator_resource_ctrl(mgr, true);
@@ -1777,25 +1831,17 @@ static int sde_rotator_open_session(struct sde_rot_mgr *mgr,
goto enable_clk_err;
}
- ret = sde_rotator_update_perf(mgr);
- if (ret) {
- SDEROT_ERR("fail to open session, not enough clk/bw\n");
- goto perf_err;
- }
SDEROT_DBG("open session id=%u in{%u,%u}f:%u out{%u,%u}f:%u\n",
config.session_id, config.input.width, config.input.height,
config.input.format, config.output.width, config.output.height,
config.output.format);
goto done;
-perf_err:
- sde_rotator_clk_ctrl(mgr, false);
enable_clk_err:
update_clk_err:
sde_rotator_resource_ctrl(mgr, false);
resource_err:
list_del_init(&perf->list);
-copy_user_err:
devm_kfree(&mgr->pdev->dev, perf->work_distribution);
alloc_err:
devm_kfree(&mgr->pdev->dev, perf);
@@ -1867,11 +1913,23 @@ static int sde_rotator_config_session(struct sde_rot_mgr *mgr,
}
ret = sde_rotator_update_perf(mgr);
+ if (ret) {
+ SDEROT_ERR("error in updating perf: %d\n", ret);
+ goto done;
+ }
+
+ ret = sde_rotator_update_clk(mgr);
+ if (ret) {
+ SDEROT_ERR("error in updating the rotator clk: %d\n", ret);
+ goto done;
+ }
- SDEROT_DBG("reconfig session id=%u in{%u,%u}f:%u out{%u,%u}f:%u\n",
+ SDEROT_DBG(
+ "reconfig session id=%u in{%u,%u}f:%u out{%u,%u}f:%u fps:%d clk:%lu, bw:%llu\n",
config->session_id, config->input.width, config->input.height,
config->input.format, config->output.width,
- config->output.height, config->output.format);
+ config->output.height, config->output.format,
+ config->frame_rate, perf->clk_rate, perf->bw);
done:
return ret;
}
@@ -2386,6 +2444,10 @@ int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
mgr->queue_count = 1;
mgr->pixel_per_clk.numer = ROT_PIXEL_PER_CLK_NUMERATOR;
mgr->pixel_per_clk.denom = ROT_PIXEL_PER_CLK_DENOMINATOR;
+ mgr->fudge_factor.numer = ROT_FUDGE_FACTOR_NUMERATOR;
+ mgr->fudge_factor.denom = ROT_FUDGE_FACTOR_DENOMINATOR;
+ mgr->overhead.numer = ROT_OVERHEAD_NUMERATOR;
+ mgr->overhead.denom = ROT_OVERHEAD_DENOMINATOR;
mutex_init(&mgr->lock);
atomic_set(&mgr->device_suspended, 0);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index aa17341de7c2..781b03e1b974 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -281,6 +281,8 @@ struct sde_rot_mgr {
u32 hwacquire_timeout;
struct sde_mult_factor pixel_per_clk;
+ struct sde_mult_factor fudge_factor;
+ struct sde_mult_factor overhead;
int (*ops_config_hw)(struct sde_rot_hw_resource *hw,
struct sde_rot_entry *entry);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index c609dbd2036e..94223b557990 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -914,6 +914,34 @@ static int sde_rotator_evtlog_create_debugfs(
return 0;
}
+
+static int sde_rotator_perf_create_debugfs(
+ struct sde_rotator_device *rot_dev,
+ struct dentry *debugfs_root)
+{
+ rot_dev->perf_root = debugfs_create_dir("perf", debugfs_root);
+ if (IS_ERR_OR_NULL(rot_dev->perf_root)) {
+ pr_err("debugfs_create_dir for perf failed, error %ld\n",
+ PTR_ERR(rot_dev->perf_root));
+ rot_dev->perf_root = NULL;
+ return -ENODEV;
+ }
+
+ rot_dev->min_rot_clk = 0;
+ debugfs_create_u32("min_rot_clk", S_IRUGO | S_IWUSR,
+ rot_dev->perf_root, &rot_dev->min_rot_clk);
+
+ rot_dev->min_bw = 0;
+ debugfs_create_u32("min_bw", S_IRUGO | S_IWUSR,
+ rot_dev->perf_root, &rot_dev->min_bw);
+
+ rot_dev->min_overhead_us = 0;
+ debugfs_create_u32("min_overhead_us", S_IRUGO | S_IWUSR,
+ rot_dev->perf_root, &rot_dev->min_overhead_us);
+
+ return 0;
+}
+
/*
* struct sde_rotator_stat_ops - processed statistics file operations
*/
@@ -1006,6 +1034,12 @@ struct dentry *sde_rotator_create_debugfs(
return NULL;
}
+ if (sde_rotator_perf_create_debugfs(rot_dev, debugfs_root)) {
+ SDEROT_ERR("fail create perf debugfs\n");
+ debugfs_remove_recursive(debugfs_root);
+ return NULL;
+ }
+
return debugfs_root;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index d34623a531ba..b88f03ce89ae 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -546,22 +546,8 @@ static struct vb2_mem_ops sde_rotator_vb2_mem_ops = {
static int sde_rotator_s_ctx_ctrl(struct sde_rotator_ctx *ctx,
s32 *ctx_ctrl, struct v4l2_ctrl *ctrl)
{
- struct sde_rotator_device *rot_dev = ctx->rot_dev;
- struct sde_rotation_config config;
- s32 prev_val;
- int ret;
-
- prev_val = *ctx_ctrl;
*ctx_ctrl = ctrl->val;
- sde_rotator_get_config_from_ctx(ctx, &config);
- ret = sde_rotator_session_config(rot_dev->mgr, ctx->private, &config);
- if (ret) {
- SDEDEV_WARN(rot_dev->dev, "fail %s:%d s:%d\n",
- ctrl->name, ctrl->val, ctx->session_id);
- *ctx_ctrl = prev_val;
- }
-
- return ret;
+ return 0;
}
/*
@@ -1137,6 +1123,13 @@ static int sde_rotator_try_fmt_vid_cap(struct file *file,
struct sde_rotation_config config;
int ret;
+ if ((f->fmt.pix.width == 0) || (f->fmt.pix.height == 0)) {
+ SDEDEV_WARN(ctx->rot_dev->dev,
+ "Not supporting 0 width/height: %dx%d\n",
+ f->fmt.pix.width, f->fmt.pix.height);
+ return -EINVAL;
+ }
+
sde_rot_mgr_lock(rot_dev->mgr);
sde_rotator_get_config_from_ctx(ctx, &config);
config.output.format = f->fmt.pix.pixelformat;
@@ -1176,6 +1169,13 @@ static int sde_rotator_try_fmt_vid_out(struct file *file,
struct sde_rotation_config config;
int ret;
+ if ((f->fmt.pix.width == 0) || (f->fmt.pix.height == 0)) {
+ SDEDEV_WARN(ctx->rot_dev->dev,
+ "Not supporting 0 width/height: %dx%d\n",
+ f->fmt.pix.width, f->fmt.pix.height);
+ return -EINVAL;
+ }
+
sde_rot_mgr_lock(rot_dev->mgr);
sde_rotator_get_config_from_ctx(ctx, &config);
config.input.format = f->fmt.pix.pixelformat;
@@ -1213,7 +1213,6 @@ static int sde_rotator_s_fmt_vid_cap(struct file *file,
{
struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
struct sde_rotator_device *rot_dev = ctx->rot_dev;
- struct sde_rotation_config config;
int ret;
ret = sde_rotator_try_fmt_vid_cap(file, fh, f);
@@ -1235,12 +1234,6 @@ static int sde_rotator_s_fmt_vid_cap(struct file *file,
f->fmt.pix.field,
f->fmt.pix.width, f->fmt.pix.height);
- /* configure hal to current input/output setting */
- sde_rot_mgr_lock(rot_dev->mgr);
- sde_rotator_get_config_from_ctx(ctx, &config);
- sde_rotator_session_config(rot_dev->mgr, ctx->private, &config);
- sde_rot_mgr_unlock(rot_dev->mgr);
-
return 0;
}
@@ -1524,7 +1517,6 @@ static int sde_rotator_s_crop(struct file *file, void *fh,
{
struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
struct sde_rotator_device *rot_dev = ctx->rot_dev;
- struct sde_rotation_config config;
struct sde_rotation_item item;
struct v4l2_rect rect;
@@ -1597,12 +1589,6 @@ static int sde_rotator_s_crop(struct file *file, void *fh,
return -EINVAL;
}
- /* configure hal to current input/output setting */
- sde_rot_mgr_lock(rot_dev->mgr);
- sde_rotator_get_config_from_ctx(ctx, &config);
- sde_rotator_session_config(rot_dev->mgr, ctx->private, &config);
- sde_rot_mgr_unlock(rot_dev->mgr);
-
return 0;
}
@@ -2336,6 +2322,9 @@ static int sde_rotator_probe(struct platform_device *pdev)
rot_dev->early_submit = SDE_ROTATOR_EARLY_SUBMIT;
rot_dev->fence_timeout = SDE_ROTATOR_FENCE_TIMEOUT;
rot_dev->streamoff_timeout = SDE_ROTATOR_STREAM_OFF_TIMEOUT;
+ rot_dev->min_rot_clk = 0;
+ rot_dev->min_bw = 0;
+ rot_dev->min_overhead_us = 0;
rot_dev->drvdata = sde_rotator_get_drv_data(&pdev->dev);
rot_dev->pdev = pdev;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
index fd247d10128c..f3c904817296 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
@@ -160,6 +160,9 @@ struct sde_rotator_statistics {
* @session_id: Next context session identifier
* @fence_timeout: Timeout value in msec for fence wait
* @streamoff_timeout: Timeout value in msec for stream off
+ * @min_rot_clk: Override the minimum rotator clock from perf calculation
+ * @min_bw: Override the minimum bandwidth from perf calculation
+ * @min_overhead_us: Override the minimum overhead in us from perf calculation
* @debugfs_root: Pointer to debugfs directory entry.
* @stats: placeholder for rotator statistics
*/
@@ -177,8 +180,12 @@ struct sde_rotator_device {
u32 session_id;
u32 fence_timeout;
u32 streamoff_timeout;
+ u32 min_rot_clk;
+ u32 min_bw;
+ u32 min_overhead_us;
struct sde_rotator_statistics stats;
struct dentry *debugfs_root;
+ struct dentry *perf_root;
};
static inline
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index 7bbd8aa53342..c11c4b61d832 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -448,7 +448,6 @@ int sde_smmu_probe(struct platform_device *pdev)
struct sde_smmu_domain smmu_domain;
const struct of_device_id *match;
struct sde_module_power *mp;
- int disable_htw = 1;
char name[MAX_CLIENT_NAME_LEN];
if (!mdata) {
@@ -535,13 +534,6 @@ int sde_smmu_probe(struct platform_device *pdev)
goto disable_power;
}
- rc = iommu_domain_set_attr(sde_smmu->mmu_mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
- if (rc) {
- SDEROT_ERR("couldn't disable coherent HTW\n");
- goto release_mapping;
- }
-
if (smmu_domain.domain == SDE_IOMMU_DOMAIN_ROT_SECURE) {
int secure_vmid = VMID_CP_PIXEL;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
index eed177ea5bab..60c4c81eddf2 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
@@ -355,13 +355,6 @@ int sde_mdp_get_plane_sizes(struct sde_mdp_format_params *fmt, u32 w, u32 h,
chroma_samp = fmt->chroma_sample;
- if (rotation) {
- if (chroma_samp == SDE_MDP_CHROMA_H2V1)
- chroma_samp = SDE_MDP_CHROMA_H1V2;
- else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
- chroma_samp = SDE_MDP_CHROMA_H2V1;
- }
-
sde_mdp_get_v_h_subsample_rate(chroma_samp,
&v_subsample, &h_subsample);
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index fdf6e1b1c5d0..becea0c59521 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -626,6 +626,11 @@ static u32 get_frame_size_compressed(int plane,
return (max_mbs_per_frame * size_per_mb * 3/2)/2;
}
+static u32 get_frame_size_nv12_ubwc_10bit(int plane, u32 height, u32 width)
+{
+ return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
+}
+
static u32 get_frame_size(struct msm_vidc_inst *inst,
const struct msm_vidc_format *fmt,
int fmt_type, int plane)
@@ -662,7 +667,7 @@ static int is_ctrl_valid_for_codec(struct msm_vidc_inst *inst,
int rc = 0;
switch (ctrl->id) {
case V4L2_CID_MPEG_VIDC_VIDEO_MVC_BUFFER_LAYOUT:
- if (inst->fmts[OUTPUT_PORT]->fourcc != V4L2_PIX_FMT_H264_MVC) {
+ if (inst->fmts[OUTPUT_PORT].fourcc != V4L2_PIX_FMT_H264_MVC) {
dprintk(VIDC_ERR, "Control %#x only valid for MVC\n",
ctrl->id);
rc = -ENOTSUPP;
@@ -670,7 +675,7 @@ static int is_ctrl_valid_for_codec(struct msm_vidc_inst *inst,
}
break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
- if (inst->fmts[OUTPUT_PORT]->fourcc == V4L2_PIX_FMT_H264_MVC &&
+ if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264_MVC &&
ctrl->val != V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) {
dprintk(VIDC_ERR,
"Profile %#x not supported for MVC\n",
@@ -680,7 +685,7 @@ static int is_ctrl_valid_for_codec(struct msm_vidc_inst *inst,
}
break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
- if (inst->fmts[OUTPUT_PORT]->fourcc == V4L2_PIX_FMT_H264_MVC &&
+ if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264_MVC &&
ctrl->val >= V4L2_MPEG_VIDEO_H264_LEVEL_5_2) {
dprintk(VIDC_ERR, "Level %#x not supported for MVC\n",
ctrl->val);
@@ -712,6 +717,14 @@ struct msm_vidc_format vdec_formats[] = {
.type = CAPTURE_PORT,
},
{
+ .name = "UBWC YCbCr Semiplanar 4:2:0 10bit",
+ .description = "UBWC Y/CbCr 4:2:0 10bit",
+ .fourcc = V4L2_PIX_FMT_NV12_TP10_UBWC,
+ .num_planes = 2,
+ .get_frame_size = get_frame_size_nv12_ubwc_10bit,
+ .type = CAPTURE_PORT,
+ },
+ {
.name = "Mpeg4",
.description = "Mpeg4 compressed format",
.fourcc = V4L2_PIX_FMT_MPEG4,
@@ -883,10 +896,10 @@ int msm_vdec_prepare_buf(struct msm_vidc_inst *inst,
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (b->length != inst->fmts[CAPTURE_PORT]->num_planes) {
+ if (b->length != inst->fmts[CAPTURE_PORT].num_planes) {
dprintk(VIDC_ERR,
"Planes mismatch: needed: %d, allocated: %d\n",
- inst->fmts[CAPTURE_PORT]->num_planes,
+ inst->fmts[CAPTURE_PORT].num_planes,
b->length);
rc = -EINVAL;
break;
@@ -962,10 +975,10 @@ int msm_vdec_release_buf(struct msm_vidc_inst *inst,
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (b->length != inst->fmts[CAPTURE_PORT]->num_planes) {
+ if (b->length != inst->fmts[CAPTURE_PORT].num_planes) {
dprintk(VIDC_ERR,
"Planes mismatch: needed: %d, to release: %d\n",
- inst->fmts[CAPTURE_PORT]->num_planes, b->length);
+ inst->fmts[CAPTURE_PORT].num_planes, b->length);
rc = -EINVAL;
break;
}
@@ -1086,9 +1099,9 @@ int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
hdev = inst->core->device;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- fmt = inst->fmts[CAPTURE_PORT];
+ fmt = &inst->fmts[CAPTURE_PORT];
else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- fmt = inst->fmts[OUTPUT_PORT];
+ fmt = &inst->fmts[OUTPUT_PORT];
else
return -ENOTSUPP;
@@ -1237,6 +1250,8 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
rc = -EINVAL;
goto err_invalid_fmt;
}
+ memcpy(&inst->fmts[fmt->type], fmt,
+ sizeof(struct msm_vidc_format));
inst->prop.width[CAPTURE_PORT] = f->fmt.pix_mp.width;
inst->prop.height[CAPTURE_PORT] = f->fmt.pix_mp.height;
@@ -1244,7 +1259,6 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
msm_comm_get_hal_output_buffer(inst),
f->fmt.pix_mp.pixelformat);
- inst->fmts[fmt->type] = fmt;
if (msm_comm_get_stream_output_mode(inst) ==
HAL_VIDEO_DECODER_SECONDARY) {
frame_sz.buffer_type = HAL_BUFFER_OUTPUT2;
@@ -1259,10 +1273,10 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
}
f->fmt.pix_mp.plane_fmt[0].sizeimage =
- fmt->get_frame_size(0,
+ inst->fmts[fmt->type].get_frame_size(0,
f->fmt.pix_mp.height, f->fmt.pix_mp.width);
- extra_idx = EXTRADATA_IDX(fmt->num_planes);
+ extra_idx = EXTRADATA_IDX(inst->fmts[fmt->type].num_planes);
if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
VENUS_EXTRADATA_SIZE(
@@ -1270,8 +1284,8 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
inst->prop.width[CAPTURE_PORT]);
}
- f->fmt.pix_mp.num_planes = fmt->num_planes;
- for (i = 0; i < fmt->num_planes; ++i) {
+ f->fmt.pix_mp.num_planes = inst->fmts[fmt->type].num_planes;
+ for (i = 0; i < inst->fmts[fmt->type].num_planes; ++i) {
inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] =
f->fmt.pix_mp.plane_fmt[i].sizeimage;
}
@@ -1290,6 +1304,8 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
rc = -EINVAL;
goto err_invalid_fmt;
}
+ memcpy(&inst->fmts[fmt->type], fmt,
+ sizeof(struct msm_vidc_format));
rc = msm_comm_try_state(inst, MSM_VIDC_CORE_INIT_DONE);
if (rc) {
@@ -1297,17 +1313,16 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
goto err_invalid_fmt;
}
- if (!(get_hal_codec(fmt->fourcc) &
+ if (!(get_hal_codec(inst->fmts[fmt->type].fourcc) &
inst->core->dec_codec_supported)) {
dprintk(VIDC_ERR,
"Codec(%#x) is not present in the supported codecs list(%#x)\n",
- get_hal_codec(fmt->fourcc),
+ get_hal_codec(inst->fmts[fmt->type].fourcc),
inst->core->dec_codec_supported);
rc = -EINVAL;
goto err_invalid_fmt;
}
- inst->fmts[fmt->type] = fmt;
rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
if (rc) {
dprintk(VIDC_ERR, "Failed to open instance\n");
@@ -1330,14 +1345,15 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
frame_sz.height);
msm_comm_try_set_prop(inst, HAL_PARAM_FRAME_SIZE, &frame_sz);
- max_input_size = get_frame_size(inst, fmt, f->type, 0);
+ max_input_size = get_frame_size(
+inst, &inst->fmts[fmt->type], f->type, 0);
if (f->fmt.pix_mp.plane_fmt[0].sizeimage > max_input_size ||
!f->fmt.pix_mp.plane_fmt[0].sizeimage) {
f->fmt.pix_mp.plane_fmt[0].sizeimage = max_input_size;
}
- f->fmt.pix_mp.num_planes = fmt->num_planes;
- for (i = 0; i < fmt->num_planes; ++i) {
+ f->fmt.pix_mp.num_planes = inst->fmts[fmt->type].num_planes;
+ for (i = 0; i < inst->fmts[fmt->type].num_planes; ++i) {
inst->bufq[OUTPUT_PORT].vb2_bufq.plane_sizes[i] =
f->fmt.pix_mp.plane_fmt[i].sizeimage;
}
@@ -1451,20 +1467,20 @@ static int msm_vdec_queue_setup(struct vb2_queue *q,
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- *num_planes = inst->fmts[OUTPUT_PORT]->num_planes;
+ *num_planes = inst->fmts[OUTPUT_PORT].num_planes;
if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS ||
*num_buffers > MAX_NUM_OUTPUT_BUFFERS)
*num_buffers = MIN_NUM_OUTPUT_BUFFERS;
for (i = 0; i < *num_planes; i++) {
sizes[i] = get_frame_size(inst,
- inst->fmts[OUTPUT_PORT], q->type, i);
+ &inst->fmts[OUTPUT_PORT], q->type, i);
}
rc = set_actual_buffer_count(inst, *num_buffers,
HAL_BUFFER_INPUT);
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
dprintk(VIDC_DBG, "Getting bufreqs on capture plane\n");
- *num_planes = inst->fmts[CAPTURE_PORT]->num_planes;
+ *num_planes = inst->fmts[CAPTURE_PORT].num_planes;
rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
if (rc) {
dprintk(VIDC_ERR, "Failed to open instance\n");
@@ -1549,7 +1565,7 @@ static int msm_vdec_queue_setup(struct vb2_queue *q,
}
extra_idx =
- EXTRADATA_IDX(inst->fmts[CAPTURE_PORT]->num_planes);
+ EXTRADATA_IDX(inst->fmts[CAPTURE_PORT].num_planes);
if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
sizes[extra_idx] =
VENUS_EXTRADATA_SIZE(
@@ -1650,7 +1666,7 @@ static inline int start_streaming(struct msm_vidc_inst *inst)
unsigned int buffer_size;
struct msm_vidc_format *fmt = NULL;
- fmt = inst->fmts[CAPTURE_PORT];
+ fmt = &inst->fmts[CAPTURE_PORT];
buffer_size = fmt->get_frame_size(0,
inst->prop.height[CAPTURE_PORT],
inst->prop.width[CAPTURE_PORT]);
@@ -1872,8 +1888,6 @@ int msm_vdec_inst_init(struct msm_vidc_inst *inst)
dprintk(VIDC_ERR, "Invalid input = %pK\n", inst);
return -EINVAL;
}
- inst->fmts[OUTPUT_PORT] = &vdec_formats[2];
- inst->fmts[CAPTURE_PORT] = &vdec_formats[0];
inst->prop.height[CAPTURE_PORT] = DEFAULT_HEIGHT;
inst->prop.width[CAPTURE_PORT] = DEFAULT_WIDTH;
inst->prop.height[OUTPUT_PORT] = DEFAULT_HEIGHT;
@@ -1889,6 +1903,10 @@ int msm_vdec_inst_init(struct msm_vidc_inst *inst)
inst->buffer_mode_set[OUTPUT_PORT] = HAL_BUFFER_MODE_STATIC;
inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_STATIC;
inst->prop.fps = DEFAULT_FPS;
+ memcpy(&inst->fmts[OUTPUT_PORT], &vdec_formats[2],
+ sizeof(struct msm_vidc_format));
+ memcpy(&inst->fmts[CAPTURE_PORT], &vdec_formats[0],
+ sizeof(struct msm_vidc_format));
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 99f30d9cb97b..f071aae3ccab 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1400,6 +1400,49 @@ static struct msm_vidc_format venc_formats[] = {
},
};
+static void msm_venc_update_plane_count(struct msm_vidc_inst *inst, int type)
+{
+ struct v4l2_ctrl *ctrl = NULL;
+ u32 extradata = 0;
+
+ if (!inst)
+ return;
+
+ inst->fmts[type].num_planes = 1;
+
+ ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
+ V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
+
+ if (ctrl)
+ extradata = v4l2_ctrl_g_ctrl(ctrl);
+
+ if (type == CAPTURE_PORT) {
+ switch (extradata) {
+ case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
+ case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
+ case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
+ case V4L2_MPEG_VIDC_EXTRADATA_LTR:
+ case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
+ inst->fmts[CAPTURE_PORT].num_planes = 2;
+ default:
+ break;
+ }
+ } else if (type == OUTPUT_PORT) {
+ switch (extradata) {
+ case V4L2_MPEG_VIDC_EXTRADATA_INPUT_CROP:
+ case V4L2_MPEG_VIDC_EXTRADATA_DIGITAL_ZOOM:
+ case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
+ case V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS:
+ case V4L2_MPEG_VIDC_EXTRADATA_ROI_QP:
+ case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
+ inst->fmts[OUTPUT_PORT].num_planes = 2;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
static int msm_venc_set_csc(struct msm_vidc_inst *inst);
static int msm_venc_queue_setup(struct vb2_queue *q,
@@ -1414,8 +1457,7 @@ static int msm_venc_queue_setup(struct vb2_queue *q,
enum hal_property property_id;
struct hfi_device *hdev;
struct hal_buffer_requirements *buff_req;
- struct v4l2_ctrl *ctrl = NULL;
- u32 extradata = 0, extra_idx = 0;
+ u32 extra_idx = 0;
struct hal_buffer_requirements *buff_req_buffer = NULL;
if (!q || !q->drv_priv) {
@@ -1471,21 +1513,8 @@ static int msm_venc_queue_setup(struct vb2_queue *q,
temp, *num_buffers);
}
- ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
- V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
- if (ctrl)
- extradata = v4l2_ctrl_g_ctrl(ctrl);
- switch (extradata) {
- case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
- case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
- case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
- case V4L2_MPEG_VIDC_EXTRADATA_LTR:
- case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
- *num_planes = *num_planes + 1;
- default:
- break;
- }
- inst->fmts[CAPTURE_PORT]->num_planes = *num_planes;
+ msm_venc_update_plane_count(inst, CAPTURE_PORT);
+ *num_planes = inst->fmts[CAPTURE_PORT].num_planes;
for (i = 0; i < *num_planes; i++) {
int extra_idx = EXTRADATA_IDX(*num_planes);
@@ -1543,24 +1572,9 @@ static int msm_venc_queue_setup(struct vb2_queue *q,
dprintk(VIDC_DBG, "actual input buffer count set to fw = %d\n",
*num_buffers);
- ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
- V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
- if (ctrl)
- extradata = v4l2_ctrl_g_ctrl(ctrl);
- switch (extradata) {
- case V4L2_MPEG_VIDC_EXTRADATA_INPUT_CROP:
- case V4L2_MPEG_VIDC_EXTRADATA_DIGITAL_ZOOM:
- case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
- case V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS:
- case V4L2_MPEG_VIDC_EXTRADATA_ROI_QP:
- case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
- *num_planes = *num_planes + 1;
- break;
- default:
- break;
- }
+ msm_venc_update_plane_count(inst, OUTPUT_PORT);
+ *num_planes = inst->fmts[OUTPUT_PORT].num_planes;
- inst->fmts[OUTPUT_PORT]->num_planes = *num_planes;
rc = call_hfi_op(hdev, session_set_property, inst->session,
property_id, &new_buf_count);
if (rc)
@@ -1570,12 +1584,12 @@ static int msm_venc_queue_setup(struct vb2_queue *q,
inst->buff_req.buffer[0].buffer_size,
inst->buff_req.buffer[0].buffer_alignment,
inst->buff_req.buffer[0].buffer_count_actual);
- sizes[0] = inst->fmts[OUTPUT_PORT]->get_frame_size(
+ sizes[0] = inst->fmts[OUTPUT_PORT].get_frame_size(
0, inst->prop.height[OUTPUT_PORT],
inst->prop.width[OUTPUT_PORT]);
extra_idx =
- EXTRADATA_IDX(inst->fmts[OUTPUT_PORT]->num_planes);
+ EXTRADATA_IDX(inst->fmts[OUTPUT_PORT].num_planes);
if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
buff_req_buffer = get_buff_req_buffer(inst,
HAL_BUFFER_EXTRADATA_INPUT);
@@ -1610,7 +1624,7 @@ static int msm_venc_toggle_hier_p(struct msm_vidc_inst *inst, int layers)
return -EINVAL;
}
- if (inst->fmts[CAPTURE_PORT]->fourcc != V4L2_PIX_FMT_VP8)
+ if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_VP8)
return 0;
num_enh_layers = layers ? : 0;
@@ -2177,10 +2191,10 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_MPEG_VIDC_VIDEO_IDR_PERIOD:
- if (inst->fmts[CAPTURE_PORT]->fourcc != V4L2_PIX_FMT_H264 &&
- inst->fmts[CAPTURE_PORT]->fourcc !=
+ if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264 &&
+ inst->fmts[CAPTURE_PORT].fourcc !=
V4L2_PIX_FMT_H264_NO_SC &&
- inst->fmts[CAPTURE_PORT]->fourcc !=
+ inst->fmts[CAPTURE_PORT].fourcc !=
V4L2_PIX_FMT_HEVC) {
dprintk(VIDC_ERR,
"Control %#x only valid for H264 and HEVC\n",
@@ -2669,8 +2683,8 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_DELIVERY_MODE: {
bool codec_avc =
- inst->fmts[CAPTURE_PORT]->fourcc == V4L2_PIX_FMT_H264 ||
- inst->fmts[CAPTURE_PORT]->fourcc ==
+ inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264 ||
+ inst->fmts[CAPTURE_PORT].fourcc ==
V4L2_PIX_FMT_H264_NO_SC;
temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE);
@@ -2696,8 +2710,8 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
cir_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS);
is_cont_intra_supported =
- (inst->fmts[CAPTURE_PORT]->fourcc == V4L2_PIX_FMT_H264) ||
- (inst->fmts[CAPTURE_PORT]->fourcc == V4L2_PIX_FMT_HEVC);
+ (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264) ||
+ (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC);
if (is_cont_intra_supported) {
if (ctrl->val != HAL_INTRA_REFRESH_NONE)
@@ -3054,7 +3068,7 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS:
- if (inst->fmts[CAPTURE_PORT]->fourcc != V4L2_PIX_FMT_HEVC) {
+ if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC) {
dprintk(VIDC_ERR, "Hier B supported for HEVC only\n");
rc = -ENOTSUPP;
break;
@@ -3483,8 +3497,6 @@ int msm_venc_inst_init(struct msm_vidc_inst *inst)
dprintk(VIDC_ERR, "Invalid input = %pK\n", inst);
return -EINVAL;
}
- inst->fmts[CAPTURE_PORT] = &venc_formats[4];
- inst->fmts[OUTPUT_PORT] = &venc_formats[0];
inst->prop.height[CAPTURE_PORT] = DEFAULT_HEIGHT;
inst->prop.width[CAPTURE_PORT] = DEFAULT_WIDTH;
inst->prop.height[OUTPUT_PORT] = DEFAULT_HEIGHT;
@@ -3501,6 +3513,10 @@ int msm_venc_inst_init(struct msm_vidc_inst *inst)
inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_STATIC;
inst->prop.fps = DEFAULT_FPS;
inst->capability.pixelprocess_capabilities = 0;
+ memcpy(&inst->fmts[CAPTURE_PORT], &venc_formats[4],
+ sizeof(struct msm_vidc_format));
+ memcpy(&inst->fmts[OUTPUT_PORT], &venc_formats[0],
+ sizeof(struct msm_vidc_format));
return rc;
}
@@ -3624,7 +3640,11 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
goto exit;
}
- inst->fmts[fmt->type] = fmt;
+ memcpy(&inst->fmts[fmt->type], fmt,
+ sizeof(struct msm_vidc_format));
+
+ msm_venc_update_plane_count(inst, CAPTURE_PORT);
+ fmt->num_planes = inst->fmts[CAPTURE_PORT].num_planes;
rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
if (rc) {
@@ -3676,7 +3696,11 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
rc = -EINVAL;
goto exit;
}
- inst->fmts[fmt->type] = fmt;
+ memcpy(&inst->fmts[fmt->type], fmt,
+ sizeof(struct msm_vidc_format));
+
+ msm_venc_update_plane_count(inst, OUTPUT_PORT);
+ fmt->num_planes = inst->fmts[OUTPUT_PORT].num_planes;
msm_comm_set_color_format(inst, HAL_BUFFER_INPUT, fmt->fourcc);
} else {
@@ -3717,12 +3741,12 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
struct hal_buffer_requirements *bufreq = NULL;
int extra_idx = 0;
- for (i = 0; i < fmt->num_planes; ++i) {
+ for (i = 0; i < inst->fmts[fmt->type].num_planes; ++i) {
f->fmt.pix_mp.plane_fmt[i].sizeimage =
- fmt->get_frame_size(i,
+ inst->fmts[fmt->type].get_frame_size(i,
f->fmt.pix_mp.height, f->fmt.pix_mp.width);
}
- extra_idx = EXTRADATA_IDX(fmt->num_planes);
+ extra_idx = EXTRADATA_IDX(inst->fmts[fmt->type].num_planes);
if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
bufreq = get_buff_req_buffer(inst,
HAL_BUFFER_EXTRADATA_INPUT);
@@ -3739,7 +3763,7 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
const struct msm_vidc_format *fmt = NULL;
int rc = 0;
int i;
- u32 height, width;
+ u32 height, width, num_planes;
unsigned int extra_idx = 0;
struct hal_buffer_requirements *bufreq = NULL;
@@ -3757,13 +3781,17 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
}
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- fmt = inst->fmts[CAPTURE_PORT];
+ fmt = &inst->fmts[CAPTURE_PORT];
height = inst->prop.height[CAPTURE_PORT];
width = inst->prop.width[CAPTURE_PORT];
+ msm_venc_update_plane_count(inst, CAPTURE_PORT);
+ num_planes = inst->fmts[CAPTURE_PORT].num_planes;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- fmt = inst->fmts[OUTPUT_PORT];
+ fmt = &inst->fmts[OUTPUT_PORT];
height = inst->prop.height[OUTPUT_PORT];
width = inst->prop.width[OUTPUT_PORT];
+ msm_venc_update_plane_count(inst, OUTPUT_PORT);
+ num_planes = inst->fmts[OUTPUT_PORT].num_planes;
} else {
dprintk(VIDC_ERR, "Invalid type: %x\n", f->type);
return -ENOTSUPP;
@@ -3772,10 +3800,10 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
f->fmt.pix_mp.pixelformat = fmt->fourcc;
f->fmt.pix_mp.height = height;
f->fmt.pix_mp.width = width;
- f->fmt.pix_mp.num_planes = fmt->num_planes;
+ f->fmt.pix_mp.num_planes = num_planes;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- for (i = 0; i < fmt->num_planes; ++i) {
+ for (i = 0; i < num_planes; ++i) {
f->fmt.pix_mp.plane_fmt[i].sizeimage =
fmt->get_frame_size(i, height, width);
}
@@ -3786,7 +3814,7 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
f->fmt.pix_mp.plane_fmt[0].sizeimage =
bufreq ? bufreq->buffer_size : 0;
}
- extra_idx = EXTRADATA_IDX(fmt->num_planes);
+ extra_idx = EXTRADATA_IDX(num_planes);
if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
bufreq = get_buff_req_buffer(inst,
@@ -3799,7 +3827,7 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
bufreq ? bufreq->buffer_size : 0;
}
- for (i = 0; i < fmt->num_planes; ++i) {
+ for (i = 0; i < num_planes; ++i) {
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
inst->bufq[OUTPUT_PORT].vb2_bufq.plane_sizes[i] =
f->fmt.pix_mp.plane_fmt[i].sizeimage;
@@ -3864,10 +3892,10 @@ int msm_venc_prepare_buf(struct msm_vidc_inst *inst,
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (b->length != inst->fmts[CAPTURE_PORT]->num_planes) {
+ if (b->length != inst->fmts[CAPTURE_PORT].num_planes) {
dprintk(VIDC_ERR,
"Planes mismatch: needed: %d, allocated: %d\n",
- inst->fmts[CAPTURE_PORT]->num_planes,
+ inst->fmts[CAPTURE_PORT].num_planes,
b->length);
rc = -EINVAL;
break;
@@ -3935,10 +3963,10 @@ int msm_venc_release_buf(struct msm_vidc_inst *inst,
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: {
if (b->length !=
- inst->fmts[CAPTURE_PORT]->num_planes) {
+ inst->fmts[CAPTURE_PORT].num_planes) {
dprintk(VIDC_ERR,
"Planes mismatch: needed: %d, to release: %d\n",
- inst->fmts[CAPTURE_PORT]->num_planes,
+ inst->fmts[CAPTURE_PORT].num_planes,
b->length);
rc = -EINVAL;
break;
@@ -4053,4 +4081,3 @@ int msm_venc_ctrl_init(struct msm_vidc_inst *inst)
return msm_comm_ctrl_init(inst, msm_venc_ctrls,
ARRAY_SIZE(msm_venc_ctrls), &msm_venc_ctrl_ops);
}
-
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 437ad43e23e9..b12eeddc678f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -682,7 +682,7 @@ static bool valid_v4l2_buffer(struct v4l2_buffer *b,
MAX_PORT_NUM;
return port != MAX_PORT_NUM &&
- inst->fmts[port]->num_planes == b->length;
+ inst->fmts[port].num_planes == b->length;
}
int msm_vidc_prepare_buf(void *instance, struct v4l2_buffer *b)
@@ -849,7 +849,7 @@ int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b)
dprintk(VIDC_DBG, "Queueing device address = %pa\n",
&binfo->device_addr[i]);
- if (inst->fmts[OUTPUT_PORT]->fourcc ==
+ if (inst->fmts[OUTPUT_PORT].fourcc ==
V4L2_PIX_FMT_HEVC_HYBRID && binfo->handle[i] &&
b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
rc = msm_comm_smem_cache_operations(inst,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index f0a3875a8f28..d1cc08d53017 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -521,12 +521,12 @@ static int msm_comm_vote_bus(struct msm_vidc_core *core)
struct v4l2_control ctrl;
codec = inst->session_type == MSM_VIDC_DECODER ?
- inst->fmts[OUTPUT_PORT]->fourcc :
- inst->fmts[CAPTURE_PORT]->fourcc;
+ inst->fmts[OUTPUT_PORT].fourcc :
+ inst->fmts[CAPTURE_PORT].fourcc;
yuv = inst->session_type == MSM_VIDC_DECODER ?
- inst->fmts[CAPTURE_PORT]->fourcc :
- inst->fmts[OUTPUT_PORT]->fourcc;
+ inst->fmts[CAPTURE_PORT].fourcc :
+ inst->fmts[OUTPUT_PORT].fourcc;
vote_data[i].domain = get_hal_domain(inst->session_type);
vote_data[i].codec = get_hal_codec(codec);
@@ -1004,8 +1004,8 @@ static void handle_session_init_done(enum hal_command_response cmd, void *data)
core = inst->core;
hdev = inst->core->device;
codec = inst->session_type == MSM_VIDC_DECODER ?
- inst->fmts[OUTPUT_PORT]->fourcc :
- inst->fmts[CAPTURE_PORT]->fourcc;
+ inst->fmts[OUTPUT_PORT].fourcc :
+ inst->fmts[CAPTURE_PORT].fourcc;
/* check if capabilities are available for this session */
for (i = 0; i < VIDC_MAX_SESSIONS; i++) {
@@ -1737,19 +1737,6 @@ static struct vb2_buffer *get_vb_from_device_addr(struct buf_queue *bufq,
return vb;
}
-static void msm_vidc_try_suspend(struct msm_vidc_inst *inst)
-{
- bool batch_mode;
-
- batch_mode = msm_comm_g_ctrl_for_id(inst, V4L2_CID_VIDC_QBUF_MODE)
- == V4L2_VIDC_QBUF_BATCHED;
- if (batch_mode) {
- dprintk(VIDC_DBG,
- "Trying to suspend Venus after finishing Batch\n");
- msm_comm_suspend(inst->core->id);
- }
-}
-
static void handle_ebd(enum hal_command_response cmd, void *data)
{
struct msm_vidc_cb_data_done *response = data;
@@ -1821,8 +1808,6 @@ static void handle_ebd(enum hal_command_response cmd, void *data)
msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_EBD);
}
- msm_vidc_try_suspend(inst);
-
put_inst(inst);
}
@@ -2043,7 +2028,7 @@ static void handle_fbd(enum hal_command_response cmd, void *data)
ns_to_timeval(time_usec * NSEC_PER_USEC);
vbuf->flags = 0;
extra_idx =
- EXTRADATA_IDX(inst->fmts[CAPTURE_PORT]->num_planes);
+ EXTRADATA_IDX(inst->fmts[CAPTURE_PORT].num_planes);
if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
vb->planes[extra_idx].m.userptr =
(unsigned long)fill_buf_done->extra_data_buffer;
@@ -2121,7 +2106,6 @@ static void handle_fbd(enum hal_command_response cmd, void *data)
msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FBD);
}
- msm_vidc_try_suspend(inst);
err_handle_fbd:
put_inst(inst);
}
@@ -2295,8 +2279,8 @@ int msm_comm_scale_clocks_load(struct msm_vidc_core *core,
list_for_each_entry(inst, &core->instances, list) {
codec = inst->session_type == MSM_VIDC_DECODER ?
- inst->fmts[OUTPUT_PORT]->fourcc :
- inst->fmts[CAPTURE_PORT]->fourcc;
+ inst->fmts[OUTPUT_PORT].fourcc :
+ inst->fmts[CAPTURE_PORT].fourcc;
if (msm_comm_turbo_session(inst))
clk_scale_data.power_mode[num_sessions] =
@@ -2727,9 +2711,9 @@ static int msm_comm_session_init(int flipped_state,
goto exit;
}
if (inst->session_type == MSM_VIDC_DECODER) {
- fourcc = inst->fmts[OUTPUT_PORT]->fourcc;
+ fourcc = inst->fmts[OUTPUT_PORT].fourcc;
} else if (inst->session_type == MSM_VIDC_ENCODER) {
- fourcc = inst->fmts[CAPTURE_PORT]->fourcc;
+ fourcc = inst->fmts[CAPTURE_PORT].fourcc;
} else {
dprintk(VIDC_ERR, "Invalid session\n");
return -EINVAL;
@@ -3617,7 +3601,7 @@ static void populate_frame_data(struct vidc_frame_data *data,
data->buffer_type = msm_comm_get_hal_output_buffer(inst);
}
- extra_idx = EXTRADATA_IDX(inst->fmts[port]->num_planes);
+ extra_idx = EXTRADATA_IDX(inst->fmts[port].num_planes);
if (extra_idx && extra_idx < VIDEO_MAX_PLANES &&
vb->planes[extra_idx].m.userptr) {
data->extradata_addr = vb->planes[extra_idx].m.userptr;
@@ -4808,9 +4792,20 @@ int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
return -EINVAL;
}
hdev = core->device;
- if (core->state == VIDC_CORE_INIT_DONE)
+ if (core->state == VIDC_CORE_INIT_DONE) {
+ /*
+ * In current implementation user-initiated SSR triggers
+ * a fatal error from hardware. However, there is no way
+ * to know if fatal error is due to SSR or not. Handle
+ * user SSR as non-fatal.
+ */
+ mutex_lock(&core->lock);
+ core->resources.debug_timeout = false;
+ mutex_unlock(&core->lock);
rc = call_hfi_op(hdev, core_trigger_ssr,
hdev->hfi_device_data, type);
+ }
+
return rc;
}
@@ -5281,7 +5276,7 @@ void msm_comm_print_inst_info(struct msm_vidc_inst *inst)
port = is_decode ? OUTPUT_PORT : CAPTURE_PORT;
dprintk(VIDC_ERR,
"%s session, Codec type: %s HxW: %d x %d fps: %d bitrate: %d bit-depth: %s\n",
- is_decode ? "Decode" : "Encode", inst->fmts[port]->name,
+ is_decode ? "Decode" : "Encode", inst->fmts[port].name,
inst->prop.height[port], inst->prop.width[port],
inst->prop.fps, inst->prop.bitrate,
!inst->bit_depth ? "8" : "10");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
index 9e67ef096c63..3cd1c38f8f37 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
@@ -237,8 +237,8 @@ void msm_dcvs_init_load(struct msm_vidc_inst *inst)
}
fourcc = inst->session_type == MSM_VIDC_DECODER ?
- inst->fmts[OUTPUT_PORT]->fourcc :
- inst->fmts[CAPTURE_PORT]->fourcc;
+ inst->fmts[OUTPUT_PORT].fourcc :
+ inst->fmts[CAPTURE_PORT].fourcc;
for (i = 0; i < num_rows; i++) {
bool matches = msm_dcvs_check_codec_supported(
@@ -589,7 +589,7 @@ static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
}
is_codec_supported =
msm_dcvs_check_codec_supported(
- inst->fmts[OUTPUT_PORT]->fourcc,
+ inst->fmts[OUTPUT_PORT].fourcc,
inst->dcvs.supported_codecs,
inst->session_type);
if (!is_codec_supported ||
@@ -599,15 +599,15 @@ static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
goto dcvs_decision_done;
}
if (msm_comm_turbo_session(inst) ||
- !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit ||
- instance_count > 1))
+ !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) ||
+ instance_count > 1)
is_dcvs_supported = false;
}
if (inst->session_type == MSM_VIDC_ENCODER) {
inst->dcvs.extra_buffer_count = DCVS_ENC_EXTRA_OUTPUT_BUFFERS;
is_codec_supported =
msm_dcvs_check_codec_supported(
- inst->fmts[CAPTURE_PORT]->fourcc,
+ inst->fmts[CAPTURE_PORT].fourcc,
inst->dcvs.supported_codecs,
inst->session_type);
if (!is_codec_supported ||
@@ -617,8 +617,8 @@ static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
goto dcvs_decision_done;
}
if (msm_comm_turbo_session(inst) ||
- !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit ||
- instance_count > 1))
+ !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) ||
+ instance_count > 1)
is_dcvs_supported = false;
}
dcvs_decision_done:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index d3027c08d24e..efb90c69881f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -289,10 +289,10 @@ static ssize_t inst_info_read(struct file *file, char __user *buf,
for (i = 0; i < MAX_PORT_NUM; i++) {
write_str(&dbg_buf, "capability: %s\n", i == OUTPUT_PORT ?
"Output" : "Capture");
- write_str(&dbg_buf, "name : %s\n", inst->fmts[i]->name);
- write_str(&dbg_buf, "planes : %d\n", inst->fmts[i]->num_planes);
+ write_str(&dbg_buf, "name : %s\n", inst->fmts[i].name);
+ write_str(&dbg_buf, "planes : %d\n", inst->fmts[i].num_planes);
write_str(
- &dbg_buf, "type: %s\n", inst->fmts[i]->type == OUTPUT_PORT ?
+ &dbg_buf, "type: %s\n", inst->fmts[i].type == OUTPUT_PORT ?
"Output" : "Capture");
switch (inst->buffer_mode_set[i]) {
case HAL_BUFFER_MODE_STATIC:
@@ -311,7 +311,7 @@ static ssize_t inst_info_read(struct file *file, char __user *buf,
write_str(&dbg_buf, "count: %u\n",
inst->bufq[i].vb2_bufq.num_buffers);
- for (j = 0; j < inst->fmts[i]->num_planes; j++)
+ for (j = 0; j < inst->fmts[i].num_planes; j++)
write_str(&dbg_buf, "size for plane %d: %u\n", j,
inst->bufq[i].vb2_bufq.plane_sizes[j]);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index b6e74715ad07..161e94f99040 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -261,7 +261,7 @@ struct msm_vidc_inst {
void *session;
struct session_prop prop;
enum instance_state state;
- struct msm_vidc_format *fmts[MAX_PORT_NUM];
+ struct msm_vidc_format fmts[MAX_PORT_NUM];
struct buf_queue bufq[MAX_PORT_NUM];
struct msm_vidc_list pendingq;
struct msm_vidc_list scratchbufs;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 25fccab99fb3..a3080be8cd7a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -1166,7 +1166,6 @@ static int msm_vidc_setup_context_bank(struct context_bank_info *cb,
struct device *dev)
{
int rc = 0;
- int disable_htw = 1;
int secure_vmid = VMID_INVAL;
struct bus_type *bus;
@@ -1192,14 +1191,6 @@ static int msm_vidc_setup_context_bank(struct context_bank_info *cb,
goto remove_cb;
}
- rc = iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
- if (rc) {
- dprintk(VIDC_ERR, "%s - disable coherent HTW failed: %s %d\n",
- __func__, dev_name(dev), rc);
- goto release_mapping;
- }
-
if (cb->is_secure) {
secure_vmid = get_secure_vmid(cb);
rc = iommu_domain_set_attr(cb->mapping->domain,
diff --git a/drivers/media/platform/msm/vidc/venus_boot.c b/drivers/media/platform/msm/vidc/venus_boot.c
index 925c97a5b6e8..85c3e15edded 100644
--- a/drivers/media/platform/msm/vidc/venus_boot.c
+++ b/drivers/media/platform/msm/vidc/venus_boot.c
@@ -190,8 +190,6 @@ static int pil_venus_auth_and_reset(void)
{
int rc;
- /* Need to enable this for new SMMU to set the device attribute */
- bool disable_htw = true;
phys_addr_t fw_bias = venus_data->resources->firmware_base;
void __iomem *reg_base = venus_data->reg_base;
u32 ver;
@@ -278,17 +276,6 @@ static int pil_venus_auth_and_reset(void)
if (iommu_present) {
phys_addr_t pa = fw_bias;
- /* Enable this for new SMMU to set the device attribute */
- rc = iommu_domain_set_attr(venus_data->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (rc) {
- dprintk(VIDC_ERR,
- "%s: Failed to disable COHERENT_HTW: %s\n",
- __func__, dev_name(dev));
- goto release_mapping;
- }
-
rc = arm_iommu_attach_device(dev, venus_data->mapping);
if (rc) {
dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index e0fb31de38ff..8332c7f4db43 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -3336,7 +3336,6 @@ static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet)
{
bool local_packet = false;
enum vidc_msg_prio log_level = VIDC_FW;
- unsigned int pending_packet_count = 0;
if (!device) {
dprintk(VIDC_ERR, "%s: Invalid params\n", __func__);
@@ -3361,23 +3360,6 @@ static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet)
log_level = VIDC_ERR;
}
- /*
- * In FATAL situation, print all the pending messages in msg
- * queue. This is useful for debugging. At this time, message
- * queues may be corrupted. Hence don't trust them and just print
- * first max_packets packets.
- */
-
- if (local_packet) {
- dprintk(VIDC_ERR,
- "Printing all pending messages in message Queue\n");
- while (!__iface_msgq_read(device, packet) &&
- pending_packet_count < max_packets) {
- __dump_packet(packet, log_level);
- pending_packet_count++;
- }
- }
-
while (!__iface_dbgq_read(device, packet)) {
struct hfi_msg_sys_coverage_packet *pkt =
(struct hfi_msg_sys_coverage_packet *) packet;
diff --git a/drivers/mfd/wcd934x-regmap.c b/drivers/mfd/wcd934x-regmap.c
index b102264ca8fd..02ddf3225af8 100644
--- a/drivers/mfd/wcd934x-regmap.c
+++ b/drivers/mfd/wcd934x-regmap.c
@@ -1872,6 +1872,10 @@ static bool wcd934x_is_volatile_register(struct device *dev, unsigned int reg)
case WCD934X_CPE_SS_PWR_SYS_PSTATE_CTL_1:
case WCD934X_CPE_SS_CPAR_CTL:
case WCD934X_CPE_SS_STATUS:
+ case WCD934X_CODEC_RPM_RST_CTL:
+ case WCD934X_SIDO_NEW_VOUT_A_STARTUP:
+ case WCD934X_SIDO_NEW_VOUT_D_STARTUP:
+ case WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL:
return true;
}
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils.c b/drivers/misc/qcom/qdsp6v2/audio_utils.c
index 065b426ca6d0..840597314a5f 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils.c
@@ -601,6 +601,7 @@ long audio_in_compat_ioctl(struct file *file,
}
case AUDIO_GET_CONFIG_32: {
struct msm_audio_config32 cfg_32;
+ memset(&cfg_32, 0, sizeof(cfg_32));
cfg_32.buffer_size = audio->pcm_cfg.buffer_size;
cfg_32.buffer_count = audio->pcm_cfg.buffer_count;
cfg_32.channel_count = audio->pcm_cfg.channel_count;
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
index f7ad8f61f2e7..0c44f79549d4 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
@@ -692,7 +692,7 @@ static int audio_aio_events_pending(struct q6audio_aio *audio)
spin_lock_irqsave(&audio->event_queue_lock, flags);
empty = !list_empty(&audio->event_queue);
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
- return empty || audio->event_abort;
+ return empty || audio->event_abort || audio->reset_event;
}
static long audio_aio_process_event_req_common(struct q6audio_aio *audio,
@@ -720,6 +720,12 @@ static long audio_aio_process_event_req_common(struct q6audio_aio *audio,
if (rc < 0)
return rc;
+ if (audio->reset_event) {
+ audio->reset_event = false;
+ pr_err("In SSR, post ENETRESET err\n");
+ return -ENETRESET;
+ }
+
if (audio->event_abort) {
audio->event_abort = 0;
return -ENODEV;
@@ -1327,6 +1333,7 @@ int audio_aio_open(struct q6audio_aio *audio, struct file *file)
audio->drv_ops.out_flush(audio);
audio->opened = 1;
+ audio->reset_event = false;
file->private_data = audio;
audio->codec_ioctl = audio_aio_ioctl;
audio->codec_compat_ioctl = audio_aio_compat_ioctl;
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.h b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.h
index d4a7c7e5483b..9c53f58b746f 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.h
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.h
@@ -1,6 +1,6 @@
/* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -199,6 +199,7 @@ struct q6audio_aio {
int feedback;
int rflush; /* Read flush */
int wflush; /* Write flush */
+ bool reset_event;
long (*codec_ioctl)(struct file *, unsigned int, unsigned long);
long (*codec_compat_ioctl)(struct file *, unsigned int, unsigned long);
};
diff --git a/drivers/misc/qcom/qdsp6v2/q6audio_v2_aio.c b/drivers/misc/qcom/qdsp6v2/q6audio_v2_aio.c
index f9534b2bfa7f..6e82c8051886 100644
--- a/drivers/misc/qcom/qdsp6v2/q6audio_v2_aio.c
+++ b/drivers/misc/qcom/qdsp6v2/q6audio_v2_aio.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -107,8 +107,9 @@ void audio_aio_cb(uint32_t opcode, uint32_t token,
audio_aio_post_event(audio, AUDIO_EVENT_STREAM_INFO, e_payload);
break;
case RESET_EVENTS:
- pr_debug("%s: Received opcode:0x%x\n", __func__, opcode);
+ pr_err("%s: Received opcode:0x%x\n", __func__, opcode);
audio->stopped = 1;
+ audio->reset_event = true;
wake_up(&audio->event_wait);
break;
default:
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 8c2bb77db049..b11fe09552bf 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -945,8 +945,6 @@ EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
{
- int ret;
-
/* MBIM backwards compatible function? */
if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
return -ENODEV;
@@ -955,16 +953,7 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
* Additionally, generic NCM devices are assumed to accept arbitrarily
* placed NDP.
*/
- ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
-
- /*
- * We should get an event when network connection is "connected" or
- * "disconnected". Set network connection in "disconnected" state
- * (carrier is OFF) during attach, so the IP network stack does not
- * start IPv6 negotiation and more.
- */
- usbnet_link_change(dev, 0, 0);
- return ret;
+ return cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
}
static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
@@ -1547,7 +1536,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM",
- .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+ | FLAG_LINK_INTR,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
@@ -1560,7 +1550,7 @@ static const struct driver_info cdc_ncm_info = {
static const struct driver_info wwan_info = {
.description = "Mobile Broadband Network Device",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
- | FLAG_WWAN,
+ | FLAG_LINK_INTR | FLAG_WWAN,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
@@ -1573,7 +1563,7 @@ static const struct driver_info wwan_info = {
static const struct driver_info wwan_noarp_info = {
.description = "Mobile Broadband Network Device (NO ARP)",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
- | FLAG_WWAN | FLAG_NOARP,
+ | FLAG_LINK_INTR | FLAG_WWAN | FLAG_NOARP,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 81651c7dec72..a0f76581e6eb 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -19,6 +19,7 @@ wil6210-y += wil_platform.o
wil6210-y += ethtool.o
wil6210-y += wil_crash_dump.o
wil6210-y += p2p.o
+wil6210-y += ftm.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 18e95148e891..17b419d408cd 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -17,6 +17,7 @@
#include <linux/etherdevice.h>
#include "wil6210.h"
#include "wmi.h"
+#include "ftm.h"
#define WIL_MAX_ROC_DURATION_MS 5000
@@ -36,6 +37,90 @@ static struct ieee80211_channel wil_60ghz_channels[] = {
/* channel 4 not supported yet */
};
+/* Vendor id to be used in vendor specific command and events
+ * to user space.
+ * NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID,
+ * vendor subcmd definitions prefixed with QCA_NL80211_VENDOR_SUBCMD, and
+ * qca_wlan_vendor_attr is open source file src/common/qca-vendor.h in
+ * git://w1.fi/srv/git/hostap.git; the values here are just a copy of that
+ */
+
+#define QCA_NL80211_VENDOR_ID 0x001374
+
+enum qca_nl80211_vendor_subcmds {
+ QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA = 128,
+ QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION = 129,
+ QCA_NL80211_VENDOR_SUBCMD_FTM_ABORT_SESSION = 130,
+ QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT = 131,
+ QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE = 132,
+ QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER = 133,
+ QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS = 134,
+ QCA_NL80211_VENDOR_SUBCMD_AOA_ABORT_MEAS = 135,
+ QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT = 136,
+};
+
+/* vendor specific commands */
+static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_ftm_get_capabilities
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_ftm_start_session
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_ABORT_SESSION,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_ftm_abort_session
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_ftm_configure_responder
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_aoa_start_measurement
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_AOA_ABORT_MEAS,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_aoa_abort_measurement
+ },
+};
+
+/* vendor specific events */
+static const struct nl80211_vendor_cmd_info wil_nl80211_vendor_events[] = {
+ [QCA_NL80211_VENDOR_EVENT_FTM_MEAS_RESULT_INDEX] = {
+ .vendor_id = QCA_NL80211_VENDOR_ID,
+ .subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT
+ },
+ [QCA_NL80211_VENDOR_EVENT_FTM_SESSION_DONE_INDEX] = {
+ .vendor_id = QCA_NL80211_VENDOR_ID,
+ .subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE
+ },
+ [QCA_NL80211_VENDOR_EVENT_AOA_MEAS_RESULT_INDEX] = {
+ .vendor_id = QCA_NL80211_VENDOR_ID,
+ .subcmd = QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT
+ },
+};
+
static struct ieee80211_supported_band wil_band_60ghz = {
.channels = wil_60ghz_channels,
.n_channels = ARRAY_SIZE(wil_60ghz_channels),
@@ -354,10 +439,13 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
__func__, wdev, wdev->iftype);
+ mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
wil_err(wil, "Already scanning\n");
+ mutex_unlock(&wil->p2p_wdev_mutex);
return -EAGAIN;
}
+ mutex_unlock(&wil->p2p_wdev_mutex);
/* check we are client side */
switch (wdev->iftype) {
@@ -760,14 +848,11 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
return rc;
}
-static struct wil_tid_crypto_rx_single *
-wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
- enum wmi_key_usage key_usage, const u8 *mac_addr)
+static struct wil_sta_info *
+wil_find_sta_by_key_usage(struct wil6210_priv *wil,
+ enum wmi_key_usage key_usage, const u8 *mac_addr)
{
int cid = -EINVAL;
- int tid = 0;
- struct wil_sta_info *s;
- struct wil_tid_crypto_rx *c;
if (key_usage == WMI_KEY_USE_TX_GROUP)
return NULL; /* not needed */
@@ -778,18 +863,72 @@ wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
else if (key_usage == WMI_KEY_USE_RX_GROUP)
cid = wil_find_cid_by_idx(wil, 0);
if (cid < 0) {
- wil_err(wil, "No CID for %pM %s[%d]\n", mac_addr,
- key_usage_str[key_usage], key_index);
+ wil_err(wil, "No CID for %pM %s\n", mac_addr,
+ key_usage_str[key_usage]);
return ERR_PTR(cid);
}
- s = &wil->sta[cid];
- if (key_usage == WMI_KEY_USE_PAIRWISE)
- c = &s->tid_crypto_rx[tid];
- else
- c = &s->group_crypto_rx;
+ return &wil->sta[cid];
+}
+
+static void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
+ struct wil_sta_info *cs,
+ struct key_params *params)
+{
+ struct wil_tid_crypto_rx_single *cc;
+ int tid;
- return &c->key_id[key_index];
+ if (!cs)
+ return;
+
+ switch (key_usage) {
+ case WMI_KEY_USE_PAIRWISE:
+ for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
+ cc = &cs->tid_crypto_rx[tid].key_id[key_index];
+ if (params->seq)
+ memcpy(cc->pn, params->seq,
+ IEEE80211_GCMP_PN_LEN);
+ else
+ memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
+ cc->key_set = true;
+ }
+ break;
+ case WMI_KEY_USE_RX_GROUP:
+ cc = &cs->group_crypto_rx.key_id[key_index];
+ if (params->seq)
+ memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
+ else
+ memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
+ cc->key_set = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void wil_del_rx_key(u8 key_index, enum wmi_key_usage key_usage,
+ struct wil_sta_info *cs)
+{
+ struct wil_tid_crypto_rx_single *cc;
+ int tid;
+
+ if (!cs)
+ return;
+
+ switch (key_usage) {
+ case WMI_KEY_USE_PAIRWISE:
+ for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
+ cc = &cs->tid_crypto_rx[tid].key_id[key_index];
+ cc->key_set = false;
+ }
+ break;
+ case WMI_KEY_USE_RX_GROUP:
+ cc = &cs->group_crypto_rx.key_id[key_index];
+ cc->key_set = false;
+ break;
+ default:
+ break;
+ }
}
static int wil_cfg80211_add_key(struct wiphy *wiphy,
@@ -801,24 +940,26 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
- struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
- key_index,
- key_usage,
- mac_addr);
+ struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
+ mac_addr);
+
+ if (!params) {
+ wil_err(wil, "NULL params\n");
+ return -EINVAL;
+ }
wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
- if (IS_ERR(cc)) {
+ if (IS_ERR(cs)) {
wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
__func__, mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
return -EINVAL;
}
- if (cc)
- cc->key_set = false;
+ wil_del_rx_key(key_index, key_usage, cs);
if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
wil_err(wil,
@@ -831,13 +972,8 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
rc = wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
params->key, key_usage);
- if ((rc == 0) && cc) {
- if (params->seq)
- memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
- else
- memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
- cc->key_set = true;
- }
+ if (!rc)
+ wil_set_crypto_rx(key_index, key_usage, cs, params);
return rc;
}
@@ -849,20 +985,18 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
- struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
- key_index,
- key_usage,
- mac_addr);
+ struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
+ mac_addr);
wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
key_usage_str[key_usage], key_index);
- if (IS_ERR(cc))
+ if (IS_ERR(cs))
wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
mac_addr, key_usage_str[key_usage], key_index);
- if (!IS_ERR_OR_NULL(cc))
- cc->key_set = false;
+ if (!IS_ERR_OR_NULL(cs))
+ wil_del_rx_key(key_index, key_usage, cs);
return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
}
@@ -1363,19 +1497,16 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
struct wireless_dev *wdev)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- u8 started;
+ struct wil_p2p_info *p2p = &wil->p2p;
+
+ if (!p2p->p2p_dev_started)
+ return;
wil_dbg_misc(wil, "%s: entered\n", __func__);
mutex_lock(&wil->mutex);
- started = wil_p2p_stop_discovery(wil);
- if (started && wil->scan_request) {
- cfg80211_scan_done(wil->scan_request, 1);
- wil->scan_request = NULL;
- wil->radio_wdev = wil->wdev;
- }
+ wil_p2p_stop_radio_operations(wil);
+ p2p->p2p_dev_started = 0;
mutex_unlock(&wil->mutex);
-
- wil->p2p.p2p_dev_started = 0;
}
static struct cfg80211_ops wil_cfg80211_ops = {
@@ -1437,6 +1568,11 @@ static void wil_wiphy_init(struct wiphy *wiphy)
wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
wiphy->mgmt_stypes = wil_mgmt_stypes;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
+
+ wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands);
+ wiphy->vendor_commands = wil_nl80211_vendor_commands;
+ wiphy->vendor_events = wil_nl80211_vendor_events;
+ wiphy->n_vendor_events = ARRAY_SIZE(wil_nl80211_vendor_events);
}
struct wireless_dev *wil_cfg80211_init(struct device *dev)
@@ -1460,14 +1596,8 @@ struct wireless_dev *wil_cfg80211_init(struct device *dev)
set_wiphy_dev(wdev->wiphy, dev);
wil_wiphy_init(wdev->wiphy);
- rc = wiphy_register(wdev->wiphy);
- if (rc < 0)
- goto out_failed_reg;
-
return wdev;
-out_failed_reg:
- wiphy_free(wdev->wiphy);
out:
kfree(wdev);
@@ -1483,7 +1613,6 @@ void wil_wdev_free(struct wil6210_priv *wil)
if (!wdev)
return;
- wiphy_unregister(wdev->wiphy);
wiphy_free(wdev->wiphy);
kfree(wdev);
}
@@ -1494,11 +1623,11 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
mutex_lock(&wil->p2p_wdev_mutex);
p2p_wdev = wil->p2p_wdev;
+ wil->p2p_wdev = NULL;
+ wil->radio_wdev = wil_to_wdev(wil);
+ mutex_unlock(&wil->p2p_wdev_mutex);
if (p2p_wdev) {
- wil->p2p_wdev = NULL;
- wil->radio_wdev = wil_to_wdev(wil);
cfg80211_unregister_wdev(p2p_wdev);
kfree(p2p_wdev);
}
- mutex_unlock(&wil->p2p_wdev_mutex);
}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index f8e36ba47ac7..d3e420f1b26b 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1559,6 +1559,56 @@ static const struct file_operations fops_led_blink_time = {
.open = simple_open,
};
+/*---------FW capabilities------------*/
+static int wil_fw_capabilities_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ seq_printf(s, "fw_capabilities : %*pb\n", WMI_FW_CAPABILITY_MAX,
+ wil->fw_capabilities);
+
+ return 0;
+}
+
+static int wil_fw_capabilities_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_fw_capabilities_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_fw_capabilities = {
+ .open = wil_fw_capabilities_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---------FW version------------*/
+static int wil_fw_version_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ if (wil->fw_version[0])
+ seq_printf(s, "%s\n", wil->fw_version);
+ else
+ seq_puts(s, "N/A\n");
+
+ return 0;
+}
+
+static int wil_fw_version_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_fw_version_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_fw_version = {
+ .open = wil_fw_version_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
@@ -1609,6 +1659,8 @@ static const struct {
{"recovery", S_IRUGO | S_IWUSR, &fops_recovery},
{"led_cfg", S_IRUGO | S_IWUSR, &fops_led_cfg},
{"led_blink_time", S_IRUGO | S_IWUSR, &fops_led_blink_time},
+ {"fw_capabilities", S_IRUGO, &fops_fw_capabilities},
+ {"fw_version", S_IRUGO, &fops_fw_version},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1649,7 +1701,6 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(privacy, S_IRUGO, doff_u32),
WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong),
- WIL_FIELD(fw_version, S_IRUGO, doff_u32),
WIL_FIELD(hw_version, S_IRUGO, doff_x32),
WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
WIL_FIELD(ap_isolate, S_IRUGO, doff_u32),
diff --git a/drivers/net/wireless/ath/wil6210/ftm.c b/drivers/net/wireless/ath/wil6210/ftm.c
new file mode 100644
index 000000000000..5cf07343a33c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/ftm.c
@@ -0,0 +1,903 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <net/netlink.h>
+#include "wil6210.h"
+#include "ftm.h"
+#include "wmi.h"
+
+/* FTM session ID we use with FW */
+#define WIL_FTM_FW_SESSION_ID 1
+
+/* fixed spare allocation we reserve in NL messages we allocate */
+#define WIL_FTM_NL_EXTRA_ALLOC 32
+
+/* approx maximum length for FTM_MEAS_RESULT NL80211 event */
+#define WIL_FTM_MEAS_RESULT_MAX_LENGTH 2048
+
+/* timeout for waiting for standalone AOA measurement, milliseconds */
+#define WIL_AOA_MEASUREMENT_TIMEOUT 1000
+
+/* maximum number of allowed FTM measurements per burst */
+#define WIL_FTM_MAX_MEAS_PER_BURST 31
+
+/* initial token to use on non-secure FTM measurement */
+#define WIL_TOF_FTM_DEFAULT_INITIAL_TOKEN 2
+
+#define WIL_TOF_FTM_MAX_LCI_LENGTH (240)
+#define WIL_TOF_FTM_MAX_LCR_LENGTH (240)
+
+static const struct
+nla_policy wil_nl80211_loc_policy[QCA_WLAN_VENDOR_ATTR_LOC_MAX + 1] = {
+ [QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE] = { .type = NLA_U64 },
+ [QCA_WLAN_VENDOR_ATTR_LOC_CAPA] = { .type = NLA_NESTED },
+ [QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS] = { .type = NLA_NESTED },
+ [QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEER_RESULTS] = { .type = NLA_NESTED },
+ [QCA_WLAN_VENDOR_ATTR_FTM_RESPONDER_ENABLE] = { .type = NLA_FLAG },
+ [QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS] = { .type = NLA_U32 },
+ [QCA_WLAN_VENDOR_ATTR_FTM_INITIAL_TOKEN] = { .type = NLA_U8 },
+ [QCA_WLAN_VENDOR_ATTR_AOA_TYPE] = { .type = NLA_U32 },
+ [QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK] = { .type = NLA_U32 },
+};
+
+static const struct
+nla_policy wil_nl80211_ftm_peer_policy[
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX + 1] = {
+ [QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR] = { .len = ETH_ALEN },
+ [QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS] = { .type = NLA_U32 },
+ [QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS] = { .type = NLA_NESTED },
+ [QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID] = { .type = NLA_U8 },
+};
+
+static const struct
+nla_policy wil_nl80211_ftm_meas_param_policy[
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX + 1] = {
+ [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST] = { .type = NLA_U8 },
+ [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP] = { .type = NLA_U8 },
+ [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION] = { .type = NLA_U8 },
+ [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD] = { .type = NLA_U16 },
+};
+
+static int wil_ftm_parse_meas_params(struct wil6210_priv *wil,
+ struct nlattr *attr,
+ struct wil_ftm_meas_params *params)
+{
+ struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX + 1];
+ int rc;
+
+ if (!attr) {
+ /* temporary defaults for one-shot measurement */
+ params->meas_per_burst = 1;
+ params->burst_period = 5; /* 500 milliseconds */
+ return 0;
+ }
+ rc = nla_parse_nested(tb, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX,
+ attr, wil_nl80211_ftm_meas_param_policy);
+ if (rc) {
+ wil_err(wil, "invalid measurement params\n");
+ return rc;
+ }
+ if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST])
+ params->meas_per_burst = nla_get_u8(
+ tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST]);
+ if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP])
+ params->num_of_bursts_exp = nla_get_u8(
+ tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP]);
+ if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION])
+ params->burst_duration = nla_get_u8(
+ tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION]);
+ if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD])
+ params->burst_period = nla_get_u16(
+ tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD]);
+ return 0;
+}
+
+static int wil_ftm_validate_meas_params(struct wil6210_priv *wil,
+ struct wil_ftm_meas_params *params)
+{
+ /* temporary allow only single-burst */
+ if (params->meas_per_burst > WIL_FTM_MAX_MEAS_PER_BURST ||
+ params->num_of_bursts_exp != 0) {
+ wil_err(wil, "invalid measurement params\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wil_ftm_append_meas_params(struct wil6210_priv *wil,
+ struct sk_buff *msg,
+ struct wil_ftm_meas_params *params)
+{
+ struct nlattr *nl_p;
+
+ nl_p = nla_nest_start(
+ msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS);
+ if (!nl_p)
+ goto out_put_failure;
+ if (nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST,
+ params->meas_per_burst) ||
+ nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP,
+ params->num_of_bursts_exp) ||
+ nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION,
+ params->burst_duration) ||
+ nla_put_u16(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD,
+ params->burst_period))
+ goto out_put_failure;
+ nla_nest_end(msg, nl_p);
+ return 0;
+out_put_failure:
+ return -ENOBUFS;
+}
+
+static int wil_ftm_append_peer_meas_res(struct wil6210_priv *wil,
+ struct sk_buff *msg,
+ struct wil_ftm_peer_meas_res *res)
+{
+ struct nlattr *nl_mres, *nl_f;
+ int i;
+
+ if (nla_put(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR,
+ ETH_ALEN, res->mac_addr) ||
+ nla_put_u32(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS,
+ res->flags) ||
+ nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS,
+ res->status))
+ goto out_put_failure;
+ if (res->status == QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED &&
+ nla_put_u8(msg,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS,
+ res->value_seconds))
+ goto out_put_failure;
+ if (res->has_params &&
+ wil_ftm_append_meas_params(wil, msg, &res->params))
+ goto out_put_failure;
+ nl_mres = nla_nest_start(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS);
+ if (!nl_mres)
+ goto out_put_failure;
+ for (i = 0; i < res->n_meas; i++) {
+ nl_f = nla_nest_start(msg, i);
+ if (!nl_f)
+ goto out_put_failure;
+ if (nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1,
+ res->meas[i].t1) ||
+ nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2,
+ res->meas[i].t2) ||
+ nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3,
+ res->meas[i].t3) ||
+ nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4,
+ res->meas[i].t4))
+ goto out_put_failure;
+ nla_nest_end(msg, nl_f);
+ }
+ nla_nest_end(msg, nl_mres);
+ return 0;
+out_put_failure:
+ wil_err(wil, "fail to append peer result\n");
+ return -ENOBUFS;
+}
+
+static void wil_ftm_send_meas_result(struct wil6210_priv *wil,
+ struct wil_ftm_peer_meas_res *res)
+{
+ struct sk_buff *vendor_event = NULL;
+ struct nlattr *nl_res;
+ int rc = 0;
+
+ wil_dbg_misc(wil, "sending %d results for peer %pM\n",
+ res->n_meas, res->mac_addr);
+
+ vendor_event = cfg80211_vendor_event_alloc(
+ wil_to_wiphy(wil),
+ wil->wdev,
+ WIL_FTM_MEAS_RESULT_MAX_LENGTH,
+ QCA_NL80211_VENDOR_EVENT_FTM_MEAS_RESULT_INDEX,
+ GFP_KERNEL);
+ if (!vendor_event) {
+ wil_err(wil, "fail to allocate measurement result\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (nla_put_u64(
+ vendor_event, QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE,
+ wil->ftm.session_cookie)) {
+ rc = -ENOBUFS;
+ goto out;
+ }
+
+ nl_res = nla_nest_start(vendor_event,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEER_RESULTS);
+ if (!nl_res) {
+ rc = -ENOBUFS;
+ goto out;
+ }
+
+ rc = wil_ftm_append_peer_meas_res(wil, vendor_event, res);
+ if (rc)
+ goto out;
+
+ nla_nest_end(vendor_event, nl_res);
+ cfg80211_vendor_event(vendor_event, GFP_KERNEL);
+ vendor_event = NULL;
+out:
+ if (vendor_event)
+ kfree_skb(vendor_event);
+ if (rc)
+ wil_err(wil, "send peer result failed, err %d\n", rc);
+}
+
+static void wil_ftm_send_peer_res(struct wil6210_priv *wil)
+{
+ if (!wil->ftm.has_ftm_res || !wil->ftm.ftm_res)
+ return;
+
+ wil_ftm_send_meas_result(wil, wil->ftm.ftm_res);
+ wil->ftm.has_ftm_res = 0;
+ wil->ftm.ftm_res->n_meas = 0;
+}
+
+static void wil_aoa_measurement_timeout(struct work_struct *work)
+{
+ struct wil_ftm_priv *ftm = container_of(work, struct wil_ftm_priv,
+ aoa_timeout_work);
+ struct wil6210_priv *wil = container_of(ftm, struct wil6210_priv, ftm);
+ struct wil_aoa_meas_result res;
+
+ wil_dbg_misc(wil, "AOA measurement timeout\n");
+
+ memset(&res, 0, sizeof(res));
+ ether_addr_copy(res.mac_addr, wil->ftm.aoa_peer_mac_addr);
+ res.type = wil->ftm.aoa_type;
+ res.status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED;
+ wil_aoa_cfg80211_meas_result(wil, &res);
+}
+
+static int
+wil_ftm_cfg80211_start_session(struct wil6210_priv *wil,
+ struct wil_ftm_session_request *request)
+{
+ int rc = 0;
+ bool has_lci = false, has_lcr = false;
+ u8 max_meas = 0, *ptr;
+ u32 i, cmd_len;
+ struct wmi_tof_session_start_cmd *cmd;
+
+ mutex_lock(&wil->ftm.lock);
+ if (wil->ftm.session_started) {
+ wil_err(wil, "FTM session already running\n");
+ rc = -EAGAIN;
+ goto out;
+ }
+ /* for now allow measurement to associated AP only */
+ if (!test_bit(wil_status_fwconnected, wil->status)) {
+ wil_err(wil, "must be associated\n");
+ rc = -ENOTSUPP;
+ goto out;
+ }
+
+ for (i = 0; i < request->n_peers; i++) {
+ if (request->peers[i].flags &
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI)
+ has_lci = true;
+ if (request->peers[i].flags &
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR)
+ has_lcr = true;
+ max_meas = max(max_meas,
+ request->peers[i].params.meas_per_burst);
+ }
+
+ wil->ftm.ftm_res = kzalloc(sizeof(*wil->ftm.ftm_res) +
+ max_meas * sizeof(struct wil_ftm_peer_meas) +
+ (has_lci ? WIL_TOF_FTM_MAX_LCI_LENGTH : 0) +
+ (has_lcr ? WIL_TOF_FTM_MAX_LCR_LENGTH : 0), GFP_KERNEL);
+ if (!wil->ftm.ftm_res) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ ptr = (u8 *)wil->ftm.ftm_res;
+ ptr += sizeof(struct wil_ftm_peer_meas_res) +
+ max_meas * sizeof(struct wil_ftm_peer_meas);
+ if (has_lci) {
+ wil->ftm.ftm_res->lci = ptr;
+ ptr += WIL_TOF_FTM_MAX_LCI_LENGTH;
+ }
+ if (has_lcr)
+ wil->ftm.ftm_res->lcr = ptr;
+ wil->ftm.max_ftm_meas = max_meas;
+
+ cmd_len = sizeof(struct wmi_tof_session_start_cmd) +
+ request->n_peers * sizeof(struct wmi_ftm_dest_info);
+ cmd = kzalloc(cmd_len, GFP_KERNEL);
+ if (!cmd) {
+ rc = -ENOMEM;
+ goto out_ftm_res;
+ }
+
+ cmd->session_id = cpu_to_le32(WIL_FTM_FW_SESSION_ID);
+ cmd->num_of_dest = cpu_to_le16(request->n_peers);
+ for (i = 0; i < request->n_peers; i++) {
+ ether_addr_copy(cmd->ftm_dest_info[i].dst_mac,
+ request->peers[i].mac_addr);
+ cmd->ftm_dest_info[i].channel = request->peers[i].channel;
+ if (request->peers[i].flags &
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE) {
+ cmd->ftm_dest_info[i].flags |=
+ WMI_TOF_SESSION_START_FLAG_SECURED;
+ cmd->ftm_dest_info[i].initial_token =
+ request->peers[i].secure_token_id;
+ } else {
+ cmd->ftm_dest_info[i].initial_token =
+ WIL_TOF_FTM_DEFAULT_INITIAL_TOKEN;
+ }
+ if (request->peers[i].flags &
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP)
+ cmd->ftm_dest_info[i].flags |=
+ WMI_TOF_SESSION_START_FLAG_ASAP;
+ if (request->peers[i].flags &
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI)
+ cmd->ftm_dest_info[i].flags |=
+ WMI_TOF_SESSION_START_FLAG_LCI_REQ;
+ if (request->peers[i].flags &
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR)
+ cmd->ftm_dest_info[i].flags |=
+ WMI_TOF_SESSION_START_FLAG_LCR_REQ;
+ cmd->ftm_dest_info[i].num_of_ftm_per_burst =
+ request->peers[i].params.meas_per_burst;
+ cmd->ftm_dest_info[i].num_of_bursts_exp =
+ request->peers[i].params.num_of_bursts_exp;
+ cmd->ftm_dest_info[i].burst_duration =
+ request->peers[i].params.burst_duration;
+ cmd->ftm_dest_info[i].burst_period =
+ cpu_to_le16(request->peers[i].params.burst_period);
+ }
+
+ rc = wmi_send(wil, WMI_TOF_SESSION_START_CMDID, cmd, cmd_len);
+ kfree(cmd);
+
+ if (rc)
+ goto out_ftm_res;
+
+ wil->ftm.session_cookie = request->session_cookie;
+ wil->ftm.session_started = 1;
+
+out_ftm_res:
+ if (rc) {
+ kfree(wil->ftm.ftm_res);
+ wil->ftm.ftm_res = NULL;
+ }
+out:
+ mutex_unlock(&wil->ftm.lock);
+ return rc;
+}
+
+static void
+wil_ftm_cfg80211_session_ended(struct wil6210_priv *wil, u32 status)
+{
+ struct sk_buff *vendor_event = NULL;
+
+ mutex_lock(&wil->ftm.lock);
+
+ if (!wil->ftm.session_started) {
+ wil_dbg_misc(wil, "FTM session not started, ignoring event\n");
+ goto out;
+ }
+
+ /* finish the session */
+ wil_dbg_misc(wil, "finishing FTM session\n");
+
+ /* send left-over results if any */
+ wil_ftm_send_peer_res(wil);
+
+ wil->ftm.session_started = 0;
+ kfree(wil->ftm.ftm_res);
+ wil->ftm.ftm_res = NULL;
+
+ vendor_event = cfg80211_vendor_event_alloc(
+ wil_to_wiphy(wil),
+ wil->wdev,
+ WIL_FTM_NL_EXTRA_ALLOC,
+ QCA_NL80211_VENDOR_EVENT_FTM_SESSION_DONE_INDEX,
+ GFP_KERNEL);
+ if (!vendor_event)
+ goto out;
+
+ if (nla_put_u64(vendor_event,
+ QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE,
+ wil->ftm.session_cookie) ||
+ nla_put_u32(vendor_event,
+ QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS, status)) {
+ wil_err(wil, "failed to fill session done event\n");
+ goto out;
+ }
+ cfg80211_vendor_event(vendor_event, GFP_KERNEL);
+ vendor_event = NULL;
+out:
+ kfree_skb(vendor_event);
+ mutex_unlock(&wil->ftm.lock);
+}
+
+static void wil_aoa_timer_fn(ulong x)
+{
+ struct wil6210_priv *wil = (void *)x;
+
+ wil_dbg_misc(wil, "AOA timer\n");
+ schedule_work(&wil->ftm.aoa_timeout_work);
+}
+
+static int
+wil_aoa_cfg80211_start_measurement(struct wil6210_priv *wil,
+ struct wil_aoa_meas_request *request)
+{
+ int rc = 0;
+ struct cfg80211_bss *bss;
+ struct wmi_aoa_meas_cmd cmd;
+
+ mutex_lock(&wil->ftm.lock);
+
+ if (wil->ftm.aoa_started) {
+ wil_err(wil, "AOA measurement already running\n");
+ rc = -EAGAIN;
+ goto out;
+ }
+ if (request->type >= QCA_WLAN_VENDOR_ATTR_AOA_TYPE_MAX) {
+ wil_err(wil, "invalid AOA type: %d\n", request->type);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ bss = cfg80211_get_bss(wil_to_wiphy(wil), NULL, request->mac_addr,
+ NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+ if (!bss) {
+ wil_err(wil, "Unable to find BSS\n");
+ rc = -ENOENT;
+ goto out;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ ether_addr_copy(cmd.mac_addr, request->mac_addr);
+ cmd.channel = bss->channel->hw_value - 1;
+ cmd.aoa_meas_type = request->type;
+
+ rc = wmi_send(wil, WMI_AOA_MEAS_CMDID, &cmd, sizeof(cmd));
+ if (rc)
+ goto out_bss;
+
+ ether_addr_copy(wil->ftm.aoa_peer_mac_addr, request->mac_addr);
+ mod_timer(&wil->ftm.aoa_timer,
+ jiffies + msecs_to_jiffies(WIL_AOA_MEASUREMENT_TIMEOUT));
+ wil->ftm.aoa_started = 1;
+out_bss:
+ cfg80211_put_bss(wil_to_wiphy(wil), bss);
+out:
+ mutex_unlock(&wil->ftm.lock);
+ return rc;
+}
+
+void wil_aoa_cfg80211_meas_result(struct wil6210_priv *wil,
+ struct wil_aoa_meas_result *result)
+{
+ struct sk_buff *vendor_event = NULL;
+
+ mutex_lock(&wil->ftm.lock);
+
+ if (!wil->ftm.aoa_started) {
+ wil_info(wil, "AOA not started, not sending result\n");
+ goto out;
+ }
+
+ wil_dbg_misc(wil, "sending AOA measurement result\n");
+
+ vendor_event = cfg80211_vendor_event_alloc(
+ wil_to_wiphy(wil),
+ wil->wdev,
+ result->length + WIL_FTM_NL_EXTRA_ALLOC,
+ QCA_NL80211_VENDOR_EVENT_AOA_MEAS_RESULT_INDEX,
+ GFP_KERNEL);
+ if (!vendor_event) {
+ wil_err(wil, "fail to allocate measurement result\n");
+ goto out;
+ }
+
+ if (nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_MAC_ADDR,
+ ETH_ALEN, result->mac_addr) ||
+ nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_AOA_TYPE,
+ result->type) ||
+ nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS,
+ result->status) ||
+ nla_put_u32(vendor_event,
+ QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK,
+ result->antenna_array_mask)) {
+ wil_err(wil, "failed to fill vendor event\n");
+ goto out;
+ }
+
+ if (result->length > 0 &&
+ nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT,
+ result->length, result->data)) {
+ wil_err(wil, "failed to fill vendor event with AOA data\n");
+ goto out;
+ }
+
+ cfg80211_vendor_event(vendor_event, GFP_KERNEL);
+
+ del_timer_sync(&wil->ftm.aoa_timer);
+ wil->ftm.aoa_started = 0;
+out:
+ mutex_unlock(&wil->ftm.lock);
+}
+
+void wil_ftm_evt_session_ended(struct wil6210_priv *wil,
+ struct wmi_tof_session_end_event *evt)
+{
+ u32 status;
+
+ switch (evt->status) {
+ case WMI_TOF_SESSION_END_NO_ERROR:
+ status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK;
+ break;
+ case WMI_TOF_SESSION_END_PARAMS_ERROR:
+ status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID;
+ break;
+ case WMI_TOF_SESSION_END_FAIL:
+ status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED;
+ break;
+ case WMI_TOF_SESSION_END_ABORTED:
+ status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED;
+ break;
+ default:
+ status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED;
+ break;
+ }
+
+ wil_ftm_cfg80211_session_ended(wil, status);
+}
+
+void wil_ftm_evt_per_dest_res(struct wil6210_priv *wil,
+ struct wmi_tof_ftm_per_dest_res_event *evt)
+{
+ u32 i, index;
+ __le64 tmp = 0;
+ u8 n_meas;
+
+ mutex_lock(&wil->ftm.lock);
+
+ if (!wil->ftm.session_started || !wil->ftm.ftm_res) {
+ wil_dbg_misc(wil, "Session not running, ignoring res event\n");
+ goto out;
+ }
+ if (wil->ftm.has_ftm_res &&
+ !ether_addr_equal(evt->dst_mac, wil->ftm.ftm_res->mac_addr)) {
+ wil_dbg_misc(wil,
+ "Results for previous peer not properly terminated\n");
+ wil_ftm_send_peer_res(wil);
+ }
+
+ if (!wil->ftm.has_ftm_res) {
+ ether_addr_copy(wil->ftm.ftm_res->mac_addr, evt->dst_mac);
+ wil->ftm.has_ftm_res = 1;
+ }
+
+ n_meas = evt->actual_ftm_per_burst;
+ switch (evt->status) {
+ case WMI_PER_DEST_RES_NO_ERROR:
+ wil->ftm.ftm_res->status =
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK;
+ break;
+ case WMI_PER_DEST_RES_TX_RX_FAIL:
+ /* FW reports corrupted results here, discard. */
+ n_meas = 0;
+ wil->ftm.ftm_res->status =
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK;
+ break;
+ case WMI_PER_DEST_RES_PARAM_DONT_MATCH:
+ wil->ftm.ftm_res->status =
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID;
+ break;
+ default:
+ wil_err(wil, "unexpected status %d\n", evt->status);
+ wil->ftm.ftm_res->status =
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID;
+ break;
+ }
+
+ for (i = 0; i < n_meas; i++) {
+ index = wil->ftm.ftm_res->n_meas;
+ if (index >= wil->ftm.max_ftm_meas) {
+ wil_dbg_misc(wil, "Too many measurements, some lost\n");
+ break;
+ }
+ memcpy(&tmp, evt->responder_ftm_res[i].t1,
+ sizeof(evt->responder_ftm_res[i].t1));
+ wil->ftm.ftm_res->meas[index].t1 = le64_to_cpu(tmp);
+ memcpy(&tmp, evt->responder_ftm_res[i].t2,
+ sizeof(evt->responder_ftm_res[i].t2));
+ wil->ftm.ftm_res->meas[index].t2 = le64_to_cpu(tmp);
+ memcpy(&tmp, evt->responder_ftm_res[i].t3,
+ sizeof(evt->responder_ftm_res[i].t3));
+ wil->ftm.ftm_res->meas[index].t3 = le64_to_cpu(tmp);
+ memcpy(&tmp, evt->responder_ftm_res[i].t4,
+ sizeof(evt->responder_ftm_res[i].t4));
+ wil->ftm.ftm_res->meas[index].t4 = le64_to_cpu(tmp);
+ wil->ftm.ftm_res->n_meas++;
+ }
+
+ if (evt->flags & WMI_PER_DEST_RES_BURST_REPORT_END)
+ wil_ftm_send_peer_res(wil);
+out:
+ mutex_unlock(&wil->ftm.lock);
+}
+
+void wil_aoa_evt_meas(struct wil6210_priv *wil,
+ struct wmi_aoa_meas_event *evt,
+ int len)
+{
+ int data_len = len - offsetof(struct wmi_aoa_meas_event, meas_data);
+ struct wil_aoa_meas_result *res;
+
+ data_len = min_t(int, le16_to_cpu(evt->length), data_len);
+
+ res = kmalloc(sizeof(*res) + data_len, GFP_KERNEL);
+ if (!res)
+ return;
+
+ ether_addr_copy(res->mac_addr, evt->mac_addr);
+ res->type = evt->aoa_meas_type;
+ res->antenna_array_mask = le32_to_cpu(evt->meas_rf_mask);
+ res->status = evt->meas_status;
+ res->length = data_len;
+ memcpy(res->data, evt->meas_data, data_len);
+
+ wil_dbg_misc(wil, "AOA result status %d type %d mask %d length %d\n",
+ res->status, res->type,
+ res->antenna_array_mask, res->length);
+
+ wil_aoa_cfg80211_meas_result(wil, res);
+ kfree(res);
+}
+
+int wil_ftm_get_capabilities(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct sk_buff *skb;
+ struct nlattr *attr;
+
+ if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ /* we should get the capabilities from the FW. for now,
+ * report dummy capabilities for one shot measurement
+ */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 128);
+ if (!skb)
+ return -ENOMEM;
+ attr = nla_nest_start(skb, QCA_WLAN_VENDOR_ATTR_LOC_CAPA);
+ if (!attr ||
+ nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER |
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR |
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP |
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA) ||
+ nla_put_u16(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS,
+ 1) ||
+ nla_put_u16(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS, 1) ||
+ nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP,
+ 0) ||
+ nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST,
+ 4) ||
+ nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES,
+ BIT(QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE))) {
+ wil_err(wil, "fail to fill get_capabilities reply\n");
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ nla_nest_end(skb, attr);
+
+ return cfg80211_vendor_cmd_reply(skb);
+}
+
+int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil_ftm_session_request *request;
+ struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_LOC_MAX + 1];
+ struct nlattr *tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX + 1];
+ struct nlattr *peer;
+ int rc, n_peers = 0, index = 0, tmp;
+ struct cfg80211_bss *bss;
+
+ if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ rc = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_LOC_MAX, data, data_len,
+ wil_nl80211_loc_policy);
+ if (rc) {
+ wil_err(wil, "Invalid ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS]) {
+ wil_err(wil, "no peers specified\n");
+ return -EINVAL;
+ }
+
+ if (!tb[QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE]) {
+ wil_err(wil, "session cookie not specified\n");
+ return -EINVAL;
+ }
+
+ nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS],
+ tmp)
+ n_peers++;
+
+ if (!n_peers) {
+ wil_err(wil, "empty peer list\n");
+ return -EINVAL;
+ }
+
+ /* for now only allow measurement for a single peer */
+ if (n_peers != 1) {
+ wil_err(wil, "only single peer allowed\n");
+ return -EINVAL;
+ }
+
+ request = kzalloc(sizeof(*request) +
+ n_peers * sizeof(struct wil_ftm_meas_peer_info),
+ GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ request->session_cookie =
+ nla_get_u64(tb[QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE]);
+ request->n_peers = n_peers;
+ nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS],
+ tmp) {
+ rc = nla_parse_nested(tb2, QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX,
+ peer, wil_nl80211_ftm_peer_policy);
+ if (rc) {
+ wil_err(wil, "Invalid peer ATTR\n");
+ goto out;
+ }
+ if (!tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR] ||
+ nla_len(tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR])
+ != ETH_ALEN) {
+ wil_err(wil, "Peer MAC address missing or invalid\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(request->peers[index].mac_addr,
+ nla_data(tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR]),
+ ETH_ALEN);
+ bss = cfg80211_get_bss(wiphy, NULL,
+ request->peers[index].mac_addr, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY);
+ if (!bss) {
+ wil_err(wil, "invalid bss at index %d\n", index);
+ rc = -ENOENT;
+ goto out;
+ }
+ request->peers[index].channel = bss->channel->hw_value - 1;
+ cfg80211_put_bss(wiphy, bss);
+ if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS])
+ request->peers[index].flags = nla_get_u32(
+ tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS]);
+ if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID])
+ request->peers[index].secure_token_id = nla_get_u8(
+ tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID]);
+ rc = wil_ftm_parse_meas_params(
+ wil,
+ tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS],
+ &request->peers[index].params);
+ if (!rc)
+ rc = wil_ftm_validate_meas_params(
+ wil, &request->peers[index].params);
+ if (rc)
+ goto out;
+ index++;
+ }
+
+ rc = wil_ftm_cfg80211_start_session(wil, request);
+out:
+ kfree(request);
+ return rc;
+}
+
+int wil_ftm_abort_session(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "stub\n");
+ return -ENOTSUPP;
+}
+
+int wil_ftm_configure_responder(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "stub\n");
+ return -ENOTSUPP;
+}
+
+int wil_aoa_start_measurement(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil_aoa_meas_request request;
+ struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_LOC_MAX + 1];
+ int rc;
+
+ if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ wil_dbg_misc(wil, "AOA start measurement\n");
+
+ rc = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_LOC_MAX, data, data_len,
+ wil_nl80211_loc_policy);
+ if (rc) {
+ wil_err(wil, "Invalid ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_WLAN_VENDOR_ATTR_MAC_ADDR] ||
+ !tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]) {
+ wil_err(wil, "Must specify MAC address and type\n");
+ return -EINVAL;
+ }
+
+ memset(&request, 0, sizeof(request));
+ ether_addr_copy(request.mac_addr,
+ nla_data(tb[QCA_WLAN_VENDOR_ATTR_MAC_ADDR]));
+ request.type = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]);
+
+ rc = wil_aoa_cfg80211_start_measurement(wil, &request);
+ return rc;
+}
+
+int wil_aoa_abort_measurement(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "stub\n");
+ return -ENOTSUPP;
+}
+
+void wil_ftm_init(struct wil6210_priv *wil)
+{
+ mutex_init(&wil->ftm.lock);
+ setup_timer(&wil->ftm.aoa_timer, wil_aoa_timer_fn, (ulong)wil);
+ INIT_WORK(&wil->ftm.aoa_timeout_work, wil_aoa_measurement_timeout);
+}
+
+void wil_ftm_deinit(struct wil6210_priv *wil)
+{
+ del_timer_sync(&wil->ftm.aoa_timer);
+ cancel_work_sync(&wil->ftm.aoa_timeout_work);
+ kfree(wil->ftm.ftm_res);
+}
+
+void wil_ftm_stop_operations(struct wil6210_priv *wil)
+{
+ wil_ftm_cfg80211_session_ended(
+ wil, QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED);
+}
diff --git a/drivers/net/wireless/ath/wil6210/ftm.h b/drivers/net/wireless/ath/wil6210/ftm.h
new file mode 100644
index 000000000000..9721344579aa
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/ftm.h
@@ -0,0 +1,512 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __WIL6210_FTM_H__
+#define __WIL6210_FTM_H__
+
+/**
+ * NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID,
+ * vendor subcmd definitions prefixed with QCA_NL80211_VENDOR_SUBCMD, and
+ * qca_wlan_vendor_attr is open source file src/common/qca-vendor.h in
+ * git://w1.fi/srv/git/hostap.git; the values here are just a copy of that
+ */
+
+/**
+ * enum qca_vendor_attr_loc - attributes for FTM and AOA commands
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE: Session cookie, specified in
+ * %QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION. It will be provided by driver
+ * events and can be used to identify events targeted for this session.
+ * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA: Nested attribute containing extra
+ * FTM/AOA capabilities, returned by %QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA.
+ * see %enum qca_wlan_vendor_attr_loc_capa.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS: array of nested attributes
+ * containing information about each peer in measurement session
+ * request. See %enum qca_wlan_vendor_attr_peer_info for supported
+ * attributes for each peer
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RESULTS: nested attribute containing
+ * measurement results for a peer. reported by the
+ * %QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT event.
+ * See %enum qca_wlan_vendor_attr_peer_result for list of supported
+ * attributes.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_RESPONDER_ENABLE: flag attribute for
+ * enabling or disabling responder functionality.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_LCI: used in the
+ * %QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER command in order to
+ * specify the LCI report that will be sent by the responder during
+ * a measurement exchange. The format is defined in IEEE P802.11-REVmc/D5.0,
+ * 9.4.2.22.10
+ * @QCA_WLAN_VENDOR_ATTR_FTM_LCR: provided with the
+ * %QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER command in order to
+ * specify the location civic report that will be sent by the responder during
+ * a measurement exchange. The format is defined in IEEE P802.11-REVmc/D5.0,
+ * 9.4.2.22.13
+ * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS: session/measurement completion
+ * status code, reported in %QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE
+ * and %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT
+ * @QCA_WLAN_VENDOR_ATTR_FTM_INITIAL_TOKEN: initial dialog token used
+ * by responder (0 if not specified)
+ * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE: AOA measurement type. Requested in
+ * %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS and optionally in
+ * %QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION if AOA measurements
+ * are needed as part of an FTM session.
+ * Reported by QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT.
+ * See enum qca_wlan_vendor_attr_aoa_type.
+ * @QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK: bit mask indicating
+ * which antenna arrays were used in location measurement.
+ * Reported in %QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT and
+ * %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT
+ * @QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT: AOA measurement data.
+ * Its contents depends on the AOA type and antenna array mask:
+ * %QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE: array of U16 values,
+ * phase of the strongest CIR path for each antenna in the measured
+ * array(s).
+ * %QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP: array of 2 U16
+ * values, phase and amplitude of the strongest CIR path for each
+ * antenna in the measured array(s)
+ */
+enum qca_wlan_vendor_attr_loc {
+ /* we reuse these attributes */
+ QCA_WLAN_VENDOR_ATTR_MAC_ADDR = 6,
+ QCA_WLAN_VENDOR_ATTR_PAD = 13,
+ QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE = 14,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA = 15,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS = 16,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEER_RESULTS = 17,
+ QCA_WLAN_VENDOR_ATTR_FTM_RESPONDER_ENABLE = 18,
+ QCA_WLAN_VENDOR_ATTR_FTM_LCI = 19,
+ QCA_WLAN_VENDOR_ATTR_FTM_LCR = 20,
+ QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS = 21,
+ QCA_WLAN_VENDOR_ATTR_FTM_INITIAL_TOKEN = 22,
+ QCA_WLAN_VENDOR_ATTR_AOA_TYPE = 23,
+ QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK = 24,
+ QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT = 25,
+ /* keep last */
+ QCA_WLAN_VENDOR_ATTR_LOC_AFTER_LAST,
+ QCA_WLAN_VENDOR_ATTR_LOC_MAX = QCA_WLAN_VENDOR_ATTR_LOC_AFTER_LAST - 1,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_loc_capa - indoor location capabilities
+ *
+ * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS: various flags. See
+ * %enum qca_wlan_vendor_attr_loc_capa_flags
+ * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS: Maximum number
+ * of measurement sessions that can run concurrently.
+ * Default is one session (no session concurrency)
+ * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS: The total number of unique
+ * peers that are supported in running sessions. For example,
+ * if the value is 8 and maximum number of sessions is 2, you can
+ * have one session with 8 unique peers, or 2 sessions with 4 unique
+ * peers each, and so on.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP: Maximum number
+ * of bursts per peer, as an exponent (2^value). Default is 0,
+ * meaning no multi-burst support.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST: Maximum number
+ * of measurement exchanges allowed in a single burst
+ * @QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES: Supported AOA measurement
+ * types. A bit mask (unsigned 32 bit value), each bit corresponds
+ * to an AOA type as defined by %enum qca_vendor_attr_aoa_type.
+ */
+enum qca_wlan_vendor_attr_loc_capa {
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_INVALID,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS,
+ QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS,
+ QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS,
+ QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP,
+ QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST,
+ QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES,
+ /* keep last */
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_AFTER_LAST,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_MAX =
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_AFTER_LAST - 1,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_loc_capa_flags: Indoor location capability flags
+ *
+ * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER: Set if driver
+ * can be configured as an FTM responder (for example, an AP that
+ * services FTM requests). %QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER
+ * will be supported if set.
+ * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR: Set if driver
+ * can run FTM sessions. %QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION
+ * will be supported if set.
+ * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP: Set if FTM responder
+ * supports immediate (ASAP) response.
+ * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA: Set if driver supports standalone
+ * AOA measurement using %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS
+ * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA_IN_FTM: Set if driver supports
+ * requesting AOA measurements as part of an FTM session.
+ */
+enum qca_wlan_vendor_attr_loc_capa_flags {
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER = 1 << 0,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR = 1 << 1,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP = 1 << 2,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA = 1 << 3,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA_IN_FTM = 1 << 4,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_peer_info: information about
+ * a single peer in a measurement session.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR: The MAC address of the peer.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS: Various flags related
+ * to measurement. See %enum qca_wlan_vendor_attr_ftm_peer_meas_flags.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS: Nested attribute of
+ * FTM measurement parameters, as specified by IEEE P802.11-REVmc/D7.0,
+ * 9.4.2.167. See %enum qca_wlan_vendor_attr_ftm_meas_param for
+ * list of supported attributes.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID: Initial token ID for
+ * secure measurement
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD: Request AOA
+ * measurement every _value_ bursts. If 0 or not specified,
+ * AOA measurements will be disabled for this peer.
+ */
+enum qca_wlan_vendor_attr_ftm_peer_info {
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_INVALID,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD,
+ /* keep last */
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_AFTER_LAST,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX =
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_AFTER_LAST - 1,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_ftm_peer_meas_flags: Measurement request flags,
+ * per-peer
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP: If set, request
+ * immediate (ASAP) response from peer
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI: If set, request
+ * LCI report from peer. The LCI report includes the absolute
+ * location of the peer in "official" coordinates (similar to GPS).
+ * See IEEE P802.11-REVmc/D7.0, 11.24.6.7 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR: If set, request
+ * Location civic report from peer. The LCR includes the location
+ * of the peer in free-form format. See IEEE P802.11-REVmc/D7.0,
+ * 11.24.6.7 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE: If set,
+ * request a secure measurement.
+ * %QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID must also be provided.
+ */
+enum qca_wlan_vendor_attr_ftm_peer_meas_flags {
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP = 1 << 0,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI = 1 << 1,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR = 1 << 2,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE = 1 << 3,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_ftm_meas_param: Measurement parameters
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST: Number of measurements
+ * to perform in a single burst.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP: Number of bursts to
+ * perform, specified as an exponent (2^value)
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION: Duration of burst
+ * instance, as specified in IEEE P802.11-REVmc/D7.0, 9.4.2.167
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD: Time between bursts,
+ * as specified in IEEE P802.11-REVmc/D7.0, 9.4.2.167. Must
+ * be larger than %QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION
+ */
+enum qca_wlan_vendor_attr_ftm_meas_param {
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_INVALID,
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST,
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP,
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION,
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD,
+ /* keep last */
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_AFTER_LAST,
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX =
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_AFTER_LAST - 1,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_ftm_peer_result: Per-peer results
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR: MAC address of the reported
+ * peer
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS: Status of measurement
+ * request for this peer.
+ * See %enum qca_wlan_vendor_attr_ftm_peer_result_status
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS: Various flags related
+ * to measurement results for this peer.
+ * See %enum qca_wlan_vendor_attr_ftm_peer_result_flags
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS: Specified when
+ * request failed and peer requested not to send an additional request
+ * for this number of seconds.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCI: LCI report when received
+ * from peer. In the format specified by IEEE P802.11-REVmc/D7.0,
+ * 9.4.2.22.10
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCR: Location civic report when
+ * received from peer.In the format specified by IEEE P802.11-REVmc/D7.0,
+ * 9.4.2.22.13
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS: Reported when peer
+ * overridden some measurement request parameters. See
+ * enum qca_wlan_vendor_attr_ftm_meas_param.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AOA_MEAS: AOA measurement
+ * for this peer. Same contents as %QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS: Array of measurement
+ * results. Each entry is a nested attribute defined
+ * by enum qca_wlan_vendor_attr_ftm_meas.
+ */
+enum qca_wlan_vendor_attr_ftm_peer_result {
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_INVALID,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCI,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCR,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AOA_MEAS,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS,
+ /* keep last */
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AFTER_LAST,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAX =
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AFTER_LAST - 1,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_ftm_peer_result_status
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK: Request sent ok and results
+ * will be provided. Peer may have overridden some measurement parameters,
+ * in which case overridden parameters will be report by
+ * %QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS attribute
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INCAPABLE: Peer is incapable
+ * of performing the measurement request. No more results will be sent
+ * for this peer in this session.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED: Peer reported request
+ * failed, and requested not to send an additional request for number
+ * of seconds specified by %QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS
+ * attribute.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID: Request validation
+ * failed. Request was not sent over the air.
+ */
+enum qca_wlan_vendor_attr_ftm_peer_result_status {
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INCAPABLE,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_ftm_peer_result_flags : Various flags
+ * for measurement result, per-peer
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAG_DONE: If set,
+ * measurement completed for this peer. No more results will be reported
+ * for this peer in this session.
+ */
+enum qca_wlan_vendor_attr_ftm_peer_result_flags {
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAG_DONE = 1 << 0,
+};
+
+/**
+ * enum qca_vendor_attr_loc_session_status: Session completion status code
+ *
+ * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK: Session completed
+ * successfully.
+ * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED: Session aborted
+ * by request
+ * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID: Session request
+ * was invalid and was not started
+ * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED: Session had an error
+ * and did not complete normally (for example out of resources)
+ *
+ */
+enum qca_vendor_attr_loc_session_status {
+ QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK,
+ QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED,
+ QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID,
+ QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_ftm_meas: Single measurement data
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1: Time of departure(TOD) of FTM packet as
+ * recorded by responder, in picoseconds.
+ * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2: Time of arrival(TOA) of FTM packet at
+ * initiator, in picoseconds.
+ * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3: TOD of ACK packet as recorded by
+ * initiator, in picoseconds.
+ * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4: TOA of ACK packet at
+ * responder, in picoseconds.
+ * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_RSSI: RSSI (signal level) as recorded
+ * during this measurement exchange. Optional and will be provided if
+ * the hardware can measure it.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOD_ERR: TOD error reported by
+ * responder. Not always provided.
+ * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOA_ERR: TOA error reported by
+ * responder. Not always provided.
+ * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOD_ERR: TOD error measured by
+ * initiator. Not always provided.
+ * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOA_ERR: TOA error measured by
+ * initiator. Not always provided.
+ * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PAD: Dummy attribute for padding.
+ */
+enum qca_wlan_vendor_attr_ftm_meas {
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INVALID,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_RSSI,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOD_ERR,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOA_ERR,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOD_ERR,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOA_ERR,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PAD,
+ /* keep last */
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_AFTER_LAST,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_MAX =
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_AFTER_LAST - 1,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_aoa_type: AOA measurement type
+ *
+ * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE: Phase of the strongest
+ * CIR (channel impulse response) path for each antenna.
+ * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP: Phase and amplitude
+ * of the strongest CIR path for each antenna.
+ */
+enum qca_wlan_vendor_attr_aoa_type {
+ QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE,
+ QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP,
+ QCA_WLAN_VENDOR_ATTR_AOA_TYPE_MAX,
+};
+
+/* vendor event indices, used from both cfg80211.c and ftm.c */
+enum qca_nl80211_vendor_events_index {
+ QCA_NL80211_VENDOR_EVENT_FTM_MEAS_RESULT_INDEX,
+ QCA_NL80211_VENDOR_EVENT_FTM_SESSION_DONE_INDEX,
+ QCA_NL80211_VENDOR_EVENT_AOA_MEAS_RESULT_INDEX,
+};
+
+/* measurement parameters. Specified for each peer as part
+ * of measurement request, or provided with measurement
+ * results for peer in case peer overridden parameters
+ */
+struct wil_ftm_meas_params {
+ u8 meas_per_burst;
+ u8 num_of_bursts_exp;
+ u8 burst_duration;
+ u16 burst_period;
+};
+
+/* measurement request for a single peer */
+struct wil_ftm_meas_peer_info {
+ u8 mac_addr[ETH_ALEN];
+ u8 channel;
+ u32 flags; /* enum qca_wlan_vendor_attr_ftm_peer_meas_flags */
+ struct wil_ftm_meas_params params;
+ u8 secure_token_id;
+};
+
+/* session request, passed to wil_ftm_cfg80211_start_session */
+struct wil_ftm_session_request {
+ u64 session_cookie;
+ u32 n_peers;
+ /* keep last, variable size according to n_peers */
+ struct wil_ftm_meas_peer_info peers[0];
+};
+
+/* single measurement for a peer */
+struct wil_ftm_peer_meas {
+ u64 t1, t2, t3, t4;
+};
+
+/* measurement results for a single peer */
+struct wil_ftm_peer_meas_res {
+ u8 mac_addr[ETH_ALEN];
+ u32 flags; /* enum qca_wlan_vendor_attr_ftm_peer_result_flags */
+ u8 status; /* enum qca_wlan_vendor_attr_ftm_peer_result_status */
+ u8 value_seconds;
+ bool has_params; /* true if params is valid */
+ struct wil_ftm_meas_params params; /* peer overridden params */
+ u8 *lci;
+ u8 lci_length;
+ u8 *lcr;
+ u8 lcr_length;
+ u32 n_meas;
+ /* keep last, variable size according to n_meas */
+ struct wil_ftm_peer_meas meas[0];
+};
+
+/* standalone AOA measurement request */
+struct wil_aoa_meas_request {
+ u8 mac_addr[ETH_ALEN];
+ u32 type;
+};
+
+/* AOA measurement result */
+struct wil_aoa_meas_result {
+ u8 mac_addr[ETH_ALEN];
+ u32 type;
+ u32 antenna_array_mask;
+ u32 status;
+ u32 length;
+ /* keep last, variable size according to length */
+ u8 data[0];
+};
+
+/* private data related to FTM. Part of the wil6210_priv structure */
+struct wil_ftm_priv {
+ struct mutex lock; /* protects the FTM data */
+ u8 session_started;
+ u64 session_cookie;
+ struct wil_ftm_peer_meas_res *ftm_res;
+ u8 has_ftm_res;
+ u32 max_ftm_meas;
+
+ /* standalone AOA measurement */
+ u8 aoa_started;
+ u8 aoa_peer_mac_addr[ETH_ALEN];
+ u32 aoa_type;
+ struct timer_list aoa_timer;
+ struct work_struct aoa_timeout_work;
+};
+
+int wil_ftm_get_capabilities(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len);
+int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len);
+int wil_ftm_abort_session(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len);
+int wil_ftm_configure_responder(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len);
+int wil_aoa_start_measurement(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len);
+int wil_aoa_abort_measurement(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len);
+
+#endif /* __WIL6210_FTM_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
index 7a2c6c129ad5..2f2b910501ba 100644
--- a/drivers/net/wireless/ath/wil6210/fw.h
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -58,6 +58,15 @@ struct wil_fw_record_comment { /* type == wil_fw_type_comment */
u8 data[0]; /* free-form data [data_size], see above */
} __packed;
+/* FW capabilities encoded inside a comment record */
+#define WIL_FW_CAPABILITIES_MAGIC (0xabcddcba)
+struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */
+ /* identifies capabilities record */
+ __le32 magic;
+ /* capabilities (variable size), see enum wmi_fw_capability */
+ u8 capabilities[0];
+};
+
/* perform action
* data_size = @head.size - offsetof(struct wil_fw_record_action, data)
*/
@@ -93,6 +102,9 @@ struct wil_fw_record_verify { /* type == wil_fw_verify */
/* file header
* First record of every file
*/
+/* the FW version prefix in the comment */
+#define WIL_FW_VERSION_PREFIX "FW version: "
+#define WIL_FW_VERSION_PREFIX_LEN (sizeof(WIL_FW_VERSION_PREFIX) - 1)
struct wil_fw_record_file_header {
__le32 signature ; /* Wilocity signature */
__le32 reserved;
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index d30657ee7e83..8f40eb301924 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -118,6 +118,12 @@ static int wil_fw_verify(struct wil6210_priv *wil, const u8 *data, size_t size)
return (int)dlen;
}
+static int fw_ignore_section(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ return 0;
+}
+
static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
size_t size)
{
@@ -126,6 +132,27 @@ static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
return 0;
}
+static int
+fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_capabilities *rec = data;
+ size_t capa_size;
+
+ if (size < sizeof(*rec) ||
+ le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC)
+ return 0;
+
+ capa_size = size - offsetof(struct wil_fw_record_capabilities,
+ capabilities);
+ bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
+ memcpy(wil->fw_capabilities, rec->capabilities,
+ min(sizeof(wil->fw_capabilities), capa_size));
+ wil_hex_dump_fw("CAPA", DUMP_PREFIX_OFFSET, 16, 1,
+ rec->capabilities, capa_size, false);
+ return 0;
+}
+
static int fw_handle_data(struct wil6210_priv *wil, const void *data,
size_t size)
{
@@ -196,6 +223,13 @@ static int fw_handle_file_header(struct wil6210_priv *wil, const void *data,
wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, d->comment,
sizeof(d->comment), true);
+ if (!memcmp(d->comment, WIL_FW_VERSION_PREFIX,
+ WIL_FW_VERSION_PREFIX_LEN))
+ memcpy(wil->fw_version,
+ d->comment + WIL_FW_VERSION_PREFIX_LEN,
+ min(sizeof(d->comment) - WIL_FW_VERSION_PREFIX_LEN,
+ sizeof(wil->fw_version) - 1));
+
return 0;
}
@@ -383,42 +417,51 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
static const struct {
int type;
- int (*handler)(struct wil6210_priv *wil, const void *data, size_t size);
+ int (*load_handler)(struct wil6210_priv *wil, const void *data,
+ size_t size);
+ int (*parse_handler)(struct wil6210_priv *wil, const void *data,
+ size_t size);
} wil_fw_handlers[] = {
- {wil_fw_type_comment, fw_handle_comment},
- {wil_fw_type_data, fw_handle_data},
- {wil_fw_type_fill, fw_handle_fill},
+ {wil_fw_type_comment, fw_handle_comment, fw_handle_capabilities},
+ {wil_fw_type_data, fw_handle_data, fw_ignore_section},
+ {wil_fw_type_fill, fw_handle_fill, fw_ignore_section},
/* wil_fw_type_action */
/* wil_fw_type_verify */
- {wil_fw_type_file_header, fw_handle_file_header},
- {wil_fw_type_direct_write, fw_handle_direct_write},
- {wil_fw_type_gateway_data, fw_handle_gateway_data},
- {wil_fw_type_gateway_data4, fw_handle_gateway_data4},
+ {wil_fw_type_file_header, fw_handle_file_header,
+ fw_handle_file_header},
+ {wil_fw_type_direct_write, fw_handle_direct_write, fw_ignore_section},
+ {wil_fw_type_gateway_data, fw_handle_gateway_data, fw_ignore_section},
+ {wil_fw_type_gateway_data4, fw_handle_gateway_data4,
+ fw_ignore_section},
};
static int wil_fw_handle_record(struct wil6210_priv *wil, int type,
- const void *data, size_t size)
+ const void *data, size_t size, bool load)
{
int i;
- for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++) {
+ for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++)
if (wil_fw_handlers[i].type == type)
- return wil_fw_handlers[i].handler(wil, data, size);
- }
+ return load ?
+ wil_fw_handlers[i].load_handler(
+ wil, data, size) :
+ wil_fw_handlers[i].parse_handler(
+ wil, data, size);
wil_err_fw(wil, "unknown record type: %d\n", type);
return -EINVAL;
}
/**
- * wil_fw_load - load FW into device
- *
- * Load the FW and uCode code and data to the corresponding device
- * memory regions
+ * wil_fw_process - process section from FW file
+ * if load is true: Load the FW and uCode code and data to the
+ * corresponding device memory regions,
+ * otherwise only parse and look for capabilities
*
* Return error code
*/
-static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
+static int wil_fw_process(struct wil6210_priv *wil, const void *data,
+ size_t size, bool load)
{
int rc = 0;
const struct wil_fw_record_head *hdr;
@@ -437,7 +480,7 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
return -EINVAL;
}
rc = wil_fw_handle_record(wil, le16_to_cpu(hdr->type),
- &hdr[1], hdr_sz);
+ &hdr[1], hdr_sz, load);
if (rc)
return rc;
}
@@ -456,13 +499,16 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
}
/**
- * wil_request_firmware - Request firmware and load to device
+ * wil_request_firmware - Request firmware
*
- * Request firmware image from the file and load it to device
+ * Request firmware image from the file
+ * If load is true, load firmware to device, otherwise
+ * only parse and extract capabilities
*
* Return error code
*/
-int wil_request_firmware(struct wil6210_priv *wil, const char *name)
+int wil_request_firmware(struct wil6210_priv *wil, const char *name,
+ bool load)
{
int rc, rc1;
const struct firmware *fw;
@@ -482,7 +528,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name)
rc = rc1;
goto out;
}
- rc = wil_fw_load(wil, d, rc1);
+ rc = wil_fw_process(wil, d, rc1, load);
if (rc < 0)
goto out;
}
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index f10c47dcbde5..64046e0bd0a2 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -599,7 +599,7 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
void wil6210_set_halp(struct wil6210_priv *wil)
{
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "%s()\n", __func__);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
BIT_DMA_EP_MISC_ICR_HALP);
@@ -607,7 +607,7 @@ void wil6210_set_halp(struct wil6210_priv *wil)
void wil6210_clear_halp(struct wil6210_priv *wil)
{
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "%s()\n", __func__);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
BIT_DMA_EP_MISC_ICR_HALP);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 49bd9b45ce22..5285ebc8b9af 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -232,6 +232,9 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *wdev = wil->wdev;
+ if (unlikely(!ndev))
+ return;
+
might_sleep();
wil_info(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid,
reason_code, from_event ? "+" : "-");
@@ -515,6 +518,8 @@ int wil_priv_init(struct wil6210_priv *wil)
spin_lock_init(&wil->wmi_ev_lock);
init_waitqueue_head(&wil->wq);
+ wil_ftm_init(wil);
+
wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi");
if (!wil->wmi_wq)
return -EAGAIN;
@@ -562,6 +567,7 @@ void wil_priv_deinit(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_ftm_deinit(wil);
wil_set_recovery_state(wil, fw_recovery_idle);
del_timer_sync(&wil->scan_timer);
del_timer_sync(&wil->p2p.discovery_timer);
@@ -852,6 +858,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
bitmap_zero(wil->status, wil_status_last);
mutex_unlock(&wil->wmi_mutex);
+ mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
wil->scan_request);
@@ -859,6 +866,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
cfg80211_scan_done(wil->scan_request, true);
wil->scan_request = NULL;
}
+ mutex_unlock(&wil->p2p_wdev_mutex);
wil_mask_irq(wil);
@@ -887,11 +895,12 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
WIL_FW2_NAME);
wil_halt_cpu(wil);
+ memset(wil->fw_version, 0, sizeof(wil->fw_version));
/* Loading f/w from the file */
- rc = wil_request_firmware(wil, WIL_FW_NAME);
+ rc = wil_request_firmware(wil, WIL_FW_NAME, true);
if (rc)
return rc;
- rc = wil_request_firmware(wil, WIL_FW2_NAME);
+ rc = wil_request_firmware(wil, WIL_FW2_NAME, true);
if (rc)
return rc;
@@ -1034,10 +1043,10 @@ int wil_up(struct wil6210_priv *wil)
int __wil_down(struct wil6210_priv *wil)
{
- int rc;
-
WARN_ON(!mutex_is_locked(&wil->mutex));
+ set_bit(wil_status_resetting, wil->status);
+
if (wil->platform_ops.bus_request)
wil->platform_ops.bus_request(wil->platform_handle, 0);
@@ -1049,8 +1058,10 @@ int __wil_down(struct wil6210_priv *wil)
}
wil_enable_irq(wil);
- (void)wil_p2p_stop_discovery(wil);
+ wil_p2p_stop_radio_operations(wil);
+ wil_ftm_stop_operations(wil);
+ mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
wil->scan_request);
@@ -1058,18 +1069,7 @@ int __wil_down(struct wil6210_priv *wil)
cfg80211_scan_done(wil->scan_request, true);
wil->scan_request = NULL;
}
-
- if (test_bit(wil_status_fwconnected, wil->status) ||
- test_bit(wil_status_fwconnecting, wil->status)) {
-
- mutex_unlock(&wil->mutex);
- rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
- WMI_DISCONNECT_EVENTID, NULL, 0,
- WIL6210_DISCONNECT_TO_MS);
- mutex_lock(&wil->mutex);
- if (rc)
- wil_err(wil, "timeout waiting for disconnect\n");
- }
+ mutex_unlock(&wil->p2p_wdev_mutex);
wil_reset(wil, false);
@@ -1113,8 +1113,8 @@ void wil_halp_vote(struct wil6210_priv *wil)
mutex_lock(&wil->halp.lock);
- wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
- wil->halp.ref_cnt);
+ wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
if (++wil->halp.ref_cnt == 1) {
wil6210_set_halp(wil);
@@ -1124,15 +1124,15 @@ void wil_halp_vote(struct wil6210_priv *wil)
/* Mask HALP as done in case the interrupt is raised */
wil6210_mask_halp(wil);
} else {
- wil_dbg_misc(wil,
- "%s: HALP vote completed after %d ms\n",
- __func__,
- jiffies_to_msecs(to_jiffies - rc));
+ wil_dbg_irq(wil,
+ "%s: HALP vote completed after %d ms\n",
+ __func__,
+ jiffies_to_msecs(to_jiffies - rc));
}
}
- wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
- wil->halp.ref_cnt);
+ wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
}
@@ -1143,16 +1143,16 @@ void wil_halp_unvote(struct wil6210_priv *wil)
mutex_lock(&wil->halp.lock);
- wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
- wil->halp.ref_cnt);
+ wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
if (--wil->halp.ref_cnt == 0) {
wil6210_clear_halp(wil);
- wil_dbg_misc(wil, "%s: HALP unvote\n", __func__);
+ wil_dbg_irq(wil, "%s: HALP unvote\n", __func__);
}
- wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
- wil->halp.ref_cnt);
+ wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index bd4c17ca2484..f4fca9d4eedf 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -185,13 +185,6 @@ void *wil_if_alloc(struct device *dev)
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
- netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
- WIL6210_NAPI_BUDGET);
- netif_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
- WIL6210_NAPI_BUDGET);
-
- netif_tx_stop_all_queues(ndev);
-
return wil;
out_priv:
@@ -222,25 +215,48 @@ void wil_if_free(struct wil6210_priv *wil)
int wil_if_add(struct wil6210_priv *wil)
{
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+ struct wiphy *wiphy = wdev->wiphy;
struct net_device *ndev = wil_to_ndev(wil);
int rc;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "entered");
+
+ strlcpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
+
+ rc = wiphy_register(wiphy);
+ if (rc < 0) {
+ wil_err(wil, "failed to register wiphy, err %d\n", rc);
+ return rc;
+ }
+
+ netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
+ WIL6210_NAPI_BUDGET);
+ netif_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
+ WIL6210_NAPI_BUDGET);
+
+ netif_tx_stop_all_queues(ndev);
rc = register_netdev(ndev);
if (rc < 0) {
dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
- return rc;
+ goto out_wiphy;
}
return 0;
+
+out_wiphy:
+ wiphy_unregister(wdev->wiphy);
+ return rc;
}
void wil_if_remove(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil_to_wdev(wil);
wil_dbg_misc(wil, "%s()\n", __func__);
unregister_netdev(ndev);
+ wiphy_unregister(wdev->wiphy);
}
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
index 213b8259638c..42148da111f1 100644
--- a/drivers/net/wireless/ath/wil6210/p2p.c
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -259,3 +259,46 @@ void wil_p2p_search_expired(struct work_struct *work)
mutex_unlock(&wil->p2p_wdev_mutex);
}
}
+
+void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
+{
+ struct wil_p2p_info *p2p = &wil->p2p;
+
+ lockdep_assert_held(&wil->mutex);
+
+ mutex_lock(&wil->p2p_wdev_mutex);
+
+ if (wil->radio_wdev != wil->p2p_wdev)
+ goto out;
+
+ if (!p2p->discovery_started) {
+ /* Regular scan on the p2p device */
+ if (wil->scan_request &&
+ wil->scan_request->wdev == wil->p2p_wdev) {
+ cfg80211_scan_done(wil->scan_request, 1);
+ wil->scan_request = NULL;
+ }
+ goto out;
+ }
+
+ /* Search or listen on p2p device */
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ wil_p2p_stop_discovery(wil);
+ mutex_lock(&wil->p2p_wdev_mutex);
+
+ if (wil->scan_request) {
+ /* search */
+ cfg80211_scan_done(wil->scan_request, 1);
+ wil->scan_request = NULL;
+ } else {
+ /* listen */
+ cfg80211_remain_on_channel_expired(wil->radio_wdev,
+ p2p->cookie,
+ &p2p->listen_chan,
+ GFP_KERNEL);
+ }
+
+out:
+ wil->radio_wdev = wil->wdev;
+ mutex_unlock(&wil->p2p_wdev_mutex);
+}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 7b5c4222bc33..44746ca0d2e6 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <linux/suspend.h>
#include "wil6210.h"
+#include <linux/rtnetlink.h>
static bool use_msi = true;
module_param(use_msi, bool, S_IRUGO);
@@ -38,6 +39,7 @@ void wil_set_capabilities(struct wil6210_priv *wil)
u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
bitmap_zero(wil->hw_capabilities, hw_capability_last);
+ bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
switch (rev_id) {
case JTAG_DEV_ID_SPARROW_B0:
@@ -51,6 +53,9 @@ void wil_set_capabilities(struct wil6210_priv *wil)
}
wil_info(wil, "Board hardware is %s\n", wil->hw_name);
+
+ /* extract FW capabilities from file without loading the FW */
+ wil_request_firmware(wil, WIL_FW_NAME, false);
}
void wil_disable_irq(struct wil6210_priv *wil)
@@ -293,6 +298,9 @@ static void wil_pcie_remove(struct pci_dev *pdev)
#endif /* CONFIG_PM */
wil6210_debugfs_remove(wil);
+ rtnl_lock();
+ wil_p2p_wdev_free(wil);
+ rtnl_unlock();
wil_if_remove(wil);
wil_if_pcie_disable(wil);
pci_iounmap(pdev, csr);
@@ -300,7 +308,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
if (wil->platform_ops.uninit)
wil->platform_ops.uninit(wil->platform_handle);
- wil_p2p_wdev_free(wil);
wil_if_free(wil);
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index f2f6a404d3d1..4c38520d4dd2 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -873,9 +873,12 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
rc = -EINVAL;
goto out_free;
}
- vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+ spin_lock_bh(&txdata->lock);
+ vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
+
if (txdata->dot1x_open && (agg_wsize >= 0))
wil_addba_tx_request(wil, id, agg_wsize);
@@ -950,9 +953,11 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
rc = -EINVAL;
goto out_free;
}
- vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+ spin_lock_bh(&txdata->lock);
+ vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
return 0;
out_free:
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 16e8ba570011..a19dba5b9e5f 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -17,6 +17,7 @@
#ifndef __WIL6210_H__
#define __WIL6210_H__
+#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <net/cfg80211.h>
@@ -24,6 +25,7 @@
#include <linux/types.h>
#include "wmi.h"
#include "wil_platform.h"
+#include "ftm.h"
extern bool no_fw_recovery;
extern unsigned int mtu_max;
@@ -579,10 +581,11 @@ struct wil6210_priv {
struct wireless_dev *wdev;
void __iomem *csr;
DECLARE_BITMAP(status, wil_status_last);
- u32 fw_version;
+ u8 fw_version[ETHTOOL_FWVERS_LEN];
u32 hw_version;
const char *hw_name;
DECLARE_BITMAP(hw_capabilities, hw_capability_last);
+ DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
u8 n_mids; /* number of additional MIDs as reported by FW */
u32 recovery_count; /* num of FW recovery attempts in a short time */
u32 recovery_state; /* FW recovery state machine */
@@ -660,12 +663,14 @@ struct wil6210_priv {
/* P2P_DEVICE vif */
struct wireless_dev *p2p_wdev;
- struct mutex p2p_wdev_mutex; /* protect @p2p_wdev */
+ struct mutex p2p_wdev_mutex; /* protect @p2p_wdev and @scan_request */
struct wireless_dev *radio_wdev;
/* High Access Latency Policy voting */
struct wil_halp halp;
+ struct wil_ftm_priv ftm;
+
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
struct notifier_block pm_notify;
@@ -844,6 +849,7 @@ u8 wil_p2p_stop_discovery(struct wil6210_priv *wil);
int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie);
void wil_p2p_listen_expired(struct work_struct *work);
void wil_p2p_search_expired(struct work_struct *work);
+void wil_p2p_stop_radio_operations(struct wil6210_priv *wil);
/* WMI for P2P */
int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi);
@@ -869,6 +875,8 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
u8 chan, u8 hidden_ssid, u8 is_go);
int wmi_pcp_stop(struct wil6210_priv *wil);
int wmi_led_cfg(struct wil6210_priv *wil, bool enable);
+int wmi_aoa_meas(struct wil6210_priv *wil, const void *mac_addr, u8 chan,
+ u8 type);
void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event);
void wil_probe_client_flush(struct wil6210_priv *wil);
@@ -897,7 +905,8 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
int wil_iftype_nl2wmi(enum nl80211_iftype type);
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
-int wil_request_firmware(struct wil6210_priv *wil, const char *name);
+int wil_request_firmware(struct wil6210_priv *wil, const char *name,
+ bool load);
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
@@ -911,4 +920,18 @@ void wil_halp_unvote(struct wil6210_priv *wil);
void wil6210_set_halp(struct wil6210_priv *wil);
void wil6210_clear_halp(struct wil6210_priv *wil);
+void wil_ftm_init(struct wil6210_priv *wil);
+void wil_ftm_deinit(struct wil6210_priv *wil);
+void wil_ftm_stop_operations(struct wil6210_priv *wil);
+void wil_aoa_cfg80211_meas_result(struct wil6210_priv *wil,
+ struct wil_aoa_meas_result *result);
+
+void wil_ftm_evt_session_ended(struct wil6210_priv *wil,
+ struct wmi_tof_session_end_event *evt);
+void wil_ftm_evt_per_dest_res(struct wil6210_priv *wil,
+ struct wmi_tof_ftm_per_dest_res_event *evt);
+void wil_aoa_evt_meas(struct wil6210_priv *wil,
+ struct wmi_aoa_meas_event *evt,
+ int len);
+
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index c9fef36977ca..daa7a33d12d8 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -22,6 +22,7 @@
#include "txrx.h"
#include "wmi.h"
#include "trace.h"
+#include "ftm.h"
static uint max_assoc_sta = WIL6210_MAX_CID;
module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR);
@@ -312,14 +313,14 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
struct wireless_dev *wdev = wil->wdev;
struct wmi_ready_event *evt = d;
- wil->fw_version = le32_to_cpu(evt->sw_version);
wil->n_mids = evt->numof_additional_mids;
- wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
+ wil_info(wil, "FW ver. %s(SW %d); MAC %pM; %d MID's\n",
+ wil->fw_version, le32_to_cpu(evt->sw_version),
evt->mac, wil->n_mids);
/* ignore MAC address, we already have it from the boot loader */
- snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
- "%d", wil->fw_version);
+ strlcpy(wdev->wiphy->fw_version, wil->fw_version,
+ sizeof(wdev->wiphy->fw_version));
wil_set_recovery_state(wil, fw_recovery_idle);
set_bit(wil_status_fwready, wil->status);
@@ -424,6 +425,7 @@ static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
void *d, int len)
{
+ mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
struct wmi_scan_complete_event *data = d;
bool aborted = (data->status != WMI_SCAN_SUCCESS);
@@ -433,14 +435,13 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
wil->scan_request, aborted);
del_timer_sync(&wil->scan_timer);
- mutex_lock(&wil->p2p_wdev_mutex);
cfg80211_scan_done(wil->scan_request, aborted);
wil->radio_wdev = wil->wdev;
- mutex_unlock(&wil->p2p_wdev_mutex);
wil->scan_request = NULL;
} else {
wil_err(wil, "SCAN_COMPLETE while not scanning\n");
}
+ mutex_unlock(&wil->p2p_wdev_mutex);
}
static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
@@ -772,6 +773,30 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
spin_unlock_bh(&sta->tid_rx_lock);
}
+static void wmi_evt_aoa_meas(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct wmi_aoa_meas_event *evt = d;
+
+ wil_aoa_evt_meas(wil, evt, len);
+}
+
+static void wmi_evt_ftm_session_ended(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct wmi_tof_session_end_event *evt = d;
+
+ wil_ftm_evt_session_ended(wil, evt);
+}
+
+static void wmi_evt_per_dest_res(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct wmi_tof_ftm_per_dest_res_event *evt = d;
+
+ wil_ftm_evt_per_dest_res(wil, evt);
+}
+
/**
* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
@@ -799,6 +824,13 @@ static const struct {
{WMI_DELBA_EVENTID, wmi_evt_delba},
{WMI_VRING_EN_EVENTID, wmi_evt_vring_en},
{WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore},
+ {WMI_AOA_MEAS_EVENTID, wmi_evt_aoa_meas},
+ {WMI_TOF_SESSION_END_EVENTID, wmi_evt_ftm_session_ended},
+ {WMI_TOF_GET_CAPABILITIES_EVENTID, wmi_evt_ignore},
+ {WMI_TOF_SET_LCR_EVENTID, wmi_evt_ignore},
+ {WMI_TOF_SET_LCI_EVENTID, wmi_evt_ignore},
+ {WMI_TOF_FTM_PER_DEST_RES_EVENTID, wmi_evt_per_dest_res},
+ {WMI_TOF_CHANNEL_INFO_EVENTID, wmi_evt_ignore},
};
/*
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 685fe0ddea26..f430e8a80603 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -46,6 +46,16 @@ enum wmi_mid {
MID_BROADCAST = 0xFF,
};
+/* FW capability IDs
+ * Each ID maps to a bit in a 32-bit bitmask value provided by the FW to
+ * the host
+ */
+enum wmi_fw_capability {
+ WMI_FW_CAPABILITY_FTM = 0,
+ WMI_FW_CAPABILITY_PS_CONFIG = 1,
+ WMI_FW_CAPABILITY_MAX,
+};
+
/* WMI_CMD_HDR */
struct wmi_cmd_hdr {
u8 mid;
@@ -120,6 +130,8 @@ enum wmi_command_id {
WMI_BF_SM_MGMT_CMDID = 0x838,
WMI_BF_RXSS_MGMT_CMDID = 0x839,
WMI_BF_TRIG_CMDID = 0x83A,
+ WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
+ WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
WMI_SET_SECTORS_CMDID = 0x849,
WMI_MAINTAIN_PAUSE_CMDID = 0x850,
WMI_MAINTAIN_RESUME_CMDID = 0x851,
@@ -134,10 +146,15 @@ enum wmi_command_id {
WMI_BF_CTRL_CMDID = 0x862,
WMI_NOTIFY_REQ_CMDID = 0x863,
WMI_GET_STATUS_CMDID = 0x864,
+ WMI_GET_RF_STATUS_CMDID = 0x866,
+ WMI_GET_BASEBAND_TYPE_CMDID = 0x867,
WMI_UNIT_TEST_CMDID = 0x900,
WMI_HICCUP_CMDID = 0x901,
WMI_FLASH_READ_CMDID = 0x902,
WMI_FLASH_WRITE_CMDID = 0x903,
+ /* Power management */
+ WMI_TRAFFIC_DEFERRAL_CMDID = 0x904,
+ WMI_TRAFFIC_RESUME_CMDID = 0x905,
/* P2P */
WMI_P2P_CFG_CMDID = 0x910,
WMI_PORT_ALLOCATE_CMDID = 0x911,
@@ -150,6 +167,26 @@ enum wmi_command_id {
WMI_PCP_START_CMDID = 0x918,
WMI_PCP_STOP_CMDID = 0x919,
WMI_GET_PCP_FACTOR_CMDID = 0x91B,
+ /* Power Save Configuration Commands */
+ WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C,
+ /* Not supported yet */
+ WMI_PS_DEV_CFG_CMDID = 0x91D,
+ /* Not supported yet */
+ WMI_PS_DEV_CFG_READ_CMDID = 0x91E,
+ /* Per MAC Power Save Configuration commands
+ * Not supported yet
+ */
+ WMI_PS_MID_CFG_CMDID = 0x91F,
+ /* Not supported yet */
+ WMI_PS_MID_CFG_READ_CMDID = 0x920,
+ WMI_RS_CFG_CMDID = 0x921,
+ WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
+ WMI_AOA_MEAS_CMDID = 0x923,
+ WMI_TOF_SESSION_START_CMDID = 0x991,
+ WMI_TOF_GET_CAPABILITIES_CMDID = 0x992,
+ WMI_TOF_SET_LCR_CMDID = 0x993,
+ WMI_TOF_SET_LCI_CMDID = 0x994,
+ WMI_TOF_CHANNEL_INFO_CMDID = 0x995,
WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
WMI_ABORT_SCAN_CMDID = 0xF007,
WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
@@ -291,9 +328,8 @@ enum wmi_scan_type {
/* WMI_START_SCAN_CMDID */
struct wmi_start_scan_cmd {
u8 direct_scan_mac_addr[WMI_MAC_LEN];
- /* DMG Beacon frame is transmitted during active scanning */
+ /* run scan with discovery beacon. Relevant for ACTIVE scan only. */
u8 discovery_mode;
- /* reserved */
u8 reserved;
/* Max duration in the home channel(ms) */
__le32 dwell_time;
@@ -453,6 +489,12 @@ struct wmi_port_delete_cmd {
u8 reserved[3];
} __packed;
+/* WMI_TRAFFIC_DEFERRAL_CMDID */
+struct wmi_traffic_deferral_cmd {
+ /* Bit vector: bit[0] - wake on Unicast, bit[1] - wake on Broadcast */
+ u8 wakeup_trigger;
+} __packed;
+
/* WMI_P2P_CFG_CMDID */
enum wmi_discovery_mode {
WMI_DISCOVERY_MODE_NON_OFFLOAD = 0x00,
@@ -818,85 +860,193 @@ struct wmi_pmc_cmd {
__le64 mem_base;
} __packed;
+enum wmi_aoa_meas_type {
+ WMI_AOA_PHASE_MEAS = 0x00,
+ WMI_AOA_PHASE_AMP_MEAS = 0x01,
+};
+
+/* WMI_AOA_MEAS_CMDID */
+struct wmi_aoa_meas_cmd {
+ u8 mac_addr[WMI_MAC_LEN];
+ /* channels IDs:
+ * 0 - 58320 MHz
+ * 1 - 60480 MHz
+ * 2 - 62640 MHz
+ */
+ u8 channel;
+ /* enum wmi_aoa_meas_type */
+ u8 aoa_meas_type;
+ __le32 meas_rf_mask;
+} __packed;
+
+enum wmi_tof_burst_duration {
+ WMI_TOF_BURST_DURATION_250_USEC = 2,
+ WMI_TOF_BURST_DURATION_500_USEC = 3,
+ WMI_TOF_BURST_DURATION_1_MSEC = 4,
+ WMI_TOF_BURST_DURATION_2_MSEC = 5,
+ WMI_TOF_BURST_DURATION_4_MSEC = 6,
+ WMI_TOF_BURST_DURATION_8_MSEC = 7,
+ WMI_TOF_BURST_DURATION_16_MSEC = 8,
+ WMI_TOF_BURST_DURATION_32_MSEC = 9,
+ WMI_TOF_BURST_DURATION_64_MSEC = 10,
+ WMI_TOF_BURST_DURATION_128_MSEC = 11,
+ WMI_TOF_BURST_DURATION_NO_PREFERENCES = 15,
+};
+
+enum wmi_tof_session_start_flags {
+ WMI_TOF_SESSION_START_FLAG_SECURED = 0x1,
+ WMI_TOF_SESSION_START_FLAG_ASAP = 0x2,
+ WMI_TOF_SESSION_START_FLAG_LCI_REQ = 0x4,
+ WMI_TOF_SESSION_START_FLAG_LCR_REQ = 0x8,
+};
+
+/* WMI_TOF_SESSION_START_CMDID */
+struct wmi_ftm_dest_info {
+ u8 channel;
+ /* wmi_tof_session_start_flags_e */
+ u8 flags;
+ u8 initial_token;
+ u8 num_of_ftm_per_burst;
+ u8 num_of_bursts_exp;
+ /* wmi_tof_burst_duration_e */
+ u8 burst_duration;
+ /* Burst Period indicate interval between two consecutive burst
+ * instances, in units of 100 ms
+ */
+ __le16 burst_period;
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 reserved;
+} __packed;
+
+/* WMI_TOF_SESSION_START_CMDID */
+struct wmi_tof_session_start_cmd {
+ __le32 session_id;
+ u8 num_of_aoa_measures;
+ u8 aoa_type;
+ __le16 num_of_dest;
+ u8 reserved[4];
+ struct wmi_ftm_dest_info ftm_dest_info[0];
+} __packed;
+
+enum wmi_tof_channel_info_report_type {
+ WMI_TOF_CHANNEL_INFO_TYPE_CIR = 0x1,
+ WMI_TOF_CHANNEL_INFO_TYPE_RSSI = 0x2,
+ WMI_TOF_CHANNEL_INFO_TYPE_SNR = 0x4,
+ WMI_TOF_CHANNEL_INFO_TYPE_DEBUG_DATA = 0x8,
+ WMI_TOF_CHANNEL_INFO_TYPE_VENDOR_SPECIFIC = 0x10,
+};
+
+/* WMI_TOF_CHANNEL_INFO_CMDID */
+struct wmi_tof_channel_info_cmd {
+ /* wmi_tof_channel_info_report_type_e */
+ __le32 channel_info_report_request;
+} __packed;
+
/* WMI Events
* List of Events (target to host)
*/
enum wmi_event_id {
- WMI_READY_EVENTID = 0x1001,
- WMI_CONNECT_EVENTID = 0x1002,
- WMI_DISCONNECT_EVENTID = 0x1003,
- WMI_SCAN_COMPLETE_EVENTID = 0x100A,
- WMI_REPORT_STATISTICS_EVENTID = 0x100B,
- WMI_RD_MEM_RSP_EVENTID = 0x1800,
- WMI_FW_READY_EVENTID = 0x1801,
- WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200,
- WMI_ECHO_RSP_EVENTID = 0x1803,
- WMI_FS_TUNE_DONE_EVENTID = 0x180A,
- WMI_CORR_MEASURE_EVENTID = 0x180B,
- WMI_READ_RSSI_EVENTID = 0x180C,
- WMI_TEMP_SENSE_DONE_EVENTID = 0x180E,
- WMI_DC_CALIB_DONE_EVENTID = 0x180F,
- WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
- WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
- WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
- WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
- WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
- WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
- WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181A,
- WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D,
- WMI_RF_RX_TEST_DONE_EVENTID = 0x181E,
- WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
- WMI_VRING_CFG_DONE_EVENTID = 0x1821,
- WMI_BA_STATUS_EVENTID = 0x1823,
- WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
- WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
- WMI_DELBA_EVENTID = 0x1826,
- WMI_GET_SSID_EVENTID = 0x1828,
- WMI_GET_PCP_CHANNEL_EVENTID = 0x182A,
- WMI_SW_TX_COMPLETE_EVENTID = 0x182B,
- WMI_READ_MAC_RXQ_EVENTID = 0x1830,
- WMI_READ_MAC_TXQ_EVENTID = 0x1831,
- WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
- WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
- WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
- WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
- WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
- WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
- WMI_RS_MGMT_DONE_EVENTID = 0x1852,
- WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
- WMI_THERMAL_THROTTLING_STATUS_EVENTID = 0x1855,
- WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
- WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
- WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
- WMI_OTP_READ_RESULT_EVENTID = 0x1856,
- WMI_LED_CFG_DONE_EVENTID = 0x1858,
+ WMI_READY_EVENTID = 0x1001,
+ WMI_CONNECT_EVENTID = 0x1002,
+ WMI_DISCONNECT_EVENTID = 0x1003,
+ WMI_SCAN_COMPLETE_EVENTID = 0x100A,
+ WMI_REPORT_STATISTICS_EVENTID = 0x100B,
+ WMI_RD_MEM_RSP_EVENTID = 0x1800,
+ WMI_FW_READY_EVENTID = 0x1801,
+ WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200,
+ WMI_ECHO_RSP_EVENTID = 0x1803,
+ WMI_FS_TUNE_DONE_EVENTID = 0x180A,
+ WMI_CORR_MEASURE_EVENTID = 0x180B,
+ WMI_READ_RSSI_EVENTID = 0x180C,
+ WMI_TEMP_SENSE_DONE_EVENTID = 0x180E,
+ WMI_DC_CALIB_DONE_EVENTID = 0x180F,
+ WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
+ WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
+ WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
+ WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
+ WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
+ WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
+ WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181A,
+ WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D,
+ WMI_RF_RX_TEST_DONE_EVENTID = 0x181E,
+ WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
+ WMI_VRING_CFG_DONE_EVENTID = 0x1821,
+ WMI_BA_STATUS_EVENTID = 0x1823,
+ WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
+ WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
+ WMI_DELBA_EVENTID = 0x1826,
+ WMI_GET_SSID_EVENTID = 0x1828,
+ WMI_GET_PCP_CHANNEL_EVENTID = 0x182A,
+ WMI_SW_TX_COMPLETE_EVENTID = 0x182B,
+ WMI_READ_MAC_RXQ_EVENTID = 0x1830,
+ WMI_READ_MAC_TXQ_EVENTID = 0x1831,
+ WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
+ WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
+ WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
+ WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
+ WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
+ WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
+ WMI_RS_MGMT_DONE_EVENTID = 0x1852,
+ WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
+ WMI_THERMAL_THROTTLING_STATUS_EVENTID = 0x1855,
+ WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
+ WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
+ WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
+ WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID = 0x1842,
+ WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENTID = 0x1843,
+ WMI_OTP_READ_RESULT_EVENTID = 0x1856,
+ WMI_LED_CFG_DONE_EVENTID = 0x1858,
/* Performance monitoring events */
- WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
- WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
- WMI_BF_CTRL_DONE_EVENTID = 0x1862,
- WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
- WMI_GET_STATUS_DONE_EVENTID = 0x1864,
- WMI_VRING_EN_EVENTID = 0x1865,
- WMI_UNIT_TEST_EVENTID = 0x1900,
- WMI_FLASH_READ_DONE_EVENTID = 0x1902,
- WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
+ WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
+ WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
+ WMI_BF_CTRL_DONE_EVENTID = 0x1862,
+ WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
+ WMI_GET_STATUS_DONE_EVENTID = 0x1864,
+ WMI_VRING_EN_EVENTID = 0x1865,
+ WMI_GET_RF_STATUS_EVENTID = 0x1866,
+ WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867,
+ WMI_UNIT_TEST_EVENTID = 0x1900,
+ WMI_FLASH_READ_DONE_EVENTID = 0x1902,
+ WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
+ /* Power management */
+ WMI_TRAFFIC_DEFERRAL_EVENTID = 0x1904,
+ WMI_TRAFFIC_RESUME_EVENTID = 0x1905,
/* P2P */
- WMI_P2P_CFG_DONE_EVENTID = 0x1910,
- WMI_PORT_ALLOCATED_EVENTID = 0x1911,
- WMI_PORT_DELETED_EVENTID = 0x1912,
- WMI_LISTEN_STARTED_EVENTID = 0x1914,
- WMI_SEARCH_STARTED_EVENTID = 0x1915,
- WMI_DISCOVERY_STARTED_EVENTID = 0x1916,
- WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
- WMI_PCP_STARTED_EVENTID = 0x1918,
- WMI_PCP_STOPPED_EVENTID = 0x1919,
- WMI_PCP_FACTOR_EVENTID = 0x191A,
- WMI_SET_CHANNEL_EVENTID = 0x9000,
- WMI_ASSOC_REQ_EVENTID = 0x9001,
- WMI_EAPOL_RX_EVENTID = 0x9002,
- WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
- WMI_FW_VER_EVENTID = 0x9004,
- WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005,
+ WMI_P2P_CFG_DONE_EVENTID = 0x1910,
+ WMI_PORT_ALLOCATED_EVENTID = 0x1911,
+ WMI_PORT_DELETED_EVENTID = 0x1912,
+ WMI_LISTEN_STARTED_EVENTID = 0x1914,
+ WMI_SEARCH_STARTED_EVENTID = 0x1915,
+ WMI_DISCOVERY_STARTED_EVENTID = 0x1916,
+ WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
+ WMI_PCP_STARTED_EVENTID = 0x1918,
+ WMI_PCP_STOPPED_EVENTID = 0x1919,
+ WMI_PCP_FACTOR_EVENTID = 0x191A,
+ /* Power Save Configuration Events */
+ WMI_PS_DEV_PROFILE_CFG_EVENTID = 0x191C,
+ /* Not supported yet */
+ WMI_PS_DEV_CFG_EVENTID = 0x191D,
+ /* Not supported yet */
+ WMI_PS_DEV_CFG_READ_EVENTID = 0x191E,
+ /* Not supported yet */
+ WMI_PS_MID_CFG_EVENTID = 0x191F,
+ /* Not supported yet */
+ WMI_PS_MID_CFG_READ_EVENTID = 0x1920,
+ WMI_RS_CFG_DONE_EVENTID = 0x1921,
+ WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922,
+ WMI_AOA_MEAS_EVENTID = 0x1923,
+ WMI_TOF_SESSION_END_EVENTID = 0x1991,
+ WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992,
+ WMI_TOF_SET_LCR_EVENTID = 0x1993,
+ WMI_TOF_SET_LCI_EVENTID = 0x1994,
+ WMI_TOF_FTM_PER_DEST_RES_EVENTID = 0x1995,
+ WMI_TOF_CHANNEL_INFO_EVENTID = 0x1996,
+ WMI_SET_CHANNEL_EVENTID = 0x9000,
+ WMI_ASSOC_REQ_EVENTID = 0x9001,
+ WMI_EAPOL_RX_EVENTID = 0x9002,
+ WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
+ WMI_FW_VER_EVENTID = 0x9004,
+ WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005,
};
/* Events data structures */
@@ -943,10 +1093,85 @@ struct wmi_get_status_done_event {
/* WMI_FW_VER_EVENTID */
struct wmi_fw_ver_event {
- u8 major;
- u8 minor;
- __le16 subminor;
- __le16 build;
+ /* FW image version */
+ __le32 fw_major;
+ __le32 fw_minor;
+ __le32 fw_subminor;
+ __le32 fw_build;
+ /* FW image build time stamp */
+ __le32 hour;
+ __le32 minute;
+ __le32 second;
+ __le32 day;
+ __le32 month;
+ __le32 year;
+ /* Boot Loader image version */
+ __le32 bl_major;
+ __le32 bl_minor;
+ __le32 bl_subminor;
+ __le32 bl_build;
+ /* The number of entries in the FW capabilies array */
+ u8 fw_capabilities_len;
+ u8 reserved[3];
+ /* FW capabilities info
+ * Must be the last member of the struct
+ */
+ __le32 fw_capabilities[0];
+} __packed;
+
+/* WMI_GET_RF_STATUS_EVENTID */
+enum rf_type {
+ RF_UNKNOWN = 0x00,
+ RF_MARLON = 0x01,
+ RF_SPARROW = 0x02,
+};
+
+/* WMI_GET_RF_STATUS_EVENTID */
+enum board_file_rf_type {
+ BF_RF_MARLON = 0x00,
+ BF_RF_SPARROW = 0x01,
+};
+
+/* WMI_GET_RF_STATUS_EVENTID */
+enum rf_status {
+ RF_OK = 0x00,
+ RF_NO_COMM = 0x01,
+ RF_WRONG_BOARD_FILE = 0x02,
+};
+
+/* WMI_GET_RF_STATUS_EVENTID */
+struct wmi_get_rf_status_event {
+ /* enum rf_type */
+ __le32 rf_type;
+ /* attached RFs bit vector */
+ __le32 attached_rf_vector;
+ /* enabled RFs bit vector */
+ __le32 enabled_rf_vector;
+ /* enum rf_status, refers to enabled RFs */
+ u8 rf_status[32];
+ /* enum board file RF type */
+ __le32 board_file_rf_type;
+ /* board file platform type */
+ __le32 board_file_platform_type;
+ /* board file version */
+ __le32 board_file_version;
+ __le32 reserved[2];
+} __packed;
+
+/* WMI_GET_BASEBAND_TYPE_EVENTID */
+enum baseband_type {
+ BASEBAND_UNKNOWN = 0x00,
+ BASEBAND_SPARROW_M_A0 = 0x03,
+ BASEBAND_SPARROW_M_A1 = 0x04,
+ BASEBAND_SPARROW_M_B0 = 0x05,
+ BASEBAND_SPARROW_M_C0 = 0x06,
+ BASEBAND_SPARROW_M_D0 = 0x07,
+};
+
+/* WMI_GET_BASEBAND_TYPE_EVENTID */
+struct wmi_get_baseband_type_event {
+ /* enum baseband_type */
+ __le32 baseband_type;
} __packed;
/* WMI_MAC_ADDR_RESP_EVENTID */
@@ -1410,4 +1635,553 @@ struct wmi_led_cfg_done_event {
__le32 status;
} __packed;
+#define WMI_NUM_MCS (13)
+
+/* Rate search parameters configuration per connection */
+struct wmi_rs_cfg {
+ /* The maximal allowed PER for each MCS
+ * MCS will be considered as failed if PER during RS is higher
+ */
+ u8 per_threshold[WMI_NUM_MCS];
+ /* Number of MPDUs for each MCS
+ * this is the minimal statistic required to make an educated
+ * decision
+ */
+ u8 min_frame_cnt[WMI_NUM_MCS];
+ /* stop threshold [0-100] */
+ u8 stop_th;
+ /* MCS1 stop threshold [0-100] */
+ u8 mcs1_fail_th;
+ u8 max_back_failure_th;
+ /* Debug feature for disabling internal RS trigger (which is
+ * currently triggered by BF Done)
+ */
+ u8 dbg_disable_internal_trigger;
+ __le32 back_failure_mask;
+ __le32 mcs_en_vec;
+} __packed;
+
+/* WMI_RS_CFG_CMDID */
+struct wmi_rs_cfg_cmd {
+ /* connection id */
+ u8 cid;
+ /* enable or disable rate search */
+ u8 rs_enable;
+ /* rate search configuration */
+ struct wmi_rs_cfg rs_cfg;
+} __packed;
+
+/* WMI_RS_CFG_DONE_EVENTID */
+struct wmi_rs_cfg_done_event {
+ u8 cid;
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_GET_DETAILED_RS_RES_CMDID */
+struct wmi_get_detailed_rs_res_cmd {
+ /* connection id */
+ u8 cid;
+ u8 reserved[3];
+} __packed;
+
+/* RS results status */
+enum wmi_rs_results_status {
+ WMI_RS_RES_VALID = 0x00,
+ WMI_RS_RES_INVALID = 0x01,
+};
+
+/* Rate search results */
+struct wmi_rs_results {
+ /* number of sent MPDUs */
+ u8 num_of_tx_pkt[WMI_NUM_MCS];
+ /* number of non-acked MPDUs */
+ u8 num_of_non_acked_pkt[WMI_NUM_MCS];
+ /* RS timestamp */
+ __le32 tsf;
+ /* RS selected MCS */
+ u8 mcs;
+} __packed;
+
+/* WMI_GET_DETAILED_RS_RES_EVENTID */
+struct wmi_get_detailed_rs_res_event {
+ u8 cid;
+ /* enum wmi_rs_results_status */
+ u8 status;
+ /* detailed rs results */
+ struct wmi_rs_results rs_results;
+ u8 reserved[3];
+} __packed;
+
+/* broadcast connection ID */
+#define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST (0xFFFFFFFF)
+
+/* Types wmi_link_maintain_cfg presets for WMI_LINK_MAINTAIN_CFG_WRITE_CMD */
+enum wmi_link_maintain_cfg_type {
+ /* AP/PCP default normal (non-FST) configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_AP = 0x00,
+ /* AP/PCP default FST configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_AP = 0x01,
+ /* STA default normal (non-FST) configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_STA = 0x02,
+ /* STA default FST configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_STA = 0x03,
+ /* custom configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM = 0x04,
+ /* number of defined configuration types */
+ WMI_LINK_MAINTAIN_CFG_TYPES_NUM = 0x05,
+};
+
+/* Response status codes for WMI_LINK_MAINTAIN_CFG_WRITE/READ commands */
+enum wmi_link_maintain_cfg_response_status {
+ /* WMI_LINK_MAINTAIN_CFG_WRITE/READ command successfully accomplished
+ */
+ WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_OK = 0x00,
+ /* ERROR due to bad argument in WMI_LINK_MAINTAIN_CFG_WRITE/READ
+ * command request
+ */
+ WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_BAD_ARGUMENT = 0x01,
+};
+
+/* Link Loss and Keep Alive configuration */
+struct wmi_link_maintain_cfg {
+ /* link_loss_enable_detectors_vec */
+ __le32 link_loss_enable_detectors_vec;
+ /* detectors check period usec */
+ __le32 check_link_loss_period_usec;
+ /* max allowed tx ageing */
+ __le32 tx_ageing_threshold_usec;
+ /* keep alive period for high SNR */
+ __le32 keep_alive_period_usec_high_snr;
+ /* keep alive period for low SNR */
+ __le32 keep_alive_period_usec_low_snr;
+ /* lower snr limit for keep alive period update */
+ __le32 keep_alive_snr_threshold_low_db;
+ /* upper snr limit for keep alive period update */
+ __le32 keep_alive_snr_threshold_high_db;
+ /* num of successive bad bcons causing link-loss */
+ __le32 bad_beacons_num_threshold;
+ /* SNR limit for bad_beacons_detector */
+ __le32 bad_beacons_snr_threshold_db;
+} __packed;
+
+/* WMI_LINK_MAINTAIN_CFG_WRITE_CMDID */
+struct wmi_link_maintain_cfg_write_cmd {
+ /* enum wmi_link_maintain_cfg_type_e - type of requested default
+ * configuration to be applied
+ */
+ __le32 cfg_type;
+ /* requested connection ID or WMI_LINK_MAINTAIN_CFG_CID_BROADCAST */
+ __le32 cid;
+ /* custom configuration settings to be applied (relevant only if
+ * cfg_type==WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM)
+ */
+ struct wmi_link_maintain_cfg lm_cfg;
+} __packed;
+
+/* WMI_LINK_MAINTAIN_CFG_READ_CMDID */
+struct wmi_link_maintain_cfg_read_cmd {
+ /* connection ID which configuration settings are requested */
+ __le32 cid;
+} __packed;
+
+/* WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID */
+struct wmi_link_maintain_cfg_write_done_event {
+ /* requested connection ID */
+ __le32 cid;
+ /* wmi_link_maintain_cfg_response_status_e - write status */
+ __le32 status;
+} __packed;
+
+/* \WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENT */
+struct wmi_link_maintain_cfg_read_done_event {
+ /* requested connection ID */
+ __le32 cid;
+ /* wmi_link_maintain_cfg_response_status_e - read status */
+ __le32 status;
+ /* Retrieved configuration settings */
+ struct wmi_link_maintain_cfg lm_cfg;
+} __packed;
+
+enum wmi_traffic_deferral_status {
+ WMI_TRAFFIC_DEFERRAL_APPROVED = 0x0,
+ WMI_TRAFFIC_DEFERRAL_REJECTED = 0x1,
+};
+
+/* WMI_TRAFFIC_DEFERRAL_EVENTID */
+struct wmi_traffic_deferral_event {
+ /* enum wmi_traffic_deferral_status_e */
+ u8 status;
+} __packed;
+
+enum wmi_traffic_resume_status {
+ WMI_TRAFFIC_RESUME_SUCCESS = 0x0,
+ WMI_TRAFFIC_RESUME_FAILED = 0x1,
+};
+
+/* WMI_TRAFFIC_RESUME_EVENTID */
+struct wmi_traffic_resume_event {
+ /* enum wmi_traffic_resume_status_e */
+ u8 status;
+} __packed;
+
+/* Power Save command completion status codes */
+enum wmi_ps_cfg_cmd_status {
+ WMI_PS_CFG_CMD_STATUS_SUCCESS = 0x00,
+ WMI_PS_CFG_CMD_STATUS_BAD_PARAM = 0x01,
+ /* other error */
+ WMI_PS_CFG_CMD_STATUS_ERROR = 0x02,
+};
+
+/* Device Power Save Profiles */
+enum wmi_ps_profile_type {
+ WMI_PS_PROFILE_TYPE_DEFAULT = 0x00,
+ WMI_PS_PROFILE_TYPE_PS_DISABLED = 0x01,
+ WMI_PS_PROFILE_TYPE_MAX_PS = 0x02,
+ WMI_PS_PROFILE_TYPE_LOW_LATENCY_PS = 0x03,
+};
+
+/* WMI_PS_DEV_PROFILE_CFG_CMDID
+ *
+ * Power save profile to be used by the device
+ *
+ * Returned event:
+ * - WMI_PS_DEV_PROFILE_CFG_EVENTID
+ */
+struct wmi_ps_dev_profile_cfg_cmd {
+ /* wmi_ps_profile_type_e */
+ u8 ps_profile;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_PS_DEV_PROFILE_CFG_EVENTID */
+struct wmi_ps_dev_profile_cfg_event {
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+} __packed;
+
+enum wmi_ps_level {
+ WMI_PS_LEVEL_DEEP_SLEEP = 0x00,
+ WMI_PS_LEVEL_SHALLOW_SLEEP = 0x01,
+ /* awake = all PS mechanisms are disabled */
+ WMI_PS_LEVEL_AWAKE = 0x02,
+};
+
+enum wmi_ps_deep_sleep_clk_level {
+ /* 33k */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC = 0x00,
+ /* 10k */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_OSC = 0x01,
+ /* @RTC Low latency */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC_LT = 0x02,
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_XTAL = 0x03,
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_SYSCLK = 0x04,
+ /* Not Applicable */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_N_A = 0xFF,
+};
+
+/* Response by the FW to a D3 entry request */
+enum wmi_ps_d3_resp_policy {
+ WMI_PS_D3_RESP_POLICY_DEFAULT = 0x00,
+ /* debug -D3 req is always denied */
+ WMI_PS_D3_RESP_POLICY_DENIED = 0x01,
+ /* debug -D3 req is always approved */
+ WMI_PS_D3_RESP_POLICY_APPROVED = 0x02,
+};
+
+/* Device common power save configurations */
+struct wmi_ps_dev_cfg {
+ /* lowest level of PS allowed while unassociated, enum wmi_ps_level_e
+ */
+ u8 ps_unassoc_min_level;
+ /* lowest deep sleep clock level while nonassoc, enum
+ * wmi_ps_deep_sleep_clk_level_e
+ */
+ u8 ps_unassoc_deep_sleep_min_level;
+ /* lowest level of PS allowed while associated, enum wmi_ps_level_e */
+ u8 ps_assoc_min_level;
+ /* lowest deep sleep clock level while assoc, enum
+ * wmi_ps_deep_sleep_clk_level_e
+ */
+ u8 ps_assoc_deep_sleep_min_level;
+ /* enum wmi_ps_deep_sleep_clk_level_e */
+ u8 ps_assoc_low_latency_ds_min_level;
+ /* enum wmi_ps_d3_resp_policy_e */
+ u8 ps_D3_response_policy;
+ /* BOOL */
+ u8 ps_D3_pm_pme_enabled;
+ /* BOOL */
+ u8 ps_halp_enable;
+ u8 ps_deep_sleep_enter_thresh_msec;
+ /* BOOL */
+ u8 ps_voltage_scaling_en;
+} __packed;
+
+/* WMI_PS_DEV_CFG_CMDID
+ *
+ * Configure common Power Save parameters of the device and all MIDs.
+ *
+ * Returned event:
+ * - WMI_PS_DEV_CFG_EVENTID
+ */
+struct wmi_ps_dev_cfg_cmd {
+ /* Device Power Save configuration to be applied */
+ struct wmi_ps_dev_cfg ps_dev_cfg;
+ /* alignment to 32b */
+ u8 reserved[2];
+} __packed;
+
+/* WMI_PS_DEV_CFG_EVENTID */
+struct wmi_ps_dev_cfg_event {
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+} __packed;
+
+/* WMI_PS_DEV_CFG_READ_CMDID
+ *
+ * request to retrieve device Power Save configuration
+ * (WMI_PS_DEV_CFG_CMD params)
+ *
+ * Returned event:
+ * - WMI_PS_DEV_CFG_READ_EVENTID
+ */
+struct wmi_ps_dev_cfg_read_cmd {
+ __le32 reserved;
+} __packed;
+
+/* WMI_PS_DEV_CFG_READ_EVENTID */
+struct wmi_ps_dev_cfg_read_event {
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+ /* Retrieved device Power Save configuration (WMI_PS_DEV_CFG_CMD
+ * params)
+ */
+ struct wmi_ps_dev_cfg dev_ps_cfg;
+ /* alignment to 32b */
+ u8 reserved[2];
+} __packed;
+
+/* Per Mac Power Save configurations */
+struct wmi_ps_mid_cfg {
+ /* Low power RX in BTI is enabled, BOOL */
+ u8 beacon_lprx_enable;
+ /* Sync to sector ID enabled, BOOL */
+ u8 beacon_sync_to_sectorId_enable;
+ /* Low power RX in DTI is enabled, BOOL */
+ u8 frame_exchange_lprx_enable;
+ /* Sleep Cycle while in scheduled PS, 1-31 */
+ u8 scheduled_sleep_cycle_pow2;
+ /* Stay Awake for k BIs every (sleep_cycle - k) BIs, 1-31 */
+ u8 scheduled_num_of_awake_bis;
+ u8 am_to_traffic_load_thresh_mbp;
+ u8 traffic_to_am_load_thresh_mbps;
+ u8 traffic_to_am_num_of_no_traffic_bis;
+ /* BOOL */
+ u8 continuous_traffic_psm;
+ __le16 no_traffic_to_min_usec;
+ __le16 no_traffic_to_max_usec;
+ __le16 snoozing_sleep_interval_milisec;
+ u8 max_no_data_awake_events;
+ /* Trigger WEB after k failed beacons */
+ u8 num_of_failed_beacons_rx_to_trigger_web;
+ /* Trigger BF after k failed beacons */
+ u8 num_of_failed_beacons_rx_to_trigger_bf;
+ /* Trigger SOB after k successful beacons */
+ u8 num_of_successful_beacons_rx_to_trigger_sob;
+} __packed;
+
+/* WMI_PS_MID_CFG_CMDID
+ *
+ * Configure Power Save parameters of a specific MID.
+ * These parameters are relevant for the specific BSS this MID belongs to.
+ *
+ * Returned event:
+ * - WMI_PS_MID_CFG_EVENTID
+ */
+struct wmi_ps_mid_cfg_cmd {
+ /* MAC ID */
+ u8 mid;
+ /* mid PS configuration to be applied */
+ struct wmi_ps_mid_cfg ps_mid_cfg;
+} __packed;
+
+/* WMI_PS_MID_CFG_EVENTID */
+struct wmi_ps_mid_cfg_event {
+ /* MAC ID */
+ u8 mid;
+ /* alignment to 32b */
+ u8 reserved[3];
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+} __packed;
+
+/* WMI_PS_MID_CFG_READ_CMDID
+ *
+ * request to retrieve Power Save configuration of mid
+ * (WMI_PS_MID_CFG_CMD params)
+ *
+ * Returned event:
+ * - WMI_PS_MID_CFG_READ_EVENTID
+ */
+struct wmi_ps_mid_cfg_read_cmd {
+ /* MAC ID */
+ u8 mid;
+ /* alignment to 32b */
+ u8 reserved[3];
+} __packed;
+
+/* WMI_PS_MID_CFG_READ_EVENTID */
+struct wmi_ps_mid_cfg_read_event {
+ /* MAC ID */
+ u8 mid;
+ /* Retrieved MID Power Save configuration(WMI_PS_MID_CFG_CMD params) */
+ struct wmi_ps_mid_cfg mid_ps_cfg;
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+} __packed;
+
+#define WMI_AOA_MAX_DATA_SIZE (128)
+
+enum wmi_aoa_meas_status {
+ WMI_AOA_MEAS_SUCCESS = 0x00,
+ WMI_AOA_MEAS_PEER_INCAPABLE = 0x01,
+ WMI_AOA_MEAS_FAILURE = 0x02,
+};
+
+/* WMI_AOA_MEAS_EVENTID */
+struct wmi_aoa_meas_event {
+ u8 mac_addr[WMI_MAC_LEN];
+ /* channels IDs:
+ * 0 - 58320 MHz
+ * 1 - 60480 MHz
+ * 2 - 62640 MHz
+ */
+ u8 channel;
+ /* enum wmi_aoa_meas_type */
+ u8 aoa_meas_type;
+ /* Measurments are from RFs, defined by the mask */
+ __le32 meas_rf_mask;
+ /* enum wmi_aoa_meas_status */
+ u8 meas_status;
+ u8 reserved;
+ /* Length of meas_data in bytes */
+ __le16 length;
+ u8 meas_data[WMI_AOA_MAX_DATA_SIZE];
+} __packed;
+
+/* WMI_TOF_GET_CAPABILITIES_EVENTID */
+struct wmi_tof_get_capabilities_event {
+ u8 ftm_capability;
+ /* maximum supported number of destination to start TOF */
+ u8 max_num_of_dest;
+ /* maximum supported number of measurements per burst */
+ u8 max_num_of_meas_per_burst;
+ u8 reserved;
+ /* maximum supported multi bursts */
+ __le16 max_multi_bursts_sessions;
+ /* maximum supported FTM burst duration , wmi_tof_burst_duration_e */
+ __le16 max_ftm_burst_duration;
+ /* AOA supported types */
+ __le32 aoa_supported_types;
+} __packed;
+
+enum wmi_tof_session_end_status {
+ WMI_TOF_SESSION_END_NO_ERROR = 0x00,
+ WMI_TOF_SESSION_END_FAIL = 0x01,
+ WMI_TOF_SESSION_END_PARAMS_ERROR = 0x02,
+ WMI_TOF_SESSION_END_ABORTED = 0x03,
+};
+
+/* WMI_TOF_SESSION_END_EVENTID */
+struct wmi_tof_session_end_event {
+ /* FTM session ID */
+ __le32 session_id;
+ /* wmi_tof_session_end_status_e */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* Responder FTM Results */
+struct wmi_responder_ftm_res {
+ u8 t1[6];
+ u8 t2[6];
+ u8 t3[6];
+ u8 t4[6];
+ __le16 tod_err;
+ __le16 toa_err;
+ __le16 tod_err_initiator;
+ __le16 toa_err_initiator;
+} __packed;
+
+enum wmi_tof_ftm_per_dest_res_status {
+ WMI_PER_DEST_RES_NO_ERROR = 0x00,
+ WMI_PER_DEST_RES_TX_RX_FAIL = 0x01,
+ WMI_PER_DEST_RES_PARAM_DONT_MATCH = 0x02,
+};
+
+enum wmi_tof_ftm_per_dest_res_flags {
+ WMI_PER_DEST_RES_REQ_START = 0x01,
+ WMI_PER_DEST_RES_BURST_REPORT_END = 0x02,
+ WMI_PER_DEST_RES_REQ_END = 0x04,
+ WMI_PER_DEST_RES_PARAM_UPDATE = 0x08,
+};
+
+/* WMI_TOF_FTM_PER_DEST_RES_EVENTID */
+struct wmi_tof_ftm_per_dest_res_event {
+ /* FTM session ID */
+ __le32 session_id;
+ /* destination MAC address */
+ u8 dst_mac[WMI_MAC_LEN];
+ /* wmi_tof_ftm_per_dest_res_flags_e */
+ u8 flags;
+ /* wmi_tof_ftm_per_dest_res_status_e */
+ u8 status;
+ /* responder ASAP */
+ u8 responder_asap;
+ /* responder number of FTM per burst */
+ u8 responder_num_ftm_per_burst;
+ /* responder number of FTM burst exponent */
+ u8 responder_num_ftm_bursts_exp;
+ /* responder burst duration ,wmi_tof_burst_duration_e */
+ u8 responder_burst_duration;
+ /* responder burst period, indicate interval between two consecutive
+ * burst instances, in units of 100 ms
+ */
+ __le16 responder_burst_period;
+ /* receive burst counter */
+ __le16 bursts_cnt;
+ /* tsf of responder start burst */
+ __le32 tsf_sync;
+ /* actual received ftm per burst */
+ u8 actual_ftm_per_burst;
+ u8 reserved0[7];
+ struct wmi_responder_ftm_res responder_ftm_res[0];
+} __packed;
+
+enum wmi_tof_channel_info_type {
+ WMI_TOF_CHANNEL_INFO_AOA = 0x00,
+ WMI_TOF_CHANNEL_INFO_LCI = 0x01,
+ WMI_TOF_CHANNEL_INFO_LCR = 0x02,
+ WMI_TOF_CHANNEL_INFO_VENDOR_SPECIFIC = 0x03,
+ WMI_TOF_CHANNEL_INFO_CIR = 0x04,
+ WMI_TOF_CHANNEL_INFO_RSSI = 0x05,
+ WMI_TOF_CHANNEL_INFO_SNR = 0x06,
+ WMI_TOF_CHANNEL_INFO_DEBUG = 0x07,
+};
+
+/* WMI_TOF_CHANNEL_INFO_EVENTID */
+struct wmi_tof_channel_info_event {
+ /* FTM session ID */
+ __le32 session_id;
+ /* destination MAC address */
+ u8 dst_mac[WMI_MAC_LEN];
+ /* wmi_tof_channel_info_type_e */
+ u8 type;
+ /* data report length */
+ u8 len;
+ /* data report payload */
+ u8 report[0];
+} __packed;
+
#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/cnss/cnss_common.c b/drivers/net/wireless/cnss/cnss_common.c
index f63e958b1205..7805882aa6fe 100644
--- a/drivers/net/wireless/cnss/cnss_common.c
+++ b/drivers/net/wireless/cnss/cnss_common.c
@@ -24,6 +24,31 @@
#include "cnss_common.h"
#include <net/cfg80211.h>
+#define AR6320_REV1_VERSION 0x5000000
+#define AR6320_REV1_1_VERSION 0x5000001
+#define AR6320_REV1_3_VERSION 0x5000003
+#define AR6320_REV2_1_VERSION 0x5010000
+#define AR6320_REV3_VERSION 0x5020000
+#define AR6320_REV3_2_VERSION 0x5030000
+#define AR900B_DEV_VERSION 0x1000000
+#define QCA9377_REV1_1_VERSION 0x5020001
+
+static struct cnss_fw_files FW_FILES_QCA6174_FW_1_1 = {
+ "qwlan11.bin", "bdwlan11.bin", "otp11.bin", "utf11.bin",
+ "utfbd11.bin", "epping11.bin", "evicted11.bin"};
+static struct cnss_fw_files FW_FILES_QCA6174_FW_2_0 = {
+ "qwlan20.bin", "bdwlan20.bin", "otp20.bin", "utf20.bin",
+ "utfbd20.bin", "epping20.bin", "evicted20.bin"};
+static struct cnss_fw_files FW_FILES_QCA6174_FW_1_3 = {
+ "qwlan13.bin", "bdwlan13.bin", "otp13.bin", "utf13.bin",
+ "utfbd13.bin", "epping13.bin", "evicted13.bin"};
+static struct cnss_fw_files FW_FILES_QCA6174_FW_3_0 = {
+ "qwlan30.bin", "bdwlan30.bin", "otp30.bin", "utf30.bin",
+ "utfbd30.bin", "epping30.bin", "evicted30.bin"};
+static struct cnss_fw_files FW_FILES_DEFAULT = {
+ "qwlan.bin", "bdwlan.bin", "otp.bin", "utf.bin",
+ "utfbd.bin", "epping.bin", "evicted.bin"};
+
enum cnss_dev_bus_type {
CNSS_BUS_NONE = -1,
CNSS_BUS_PCI,
@@ -424,3 +449,49 @@ int cnss_power_down(struct device *dev)
return ret;
}
EXPORT_SYMBOL(cnss_power_down);
+
+void cnss_get_qca9377_fw_files(struct cnss_fw_files *pfw_files,
+ u32 size, u32 tufello_dual_fw)
+{
+ if (tufello_dual_fw)
+ memcpy(pfw_files, &FW_FILES_DEFAULT, sizeof(*pfw_files));
+ else
+ memcpy(pfw_files, &FW_FILES_QCA6174_FW_3_0, sizeof(*pfw_files));
+}
+EXPORT_SYMBOL(cnss_get_qca9377_fw_files);
+
+int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files,
+ u32 target_type, u32 target_version)
+{
+ if (!pfw_files)
+ return -ENODEV;
+
+ switch (target_version) {
+ case AR6320_REV1_VERSION:
+ case AR6320_REV1_1_VERSION:
+ memcpy(pfw_files, &FW_FILES_QCA6174_FW_1_1, sizeof(*pfw_files));
+ break;
+ case AR6320_REV1_3_VERSION:
+ memcpy(pfw_files, &FW_FILES_QCA6174_FW_1_3, sizeof(*pfw_files));
+ break;
+ case AR6320_REV2_1_VERSION:
+ memcpy(pfw_files, &FW_FILES_QCA6174_FW_2_0, sizeof(*pfw_files));
+ break;
+ case AR6320_REV3_VERSION:
+ case AR6320_REV3_2_VERSION:
+ memcpy(pfw_files, &FW_FILES_QCA6174_FW_3_0, sizeof(*pfw_files));
+ break;
+ default:
+ memcpy(pfw_files, &FW_FILES_DEFAULT, sizeof(*pfw_files));
+ pr_err("%s default version 0x%X 0x%X", __func__,
+ target_type, target_version);
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(cnss_get_fw_files_for_target);
+
+const char *cnss_wlan_get_evicted_data_file(void)
+{
+ return FW_FILES_QCA6174_FW_3_0.evicted_data;
+}
diff --git a/drivers/net/wireless/cnss/cnss_common.h b/drivers/net/wireless/cnss/cnss_common.h
index 0d299f3b6208..07ef9844b6db 100644
--- a/drivers/net/wireless/cnss/cnss_common.h
+++ b/drivers/net/wireless/cnss/cnss_common.h
@@ -41,4 +41,5 @@ int cnss_sdio_power_up(struct device *dev);
int cnss_sdio_power_down(struct device *dev);
int cnss_pcie_power_up(struct device *dev);
int cnss_pcie_power_down(struct device *dev);
+const char *cnss_wlan_get_evicted_data_file(void);
#endif /* _NET_CNSS_COMMON_H_ */
diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c
index 1e3c3829c1c7..1e56d445c6e1 100644
--- a/drivers/net/wireless/cnss/cnss_pci.c
+++ b/drivers/net/wireless/cnss/cnss_pci.c
@@ -78,29 +78,6 @@
#define QCA6174_FW_3_0 (0x30)
#define QCA6174_FW_3_2 (0x32)
#define BEELINER_FW (0x00)
-#define AR6320_REV1_VERSION 0x5000000
-#define AR6320_REV1_1_VERSION 0x5000001
-#define AR6320_REV1_3_VERSION 0x5000003
-#define AR6320_REV2_1_VERSION 0x5010000
-#define AR6320_REV3_VERSION 0x5020000
-#define AR6320_REV3_2_VERSION 0x5030000
-#define AR900B_DEV_VERSION 0x1000000
-
-static struct cnss_fw_files FW_FILES_QCA6174_FW_1_1 = {
-"qwlan11.bin", "bdwlan11.bin", "otp11.bin", "utf11.bin",
-"utfbd11.bin", "epping11.bin", "evicted11.bin"};
-static struct cnss_fw_files FW_FILES_QCA6174_FW_2_0 = {
-"qwlan20.bin", "bdwlan20.bin", "otp20.bin", "utf20.bin",
-"utfbd20.bin", "epping20.bin", "evicted20.bin"};
-static struct cnss_fw_files FW_FILES_QCA6174_FW_1_3 = {
-"qwlan13.bin", "bdwlan13.bin", "otp13.bin", "utf13.bin",
-"utfbd13.bin", "epping13.bin", "evicted13.bin"};
-static struct cnss_fw_files FW_FILES_QCA6174_FW_3_0 = {
-"qwlan30.bin", "bdwlan30.bin", "otp30.bin", "utf30.bin",
-"utfbd30.bin", "epping30.bin", "evicted30.bin"};
-static struct cnss_fw_files FW_FILES_DEFAULT = {
-"qwlan.bin", "bdwlan.bin", "otp.bin", "utf.bin",
-"utfbd.bin", "epping.bin", "evicted.bin"};
#define QCA6180_VENDOR_ID (0x168C)
#define QCA6180_DEVICE_ID (0x0041)
@@ -1093,37 +1070,6 @@ int cnss_get_fw_files(struct cnss_fw_files *pfw_files)
}
EXPORT_SYMBOL(cnss_get_fw_files);
-int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files,
- u32 target_type, u32 target_version)
-{
- if (!pfw_files)
- return -ENODEV;
-
- switch (target_version) {
- case AR6320_REV1_VERSION:
- case AR6320_REV1_1_VERSION:
- memcpy(pfw_files, &FW_FILES_QCA6174_FW_1_1, sizeof(*pfw_files));
- break;
- case AR6320_REV1_3_VERSION:
- memcpy(pfw_files, &FW_FILES_QCA6174_FW_1_3, sizeof(*pfw_files));
- break;
- case AR6320_REV2_1_VERSION:
- memcpy(pfw_files, &FW_FILES_QCA6174_FW_2_0, sizeof(*pfw_files));
- break;
- case AR6320_REV3_VERSION:
- case AR6320_REV3_2_VERSION:
- memcpy(pfw_files, &FW_FILES_QCA6174_FW_3_0, sizeof(*pfw_files));
- break;
- default:
- memcpy(pfw_files, &FW_FILES_DEFAULT, sizeof(*pfw_files));
- pr_err("%s version mismatch 0x%X 0x%X",
- __func__, target_type, target_version);
- break;
- }
- return 0;
-}
-EXPORT_SYMBOL(cnss_get_fw_files_for_target);
-
#ifdef CONFIG_CNSS_SECURE_FW
static void cnss_wlan_fw_mem_alloc(struct pci_dev *pdev)
{
@@ -1458,7 +1404,6 @@ static int cnss_wlan_is_codeswap_supported(u16 revision)
static int cnss_smmu_init(struct device *dev)
{
struct dma_iommu_mapping *mapping;
- int disable_htw = 1;
int atomic_ctx = 1;
int ret;
@@ -1472,15 +1417,6 @@ static int cnss_smmu_init(struct device *dev)
}
ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret) {
- pr_err("%s: set disable_htw attribute failed, err = %d\n",
- __func__, ret);
- goto set_attr_fail;
- }
-
- ret = iommu_domain_set_attr(mapping->domain,
DOMAIN_ATTR_ATOMIC,
&atomic_ctx);
if (ret) {
@@ -2058,7 +1994,7 @@ static void cnss_wlan_memory_expansion(void)
{
struct device *dev;
const struct firmware *fw_entry;
- const char *filename = FW_FILES_QCA6174_FW_3_0.evicted_data;
+ const char *filename;
u_int32_t fw_entry_size, size_left, dma_size_left, length;
char *fw_temp;
char *fw_data;
@@ -2067,6 +2003,7 @@ static void cnss_wlan_memory_expansion(void)
u_int32_t total_length = 0;
struct pci_dev *pdev;
+ filename = cnss_wlan_get_evicted_data_file();
pdev = penv->pdev;
dev = &pdev->dev;
cnss_seg_info = penv->cnss_seg_info;
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 87f4b641201c..14af8ca66d1c 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -622,6 +622,8 @@ struct msm_pcie_dev_t {
uint32_t cpl_timeout;
uint32_t current_bdf;
short current_short_bdf;
+ uint32_t perst_delay_us_min;
+ uint32_t perst_delay_us_max;
uint32_t tlp_rd_size;
bool linkdown_panic;
bool ep_wakeirq;
@@ -1758,7 +1760,8 @@ static inline int msm_pcie_check_align(struct msm_pcie_dev_t *dev,
static bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev,
bool check_sw_stts,
- bool check_ep)
+ bool check_ep,
+ void __iomem *ep_conf)
{
u32 val;
@@ -1785,7 +1788,7 @@ static bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev,
}
if (check_ep) {
- val = readl_relaxed(dev->conf);
+ val = readl_relaxed(ep_conf);
PCIE_DBG(dev,
"PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n",
dev->rc_idx, val);
@@ -1814,6 +1817,10 @@ static void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc)
cfg = dev->dm_core;
shadow = dev->rc_shadow;
} else {
+ if (!msm_pcie_confirm_linkup(dev, false, true,
+ dev->pcidev_table[i].conf_base))
+ continue;
+
shadow = dev->ep_shadow[i];
PCIE_DBG(dev,
"PCIe Device: %02x:%02x.%01x\n",
@@ -1973,6 +1980,10 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
dev->cpl_timeout);
PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
dev->current_bdf);
+ PCIE_DBG_FS(dev, "perst_delay_us_min: %dus\n",
+ dev->perst_delay_us_min);
+ PCIE_DBG_FS(dev, "perst_delay_us_max: %dus\n",
+ dev->perst_delay_us_max);
PCIE_DBG_FS(dev, "tlp_rd_size: 0x%x\n",
dev->tlp_rd_size);
PCIE_DBG_FS(dev, "rc_corr_counter: %lu\n",
@@ -4544,8 +4555,7 @@ int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
dev->rc_idx);
gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
- usleep_range(PERST_PROPAGATION_DELAY_US_MIN,
- PERST_PROPAGATION_DELAY_US_MAX);
+ usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
/* set max tlp read size */
msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
@@ -4561,11 +4571,11 @@ int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
} while ((!(val & XMLH_LINK_UP) ||
- !msm_pcie_confirm_linkup(dev, false, false))
+ !msm_pcie_confirm_linkup(dev, false, false, NULL))
&& (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
if ((val & XMLH_LINK_UP) &&
- msm_pcie_confirm_linkup(dev, false, false)) {
+ msm_pcie_confirm_linkup(dev, false, false, NULL)) {
PCIE_DBG(dev, "Link is up after %d checkings\n",
link_check_count);
PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx);
@@ -6073,6 +6083,34 @@ static int msm_pcie_probe(struct platform_device *pdev)
PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: cpl-timeout: 0x%x.\n",
rc_idx, msm_pcie_dev[rc_idx].cpl_timeout);
+ msm_pcie_dev[rc_idx].perst_delay_us_min =
+ PERST_PROPAGATION_DELAY_US_MIN;
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,perst-delay-us-min",
+ &msm_pcie_dev[rc_idx].perst_delay_us_min);
+ if (ret)
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "RC%d: perst-delay-us-min does not exist. Use default value %dus.\n",
+ rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
+ else
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "RC%d: perst-delay-us-min: %dus.\n",
+ rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
+
+ msm_pcie_dev[rc_idx].perst_delay_us_max =
+ PERST_PROPAGATION_DELAY_US_MAX;
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,perst-delay-us-max",
+ &msm_pcie_dev[rc_idx].perst_delay_us_max);
+ if (ret)
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "RC%d: perst-delay-us-max does not exist. Use default value %dus.\n",
+ rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
+ else
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "RC%d: perst-delay-us-max: %dus.\n",
+ rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
+
msm_pcie_dev[rc_idx].tlp_rd_size = PCIE_TLP_RD_SIZE;
ret = of_property_read_u32(pdev->dev.of_node,
"qcom,tlp-rd-size",
@@ -6451,7 +6489,8 @@ static int msm_pcie_pm_suspend(struct pci_dev *dev,
}
if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)
- && msm_pcie_confirm_linkup(pcie_dev, true, true)) {
+ && msm_pcie_confirm_linkup(pcie_dev, true, true,
+ pcie_dev->conf)) {
ret = pci_save_state(dev);
pcie_dev->saved_state = pci_store_saved_state(dev);
}
@@ -6960,7 +6999,7 @@ int msm_pcie_recover_config(struct pci_dev *dev)
return -ENODEV;
}
- if (msm_pcie_confirm_linkup(pcie_dev, true, true)) {
+ if (msm_pcie_confirm_linkup(pcie_dev, true, true, pcie_dev->conf)) {
PCIE_DBG(pcie_dev,
"Recover config space of RC%d and its EP\n",
pcie_dev->rc_idx);
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index af1e5a70d585..352defe6204b 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -170,7 +170,12 @@ static void gsi_handle_glob_err(uint32_t err)
gsi_ctx->per.notify_cb(&per_notify);
break;
case GSI_ERR_TYPE_CHAN:
- BUG_ON(log->virt_idx >= GSI_MAX_CHAN);
+ if (log->virt_idx >= gsi_ctx->max_ch) {
+ GSIERR("Unexpected ch %d\n", log->virt_idx);
+ WARN_ON(1);
+ return;
+ }
+
ch = &gsi_ctx->chan[log->virt_idx];
chan_notify.chan_user_data = ch->props.chan_user_data;
chan_notify.err_desc = err & 0xFFFF;
@@ -213,7 +218,12 @@ static void gsi_handle_glob_err(uint32_t err)
WARN_ON(1);
break;
case GSI_ERR_TYPE_EVT:
- BUG_ON(log->virt_idx >= GSI_MAX_EVT_RING);
+ if (log->virt_idx >= gsi_ctx->max_ev) {
+ GSIERR("Unexpected ev %d\n", log->virt_idx);
+ WARN_ON(1);
+ return;
+ }
+
ev = &gsi_ctx->evtr[log->virt_idx];
evt_notify.user_data = ev->props.user_data;
evt_notify.err_desc = err & 0xFFFF;
@@ -257,6 +267,9 @@ static void gsi_handle_glob_ee(int ee)
if (val & GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK) {
err = gsi_readl(gsi_ctx->base +
GSI_EE_n_ERROR_LOG_OFFS(ee));
+ if (gsi_ctx->per.ver >= GSI_VER_1_2)
+ gsi_writel(0, gsi_ctx->base +
+ GSI_EE_n_ERROR_LOG_OFFS(ee));
gsi_writel(clr, gsi_ctx->base +
GSI_EE_n_ERROR_LOG_CLR_OFFS(ee));
gsi_handle_glob_err(err);
@@ -311,7 +324,12 @@ static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
uint64_t rp;
ch_id = evt->chid;
- BUG_ON(ch_id >= GSI_MAX_CHAN);
+ if (ch_id >= gsi_ctx->max_ch) {
+ GSIERR("Unexpected ch %d\n", ch_id);
+ WARN_ON(1);
+ return;
+ }
+
ch_ctx = &gsi_ctx->chan[ch_id];
BUG_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI);
rp = evt->xfer_ptr;
@@ -567,6 +585,75 @@ static irqreturn_t gsi_isr(int irq, void *ctxt)
return IRQ_HANDLED;
}
+static uint32_t gsi_get_max_channels(enum gsi_ver ver)
+{
+ uint32_t reg;
+
+ switch (ver) {
+ case GSI_VER_1_0:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK) >>
+ GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_2:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK) >>
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_3:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+ reg = (reg &
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
+ break;
+ default:
+ GSIERR("bad gsi version %d\n", ver);
+ WARN_ON(1);
+ reg = 0;
+ }
+
+ GSIDBG("max channels %d\n", reg);
+
+ return reg;
+}
+
+static uint32_t gsi_get_max_event_rings(enum gsi_ver ver)
+{
+ uint32_t reg;
+
+ switch (ver) {
+ case GSI_VER_1_0:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK) >>
+ GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_2:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK) >>
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_3:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+ reg = (reg &
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
+ break;
+ default:
+ GSIERR("bad gsi version %d\n", ver);
+ WARN_ON(1);
+ reg = 0;
+ }
+
+ GSIDBG("max event rings %d\n", reg);
+
+ return reg;
+}
int gsi_complete_clk_grant(unsigned long dev_hdl)
{
unsigned long flags;
@@ -611,6 +698,11 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
return -GSI_STATUS_INVALID_PARAMS;
}
+ if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) {
+ GSIERR("bad params gsi_ver=%d\n", props->ver);
+ return -GSI_STATUS_INVALID_PARAMS;
+ }
+
if (!props->notify_cb) {
GSIERR("notify callback must be provided\n");
return -GSI_STATUS_INVALID_PARAMS;
@@ -668,8 +760,25 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
mutex_init(&gsi_ctx->mlock);
atomic_set(&gsi_ctx->num_chan, 0);
atomic_set(&gsi_ctx->num_evt_ring, 0);
- /* only support 16 un-reserved + 7 reserved event virtual IDs */
- gsi_ctx->evt_bmap = ~0x7E03FF;
+ gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
+ if (gsi_ctx->max_ch == 0) {
+ devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+ devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+ GSIERR("failed to get max channels\n");
+ return -GSI_STATUS_ERROR;
+ }
+ gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
+ if (gsi_ctx->max_ev == 0) {
+ devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+ devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+ GSIERR("failed to get max event rings\n");
+ return -GSI_STATUS_ERROR;
+ }
+
+ /* bitmap is max events excludes reserved events */
+ gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
+ gsi_ctx->evt_bmap |= ((1 << (GSI_MHI_ER_END + 1)) - 1) ^
+ ((1 << GSI_MHI_ER_START) - 1);
/*
* enable all interrupts but GSI_BREAK_POINT.
@@ -693,6 +802,10 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
else
GSIERR("Manager EE has not enabled GSI, GSI un-usable\n");
+ if (gsi_ctx->per.ver >= GSI_VER_1_2)
+ gsi_writel(0, gsi_ctx->base +
+ GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee));
+
*dev_hdl = (uintptr_t)gsi_ctx;
return GSI_STATUS_SUCCESS;
@@ -1059,7 +1172,7 @@ int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
return -GSI_STATUS_NODEV;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1093,7 +1206,7 @@ int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
return -GSI_STATUS_NODEV;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1160,7 +1273,7 @@ int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1194,7 +1307,7 @@ int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
return -GSI_STATUS_NODEV;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1255,7 +1368,7 @@ int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1291,7 +1404,7 @@ int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1382,7 +1495,7 @@ static int gsi_validate_channel_props(struct gsi_chan_props *props)
{
uint64_t ra;
- if (props->ch_id >= GSI_MAX_CHAN) {
+ if (props->ch_id >= gsi_ctx->max_ch) {
GSIERR("ch_id %u invalid\n", props->ch_id);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1573,7 +1686,7 @@ int gsi_write_channel_scratch(unsigned long chan_hdl,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1610,7 +1723,7 @@ int gsi_query_channel_db_addr(unsigned long chan_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1642,7 +1755,7 @@ int gsi_start_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1694,7 +1807,7 @@ int gsi_stop_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1763,7 +1876,7 @@ int gsi_stop_db_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1832,7 +1945,7 @@ int gsi_reset_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1898,7 +2011,7 @@ int gsi_dealloc_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2021,7 +2134,7 @@ int gsi_query_channel_info(unsigned long chan_hdl,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !info) {
+ if (chan_hdl >= gsi_ctx->max_ch || !info) {
GSIERR("bad params chan_hdl=%lu info=%p\n", chan_hdl, info);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2091,7 +2204,7 @@ int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !is_empty) {
+ if (chan_hdl >= gsi_ctx->max_ch || !is_empty) {
GSIERR("bad params chan_hdl=%lu is_empty=%p\n",
chan_hdl, is_empty);
return -GSI_STATUS_INVALID_PARAMS;
@@ -2155,7 +2268,7 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !num_xfers || !xfer) {
+ if (chan_hdl >= gsi_ctx->max_ch || !num_xfers || !xfer) {
GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%p\n",
chan_hdl, num_xfers, xfer);
return -GSI_STATUS_INVALID_PARAMS;
@@ -2242,7 +2355,7 @@ int gsi_start_xfer(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2278,7 +2391,7 @@ int gsi_poll_channel(unsigned long chan_hdl,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !notify) {
+ if (chan_hdl >= gsi_ctx->max_ch || !notify) {
GSIERR("bad params chan_hdl=%lu notify=%p\n", chan_hdl, notify);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2327,7 +2440,7 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2390,7 +2503,7 @@ int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2426,7 +2539,7 @@ int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2471,9 +2584,9 @@ static void gsi_configure_ieps(void *base)
gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS);
- gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_OFFS);
- gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_OFFS);
- gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_OFFS);
+ gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
+ gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
+ gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
@@ -2502,9 +2615,9 @@ static void gsi_configure_bck_prs_matrix(void *base)
gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS);
gsi_writel(0x00000000,
gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS);
- gsi_writel(0x00ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS);
+ gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS);
gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS);
- gsi_writel(0xfdffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS);
+ gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS);
gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS);
gsi_writel(0xffffffff, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS);
gsi_writel(0xfffffffe, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS);
@@ -2551,15 +2664,35 @@ int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
}
/* Enable the MCS and set to x2 clocks */
- value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
- GSI_GSI_CFG_GSI_ENABLE_BMSK) |
- ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
- GSI_GSI_CFG_MCS_ENABLE_BMSK) |
- ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
- GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
- ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
- GSI_GSI_CFG_UC_IS_MCS_BMSK));
- gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
+ if (gsi_ctx->per.ver >= GSI_VER_1_2) {
+ value = ((1 << GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT) &
+ GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK);
+ gsi_writel(value, gsi_base + GSI_GSI_MCS_CFG_OFFS);
+
+ value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
+ GSI_GSI_CFG_GSI_ENABLE_BMSK) |
+ ((0 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
+ GSI_GSI_CFG_MCS_ENABLE_BMSK) |
+ ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
+ GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
+ ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
+ GSI_GSI_CFG_UC_IS_MCS_BMSK) |
+ ((0 << GSI_GSI_CFG_GSI_PWR_CLPS_SHFT) &
+ GSI_GSI_CFG_GSI_PWR_CLPS_BMSK) |
+ ((0 << GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT) &
+ GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK));
+ gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
+ } else {
+ value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
+ GSI_GSI_CFG_GSI_ENABLE_BMSK) |
+ ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
+ GSI_GSI_CFG_MCS_ENABLE_BMSK) |
+ ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
+ GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
+ ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
+ GSI_GSI_CFG_UC_IS_MCS_BMSK));
+ gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
+ }
iounmap(gsi_base);
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index 1d438ffb8b76..0b94ed2d3a92 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -19,8 +19,8 @@
#include <linux/spinlock.h>
#include <linux/msm_gsi.h>
-#define GSI_MAX_CHAN 31
-#define GSI_MAX_EVT_RING 23
+#define GSI_CHAN_MAX 31
+#define GSI_EVT_RING_MAX 23
#define GSI_NO_EVT_ERINDEX 31
#define gsi_readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
@@ -130,8 +130,8 @@ struct gsi_ctx {
struct device *dev;
struct gsi_per_props per;
bool per_registered;
- struct gsi_chan_ctx chan[GSI_MAX_CHAN];
- struct gsi_evt_ctx evtr[GSI_MAX_EVT_RING];
+ struct gsi_chan_ctx chan[GSI_CHAN_MAX];
+ struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX];
struct mutex mlock;
spinlock_t slock;
unsigned long evt_bmap;
@@ -141,6 +141,8 @@ struct gsi_ctx {
struct gsi_ee_scratch scratch;
int num_ch_dp_stats;
struct workqueue_struct *dp_stat_wq;
+ u32 max_ch;
+ u32 max_ev;
};
enum gsi_re_type {
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index 2ab8b79acc6d..5eb9084292a4 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -71,7 +71,7 @@ static ssize_t gsi_dump_evt(struct file *file,
TDBG("arg1=%u arg2=%u\n", arg1, arg2);
- if (arg1 >= GSI_MAX_EVT_RING) {
+ if (arg1 >= gsi_ctx->max_ev) {
TERR("invalid evt ring id %u\n", arg1);
return -EFAULT;
}
@@ -184,7 +184,7 @@ static ssize_t gsi_dump_ch(struct file *file,
TDBG("arg1=%u arg2=%u\n", arg1, arg2);
- if (arg1 >= GSI_MAX_CHAN) {
+ if (arg1 >= gsi_ctx->max_ch) {
TERR("invalid chan id %u\n", arg1);
return -EFAULT;
}
@@ -271,9 +271,30 @@ static ssize_t gsi_dump_ee(struct file *file,
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee));
TERR("EE%2d STATUS 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d HW_PARAM 0x%x\n", gsi_ctx->per.ee, val);
+ if (gsi_ctx->per.ver == GSI_VER_1_0) {
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM 0x%x\n", gsi_ctx->per.ee, val);
+ } else if (gsi_ctx->per.ver == GSI_VER_1_2) {
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
+ } else if (gsi_ctx->per.ver == GSI_VER_1_3) {
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_2 0x%x\n", gsi_ctx->per.ee, val);
+ } else {
+ WARN_ON(1);
+ }
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_SW_VERSION_OFFS(gsi_ctx->per.ee));
TERR("EE%2d SW_VERSION 0x%x\n", gsi_ctx->per.ee, val);
@@ -329,7 +350,7 @@ static ssize_t gsi_dump_map(struct file *file,
int i;
TERR("EVT bitmap 0x%lx\n", gsi_ctx->evt_bmap);
- for (i = 0; i < GSI_MAX_CHAN; i++) {
+ for (i = 0; i < gsi_ctx->max_ch; i++) {
ctx = &gsi_ctx->chan[i];
if (ctx->allocated) {
@@ -402,8 +423,8 @@ static ssize_t gsi_dump_stats(struct file *file,
if (ch_id == -1) {
min = 0;
- max = GSI_MAX_CHAN;
- } else if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ max = gsi_ctx->max_ch;
+ } else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
} else {
@@ -464,7 +485,7 @@ static ssize_t gsi_enable_dp_stats(struct file *file,
if (kstrtos32(dbg_buff + 1, 0, &ch_id))
goto error;
- if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
}
@@ -540,7 +561,7 @@ static ssize_t gsi_set_max_elem_dp_stats(struct file *file,
/* get */
if (kstrtou32(dbg_buff, 0, &ch_id))
goto error;
- if (ch_id >= GSI_MAX_CHAN)
+ if (ch_id >= gsi_ctx->max_ch)
goto error;
PRT_STAT("ch %d: max_re_expected=%d\n", ch_id,
gsi_ctx->chan[ch_id].props.max_re_expected);
@@ -553,7 +574,7 @@ static ssize_t gsi_set_max_elem_dp_stats(struct file *file,
TDBG("ch_id=%u max_elem=%u\n", ch_id, max_elem);
- if (ch_id >= GSI_MAX_CHAN) {
+ if (ch_id >= gsi_ctx->max_ch) {
TERR("invalid chan id %u\n", ch_id);
goto error;
}
@@ -572,7 +593,7 @@ static void gsi_wq_print_dp_stats(struct work_struct *work)
{
int ch_id;
- for (ch_id = 0; ch_id < GSI_MAX_CHAN; ch_id++) {
+ for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
if (gsi_ctx->chan[ch_id].print_dp_stats)
gsi_dump_ch_stats(&gsi_ctx->chan[ch_id]);
}
@@ -618,7 +639,7 @@ static void gsi_wq_update_dp_stats(struct work_struct *work)
{
int ch_id;
- for (ch_id = 0; ch_id < GSI_MAX_CHAN; ch_id++) {
+ for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
if (gsi_ctx->chan[ch_id].allocated &&
gsi_ctx->chan[ch_id].props.prot != GSI_CHAN_PROT_GPI &&
gsi_ctx->chan[ch_id].enable_dp_stats)
@@ -649,8 +670,8 @@ static ssize_t gsi_rst_stats(struct file *file,
if (ch_id == -1) {
min = 0;
- max = GSI_MAX_CHAN;
- } else if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ max = gsi_ctx->max_ch;
+ } else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
} else {
@@ -691,7 +712,7 @@ static ssize_t gsi_print_dp_stats(struct file *file,
if (kstrtos32(dbg_buff + 1, 0, &ch_id))
goto error;
- if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
}
diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h
index 36a74105b490..fa1e84896f73 100644
--- a/drivers/platform/msm/gsi/gsi_reg.h
+++ b/drivers/platform/msm/gsi/gsi_reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,10 @@
#define GSI_GSI_CFG_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000000)
#define GSI_GSI_CFG_RMSK 0xf
+#define GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20
+#define GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5
+#define GSI_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10
+#define GSI_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4
#define GSI_GSI_CFG_UC_IS_MCS_BMSK 0x8
#define GSI_GSI_CFG_UC_IS_MCS_SHFT 0x3
#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4
@@ -26,6 +30,11 @@
#define GSI_GSI_CFG_GSI_ENABLE_BMSK 0x1
#define GSI_GSI_CFG_GSI_ENABLE_SHFT 0x0
+#define GSI_GSI_MCS_CFG_OFFS \
+ (GSI_GSI_REG_BASE_OFFS + 0x0000B000)
+#define GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK 0x1
+#define GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT 0x0
+
#define GSI_GSI_MANAGER_MCS_CODE_VER_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000008)
#define GSI_GSI_MANAGER_MCS_CODE_VER_RMSK 0xffffffff
@@ -99,8 +108,20 @@
#define GSI_GSI_CGC_CTRL_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000060)
#define GSI_GSI_CGC_CTRL_RMSK 0x3f
-#define GSI_GSI_CGC_CTRL_REGION_6_DEBUG_CNTRS_EN_BMSK 0x20
-#define GSI_GSI_CGC_CTRL_REGION_6_DEBUG_CNTRS_EN_SHFT 0x5
+#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_BMSK 0x800
+#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_SHFT 0xb
+#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_BMSK0x400
+#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_SHFT 0xa
+#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_BMSK0x200
+#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_SHFT 0x9
+#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_BMSK 0x100
+#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_SHFT 0x8
+#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_BMSK 0x80
+#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_SHFT 0x7
+#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_BMSK 0x40
+#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_SHFT 0x6
+#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_BMSK 0x20
+#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_SHFT 0x5
#define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_BMSK 0x10
#define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_SHFT 0x4
#define GSI_GSI_CGC_CTRL_REGION_4_HW_CGC_EN_BMSK 0x8
@@ -619,23 +640,23 @@
#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_BMSK 0xfff
#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_SHFT 0x0
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_OFFS \
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000430)
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_RMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_IRAM_PTR_BMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_IRAM_PTR_SHFT 0x0
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_SHFT 0x0
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_OFFS \
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000434)
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_RMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_IRAM_PTR_BMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_IRAM_PTR_SHFT 0x0
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_SHFT 0x0
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_OFFS \
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000438)
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_RMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_IRAM_PTR_BMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_IRAM_PTR_SHFT 0x0
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_SHFT 0x0
#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x0000043c)
@@ -701,7 +722,9 @@
#define GSI_GSI_DEBUG_BUSY_REG_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00001010)
-#define GSI_GSI_DEBUG_BUSY_REG_RMSK 0x7f
+#define GSI_GSI_DEBUG_BUSY_REG_RMSK 0xff
+#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_BMSK 0x80
+#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_SHFT 0x7
#define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_BMSK 0x40
#define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_SHFT 0x6
#define GSI_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_BMSK 0x20
@@ -1345,22 +1368,150 @@
#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0xffffffff
#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0
-#define GSI_EE_n_GSI_HW_PARAM_OFFS(n) \
+/* v1.0 */
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_RMSK 0x7fffffff
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_MAXn 3
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0
+
+/* v1.2 */
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n))
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_MAXn 2
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0
+
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_MAXn 2
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \
+ 0x80000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \
+ 0x40000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0
+
+/* v1.3 */
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n))
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0
+
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f03c + 0x4000 * (n))
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \
+ 0x80000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \
+ 0x40000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0
+
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(n) \
(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
-#define GSI_EE_n_GSI_HW_PARAM_RMSK 0x7fffffff
-#define GSI_EE_n_GSI_HW_PARAM_MAXn 3
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a
-#define GSI_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000
-#define GSI_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14
-#define GSI_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000
-#define GSI_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10
-#define GSI_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00
-#define GSI_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8
-#define GSI_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff
-#define GSI_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
#define GSI_EE_n_GSI_SW_VERSION_OFFS(n) \
(GSI_GSI_REG_BASE_OFFS + 0x0001f044 + 0x4000 * (n))
@@ -1662,7 +1813,7 @@
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_RMSK 0xffffffff
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_MAXn 3
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \
- 0xffffffff
+ 0x00003fff
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(n) \
@@ -1670,7 +1821,7 @@
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_RMSK 0xffffffff
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_MAXn 3
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \
- 0xffffffff
+ 0x000003ff
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) \
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 05ce3969a5c7..75b193def36e 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -26,7 +26,8 @@
#define IPA_API_DISPATCH_RETURN(api, p...) \
do { \
if (!ipa_api_ctrl) { \
- pr_err("IPA HW is not supported on this target\n"); \
+ pr_err("%s:%d IPA HW is not supported\n", \
+ __func__, __LINE__); \
ret = -EPERM; \
} \
else { \
@@ -44,7 +45,8 @@
#define IPA_API_DISPATCH(api, p...) \
do { \
if (!ipa_api_ctrl) \
- pr_err("IPA HW is not supported on this target\n"); \
+ pr_err("%s:%d IPA HW is not supported\n", \
+ __func__, __LINE__); \
else { \
if (ipa_api_ctrl->api) { \
ipa_api_ctrl->api(p); \
@@ -59,7 +61,8 @@
#define IPA_API_DISPATCH_RETURN_PTR(api, p...) \
do { \
if (!ipa_api_ctrl) { \
- pr_err("IPA HW is not supported on this target\n"); \
+ pr_err("%s:%d IPA HW is not supported\n", \
+ __func__, __LINE__); \
ret = NULL; \
} \
else { \
@@ -77,7 +80,8 @@
#define IPA_API_DISPATCH_RETURN_BOOL(api, p...) \
do { \
if (!ipa_api_ctrl) { \
- pr_err("IPA HW is not supported on this target\n"); \
+ pr_err("%s:%d IPA HW is not supported\n", \
+ __func__, __LINE__); \
ret = false; \
} \
else { \
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 838b78c1934d..d18308344431 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -2034,7 +2034,7 @@ static void ipa_usb_debugfs_init(void)
ipa3_usb_ctx->dent = debugfs_create_dir("ipa_usb", 0);
if (IS_ERR(ipa3_usb_ctx->dent)) {
- IPA_USB_ERR("fail to create folder in debug_fs.\n");
+ pr_err("fail to create folder in debug_fs.\n");
return;
}
@@ -2043,7 +2043,7 @@ static void ipa_usb_debugfs_init(void)
&ipa3_ipa_usb_ops);
if (!ipa3_usb_ctx->dfile_state_info ||
IS_ERR(ipa3_usb_ctx->dfile_state_info)) {
- IPA_USB_ERR("failed to create file for state_info\n");
+ pr_err("failed to create file for state_info\n");
goto fail;
}
@@ -2644,11 +2644,11 @@ static int __init ipa3_usb_init(void)
unsigned long flags;
int res;
- IPA_USB_DBG("entry\n");
+ pr_debug("entry\n");
ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL);
if (ipa3_usb_ctx == NULL) {
- IPA_USB_ERR("failed to allocate memory\n");
- IPA_USB_ERR(":ipa_usb init failed\n");
+ pr_err("failed to allocate memory\n");
+ pr_err(":ipa_usb init failed\n");
return -EFAULT;
}
memset(ipa3_usb_ctx, 0, sizeof(struct ipa3_usb_context));
@@ -2680,19 +2680,19 @@ static int __init ipa3_usb_init(void)
ipa3_usb_ctx->wq = create_singlethread_workqueue("ipa_usb_wq");
if (!ipa3_usb_ctx->wq) {
- IPA_USB_ERR("failed to create workqueue\n");
+ pr_err("failed to create workqueue\n");
res = -EFAULT;
goto ipa_usb_workqueue_fail;
}
ipa_usb_debugfs_init();
- IPA_USB_INFO("exit: IPA_USB init success!\n");
+ pr_info("exit: IPA_USB init success!\n");
return 0;
ipa_usb_workqueue_fail:
- IPA_USB_ERR(":init failed (%d)\n", -res);
+ pr_err(":init failed (%d)\n", -res);
kfree(ipa3_usb_ctx);
return res;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 9cb0b1f3c379..804c89dc9533 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -207,7 +207,6 @@ struct platform_device *ipa_pdev;
static struct {
bool present;
bool arm_smmu;
- bool disable_htw;
bool fast_map;
bool s1_bypass;
u32 ipa_base;
@@ -4313,9 +4312,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
- smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
- "qcom,smmu-disable-htw");
-
/* Get IPA HW Version */
result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
&ipa_drv_res->ipa_hw_type);
@@ -4502,7 +4498,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
static int ipa_smmu_wlan_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -4519,17 +4514,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
}
cb->valid = true;
- if (smmu_info.disable_htw) {
- ret = iommu_domain_set_attr(cb->iommu,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret) {
- IPAERR("couldn't disable coherent HTW\n");
- cb->valid = false;
- return -EIO;
- }
- }
-
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->iommu,
DOMAIN_ATTR_S1_BYPASS,
@@ -4589,7 +4573,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
static int ipa_smmu_uc_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int ret;
int fast = 1;
@@ -4628,18 +4611,6 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- }
-
IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
@@ -4694,7 +4665,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
int result;
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -4731,18 +4701,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- IPADBG("SMMU disable HTW\n");
- }
-
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_S1_BYPASS,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 0eab77d27760..50c387ec785d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -1420,6 +1420,7 @@ static ssize_t ipa_read_nat4(struct file *file,
u16 enable, tbl_entry, flag;
u32 no_entrys = 0;
+ mutex_lock(&ipa_ctx->nat_mem.lock);
value = ipa_ctx->nat_mem.public_ip_addr;
pr_err(
"Table IP Address:%d.%d.%d.%d\n",
@@ -1573,6 +1574,7 @@ static ssize_t ipa_read_nat4(struct file *file,
}
}
pr_err("Current No. Nat Entries: %d\n", no_entrys);
+ mutex_unlock(&ipa_ctx->nat_mem.lock);
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 695c8bc4cbc0..3c2a6d4620ba 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -3152,23 +3152,23 @@ static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in,
} else if (in->client ==
IPA_CLIENT_APPS_WAN_CONS) {
sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
- if (in->napi_enabled) {
+ sys->rx_pool_sz = ipa_ctx->wan_rx_ring_size;
+ if (nr_cpu_ids > 1) {
sys->repl_hdlr =
- ipa_replenish_rx_cache_recycle;
- sys->rx_pool_sz =
- IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ ipa_fast_replenish_rx_cache;
+ sys->repl_trig_thresh =
+ sys->rx_pool_sz / 8;
} else {
- if (nr_cpu_ids > 1) {
- sys->repl_hdlr =
- ipa_fast_replenish_rx_cache;
- sys->repl_trig_thresh =
- sys->rx_pool_sz / 8;
- } else {
+ sys->repl_hdlr =
+ ipa_replenish_rx_cache;
+ }
+ if (in->napi_enabled) {
+ sys->rx_pool_sz =
+ IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ if (in->recycle_enabled) {
sys->repl_hdlr =
- ipa_replenish_rx_cache;
+ ipa_replenish_rx_cache_recycle;
}
- sys->rx_pool_sz =
- ipa_ctx->wan_rx_ring_size;
}
sys->ep->wakelock_client =
IPA_WAKELOCK_REF_CLIENT_WAN_RX;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 581a5f9d8a2e..fec4d5484d28 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -41,8 +41,6 @@
#define MTU_BYTE 1500
#define IPA_MAX_NUM_PIPES 0x14
-#define IPA_WAN_CONS_DESC_FIFO_SZ 0x5E80
-#define IPA_WAN_NAPI_CONS_RX_POOL_SZ 3000
#define IPA_SYS_DESC_FIFO_SZ 0x2000
#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
#define IPA_LAN_RX_HEADER_LENGTH (2)
@@ -53,6 +51,8 @@
#define IPA_UC_FINISH_MAX 6
#define IPA_UC_WAIT_MIN_SLEEP 1000
#define IPA_UC_WAII_MAX_SLEEP 1200
+#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
+#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
#define IPA_MAX_STATUS_STAT_NUM 30
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
index 137a43a1217b..3f20941155a5 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -493,6 +493,8 @@ static int qmi_init_modem_send_sync_msg(void)
resp_desc.ei_array = ipa_init_modem_driver_resp_msg_data_v01_ei;
pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n");
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp),
QMI_SEND_REQ_TIMEOUT_MS);
@@ -538,7 +540,8 @@ int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01;
resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01;
resp_desc.ei_array = ipa_install_fltr_rule_resp_msg_data_v01_ei;
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
req,
sizeof(struct ipa_install_fltr_rule_req_msg_v01),
@@ -574,7 +577,8 @@ int qmi_enable_force_clear_datapath_send(
resp_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
resp_desc.ei_array =
ipa_enable_force_clear_datapath_resp_msg_data_v01_ei;
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -618,7 +622,8 @@ int qmi_disable_force_clear_datapath_send(
resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
resp_desc.ei_array =
ipa_disable_force_clear_datapath_resp_msg_data_v01_ei;
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -688,7 +693,8 @@ int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req)
QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01;
resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01;
resp_desc.ei_array = ipa_fltr_installed_notif_resp_msg_data_v01_ei;
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -1089,7 +1095,8 @@ int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
resp_desc.ei_array = ipa_get_data_stats_resp_msg_data_v01_ei;
IPAWANDBG("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_get_data_stats_req_msg_v01),
&resp_desc, resp,
@@ -1118,7 +1125,8 @@ int ipa_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
resp_desc.ei_array = ipa_get_apn_data_stats_resp_msg_data_v01_ei;
IPAWANDBG("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_get_apn_data_stats_req_msg_v01),
&resp_desc, resp,
@@ -1150,7 +1158,8 @@ int ipa_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req)
resp_desc.ei_array = ipa_set_data_usage_quota_resp_msg_data_v01_ei;
IPAWANDBG("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_set_data_usage_quota_req_msg_v01),
&resp_desc, &resp, sizeof(resp),
@@ -1184,7 +1193,8 @@ int ipa_qmi_stop_data_qouta(void)
resp_desc.ei_array = ipa_stop_data_usage_quota_resp_msg_data_v01_ei;
IPAWANDBG("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp),
QMI_SEND_STATS_REQ_TIMEOUT_MS);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
index 08ed47f3cacf..d14f8da15595 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
@@ -150,10 +150,16 @@ int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
{
int ret;
+ if (!ipa_ctx) {
+ IPAERR("IPA ctx is null\n");
+ return -ENXIO;
+ }
+
ret = ipa2_uc_state_check();
if (ret) {
ipa_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb;
ipa_ctx->uc_ntn_ctx.priv = user_data;
+ return 0;
}
return -EEXIST;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 1be9a6745531..96003d7a16a0 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -34,6 +34,8 @@
#include <linux/rmnet_ipa_fd_ioctl.h>
#include <linux/ipa.h>
#include <uapi/linux/net_map.h>
+#include <uapi/linux/msm_rmnet.h>
+#include <net/rmnet_config.h>
#include "ipa_trace.h"
@@ -1231,6 +1233,81 @@ static void apps_ipa_packet_receive_notify(void *priv,
}
+static int handle_ingress_format(struct net_device *dev,
+ struct rmnet_ioctl_extended_s *in)
+{
+ int ret = 0;
+ struct rmnet_phys_ep_conf_s *ep_cfg;
+
+ IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
+ if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.cs_offload_en =
+ IPA_ENABLE_CS_OFFLOAD_DL;
+
+ if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
+ IPAWANERR("get AGG size %d count %d\n",
+ in->u.ingress_format.agg_size,
+ in->u.ingress_format.agg_count);
+
+ ret = ipa_disable_apps_wan_cons_deaggr(
+ in->u.ingress_format.agg_size,
+ in->u.ingress_format.agg_count);
+
+ if (!ret) {
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_byte_limit =
+ in->u.ingress_format.agg_size;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_pkt_limit =
+ in->u.ingress_format.agg_count;
+
+ if (ipa_rmnet_res.ipa_napi_enable) {
+ ipa_to_apps_ep_cfg.recycle_enabled = true;
+ ep_cfg = (struct rmnet_phys_ep_conf_s *)
+ rcu_dereference(dev->rx_handler_data);
+ ep_cfg->recycle = ipa_recycle_wan_skb;
+ pr_info("Wan Recycle Enabled\n");
+ }
+ }
+ }
+
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
+
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding =
+ true;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_little_endian = 0;
+ ipa_to_apps_ep_cfg.ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
+
+ ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS;
+ ipa_to_apps_ep_cfg.notify = apps_ipa_packet_receive_notify;
+ ipa_to_apps_ep_cfg.priv = dev;
+
+ ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable;
+ if (ipa_to_apps_ep_cfg.napi_enabled)
+ ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
+ else
+ ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+
+ mutex_lock(&ipa_to_apps_pipe_handle_guard);
+ if (atomic_read(&is_ssr)) {
+ IPAWANDBG("In SSR sequence/recovery\n");
+ mutex_unlock(&ipa_to_apps_pipe_handle_guard);
+ return -EFAULT;
+ }
+ ret = ipa2_setup_sys_pipe(&ipa_to_apps_ep_cfg, &ipa_to_apps_hdl);
+ mutex_unlock(&ipa_to_apps_pipe_handle_guard);
+
+ if (ret)
+ IPAWANERR("failed to configure ingress\n");
+
+ return ret;
+}
+
/**
* ipa_wwan_ioctl() - I/O control for wwan network driver.
*
@@ -1531,83 +1608,7 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
break;
case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */
- IPAWANDBG("get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
- if ((extend_ioctl_data.u.data) &
- RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
- ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.
- cs_offload_en =
- IPA_ENABLE_CS_OFFLOAD_DL;
-
- if ((extend_ioctl_data.u.data) &
- RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
- IPAWANERR("get AGG size %d count %d\n",
- extend_ioctl_data.u.
- ingress_format.agg_size,
- extend_ioctl_data.u.
- ingress_format.agg_count);
- if (!ipa_disable_apps_wan_cons_deaggr(
- extend_ioctl_data.u.
- ingress_format.agg_size,
- extend_ioctl_data.
- u.ingress_format.agg_count)) {
- ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.
- aggr_byte_limit = extend_ioctl_data.
- u.ingress_format.agg_size;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.
- aggr_pkt_limit = extend_ioctl_data.
- u.ingress_format.agg_count;
- }
- }
-
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
- hdr_ofst_metadata_valid = 1;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.
- hdr.hdr_ofst_metadata = 1;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
- hdr_ofst_pkt_size_valid = 1;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
- hdr_ofst_pkt_size = 2;
-
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
- hdr_total_len_or_pad_valid = true;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
- hdr_total_len_or_pad = 0;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
- hdr_payload_len_inc_padding = true;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
- hdr_total_len_or_pad_offset = 0;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
- hdr_little_endian = 0;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.metadata_mask.
- metadata_mask = 0xFF000000;
-
- ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS;
- ipa_to_apps_ep_cfg.notify =
- apps_ipa_packet_receive_notify;
- ipa_to_apps_ep_cfg.priv = dev;
-
- ipa_to_apps_ep_cfg.napi_enabled =
- ipa_rmnet_res.ipa_napi_enable;
- if (ipa_to_apps_ep_cfg.napi_enabled)
- ipa_to_apps_ep_cfg.desc_fifo_sz =
- IPA_WAN_CONS_DESC_FIFO_SZ;
- else
- ipa_to_apps_ep_cfg.desc_fifo_sz =
- IPA_SYS_DESC_FIFO_SZ;
-
- mutex_lock(&ipa_to_apps_pipe_handle_guard);
- if (atomic_read(&is_ssr)) {
- IPAWANDBG("In SSR sequence/recovery\n");
- mutex_unlock(&ipa_to_apps_pipe_handle_guard);
- rc = -EFAULT;
- break;
- }
- rc = ipa2_setup_sys_pipe(
- &ipa_to_apps_ep_cfg, &ipa_to_apps_hdl);
- mutex_unlock(&ipa_to_apps_pipe_handle_guard);
- if (rc)
- IPAWANERR("failed to configure ingress\n");
+ rc = handle_ingress_format(dev, &extend_ioctl_data);
break;
case RMNET_IOCTL_SET_XLAT_DEV_INFO:
wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 1df2bc6b902c..ab62dbcddd22 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -38,6 +38,16 @@
#include <linux/hash.h>
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/smem.h>
+#include <soc/qcom/scm.h>
+
+#ifdef CONFIG_ARM64
+
+/* Outer caches unsupported on ARM64 platforms */
+#define outer_flush_range(x, y)
+#define __cpuc_flush_dcache_area __flush_dcache_area
+
+#endif
+
#define IPA_SUBSYSTEM_NAME "ipa_fws"
#include "ipa_i.h"
#include "../ipa_rm_i.h"
@@ -62,7 +72,7 @@
#define IPA_AGGR_MAX_STR_LENGTH (10)
-#define CLEANUP_TAG_PROCESS_TIMEOUT 150
+#define CLEANUP_TAG_PROCESS_TIMEOUT 500
#define IPA_AGGR_STR_IN_BYTES(str) \
(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
@@ -199,6 +209,21 @@ struct ipa3_ioc_nat_alloc_mem32 {
};
#endif
+#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
+#define TZ_MEM_PROTECT_REGION_ID 0x10
+
+struct tz_smmu_ipa_protect_region_iovec_s {
+ u64 input_addr;
+ u64 output_addr;
+ u64 size;
+ u32 attr;
+} __packed;
+
+struct tz_smmu_ipa_protect_region_s {
+ phys_addr_t iovec_buf;
+ u32 size_bytes;
+} __packed;
+
static void ipa3_start_tag_process(struct work_struct *work);
static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
@@ -226,7 +251,6 @@ struct platform_device *ipa3_pdev;
static struct {
bool present;
bool arm_smmu;
- bool disable_htw;
bool fast_map;
bool s1_bypass;
bool use_64_bit_dma_mask;
@@ -2213,7 +2237,7 @@ static int ipa3_q6_set_ex_path_to_apps(void)
}
}
- /* Will wait 150msecs for IPA tag process completion */
+ /* Will wait 500msecs for IPA tag process completion */
retval = ipa3_tag_process(desc, num_descs,
msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
if (retval) {
@@ -3766,6 +3790,32 @@ static int ipa3_gsi_pre_fw_load_init(void)
return 0;
}
+static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type)
+{
+ enum gsi_ver gsi_ver;
+
+ switch (ipa_hw_type) {
+ case IPA_HW_v3_0:
+ case IPA_HW_v3_1:
+ gsi_ver = GSI_VER_1_0;
+ break;
+ case IPA_HW_v3_5:
+ gsi_ver = GSI_VER_1_2;
+ break;
+ case IPA_HW_v3_5_1:
+ gsi_ver = GSI_VER_1_3;
+ break;
+ default:
+ IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
+ WARN_ON(1);
+ gsi_ver = GSI_VER_ERR;
+ }
+
+ IPADBG("GSI version %d\n", gsi_ver);
+
+ return gsi_ver;
+}
+
/**
* ipa3_post_init() - Initialize the IPA Driver (Part II).
* This part contains all initialization which requires interaction with
@@ -3795,6 +3845,7 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
memset(&gsi_props, 0, sizeof(gsi_props));
+ gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
gsi_props.ee = resource_p->ee;
gsi_props.intr = GSI_INTR_IRQ;
gsi_props.irq = resource_p->transport_irq;
@@ -4036,6 +4087,53 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
return count;
}
+static int ipa3_tz_unlock_reg(struct ipa3_context *ipa3_ctx)
+{
+ int i, size, ret, resp;
+ struct tz_smmu_ipa_protect_region_iovec_s *ipa_tz_unlock_vec;
+ struct tz_smmu_ipa_protect_region_s cmd_buf;
+
+ if (ipa3_ctx && ipa3_ctx->ipa_tz_unlock_reg_num > 0) {
+ size = ipa3_ctx->ipa_tz_unlock_reg_num *
+ sizeof(struct tz_smmu_ipa_protect_region_iovec_s);
+ ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL);
+ if (ipa_tz_unlock_vec == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
+ ipa_tz_unlock_vec[i].input_addr =
+ ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
+ (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
+ 0xFFF);
+ ipa_tz_unlock_vec[i].output_addr =
+ ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
+ (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
+ 0xFFF);
+ ipa_tz_unlock_vec[i].size =
+ ipa3_ctx->ipa_tz_unlock_reg[i].size;
+ ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE;
+ }
+
+ /* pass physical address of command buffer */
+ cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec);
+ cmd_buf.size_bytes = size;
+
+ /* flush cache to DDR */
+ __cpuc_flush_dcache_area((void *)ipa_tz_unlock_vec, size);
+ outer_flush_range(cmd_buf.iovec_buf, cmd_buf.iovec_buf + size);
+
+ ret = scm_call(SCM_SVC_MP, TZ_MEM_PROTECT_REGION_ID, &cmd_buf,
+ sizeof(cmd_buf), &resp, sizeof(resp));
+ if (ret) {
+ IPAERR("scm call SCM_SVC_MP failed: %d\n", ret);
+ kfree(ipa_tz_unlock_vec);
+ return -EFAULT;
+ }
+ kfree(ipa_tz_unlock_vec);
+ }
+ return 0;
+}
+
/**
* ipa3_pre_init() - Initialize the IPA Driver.
* This part contains all initialization which doesn't require IPA HW, such
@@ -4120,6 +4218,27 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
+ if (resource_p->ipa_tz_unlock_reg) {
+ ipa3_ctx->ipa_tz_unlock_reg_num =
+ resource_p->ipa_tz_unlock_reg_num;
+ ipa3_ctx->ipa_tz_unlock_reg = kcalloc(
+ ipa3_ctx->ipa_tz_unlock_reg_num,
+ sizeof(*ipa3_ctx->ipa_tz_unlock_reg),
+ GFP_KERNEL);
+ if (ipa3_ctx->ipa_tz_unlock_reg == NULL) {
+ result = -ENOMEM;
+ goto fail_tz_unlock_reg;
+ }
+ for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
+ ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr =
+ resource_p->ipa_tz_unlock_reg[i].reg_addr;
+ ipa3_ctx->ipa_tz_unlock_reg[i].size =
+ resource_p->ipa_tz_unlock_reg[i].size;
+ }
+ }
+
+ /* unlock registers for uc */
+ ipa3_tz_unlock_reg(ipa3_ctx);
/* default aggregation parameters */
ipa3_ctx->aggregation_type = IPA_MBIM_16;
@@ -4568,6 +4687,8 @@ fail_init_mem_partition:
fail_bind:
kfree(ipa3_ctx->ctrl);
fail_mem_ctrl:
+ kfree(ipa3_ctx->ipa_tz_unlock_reg);
+fail_tz_unlock_reg:
ipc_log_context_destroy(ipa3_ctx->logbuf);
fail_logbuf:
kfree(ipa3_ctx);
@@ -4579,8 +4700,10 @@ fail_mem_ctx:
static int get_ipa_dts_configuration(struct platform_device *pdev,
struct ipa3_plat_drv_res *ipa_drv_res)
{
- int result;
+ int i, result, pos;
struct resource *resource;
+ u32 *ipa_tz_unlock_reg;
+ int elem_num;
/* initialize ipa3_res */
ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
@@ -4595,9 +4718,8 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
ipa_drv_res->apply_rg10_wa = false;
ipa_drv_res->gsi_ch20_wa = false;
-
- smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
- "qcom,smmu-disable-htw");
+ ipa_drv_res->ipa_tz_unlock_reg_num = 0;
+ ipa_drv_res->ipa_tz_unlock_reg = NULL;
/* Get IPA HW Version */
result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
@@ -4808,13 +4930,52 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_drv_res->apply_rg10_wa
? "Needed" : "Not needed");
+ elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
+ "qcom,ipa-tz-unlock-reg", sizeof(u32));
+
+ if (elem_num > 0 && elem_num % 2 == 0) {
+ ipa_drv_res->ipa_tz_unlock_reg_num = elem_num / 2;
+
+ ipa_tz_unlock_reg = kcalloc(elem_num, sizeof(u32), GFP_KERNEL);
+ if (ipa_tz_unlock_reg == NULL)
+ return -ENOMEM;
+
+ ipa_drv_res->ipa_tz_unlock_reg = kcalloc(
+ ipa_drv_res->ipa_tz_unlock_reg_num,
+ sizeof(*ipa_drv_res->ipa_tz_unlock_reg),
+ GFP_KERNEL);
+ if (ipa_drv_res->ipa_tz_unlock_reg == NULL) {
+ kfree(ipa_tz_unlock_reg);
+ return -ENOMEM;
+ }
+
+ if (of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,ipa-tz-unlock-reg", ipa_tz_unlock_reg,
+ elem_num)) {
+ IPAERR("failed to read register addresses\n");
+ kfree(ipa_tz_unlock_reg);
+ kfree(ipa_drv_res->ipa_tz_unlock_reg);
+ return -EFAULT;
+ }
+
+ pos = 0;
+ for (i = 0; i < ipa_drv_res->ipa_tz_unlock_reg_num; i++) {
+ ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr =
+ ipa_tz_unlock_reg[pos++];
+ ipa_drv_res->ipa_tz_unlock_reg[i].size =
+ ipa_tz_unlock_reg[pos++];
+ IPADBG("tz unlock reg %d: addr 0x%pa size %d\n", i,
+ &ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr,
+ ipa_drv_res->ipa_tz_unlock_reg[i].size);
+ }
+ kfree(ipa_tz_unlock_reg);
+ }
return 0;
}
static int ipa_smmu_wlan_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -4834,17 +4995,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
}
cb->valid = true;
- if (smmu_info.disable_htw) {
- ret = iommu_domain_set_attr(cb->iommu,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret) {
- IPAERR("couldn't disable coherent HTW\n");
- cb->valid = false;
- return -EIO;
- }
- }
-
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->iommu,
DOMAIN_ATTR_S1_BYPASS,
@@ -4917,7 +5067,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
static int ipa_smmu_uc_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int bypass = 1;
int fast = 1;
@@ -4963,18 +5112,6 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- }
-
IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
@@ -5029,7 +5166,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
int result;
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -5077,17 +5213,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- IPADBG("SMMU disable HTW\n");
- }
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_S1_BYPASS,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 3915f652d87b..25e5e3b74f26 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -1478,6 +1478,7 @@ static ssize_t ipa3_read_nat4(struct file *file,
u16 enable, tbl_entry, flag;
u32 no_entrys = 0;
+ mutex_lock(&ipa3_ctx->nat_mem.lock);
value = ipa3_ctx->nat_mem.public_ip_addr;
pr_err(
"Table IP Address:%d.%d.%d.%d\n",
@@ -1631,6 +1632,7 @@ static ssize_t ipa3_read_nat4(struct file *file,
}
}
pr_err("Current No. Nat Entries: %d\n", no_entrys);
+ mutex_unlock(&ipa3_ctx->nat_mem.lock);
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 9e346f12a108..94e8bba1fe01 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -3180,22 +3180,20 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
IPA_CLIENT_APPS_WAN_CONS) {
sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
sys->free_rx_wrapper = ipa3_free_rx_wrapper;
- if (in->napi_enabled) {
+ sys->rx_pool_sz = ipa3_ctx->wan_rx_ring_size;
+ if (nr_cpu_ids > 1) {
sys->repl_hdlr =
- ipa3_replenish_rx_cache_recycle;
- sys->rx_pool_sz =
- IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ ipa3_fast_replenish_rx_cache;
} else {
- if (nr_cpu_ids > 1) {
- sys->repl_hdlr =
- ipa3_fast_replenish_rx_cache;
- } else {
- sys->repl_hdlr =
- ipa3_replenish_rx_cache;
- }
- sys->rx_pool_sz =
- ipa3_ctx->wan_rx_ring_size;
+ sys->repl_hdlr =
+ ipa3_replenish_rx_cache;
}
+ if (in->napi_enabled)
+ sys->rx_pool_sz =
+ IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ if (in->napi_enabled && in->recycle_enabled)
+ sys->repl_hdlr =
+ ipa3_replenish_rx_cache_recycle;
in->ipa_ep_cfg.aggr.aggr_sw_eof_active
= true;
if (ipa3_ctx->
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index aff39fc18f67..df413c991a53 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -136,7 +136,7 @@ static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
IPAERR("failed to calculate HW FLT rule size\n");
return -EPERM;
}
- IPADBG("pipe %d rule_id (handle) %u hw_len %d priority %u\n",
+ IPADBG_LOW("pipe %d rule_id(handle) %u hw_len %d priority %u\n",
pipe_idx, entry->rule_id, entry->hw_len, entry->prio);
if (entry->rule.hashable)
@@ -1372,18 +1372,18 @@ void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx)
mutex_lock(&ipa3_ctx->lock);
tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
- tbl->sticky_rear = true;
rule.action = IPA_PASS_TO_EXCEPTION;
- __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, false,
+ __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true,
&ep->dflt_flt4_rule_hdl);
ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
+ tbl->sticky_rear = true;
tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
- tbl->sticky_rear = true;
rule.action = IPA_PASS_TO_EXCEPTION;
- __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, false,
+ __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true,
&ep->dflt_flt6_rule_hdl);
ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
+ tbl->sticky_rear = true;
mutex_unlock(&ipa3_ctx->lock);
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 4309fbc3154f..8e85822d9719 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -43,8 +43,6 @@
#define MTU_BYTE 1500
#define IPA3_MAX_NUM_PIPES 31
-#define IPA_WAN_CONS_DESC_FIFO_SZ 0x5E80
-#define IPA_WAN_NAPI_CONS_RX_POOL_SZ 3000
#define IPA_SYS_DESC_FIFO_SZ 0x800
#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
#define IPA_LAN_RX_HEADER_LENGTH (2)
@@ -55,6 +53,8 @@
#define IPA_UC_FINISH_MAX 6
#define IPA_UC_WAIT_MIN_SLEEP 1000
#define IPA_UC_WAII_MAX_SLEEP 1200
+#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
+#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
#define IPA_MAX_STATUS_STAT_NUM 30
@@ -481,7 +481,7 @@ struct ipa_gsi_ep_mem_info {
struct ipa3_status_stats {
struct ipahal_pkt_status status[IPA_MAX_STATUS_STAT_NUM];
- int curr;
+ unsigned int curr;
};
/**
@@ -1012,6 +1012,11 @@ struct ipa3_ready_cb_info {
void *user_data;
};
+struct ipa_tz_unlock_reg_info {
+ u64 reg_addr;
+ u32 size;
+};
+
/**
* struct ipa3_context - IPA context
* @class: pointer to the struct class
@@ -1228,6 +1233,8 @@ struct ipa3_context {
struct list_head ipa_ready_cb_list;
struct completion init_completion_obj;
struct ipa3_smp2p_info smp2p_info;
+ u32 ipa_tz_unlock_reg_num;
+ struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
};
/**
@@ -1266,6 +1273,8 @@ struct ipa3_plat_drv_res {
bool apply_rg10_wa;
bool gsi_ch20_wa;
bool tethered_flow_control;
+ u32 ipa_tz_unlock_reg_num;
+ struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
};
/**
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index bf8a5ade04bd..a6b075583162 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -582,6 +582,8 @@ static int ipa3_qmi_init_modem_send_sync_msg(void)
resp_desc.ei_array = ipa3_init_modem_driver_resp_msg_data_v01_ei;
pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n");
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp),
QMI_SEND_REQ_TIMEOUT_MS);
@@ -623,6 +625,8 @@ int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01;
resp_desc.ei_array = ipa3_install_fltr_rule_resp_msg_data_v01_ei;
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
req,
sizeof(struct ipa_install_fltr_rule_req_msg_v01),
@@ -703,6 +707,8 @@ int ipa3_qmi_enable_force_clear_datapath_send(
resp_desc.ei_array =
ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei;
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -746,7 +752,8 @@ int ipa3_qmi_disable_force_clear_datapath_send(
resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
resp_desc.ei_array =
ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei;
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -803,6 +810,8 @@ int ipa3_qmi_filter_notify_send(
resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01;
resp_desc.ei_array = ipa3_fltr_installed_notif_resp_msg_data_v01_ei;
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -1213,6 +1222,8 @@ int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
IPAWANDBG_LOW("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n");
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_get_data_stats_req_msg_v01),
&resp_desc, resp,
@@ -1242,6 +1253,8 @@ int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
IPAWANDBG_LOW("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n");
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_get_apn_data_stats_req_msg_v01),
&resp_desc, resp,
@@ -1273,7 +1286,8 @@ int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req)
resp_desc.ei_array = ipa3_set_data_usage_quota_resp_msg_data_v01_ei;
IPAWANDBG_LOW("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_set_data_usage_quota_req_msg_v01),
&resp_desc, &resp, sizeof(resp),
@@ -1307,7 +1321,8 @@ int ipa3_qmi_stop_data_qouta(void)
resp_desc.ei_array = ipa3_stop_data_usage_quota_resp_msg_data_v01_ei;
IPAWANDBG_LOW("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp),
QMI_SEND_STATS_REQ_TIMEOUT_MS);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index b06e33a8258a..6c7bf500e760 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -289,7 +289,7 @@ static int ipa_prep_rt_tbl_for_cmt(enum ipa_ip_type ip,
return -EPERM;
}
- IPADBG("RT rule id (handle) %d hw_len %u priority %u\n",
+ IPADBG_LOW("RT rule id (handle) %d hw_len %u priority %u\n",
entry->id, entry->hw_len, entry->prio);
if (entry->rule.hashable)
@@ -642,23 +642,23 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
goto fail_imm_cmd_construct;
}
- IPADBG("Hashable HEAD\n");
+ IPADBG_LOW("Hashable HEAD\n");
IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
- IPADBG("Non-Hashable HEAD\n");
+ IPADBG_LOW("Non-Hashable HEAD\n");
IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
if (alloc_params.hash_bdy.size) {
- IPADBG("Hashable BODY\n");
+ IPADBG_LOW("Hashable BODY\n");
IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
alloc_params.hash_bdy.phys_base,
alloc_params.hash_bdy.size);
}
if (alloc_params.nhash_bdy.size) {
- IPADBG("Non-Hashable BODY\n");
+ IPADBG_LOW("Non-Hashable BODY\n");
IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
alloc_params.nhash_bdy.phys_base,
alloc_params.nhash_bdy.size);
@@ -1675,7 +1675,7 @@ int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, enum ipa_ip_type ip_type,
u8 *rule_addr;
int rule_idx;
- IPADBG("tbl_idx=%d ip_type=%d hashable=%d entry=0x%p num_entry=0x%p\n",
+ IPADBG_LOW("tbl_idx=%d ip_t=%d hashable=%d entry=0x%p num_entry=0x%p\n",
tbl_idx, ip_type, hashable, entry, num_entry);
if (ip_type == IPA_IP_v4 && tbl_idx >= IPA_MEM_PART(v4_rt_num_index)) {
@@ -1716,7 +1716,7 @@ int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, enum ipa_ip_type ip_type,
IPA_MEM_PART(v6_rt_nhash_ofst);
}
- IPADBG("hdr_base_ofst=0x%llx\n", hdr_base_ofst);
+ IPADBG_LOW("hdr_base_ofst=0x%llx\n", hdr_base_ofst);
res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
tbl_idx, &tbl_addr, &is_sys);
@@ -1724,7 +1724,7 @@ int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, enum ipa_ip_type ip_type,
IPAERR("failed to read table address from header structure\n");
goto bail;
}
- IPADBG("rt tbl %d: tbl_addr=0x%llx is_sys=%d\n",
+ IPADBG_LOW("rt tbl %d: tbl_addr=0x%llx is_sys=%d\n",
tbl_idx, tbl_addr, is_sys);
if (!tbl_addr) {
IPAERR("invalid rt tbl addr\n");
@@ -1760,7 +1760,7 @@ int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, enum ipa_ip_type ip_type,
rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
}
- IPADBG("First rule addr 0x%p\n", rule_addr);
+ IPADBG_LOW("First rule addr 0x%p\n", rule_addr);
if (!rule_addr) {
/* Modem table in system memory or empty table */
@@ -1776,7 +1776,7 @@ int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, enum ipa_ip_type ip_type,
goto bail;
}
- IPADBG("rule_size=%d\n", entry[rule_idx].rule_size);
+ IPADBG_LOW("rule_size=%d\n", entry[rule_idx].rule_size);
if (!entry[rule_idx].rule_size)
break;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index e355d9db3777..67b3cb301f1f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -115,7 +115,7 @@ static u64 ipa_fltrt_create_tbl_addr(bool is_sys, u64 addr)
static void ipa_fltrt_parse_tbl_addr(u64 hwaddr, u64 *addr, bool *is_sys)
{
- IPAHAL_DBG("Parsing hwaddr 0x%llx\n", hwaddr);
+ IPAHAL_DBG_LOW("Parsing hwaddr 0x%llx\n", hwaddr);
*is_sys = !(hwaddr & 0x1);
hwaddr &= (~0ULL - 1);
@@ -254,7 +254,7 @@ static int ipa_rt_gen_hw_rule(struct ipahal_rt_rule_gen_params *params,
}
rule_hdr->u.hdr.en_rule = en_rule;
- IPAHAL_DBG("en_rule 0x%x\n", en_rule);
+ IPAHAL_DBG_LOW("en_rule 0x%x\n", en_rule);
ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
if (*hw_len == 0) {
@@ -327,12 +327,12 @@ static int ipa_flt_gen_hw_rule(struct ipahal_flt_rule_gen_params *params,
}
rule_hdr->u.hdr.en_rule = en_rule;
- IPAHAL_DBG("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
+ IPAHAL_DBG_LOW("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
en_rule,
rule_hdr->u.hdr.action,
rule_hdr->u.hdr.rt_tbl_idx,
rule_hdr->u.hdr.retain_hdr);
- IPAHAL_DBG("priority=%d, rule_id=%d\n",
+ IPAHAL_DBG_LOW("priority=%d, rule_id=%d\n",
rule_hdr->u.hdr.priority,
rule_hdr->u.hdr.rule_id);
@@ -1152,25 +1152,25 @@ static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
* OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
*/
if (attrib->attrib_mask == 0) {
- IPAHAL_DBG("building default rule\n");
+ IPAHAL_DBG_LOW("building default rule\n");
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(ipa3_0_ofst_meq32[0]);
extra_wrd_i = ipa_write_8(0, extra_wrd_i); /* offset */
rest_wrd_i = ipa_write_32(0, rest_wrd_i); /* mask */
rest_wrd_i = ipa_write_32(0, rest_wrd_i); /* val */
}
- IPAHAL_DBG("extra_word_1 0x%llx\n", *(u64 *)extra_wrd_start);
- IPAHAL_DBG("extra_word_2 0x%llx\n",
+ IPAHAL_DBG_LOW("extra_word_1 0x%llx\n", *(u64 *)extra_wrd_start);
+ IPAHAL_DBG_LOW("extra_word_2 0x%llx\n",
*(u64 *)(extra_wrd_start + IPA3_0_HW_TBL_WIDTH));
extra_wrd_i = ipa_pad_to_64(extra_wrd_i);
sz = extra_wrd_i - extra_wrd_start;
- IPAHAL_DBG("extra words params sz %d\n", sz);
+ IPAHAL_DBG_LOW("extra words params sz %d\n", sz);
*buf = ipa_fltrt_copy_mem(extra_wrd_start, *buf, sz);
rest_wrd_i = ipa_pad_to_64(rest_wrd_i);
sz = rest_wrd_i - rest_wrd_start;
- IPAHAL_DBG("non extra words params sz %d\n", sz);
+ IPAHAL_DBG_LOW("non extra words params sz %d\n", sz);
*buf = ipa_fltrt_copy_mem(rest_wrd_start, *buf, sz);
fail_err_check:
@@ -1208,7 +1208,7 @@ static int ipa_fltrt_calc_extra_wrd_bytes(
if (attrib->ihl_offset_eq_16_present)
num++;
- IPAHAL_DBG("extra bytes number %d\n", num);
+ IPAHAL_DBG_LOW("extra bytes number %d\n", num);
return num;
}
@@ -2024,7 +2024,7 @@ static int ipa_fltrt_parse_hw_rule_eq(u8 *addr, u32 hdr_sz,
eq_bitmap = atrb->rule_eq_bitmap;
- IPAHAL_DBG("eq_bitmap=0x%x\n", eq_bitmap);
+ IPAHAL_DBG_LOW("eq_bitmap=0x%x\n", eq_bitmap);
if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ))
atrb->tos_eq_present = true;
@@ -2080,7 +2080,7 @@ static int ipa_fltrt_parse_hw_rule_eq(u8 *addr, u32 hdr_sz,
extra = &dummy_extra_wrd;
rest = addr + hdr_sz;
}
- IPAHAL_DBG("addr=0x%p extra=0x%p rest=0x%p\n", addr, extra, rest);
+ IPAHAL_DBG_LOW("addr=0x%p extra=0x%p rest=0x%p\n", addr, extra, rest);
if (atrb->tos_eq_present)
atrb->tos_eq = *extra++;
@@ -2182,13 +2182,13 @@ static int ipa_fltrt_parse_hw_rule_eq(u8 *addr, u32 hdr_sz,
rest += 4;
}
- IPAHAL_DBG("before rule alignment rest=0x%p\n", rest);
+ IPAHAL_DBG_LOW("before rule alignment rest=0x%p\n", rest);
rest = (u8 *)(((unsigned long)rest + IPA3_0_HW_RULE_START_ALIGNMENT) &
~IPA3_0_HW_RULE_START_ALIGNMENT);
- IPAHAL_DBG("after rule alignment rest=0x%p\n", rest);
+ IPAHAL_DBG_LOW("after rule alignment rest=0x%p\n", rest);
*rule_size = rest - addr;
- IPAHAL_DBG("rule_size=0x%x\n", *rule_size);
+ IPAHAL_DBG_LOW("rule_size=0x%x\n", *rule_size);
return 0;
}
@@ -2198,12 +2198,12 @@ static int ipa_rt_parse_hw_rule(u8 *addr, struct ipahal_rt_rule_entry *rule)
struct ipa3_0_rt_rule_hw_hdr *rule_hdr;
struct ipa_ipfltri_rule_eq *atrb;
- IPAHAL_DBG("Entry\n");
+ IPAHAL_DBG_LOW("Entry\n");
rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)addr;
atrb = &rule->eq_attrib;
- IPAHAL_DBG("read hdr 0x%llx\n", rule_hdr->u.word);
+ IPAHAL_DBG_LOW("read hdr 0x%llx\n", rule_hdr->u.word);
if (rule_hdr->u.word == 0) {
/* table termintator - empty table */
@@ -2235,7 +2235,7 @@ static int ipa_flt_parse_hw_rule(u8 *addr, struct ipahal_flt_rule_entry *rule)
struct ipa3_0_flt_rule_hw_hdr *rule_hdr;
struct ipa_ipfltri_rule_eq *atrb;
- IPAHAL_DBG("Entry\n");
+ IPAHAL_DBG_LOW("Entry\n");
rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)addr;
atrb = &rule->rule.eq_attrib;
@@ -2775,11 +2775,11 @@ static int ipa_fltrt_alloc_lcl_bdy(
/* The HAL allocates larger sizes than the given effective ones
* for alignments and border indications
*/
- IPAHAL_DBG("lcl tbl bdy total effective sizes: hash=%u nhash=%u\n",
+ IPAHAL_DBG_LOW("lcl tbl bdy total effective sizes: hash=%u nhash=%u\n",
params->total_sz_lcl_hash_tbls,
params->total_sz_lcl_nhash_tbls);
- IPAHAL_DBG("lcl tbl bdy count: hash=%u nhash=%u\n",
+ IPAHAL_DBG_LOW("lcl tbl bdy count: hash=%u nhash=%u\n",
params->num_lcl_hash_tbls,
params->num_lcl_nhash_tbls);
@@ -2798,7 +2798,7 @@ static int ipa_fltrt_alloc_lcl_bdy(
params->nhash_bdy.size += obj->blk_sz_alignment;
params->nhash_bdy.size &= ~(obj->blk_sz_alignment);
- IPAHAL_DBG("nhash lcl tbl bdy total h/w size = %u\n",
+ IPAHAL_DBG_LOW("nhash lcl tbl bdy total h/w size = %u\n",
params->nhash_bdy.size);
params->nhash_bdy.base = dma_alloc_coherent(
@@ -2829,7 +2829,7 @@ static int ipa_fltrt_alloc_lcl_bdy(
params->hash_bdy.size += obj->blk_sz_alignment;
params->hash_bdy.size &= ~(obj->blk_sz_alignment);
- IPAHAL_DBG("hash lcl tbl bdy total h/w size = %u\n",
+ IPAHAL_DBG_LOW("hash lcl tbl bdy total h/w size = %u\n",
params->hash_bdy.size);
params->hash_bdy.base = dma_alloc_coherent(
@@ -2862,7 +2862,7 @@ hash_bdy_fail:
int ipahal_fltrt_allocate_hw_tbl_imgs(
struct ipahal_fltrt_alloc_imgs_params *params)
{
- IPAHAL_DBG("Entry\n");
+ IPAHAL_DBG_LOW("Entry\n");
/* Input validation */
if (!params) {
@@ -2904,7 +2904,7 @@ int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem)
{
struct ipahal_fltrt_obj *obj;
- IPAHAL_DBG("Entry\n");
+ IPAHAL_DBG_LOW("Entry\n");
if (!tbl_mem) {
IPAHAL_ERR("Input err\n");
@@ -2958,7 +2958,7 @@ int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx,
u64 hwaddr;
u8 *hdr;
- IPAHAL_DBG("Entry\n");
+ IPAHAL_DBG_LOW("Entry\n");
obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
@@ -2991,7 +2991,7 @@ int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr,
u64 hwaddr;
u8 *hdr;
- IPAHAL_DBG("Entry\n");
+ IPAHAL_DBG_LOW("Entry\n");
obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
@@ -3023,7 +3023,7 @@ int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params,
u8 *tmp = NULL;
int rc;
- IPAHAL_DBG("Entry\n");
+ IPAHAL_DBG_LOW("Entry\n");
if (!params || !hw_len) {
IPAHAL_ERR("Input err: params=%p hw_len=%p\n", params, hw_len);
@@ -3081,7 +3081,7 @@ int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params,
u8 *tmp = NULL;
int rc;
- IPAHAL_DBG("Entry\n");
+ IPAHAL_DBG_LOW("Entry\n");
if (!params || !hw_len) {
IPAHAL_ERR("Input err: params=%p hw_len=%p\n", params, hw_len);
@@ -3138,7 +3138,7 @@ int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
const struct ipa_rule_attrib *attrib,
struct ipa_ipfltri_rule_eq *eq_atrb)
{
- IPAHAL_DBG("Entry\n");
+ IPAHAL_DBG_LOW("Entry\n");
if (ipt >= IPA_IP_MAX) {
IPAHAL_ERR("Input err: Invalid ip type %d\n", ipt);
@@ -3165,7 +3165,7 @@ int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
int ipahal_rt_parse_hw_rule(u8 *rule_addr,
struct ipahal_rt_rule_entry *rule)
{
- IPAHAL_DBG("Entry\n");
+ IPAHAL_DBG_LOW("Entry\n");
if (!rule_addr || !rule) {
IPAHAL_ERR("Input err: rule_addr=%p rule=%p\n",
@@ -3186,7 +3186,7 @@ int ipahal_rt_parse_hw_rule(u8 *rule_addr,
int ipahal_flt_parse_hw_rule(u8 *rule_addr,
struct ipahal_flt_rule_entry *rule)
{
- IPAHAL_DBG("Entry\n");
+ IPAHAL_DBG_LOW("Entry\n");
if (!rule_addr || !rule) {
IPAHAL_ERR("Input err: rule_addr=%p rule=%p\n",
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 2cd08d77df6e..f134852e046e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -34,6 +34,8 @@
#include <linux/rmnet_ipa_fd_ioctl.h>
#include <linux/ipa.h>
#include <uapi/linux/net_map.h>
+#include <uapi/linux/msm_rmnet.h>
+#include <net/rmnet_config.h>
#include "ipa_trace.h"
@@ -1241,6 +1243,85 @@ static void apps_ipa_packet_receive_notify(void *priv,
IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
}
+static int handle3_ingress_format(struct net_device *dev,
+ struct rmnet_ioctl_extended_s *in)
+{
+ int ret = 0;
+ struct ipa_sys_connect_params *ipa_wan_ep_cfg;
+ struct rmnet_phys_ep_conf_s *ep_cfg;
+
+ IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
+ ipa_wan_ep_cfg = &rmnet_ipa3_ctx->ipa_to_apps_ep_cfg;
+ if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
+ ipa_wan_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en =
+ IPA_ENABLE_CS_OFFLOAD_DL;
+
+ if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
+ IPAWANERR("get AGG size %d count %d\n",
+ in->u.ingress_format.agg_size,
+ in->u.ingress_format.agg_count);
+
+ ret = ipa_disable_apps_wan_cons_deaggr(
+ in->u.ingress_format.agg_size,
+ in->u.ingress_format.agg_count);
+
+ if (!ret) {
+ ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_byte_limit =
+ in->u.ingress_format.agg_size;
+ ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit =
+ in->u.ingress_format.agg_count;
+
+ if (ipa_wan_ep_cfg->napi_enabled) {
+ ipa_wan_ep_cfg->recycle_enabled = true;
+ ep_cfg = (struct rmnet_phys_ep_conf_s *)
+ rcu_dereference(dev->rx_handler_data);
+ ep_cfg->recycle = ipa_recycle_wan_skb;
+ pr_info("Wan Recycle Enabled\n");
+ }
+ }
+ }
+
+ ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 4;
+ ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
+ ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
+ ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
+ ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
+
+ ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+ ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0;
+ ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = true;
+ ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+ ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_little_endian = 0;
+ ipa_wan_ep_cfg->ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
+
+ ipa_wan_ep_cfg->client = IPA_CLIENT_APPS_WAN_CONS;
+ ipa_wan_ep_cfg->notify = apps_ipa_packet_receive_notify;
+ ipa_wan_ep_cfg->priv = dev;
+
+ ipa_wan_ep_cfg->napi_enabled = ipa3_rmnet_res.ipa_napi_enable;
+ if (ipa_wan_ep_cfg->napi_enabled)
+ ipa_wan_ep_cfg->desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
+ else
+ ipa_wan_ep_cfg->desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+
+ mutex_lock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+
+ if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
+ IPAWANDBG("In SSR sequence/recovery\n");
+ mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+ return -EFAULT;
+ }
+ ret = ipa3_setup_sys_pipe(&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg,
+ &rmnet_ipa3_ctx->ipa3_to_apps_hdl);
+
+ mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+
+ if (ret)
+ IPAWANERR("failed to configure ingress\n");
+
+ return ret;
+}
+
/**
* ipa3_wwan_ioctl() - I/O control for wwan network driver.
*
@@ -1556,91 +1637,7 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
break;
case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */
- IPAWANDBG("get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
- if ((extend_ioctl_data.u.data) &
- RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
- ipa_ep_cfg.cfg.cs_offload_en =
- IPA_ENABLE_CS_OFFLOAD_DL;
-
- if ((extend_ioctl_data.u.data) &
- RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
- IPAWANERR("get AGG size %d count %d\n",
- extend_ioctl_data.u.
- ingress_format.agg_size,
- extend_ioctl_data.u.
- ingress_format.agg_count);
- if (!ipa_disable_apps_wan_cons_deaggr(
- extend_ioctl_data.u.
- ingress_format.agg_size,
- extend_ioctl_data.
- u.ingress_format.agg_count)) {
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
- ipa_ep_cfg.aggr.aggr_byte_limit =
- extend_ioctl_data.u.ingress_format.
- agg_size;
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
- ipa_ep_cfg.aggr.aggr_pkt_limit =
- extend_ioctl_data.u.ingress_format.
- agg_count;
- }
- }
-
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
- hdr_len = 4;
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
- hdr_ofst_metadata_valid = 1;
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.
- hdr.hdr_ofst_metadata = 1;
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
- hdr_ofst_pkt_size_valid = 1;
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
- hdr_ofst_pkt_size = 2;
-
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
- hdr_total_len_or_pad_valid = true;
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
- hdr_total_len_or_pad = 0;
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
- hdr_payload_len_inc_padding = true;
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
- hdr_total_len_or_pad_offset = 0;
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
- hdr_little_endian = 0;
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.
- metadata_mask.metadata_mask = 0xFF000000;
-
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.client =
- IPA_CLIENT_APPS_WAN_CONS;
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.notify =
- apps_ipa_packet_receive_notify;
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.priv = dev;
-
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.napi_enabled =
- ipa3_rmnet_res.ipa_napi_enable;
- if (rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.napi_enabled)
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
- desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
- else
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
- desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
-
- mutex_lock(
- &rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
- if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
- IPAWANDBG("In SSR sequence/recovery\n");
- mutex_unlock(&rmnet_ipa3_ctx->
- ipa_to_apps_pipe_handle_guard);
- rc = -EFAULT;
- break;
- }
- rc = ipa3_setup_sys_pipe(
- &rmnet_ipa3_ctx->ipa_to_apps_ep_cfg,
- &rmnet_ipa3_ctx->ipa3_to_apps_hdl);
- mutex_unlock(&rmnet_ipa3_ctx->
- ipa_to_apps_pipe_handle_guard);
- if (rc)
- IPAWANERR("failed to configure ingress\n");
+ rc = handle3_ingress_format(dev, &extend_ioctl_data);
break;
case RMNET_IOCTL_SET_XLAT_DEV_INFO:
wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
@@ -1972,6 +1969,12 @@ static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
"qcom,ipa-advertise-sg-support");
pr_info("IPA SG support = %s\n",
ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
+
+ ipa_rmnet_drv_res->ipa_napi_enable =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ipa-napi-enable");
+ pr_info("IPA Napi Enable = %s\n",
+ ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
return 0;
}
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 6d826590cabc..45fedfa72bda 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -569,7 +569,6 @@ err_disable_vregs:
static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
{
- int disable_htw = 1;
int atomic_ctx = 1;
int rc;
int bypass_enable = 1;
@@ -587,17 +586,6 @@ static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
dev_info(ctx->dev, "IOMMU mapping created: %p\n", ctx->mapping);
rc = iommu_domain_set_attr(ctx->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (rc) {
- /* This error can be ignored and not considered fatal,
- * but let the users know this happened
- */
- dev_err(ctx->dev, "Warning: disable coherent HTW failed (%d)\n",
- rc);
- }
-
- rc = iommu_domain_set_attr(ctx->mapping->domain,
DOMAIN_ATTR_ATOMIC,
&atomic_ctx);
if (rc) {
diff --git a/drivers/platform/msm/qpnp-revid.c b/drivers/platform/msm/qpnp-revid.c
index 0bbda4eb4116..78e685f789cd 100644
--- a/drivers/platform/msm/qpnp-revid.c
+++ b/drivers/platform/msm/qpnp-revid.c
@@ -27,6 +27,7 @@
#define REVID_SUBTYPE 0x5
#define REVID_STATUS1 0x8
#define REVID_SPARE_0 0x60
+#define REVID_FAB_ID 0xf2
#define QPNP_REVID_DEV_NAME "qcom,qpnp-revid"
@@ -154,7 +155,7 @@ static size_t build_pmic_string(char *buf, size_t n, int sid,
static int qpnp_revid_probe(struct platform_device *pdev)
{
u8 rev1, rev2, rev3, rev4, pmic_type, pmic_subtype, pmic_status;
- u8 option1, option2, option3, option4, spare0;
+ u8 option1, option2, option3, option4, spare0, fab_id;
unsigned int base;
int rc;
char pmic_string[PMIC_STRING_MAXLENGTH] = {'\0'};
@@ -199,6 +200,11 @@ static int qpnp_revid_probe(struct platform_device *pdev)
pmic_subtype = PMI8937_PERIPHERAL_SUBTYPE;
}
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,fab-id-valid"))
+ fab_id = qpnp_read_byte(regmap, base + REVID_FAB_ID);
+ else
+ fab_id = -EINVAL;
+
revid_chip = devm_kzalloc(&pdev->dev, sizeof(struct revid_chip),
GFP_KERNEL);
if (!revid_chip)
@@ -211,6 +217,7 @@ static int qpnp_revid_probe(struct platform_device *pdev)
revid_chip->data.rev4 = rev4;
revid_chip->data.pmic_subtype = pmic_subtype;
revid_chip->data.pmic_type = pmic_type;
+ revid_chip->data.fab_id = fab_id;
if (pmic_subtype < ARRAY_SIZE(pmic_names))
revid_chip->data.pmic_name = pmic_names[pmic_subtype];
diff --git a/drivers/platform/msm/sps/bam.c b/drivers/platform/msm/sps/bam.c
index 3aef2060ab52..c94536398dac 100644
--- a/drivers/platform/msm/sps/bam.c
+++ b/drivers/platform/msm/sps/bam.c
@@ -1162,7 +1162,7 @@ void bam_output_register_content(void *base, u32 ee)
print_bam_test_bus_reg(base, 0);
- print_bam_selected_reg(dev->base, BAM_MAX_EES);
+ print_bam_selected_reg(base, BAM_MAX_EES);
num_pipes = bam_read_reg_field(base, NUM_PIPES, 0,
BAM_NUM_PIPES);
@@ -1174,11 +1174,11 @@ void bam_output_register_content(void *base, u32 ee)
if (!enhd_pipe || !pipe_attr)
for (i = 0; i < num_pipes; i++)
- print_bam_pipe_selected_reg(dev->base, i);
+ print_bam_pipe_selected_reg(base, i);
else {
for (i = 0; i < num_pipes; i++) {
if (pipe_attr & (1UL << i))
- print_bam_pipe_selected_reg(dev->base, i);
+ print_bam_pipe_selected_reg(base, i);
}
}
}
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 545a1e684b25..8af1eb66c699 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -269,6 +269,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(pd_active),
POWER_SUPPLY_ATTR(charger_temp),
POWER_SUPPLY_ATTR(charger_temp_max),
+ POWER_SUPPLY_ATTR(parallel_disable),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/qcom-charger/Makefile b/drivers/power/qcom-charger/Makefile
index aae6084c3c10..0126d2d0a18e 100644
--- a/drivers/power/qcom-charger/Makefile
+++ b/drivers/power/qcom-charger/Makefile
@@ -6,6 +6,6 @@ obj-$(CONFIG_SMB1351_USB_CHARGER) += smb1351-charger.o pmic-voter.o
obj-$(CONFIG_MSM_BCL_CTL) += msm_bcl.o
obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o
obj-$(CONFIG_BATTERY_BCL) += battery_current_limit.o
-obj-$(CONFIG_QPNP_SMB2) += qpnp-smb2.o smb-lib.o pmic-voter.o
-obj-$(CONFIG_SMB138X_CHARGER) += smb138x-charger.o smb-lib.o pmic-voter.o
+obj-$(CONFIG_QPNP_SMB2) += qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o
+obj-$(CONFIG_SMB138X_CHARGER) += smb138x-charger.o smb-lib.o pmic-voter.o storm-watch.o
obj-$(CONFIG_QPNP_QNOVO) += qpnp-qnovo.o
diff --git a/drivers/power/qcom-charger/battery_current_limit.c b/drivers/power/qcom-charger/battery_current_limit.c
index 2bda5ce1a8c4..951d0544efa0 100644
--- a/drivers/power/qcom-charger/battery_current_limit.c
+++ b/drivers/power/qcom-charger/battery_current_limit.c
@@ -174,6 +174,9 @@ struct bcl_context {
struct qpnp_adc_tm_btm_param btm_vph_adc_param;
/* Low temp min freq limit requested by thermal */
uint32_t thermal_freq_limit;
+ /* state of charge notifier */
+ struct notifier_block psy_nb;
+ struct work_struct soc_mitig_work;
/* BCL Peripheral monitor parameters */
struct bcl_threshold ibat_high_thresh;
@@ -204,8 +207,6 @@ static DEFINE_MUTEX(bcl_hotplug_mutex);
static bool bcl_hotplug_enabled;
static uint32_t battery_soc_val = 100;
static uint32_t soc_low_threshold;
-static struct power_supply *bcl_psy;
-static struct power_supply_desc bcl_psy_des;
static const char bcl_psy_name[] = "bcl";
static void bcl_handle_hotplug(struct work_struct *work)
@@ -277,22 +278,34 @@ static void update_cpu_freq(void)
trace_bcl_sw_mitigation_event("End Frequency Mitigation");
}
-static void power_supply_callback(struct power_supply *psy)
+static void soc_mitigate(struct work_struct *work)
{
- static struct power_supply *bms_psy;
+ if (bcl_hotplug_enabled)
+ queue_work(gbcl->bcl_hotplug_wq, &bcl_hotplug_work);
+ update_cpu_freq();
+}
+
+static int power_supply_callback(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct power_supply *psy = data;
+ static struct power_supply *batt_psy;
union power_supply_propval ret = {0,};
int battery_percentage;
enum bcl_threshold_state prev_soc_state;
if (gbcl->bcl_mode != BCL_DEVICE_ENABLED) {
pr_debug("BCL is not enabled\n");
- return;
+ return NOTIFY_OK;
}
- if (!bms_psy)
- bms_psy = power_supply_get_by_name("bms");
- if (bms_psy) {
- battery_percentage = power_supply_get_property(bms_psy,
+ if (strcmp(psy->desc->name, "battery"))
+ return NOTIFY_OK;
+
+ if (!batt_psy)
+ batt_psy = power_supply_get_by_name("battery");
+ if (batt_psy) {
+ battery_percentage = power_supply_get_property(batt_psy,
POWER_SUPPLY_PROP_CAPACITY, &ret);
battery_percentage = ret.intval;
battery_soc_val = battery_percentage;
@@ -302,15 +315,14 @@ static void power_supply_callback(struct power_supply *psy)
bcl_soc_state = (battery_soc_val <= soc_low_threshold) ?
BCL_LOW_THRESHOLD : BCL_HIGH_THRESHOLD;
if (bcl_soc_state == prev_soc_state)
- return;
+ return NOTIFY_OK;
trace_bcl_sw_mitigation_event(
(bcl_soc_state == BCL_LOW_THRESHOLD)
? "trigger SoC mitigation"
: "clear SoC mitigation");
- if (bcl_hotplug_enabled)
- queue_work(gbcl->bcl_hotplug_wq, &bcl_hotplug_work);
- update_cpu_freq();
+ schedule_work(&gbcl->soc_mitig_work);
}
+ return NOTIFY_OK;
}
static int bcl_get_battery_voltage(int *vbatt_mv)
@@ -624,7 +636,6 @@ static void bcl_periph_vbat_notify(enum bcl_trip_type type, int trip_temp,
static void bcl_periph_mode_set(enum bcl_device_mode mode)
{
int ret = 0;
- struct power_supply_config bcl_psy_cfg = {};
if (mode == BCL_DEVICE_ENABLED) {
/*
@@ -632,15 +643,11 @@ static void bcl_periph_mode_set(enum bcl_device_mode mode)
* power state changes. Make sure we read the current SoC
* and mitigate.
*/
- power_supply_callback(bcl_psy);
- bcl_psy_cfg.num_supplicants = 0;
- bcl_psy_cfg.drv_data = gbcl;
-
- bcl_psy = power_supply_register(gbcl->dev, &bcl_psy_des,
- &bcl_psy_cfg);
- if (IS_ERR(bcl_psy)) {
- pr_err("Unable to register bcl_psy rc = %ld\n",
- PTR_ERR(bcl_psy));
+ power_supply_callback(&gbcl->psy_nb, 1, gbcl);
+ ret = power_supply_reg_notifier(&gbcl->psy_nb);
+ if (ret < 0) {
+ pr_err("Unable to register soc notifier rc = %d\n",
+ ret);
return;
}
ret = msm_bcl_set_threshold(BCL_PARAM_CURRENT, BCL_HIGH_TRIP,
@@ -678,7 +685,7 @@ static void bcl_periph_mode_set(enum bcl_device_mode mode)
}
gbcl->btm_mode = BCL_VPH_MONITOR_MODE;
} else {
- power_supply_unregister(bcl_psy);
+ power_supply_unreg_notifier(&gbcl->psy_nb);
ret = msm_bcl_disable();
if (ret) {
pr_err("Error disabling BCL\n");
@@ -1627,19 +1634,6 @@ btm_probe_exit:
return ret;
}
-static int bcl_battery_get_property(struct power_supply *psy,
- enum power_supply_property prop,
- union power_supply_propval *val)
-{
- return 0;
-}
-static int bcl_battery_set_property(struct power_supply *psy,
- enum power_supply_property prop,
- const union power_supply_propval *val)
-{
- return 0;
-}
-
static uint32_t get_mask_from_core_handle(struct platform_device *pdev,
const char *key)
{
@@ -1725,12 +1719,8 @@ static int bcl_probe(struct platform_device *pdev)
pr_err("Cannot create bcl sysfs\n");
return ret;
}
- bcl_psy_des.name = bcl_psy_name;
- bcl_psy_des.type = POWER_SUPPLY_TYPE_BMS;
- bcl_psy_des.get_property = bcl_battery_get_property;
- bcl_psy_des.set_property = bcl_battery_set_property;
- bcl_psy_des.num_properties = 0;
- bcl_psy_des.external_power_changed = power_supply_callback;
+ INIT_WORK(&bcl->soc_mitig_work, soc_mitigate);
+ bcl->psy_nb.notifier_call = power_supply_callback;
bcl->bcl_hotplug_wq = alloc_workqueue("bcl_hotplug_wq", WQ_HIGHPRI, 0);
if (!bcl->bcl_hotplug_wq) {
pr_err("Workqueue alloc failed\n");
@@ -1773,6 +1763,7 @@ static int bcl_remove(struct platform_device *pdev)
int cpu;
/* De-register KTM handle */
+ power_supply_unreg_notifier(&gbcl->psy_nb);
if (gbcl->hotplug_handle)
devmgr_unregister_mitigation_client(&pdev->dev,
gbcl->hotplug_handle);
diff --git a/drivers/power/qcom-charger/fg-core.h b/drivers/power/qcom-charger/fg-core.h
index 08ec7334737e..adc640c7afe1 100644
--- a/drivers/power/qcom-charger/fg-core.h
+++ b/drivers/power/qcom-charger/fg-core.h
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
@@ -38,6 +39,7 @@
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
+/* Awake votable reasons */
#define SRAM_READ "fg_sram_read"
#define SRAM_WRITE "fg_sram_write"
#define PROFILE_LOAD "fg_profile_load"
@@ -54,6 +56,13 @@
CHARS_PER_ITEM) + 1) \
#define FG_SRAM_ADDRESS_MAX 255
+#define PROFILE_LEN 224
+#define PROFILE_COMP_LEN 148
+#define BUCKET_COUNT 8
+#define BUCKET_SOC_PCT (256 / BUCKET_COUNT)
+
+#define KI_COEFF_MAX 62200
+#define KI_COEFF_SOC_LEVELS 3
/* Debug flag definitions */
enum fg_debug_flag {
@@ -64,6 +73,7 @@ enum fg_debug_flag {
FG_SRAM_READ = BIT(4), /* Show SRAM reads */
FG_BUS_WRITE = BIT(5), /* Show REGMAP writes */
FG_BUS_READ = BIT(6), /* Show REGMAP reads */
+ FG_CAP_LEARN = BIT(7), /* Show capacity learning */
};
/* SRAM access */
@@ -114,10 +124,16 @@ enum fg_sram_param_id {
FG_SRAM_VOLTAGE_PRED,
FG_SRAM_OCV,
FG_SRAM_RSLOW,
+ FG_SRAM_ALG_FLAGS,
+ FG_SRAM_CC_SOC,
+ FG_SRAM_CC_SOC_SW,
+ FG_SRAM_ACT_BATT_CAP,
/* Entries below here are configurable during initialization */
FG_SRAM_CUTOFF_VOLT,
FG_SRAM_EMPTY_VOLT,
FG_SRAM_VBATT_LOW,
+ FG_SRAM_FLOAT_VOLT,
+ FG_SRAM_VBATT_FULL,
FG_SRAM_ESR_TIMER_DISCHG_MAX,
FG_SRAM_ESR_TIMER_DISCHG_INIT,
FG_SRAM_ESR_TIMER_CHG_MAX,
@@ -126,6 +142,8 @@ enum fg_sram_param_id {
FG_SRAM_CHG_TERM_CURR,
FG_SRAM_DELTA_SOC_THR,
FG_SRAM_RECHARGE_SOC_THR,
+ FG_SRAM_KI_COEFF_MED_DISCHG,
+ FG_SRAM_KI_COEFF_HI_DISCHG,
FG_SRAM_MAX,
};
@@ -143,8 +161,27 @@ struct fg_sram_param {
int val);
};
+enum fg_alg_flag_id {
+ ALG_FLAG_SOC_LT_OTG_MIN = 0,
+ ALG_FLAG_SOC_LT_RECHARGE,
+ ALG_FLAG_IBATT_LT_ITERM,
+ ALG_FLAG_IBATT_GT_HPM,
+ ALG_FLAG_IBATT_GT_UPM,
+ ALG_FLAG_VBATT_LT_RECHARGE,
+ ALG_FLAG_VBATT_GT_VFLOAT,
+ ALG_FLAG_MAX,
+};
+
+struct fg_alg_flag {
+ char *name;
+ u8 bit;
+ bool invalid;
+};
+
/* DT parameters for FG device */
struct fg_dt_props {
+ bool force_load_profile;
+ bool hold_soc_while_full;
int cutoff_volt_mv;
int empty_volt_mv;
int vbatt_low_thr_mv;
@@ -157,6 +194,18 @@ struct fg_dt_props {
int esr_timer_charging;
int esr_timer_awake;
int esr_timer_asleep;
+ int cl_start_soc;
+ int cl_max_temp;
+ int cl_min_temp;
+ int cl_max_cap_inc;
+ int cl_max_cap_dec;
+ int cl_max_cap_limit;
+ int cl_min_cap_limit;
+ int jeita_hyst_temp;
+ int batt_temp_delta;
+ int ki_coeff_soc[KI_COEFF_SOC_LEVELS];
+ int ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS];
+ int ki_coeff_hi_dischg[KI_COEFF_SOC_LEVELS];
};
/* parameters from battery profile */
@@ -164,47 +213,82 @@ struct fg_batt_props {
const char *batt_type_str;
char *batt_profile;
int float_volt_uv;
+ int vbatt_full_mv;
int fastchg_curr_ma;
int batt_id_kohm;
};
+struct fg_cyc_ctr_data {
+ bool en;
+ bool started[BUCKET_COUNT];
+ u16 count[BUCKET_COUNT];
+ u8 last_soc[BUCKET_COUNT];
+ int id;
+ struct mutex lock;
+};
+
+struct fg_cap_learning {
+ bool active;
+ int init_cc_soc_sw;
+ int64_t nom_cap_uah;
+ int64_t init_cc_uah;
+ int64_t final_cc_uah;
+ int64_t learned_cc_uah;
+ struct mutex lock;
+};
+
struct fg_irq_info {
const char *name;
const irq_handler_t handler;
- int irq;
bool wakeable;
+ int irq;
};
struct fg_chip {
struct device *dev;
struct pmic_revid_data *pmic_rev_id;
struct regmap *regmap;
- struct dentry *dentry;
+ struct dentry *dfs_root;
struct power_supply *fg_psy;
struct power_supply *batt_psy;
+ struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
struct iio_channel *batt_id_chan;
struct fg_memif *sram;
struct fg_irq_info *irqs;
struct votable *awake_votable;
struct fg_sram_param *sp;
+ struct fg_alg_flag *alg_flags;
int *debug_mask;
- char *batt_profile;
+ char batt_profile[PROFILE_LEN];
struct fg_dt_props dt;
struct fg_batt_props bp;
+ struct fg_cyc_ctr_data cyc_ctr;
struct notifier_block nb;
+ struct fg_cap_learning cl;
struct mutex bus_lock;
struct mutex sram_rw_lock;
u32 batt_soc_base;
u32 batt_info_base;
u32 mem_if_base;
- int nom_cap_uah;
- bool batt_id_avail;
+ int batt_id;
+ int status;
+ int charge_done;
+ int last_soc;
+ int last_batt_temp;
+ int health;
+ bool profile_available;
bool profile_loaded;
bool battery_missing;
+ bool fg_restarting;
+ bool charge_full;
+ bool recharge_soc_adjusted;
+ bool ki_coeff_dischg_en;
struct completion soc_update;
struct completion soc_ready;
struct delayed_work profile_load_work;
struct work_struct status_change_work;
+ struct work_struct cycle_count_work;
};
/* Debugfs data structures are below */
@@ -249,7 +333,9 @@ extern int fg_read(struct fg_chip *chip, int addr, u8 *val, int len);
extern int fg_write(struct fg_chip *chip, int addr, u8 *val, int len);
extern int fg_masked_write(struct fg_chip *chip, int addr, u8 mask, u8 val);
extern int fg_ima_init(struct fg_chip *chip);
-extern int fg_sram_debugfs_create(struct fg_chip *chip);
+extern int fg_debugfs_create(struct fg_chip *chip);
extern void fill_string(char *str, size_t str_len, u8 *buf, int buf_len);
extern int64_t twos_compliment_extend(int64_t val, int s_bit_pos);
+extern s64 fg_float_decode(u16 val);
+extern bool is_input_present(struct fg_chip *chip);
#endif
diff --git a/drivers/power/qcom-charger/fg-reg.h b/drivers/power/qcom-charger/fg-reg.h
index 9d5874340a8e..431e28a7eb1f 100644
--- a/drivers/power/qcom-charger/fg-reg.h
+++ b/drivers/power/qcom-charger/fg-reg.h
@@ -126,6 +126,7 @@
/* BATT_INFO_BATT_TEMP_CFG */
#define JEITA_TEMP_HYST_MASK GENMASK(5, 4)
+#define JEITA_TEMP_HYST_SHIFT 4
#define JEITA_TEMP_NO_HYST 0x0
#define JEITA_TEMP_HYST_1C 0x1
#define JEITA_TEMP_HYST_2C 0x2
diff --git a/drivers/power/qcom-charger/fg-util.c b/drivers/power/qcom-charger/fg-util.c
index 5f133d6f39c1..bbdbe48896d7 100644
--- a/drivers/power/qcom-charger/fg-util.c
+++ b/drivers/power/qcom-charger/fg-util.c
@@ -29,6 +29,63 @@ static struct fg_dbgfs dbgfs_data = {
},
};
+static bool is_usb_present(struct fg_chip *chip)
+{
+ union power_supply_propval pval = {0, };
+
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (chip->usb_psy)
+ power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ else
+ return false;
+
+ return pval.intval != 0;
+}
+
+static bool is_dc_present(struct fg_chip *chip)
+{
+ union power_supply_propval pval = {0, };
+
+ if (!chip->dc_psy)
+ chip->dc_psy = power_supply_get_by_name("dc");
+
+ if (chip->dc_psy)
+ power_supply_get_property(chip->dc_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ else
+ return false;
+
+ return pval.intval != 0;
+}
+
+bool is_input_present(struct fg_chip *chip)
+{
+ return is_usb_present(chip) || is_dc_present(chip);
+}
+
+#define EXPONENT_SHIFT 11
+#define EXPONENT_OFFSET -9
+#define MANTISSA_SIGN_BIT 10
+#define MICRO_UNIT 1000000
+s64 fg_float_decode(u16 val)
+{
+ s8 exponent;
+ s32 mantissa;
+
+ /* mantissa bits are shifted out during sign extension */
+ exponent = ((s16)val >> EXPONENT_SHIFT) + EXPONENT_OFFSET;
+ /* exponent bits are shifted out during sign extension */
+ mantissa = sign_extend32(val, MANTISSA_SIGN_BIT) * MICRO_UNIT;
+
+ if (exponent < 0)
+ return (s64)mantissa >> -exponent;
+
+ return (s64)mantissa << exponent;
+}
+
void fill_string(char *str, size_t str_len, u8 *buf, int buf_len)
{
int pos = 0;
@@ -63,6 +120,9 @@ int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
if (!chip)
return -ENXIO;
+ if (chip->battery_missing)
+ return -ENODATA;
+
if (!fg_sram_address_valid(address, len))
return -EFAULT;
@@ -75,6 +135,7 @@ int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
* This interrupt need to be enabled only when it is
* required. It will be kept disabled other times.
*/
+ reinit_completion(&chip->soc_update);
enable_irq(chip->irqs[SOC_UPDATE_IRQ].irq);
atomic_access = true;
} else {
@@ -127,6 +188,9 @@ int fg_sram_read(struct fg_chip *chip, u16 address, u8 offset,
if (!chip)
return -ENXIO;
+ if (chip->battery_missing)
+ return -ENODATA;
+
if (!fg_sram_address_valid(address, len))
return -EFAULT;
@@ -591,27 +655,23 @@ static const struct file_operations fg_sram_dfs_reg_fops = {
* fg_debugfs_create: adds new fg_sram debugfs entry
* @return zero on success
*/
-int fg_sram_debugfs_create(struct fg_chip *chip)
+static int fg_sram_debugfs_create(struct fg_chip *chip)
{
- struct dentry *root;
+ struct dentry *dfs_sram;
struct dentry *file;
mode_t dfs_mode = S_IRUSR | S_IWUSR;
pr_debug("Creating FG_SRAM debugfs file-system\n");
- root = debugfs_create_dir("fg_sram", NULL);
- if (IS_ERR_OR_NULL(root)) {
- pr_err("Error creating top level directory err:%ld",
- (long)root);
- if (PTR_ERR(root) == -ENODEV)
- pr_err("debugfs is not enabled in the kernel");
- return -ENODEV;
+ dfs_sram = debugfs_create_dir("sram", chip->dfs_root);
+ if (!dfs_sram) {
+ pr_err("error creating fg sram dfs rc=%ld\n",
+ (long)dfs_sram);
+ return -ENOMEM;
}
- if (!root)
- return -ENOENT;
-
dbgfs_data.help_msg.size = strlen(dbgfs_data.help_msg.data);
- file = debugfs_create_blob("help", S_IRUGO, root, &dbgfs_data.help_msg);
+ file = debugfs_create_blob("help", S_IRUGO, dfs_sram,
+ &dbgfs_data.help_msg);
if (!file) {
pr_err("error creating help entry\n");
goto err_remove_fs;
@@ -619,30 +679,106 @@ int fg_sram_debugfs_create(struct fg_chip *chip)
dbgfs_data.chip = chip;
- file = debugfs_create_u32("count", dfs_mode, root, &(dbgfs_data.cnt));
+ file = debugfs_create_u32("count", dfs_mode, dfs_sram,
+ &(dbgfs_data.cnt));
if (!file) {
pr_err("error creating 'count' entry\n");
goto err_remove_fs;
}
- file = debugfs_create_x32("address", dfs_mode,
- root, &(dbgfs_data.addr));
+ file = debugfs_create_x32("address", dfs_mode, dfs_sram,
+ &(dbgfs_data.addr));
if (!file) {
pr_err("error creating 'address' entry\n");
goto err_remove_fs;
}
- file = debugfs_create_file("data", dfs_mode, root, &dbgfs_data,
- &fg_sram_dfs_reg_fops);
+ file = debugfs_create_file("data", dfs_mode, dfs_sram, &dbgfs_data,
+ &fg_sram_dfs_reg_fops);
if (!file) {
pr_err("error creating 'data' entry\n");
goto err_remove_fs;
}
- chip->dentry = root;
return 0;
err_remove_fs:
- debugfs_remove_recursive(root);
+ debugfs_remove_recursive(dfs_sram);
+ return -ENOMEM;
+}
+
+static int fg_alg_flags_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t fg_alg_flags_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct fg_chip *chip = file->private_data;
+ char buf[512];
+ u8 alg_flags = 0;
+ int rc, i, len;
+
+ rc = fg_sram_read(chip, chip->sp[FG_SRAM_ALG_FLAGS].addr_word,
+ chip->sp[FG_SRAM_ALG_FLAGS].addr_byte, &alg_flags, 1,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("failed to read algorithm flags rc=%d\n", rc);
+ return -EFAULT;
+ }
+
+ len = 0;
+ for (i = 0; i < ALG_FLAG_MAX; ++i) {
+ if (len > ARRAY_SIZE(buf) - 1)
+ return -EFAULT;
+ if (chip->alg_flags[i].invalid)
+ continue;
+
+ len += snprintf(buf + len, sizeof(buf) - sizeof(*buf) * len,
+ "%s = %d\n", chip->alg_flags[i].name,
+ (bool)(alg_flags & chip->alg_flags[i].bit));
+ }
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fg_alg_flags_fops = {
+ .open = fg_alg_flags_open,
+ .read = fg_alg_flags_read,
+};
+
+int fg_debugfs_create(struct fg_chip *chip)
+{
+ int rc;
+
+ pr_debug("Creating debugfs file-system\n");
+ chip->dfs_root = debugfs_create_dir("fg", NULL);
+ if (IS_ERR_OR_NULL(chip->dfs_root)) {
+ if (PTR_ERR(chip->dfs_root) == -ENODEV)
+ pr_err("debugfs is not enabled in the kernel\n");
+ else
+ pr_err("error creating fg dfs root rc=%ld\n",
+ (long)chip->dfs_root);
+ return -ENODEV;
+ }
+
+ rc = fg_sram_debugfs_create(chip);
+ if (rc < 0) {
+ pr_err("failed to create sram dfs rc=%d\n", rc);
+ goto err_remove_fs;
+ }
+
+ if (!debugfs_create_file("alg_flags", S_IRUSR, chip->dfs_root, chip,
+ &fg_alg_flags_fops)) {
+ pr_err("failed to create alg_flags file\n");
+ goto err_remove_fs;
+ }
+
+ return 0;
+
+err_remove_fs:
+ debugfs_remove_recursive(chip->dfs_root);
return -ENOMEM;
}
diff --git a/drivers/power/qcom-charger/qpnp-fg-gen3.c b/drivers/power/qcom-charger/qpnp-fg-gen3.c
index dda94bb3f932..30408218b7e7 100644
--- a/drivers/power/qcom-charger/qpnp-fg-gen3.c
+++ b/drivers/power/qcom-charger/qpnp-fg-gen3.c
@@ -17,7 +17,6 @@
#include <linux/of_platform.h>
#include <linux/of_batterydata.h>
#include <linux/platform_device.h>
-#include <linux/power_supply.h>
#include <linux/iio/consumer.h>
#include <linux/qpnp/qpnp-revid.h>
#include "fg-core.h"
@@ -35,6 +34,14 @@
#define CUTOFF_VOLT_OFFSET 0
#define SYS_TERM_CURR_WORD 6
#define SYS_TERM_CURR_OFFSET 0
+#define VBATT_FULL_WORD 7
+#define VBATT_FULL_OFFSET 0
+#define KI_COEFF_MED_DISCHG_WORD 9
+#define KI_COEFF_MED_DISCHG_OFFSET 3
+#define KI_COEFF_HI_DISCHG_WORD 10
+#define KI_COEFF_HI_DISCHG_OFFSET 0
+#define KI_COEFF_LOW_DISCHG_WORD 10
+#define KI_COEFF_LOW_DISCHG_OFFSET 2
#define DELTA_SOC_THR_WORD 12
#define DELTA_SOC_THR_OFFSET 3
#define RECHARGE_SOC_THR_WORD 14
@@ -57,24 +64,42 @@
#define PROFILE_LOAD_OFFSET 0
#define NOM_CAP_WORD 58
#define NOM_CAP_OFFSET 0
+#define CYCLE_COUNT_WORD 75
+#define CYCLE_COUNT_OFFSET 0
#define PROFILE_INTEGRITY_WORD 79
#define PROFILE_INTEGRITY_OFFSET 3
#define BATT_SOC_WORD 91
#define BATT_SOC_OFFSET 0
+#define FULL_SOC_WORD 93
+#define FULL_SOC_OFFSET 2
#define MONOTONIC_SOC_WORD 94
#define MONOTONIC_SOC_OFFSET 2
+#define CC_SOC_WORD 95
+#define CC_SOC_OFFSET 0
+#define CC_SOC_SW_WORD 96
+#define CC_SOC_SW_OFFSET 0
#define VOLTAGE_PRED_WORD 97
#define VOLTAGE_PRED_OFFSET 0
#define OCV_WORD 97
#define OCV_OFFSET 2
#define RSLOW_WORD 101
#define RSLOW_OFFSET 0
+#define ACT_BATT_CAP_WORD 117
+#define ACT_BATT_CAP_OFFSET 0
#define LAST_BATT_SOC_WORD 119
#define LAST_BATT_SOC_OFFSET 0
#define LAST_MONOTONIC_SOC_WORD 119
#define LAST_MONOTONIC_SOC_OFFSET 2
+#define ALG_FLAGS_WORD 120
+#define ALG_FLAGS_OFFSET 1
/* v2 SRAM address and offset in ascending order */
+#define KI_COEFF_LOW_DISCHG_v2_WORD 9
+#define KI_COEFF_LOW_DISCHG_v2_OFFSET 3
+#define KI_COEFF_MED_DISCHG_v2_WORD 10
+#define KI_COEFF_MED_DISCHG_v2_OFFSET 0
+#define KI_COEFF_HI_DISCHG_v2_WORD 10
+#define KI_COEFF_HI_DISCHG_v2_OFFSET 1
#define DELTA_SOC_THR_v2_WORD 13
#define DELTA_SOC_THR_v2_OFFSET 0
#define RECHARGE_SOC_THR_v2_WORD 14
@@ -82,18 +107,24 @@
#define CHG_TERM_CURR_v2_WORD 15
#define CHG_TERM_CURR_v2_OFFSET 1
#define EMPTY_VOLT_v2_WORD 15
-#define EMPTY_VOLT_v2_OFFSET 2
+#define EMPTY_VOLT_v2_OFFSET 3
#define VBATT_LOW_v2_WORD 16
#define VBATT_LOW_v2_OFFSET 0
+#define FLOAT_VOLT_v2_WORD 16
+#define FLOAT_VOLT_v2_OFFSET 2
+static int fg_decode_voltage_15b(struct fg_sram_param *sp,
+ enum fg_sram_param_id id, int val);
static int fg_decode_value_16b(struct fg_sram_param *sp,
enum fg_sram_param_id id, int val);
static int fg_decode_default(struct fg_sram_param *sp,
enum fg_sram_param_id id, int val);
+static int fg_decode_cc_soc(struct fg_sram_param *sp,
+ enum fg_sram_param_id id, int value);
static void fg_encode_voltage(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf);
+ enum fg_sram_param_id id, int val_mv, u8 *buf);
static void fg_encode_current(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf);
+ enum fg_sram_param_id id, int val_ma, u8 *buf);
static void fg_encode_default(struct fg_sram_param *sp,
enum fg_sram_param_id id, int val, u8 *buf);
@@ -114,11 +145,19 @@ static struct fg_sram_param pmicobalt_v1_sram_params[] = {
PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
fg_decode_default),
PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 244141,
- 1000, 0, NULL, fg_decode_value_16b),
+ 1000, 0, NULL, fg_decode_voltage_15b),
PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 244141, 1000, 0, NULL,
- fg_decode_value_16b),
+ fg_decode_voltage_15b),
PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 244141, 1000, 0, NULL,
fg_decode_value_16b),
+ PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL,
+ fg_decode_default),
+ PARAM(CC_SOC, CC_SOC_WORD, CC_SOC_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(ACT_BATT_CAP, ACT_BATT_CAP_WORD, ACT_BATT_CAP_OFFSET, 2, 1, 1, 0,
+ NULL, fg_decode_default),
/* Entries below here are configurable during initialization */
PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
244141, 0, fg_encode_voltage, NULL),
@@ -126,11 +165,13 @@ static struct fg_sram_param pmicobalt_v1_sram_params[] = {
-2500, fg_encode_voltage, NULL),
PARAM(VBATT_LOW, VBATT_LOW_WORD, VBATT_LOW_OFFSET, 1, 100000, 390625,
-2500, fg_encode_voltage, NULL),
+ PARAM(VBATT_FULL, VBATT_FULL_WORD, VBATT_FULL_OFFSET, 2, 1000000,
+ 244141, 0, fg_encode_voltage, NULL),
PARAM(SYS_TERM_CURR, SYS_TERM_CURR_WORD, SYS_TERM_CURR_OFFSET, 3,
1000000, 122070, 0, fg_encode_current, NULL),
PARAM(CHG_TERM_CURR, CHG_TERM_CURR_WORD, CHG_TERM_CURR_OFFSET, 1,
100000, 390625, 0, fg_encode_current, NULL),
- PARAM(DELTA_SOC_THR, DELTA_SOC_THR_WORD, DELTA_SOC_THR_OFFSET, 1, 256,
+ PARAM(DELTA_SOC_THR, DELTA_SOC_THR_WORD, DELTA_SOC_THR_OFFSET, 1, 2048,
100, 0, fg_encode_default, NULL),
PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_WORD, RECHARGE_SOC_THR_OFFSET,
1, 256, 100, 0, fg_encode_default, NULL),
@@ -144,30 +185,48 @@ static struct fg_sram_param pmicobalt_v1_sram_params[] = {
ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+ PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_WORD,
+ KI_COEFF_MED_DISCHG_OFFSET, 1, 1000, 244141, 0,
+ fg_encode_default, NULL),
+ PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_WORD,
+ KI_COEFF_HI_DISCHG_OFFSET, 1, 1000, 244141, 0,
+ fg_encode_default, NULL),
};
static struct fg_sram_param pmicobalt_v2_sram_params[] = {
PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
fg_decode_default),
PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 244141,
- 1000, 0, NULL, fg_decode_value_16b),
+ 1000, 0, NULL, fg_decode_voltage_15b),
PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 244141, 1000, 0, NULL,
- fg_decode_value_16b),
+ fg_decode_voltage_15b),
PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 244141, 1000, 0, NULL,
fg_decode_value_16b),
+ PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL,
+ fg_decode_default),
+ PARAM(CC_SOC, CC_SOC_WORD, CC_SOC_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(ACT_BATT_CAP, ACT_BATT_CAP_WORD, ACT_BATT_CAP_OFFSET, 2, 1, 1, 0,
+ NULL, fg_decode_default),
/* Entries below here are configurable during initialization */
PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
244141, 0, fg_encode_voltage, NULL),
- PARAM(EMPTY_VOLT, EMPTY_VOLT_v2_WORD, EMPTY_VOLT_v2_OFFSET, 1, 100000,
- 390625, -2000, fg_encode_voltage, NULL),
- PARAM(VBATT_LOW, VBATT_LOW_v2_WORD, VBATT_LOW_v2_OFFSET, 1, 100000,
- 390625, -2000, fg_encode_voltage, NULL),
+ PARAM(EMPTY_VOLT, EMPTY_VOLT_v2_WORD, EMPTY_VOLT_v2_OFFSET, 1, 1000,
+ 15625, -2000, fg_encode_voltage, NULL),
+ PARAM(VBATT_LOW, VBATT_LOW_v2_WORD, VBATT_LOW_v2_OFFSET, 1, 1000,
+ 15625, -2000, fg_encode_voltage, NULL),
+ PARAM(FLOAT_VOLT, FLOAT_VOLT_v2_WORD, FLOAT_VOLT_v2_OFFSET, 1, 1000,
+ 15625, -2000, fg_encode_voltage, NULL),
+ PARAM(VBATT_FULL, VBATT_FULL_WORD, VBATT_FULL_OFFSET, 2, 1000000,
+ 244141, 0, fg_encode_voltage, NULL),
PARAM(SYS_TERM_CURR, SYS_TERM_CURR_WORD, SYS_TERM_CURR_OFFSET, 3,
1000000, 122070, 0, fg_encode_current, NULL),
PARAM(CHG_TERM_CURR, CHG_TERM_CURR_v2_WORD, CHG_TERM_CURR_v2_OFFSET, 1,
100000, 390625, 0, fg_encode_current, NULL),
PARAM(DELTA_SOC_THR, DELTA_SOC_THR_v2_WORD, DELTA_SOC_THR_v2_OFFSET, 1,
- 256, 100, 0, fg_encode_default, NULL),
+ 2048, 100, 0, fg_encode_default, NULL),
PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_v2_WORD,
RECHARGE_SOC_THR_v2_OFFSET, 1, 256, 100, 0, fg_encode_default,
NULL),
@@ -181,6 +240,73 @@ static struct fg_sram_param pmicobalt_v2_sram_params[] = {
ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+ PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_v2_WORD,
+ KI_COEFF_MED_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
+ fg_encode_default, NULL),
+ PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_v2_WORD,
+ KI_COEFF_HI_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
+ fg_encode_default, NULL),
+};
+
+static struct fg_alg_flag pmicobalt_v1_alg_flags[] = {
+ [ALG_FLAG_SOC_LT_OTG_MIN] = {
+ .name = "SOC_LT_OTG_MIN",
+ .bit = BIT(0),
+ },
+ [ALG_FLAG_SOC_LT_RECHARGE] = {
+ .name = "SOC_LT_RECHARGE",
+ .bit = BIT(1),
+ },
+ [ALG_FLAG_IBATT_LT_ITERM] = {
+ .name = "IBATT_LT_ITERM",
+ .bit = BIT(2),
+ },
+ [ALG_FLAG_IBATT_GT_HPM] = {
+ .name = "IBATT_GT_HPM",
+ .bit = BIT(3),
+ },
+ [ALG_FLAG_IBATT_GT_UPM] = {
+ .name = "IBATT_GT_UPM",
+ .bit = BIT(4),
+ },
+ [ALG_FLAG_VBATT_LT_RECHARGE] = {
+ .name = "VBATT_LT_RECHARGE",
+ .bit = BIT(5),
+ },
+ [ALG_FLAG_VBATT_GT_VFLOAT] = {
+ .invalid = true,
+ },
+};
+
+static struct fg_alg_flag pmicobalt_v2_alg_flags[] = {
+ [ALG_FLAG_SOC_LT_OTG_MIN] = {
+ .name = "SOC_LT_OTG_MIN",
+ .bit = BIT(0),
+ },
+ [ALG_FLAG_SOC_LT_RECHARGE] = {
+ .name = "SOC_LT_RECHARGE",
+ .bit = BIT(1),
+ },
+ [ALG_FLAG_IBATT_LT_ITERM] = {
+ .name = "IBATT_LT_ITERM",
+ .bit = BIT(2),
+ },
+ [ALG_FLAG_IBATT_GT_HPM] = {
+ .name = "IBATT_GT_HPM",
+ .bit = BIT(4),
+ },
+ [ALG_FLAG_IBATT_GT_UPM] = {
+ .name = "IBATT_GT_UPM",
+ .bit = BIT(5),
+ },
+ [ALG_FLAG_VBATT_LT_RECHARGE] = {
+ .name = "VBATT_LT_RECHARGE",
+ .bit = BIT(6),
+ },
+ [ALG_FLAG_VBATT_GT_VFLOAT] = {
+ .name = "VBATT_GT_VFLOAT",
+ .bit = BIT(7),
+ },
};
static int fg_gen3_debug_mask;
@@ -193,173 +319,36 @@ module_param_named(
sram_update_period_ms, fg_sram_update_period_ms, int, S_IRUSR | S_IWUSR
);
-/* Other functions HERE */
-
-static int fg_awake_cb(struct votable *votable, void *data, int awake,
- const char *client)
-{
- struct fg_chip *chip = data;
-
- if (awake)
- pm_stay_awake(chip->dev);
- else
- pm_relax(chip->dev);
-
- pr_debug("client: %s awake: %d\n", client, awake);
- return 0;
-}
-
-static bool is_charger_available(struct fg_chip *chip)
-{
- if (!chip->batt_psy)
- chip->batt_psy = power_supply_get_by_name("battery");
+static bool fg_sram_dump;
+module_param_named(
+ sram_dump, fg_sram_dump, bool, S_IRUSR | S_IWUSR
+);
- if (!chip->batt_psy)
- return false;
+static int fg_restart;
- return true;
-}
+/* All getters HERE */
-static void status_change_work(struct work_struct *work)
+#define VOLTAGE_15BIT_MASK GENMASK(14, 0)
+static int fg_decode_voltage_15b(struct fg_sram_param *sp,
+ enum fg_sram_param_id id, int value)
{
- struct fg_chip *chip = container_of(work,
- struct fg_chip, status_change_work);
- union power_supply_propval prop = {0, };
-
- if (!is_charger_available(chip)) {
- fg_dbg(chip, FG_STATUS, "Charger not available?!\n");
- return;
- }
-
- power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
- &prop);
- switch (prop.intval) {
- case POWER_SUPPLY_STATUS_CHARGING:
- fg_dbg(chip, FG_POWER_SUPPLY, "Charging\n");
- break;
- case POWER_SUPPLY_STATUS_DISCHARGING:
- fg_dbg(chip, FG_POWER_SUPPLY, "Discharging\n");
- break;
- case POWER_SUPPLY_STATUS_FULL:
- fg_dbg(chip, FG_POWER_SUPPLY, "Full\n");
- break;
- default:
- break;
- }
+ value &= VOLTAGE_15BIT_MASK;
+ sp[id].value = div_u64((u64)value * sp[id].numrtr, sp[id].denmtr);
+ pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+ sp[id].value);
+ return sp[id].value;
}
-#define PROFILE_LEN 224
-#define PROFILE_COMP_LEN 32
-#define SOC_READY_WAIT_MS 2000
-static void profile_load_work(struct work_struct *work)
+static int fg_decode_cc_soc(struct fg_sram_param *sp,
+ enum fg_sram_param_id id, int value)
{
- struct fg_chip *chip = container_of(work,
- struct fg_chip,
- profile_load_work.work);
- int rc;
- u8 buf[PROFILE_COMP_LEN], val;
- bool tried_again = false, profiles_same = false;
-
- if (!chip->batt_id_avail) {
- pr_err("batt_id not available\n");
- return;
- }
-
- rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
- PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
- if (rc < 0) {
- pr_err("failed to read profile integrity rc=%d\n", rc);
- return;
- }
-
- vote(chip->awake_votable, PROFILE_LOAD, true, 0);
- if (val == 0x01) {
- fg_dbg(chip, FG_STATUS, "Battery profile integrity bit is set\n");
- rc = fg_sram_read(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
- buf, PROFILE_COMP_LEN, FG_IMA_DEFAULT);
- if (rc < 0) {
- pr_err("Error in reading battery profile, rc:%d\n", rc);
- goto out;
- }
- profiles_same = memcmp(chip->batt_profile, buf,
- PROFILE_COMP_LEN) == 0;
- if (profiles_same) {
- fg_dbg(chip, FG_STATUS, "Battery profile is same\n");
- goto done;
- }
- fg_dbg(chip, FG_STATUS, "profiles are different?\n");
- }
-
- fg_dbg(chip, FG_STATUS, "profile loading started\n");
- rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
- if (rc < 0) {
- pr_err("Error in writing to %04x, rc=%d\n",
- BATT_SOC_RESTART(chip), rc);
- goto out;
- }
-
- /* load battery profile */
- rc = fg_sram_write(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
- chip->batt_profile, PROFILE_LEN, FG_IMA_ATOMIC);
- if (rc < 0) {
- pr_err("Error in writing battery profile, rc:%d\n", rc);
- goto out;
- }
-
- rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT,
- RESTART_GO_BIT);
- if (rc < 0) {
- pr_err("Error in writing to %04x, rc=%d\n",
- BATT_SOC_RESTART(chip), rc);
- goto out;
- }
-
-wait:
- rc = wait_for_completion_interruptible_timeout(&chip->soc_ready,
- msecs_to_jiffies(SOC_READY_WAIT_MS));
-
- /* If we were interrupted wait again one more time. */
- if (rc == -ERESTARTSYS && !tried_again) {
- tried_again = true;
- goto wait;
- } else if (rc <= 0) {
- pr_err("wait for soc_ready timed out rc=%d\n", rc);
- goto out;
- }
-
- fg_dbg(chip, FG_STATUS, "SOC is ready\n");
-
- /* Set the profile integrity bit */
- val = 0x1;
- rc = fg_sram_write(chip, PROFILE_INTEGRITY_WORD,
- PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
- if (rc < 0) {
- pr_err("failed to write profile integrity rc=%d\n", rc);
- goto out;
- }
-
- fg_dbg(chip, FG_STATUS, "profile loaded successfully");
-done:
- rc = fg_sram_read(chip, NOM_CAP_WORD, NOM_CAP_OFFSET, buf, 2,
- FG_IMA_DEFAULT);
- if (rc < 0) {
- pr_err("Error in reading %04x[%d] rc=%d\n", NOM_CAP_WORD,
- NOM_CAP_OFFSET, rc);
- goto out;
- }
-
- chip->nom_cap_uah = (int)(buf[0] | buf[1] << 8) * 1000;
- chip->profile_loaded = true;
-out:
- vote(chip->awake_votable, PROFILE_LOAD, false, 0);
- rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
- if (rc < 0)
- pr_err("Error in writing to %04x, rc=%d\n",
- BATT_SOC_RESTART(chip), rc);
+ sp[id].value = div_s64((s64)value * sp[id].numrtr, sp[id].denmtr);
+ sp[id].value = sign_extend32(sp[id].value, 31);
+ pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+ sp[id].value);
+ return sp[id].value;
}
-/* All getters HERE */
-
static int fg_decode_value_16b(struct fg_sram_param *sp,
enum fg_sram_param_id id, int value)
{
@@ -369,10 +358,11 @@ static int fg_decode_value_16b(struct fg_sram_param *sp,
return sp[id].value;
}
-static int fg_decode_default(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int value)
+static int fg_decode_default(struct fg_sram_param *sp, enum fg_sram_param_id id,
+ int value)
{
- return value;
+ sp[id].value = value;
+ return sp[id].value;
}
static int fg_decode(struct fg_sram_param *sp, enum fg_sram_param_id id,
@@ -387,14 +377,14 @@ static int fg_decode(struct fg_sram_param *sp, enum fg_sram_param_id id,
}
static void fg_encode_voltage(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf)
+ enum fg_sram_param_id id, int val_mv, u8 *buf)
{
int i, mask = 0xff;
int64_t temp;
- val += sp[id].offset;
- temp = (int64_t)div_u64((u64)val * sp[id].numrtr, sp[id].denmtr);
- pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val);
+ val_mv += sp[id].offset;
+ temp = (int64_t)div_u64((u64)val_mv * sp[id].numrtr, sp[id].denmtr);
+ pr_debug("temp: %llx id: %d, val_mv: %d, buf: [ ", temp, id, val_mv);
for (i = 0; i < sp[id].len; i++) {
buf[i] = temp & mask;
temp >>= 8;
@@ -404,15 +394,15 @@ static void fg_encode_voltage(struct fg_sram_param *sp,
}
static void fg_encode_current(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf)
+ enum fg_sram_param_id id, int val_ma, u8 *buf)
{
int i, mask = 0xff;
int64_t temp;
s64 current_ma;
- current_ma = -val;
+ current_ma = val_ma;
temp = (int64_t)div_s64(current_ma * sp[id].numrtr, sp[id].denmtr);
- pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val);
+ pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val_ma);
for (i = 0; i < sp[id].len; i++) {
buf[i] = temp & mask;
temp >>= 8;
@@ -463,6 +453,9 @@ static int fg_get_sram_prop(struct fg_chip *chip, enum fg_sram_param_id id,
if (id < 0 || id > FG_SRAM_MAX || chip->sp[id].len > sizeof(buf))
return -EINVAL;
+ if (chip->battery_missing)
+ return -ENODATA;
+
rc = fg_sram_read(chip, chip->sp[id].addr_word, chip->sp[id].addr_byte,
buf, chip->sp[id].len, FG_IMA_DEFAULT);
if (rc < 0) {
@@ -478,6 +471,35 @@ static int fg_get_sram_prop(struct fg_chip *chip, enum fg_sram_param_id id,
return 0;
}
+#define CC_SOC_30BIT GENMASK(29, 0)
+static int fg_get_cc_soc(struct fg_chip *chip, int *val)
+{
+ int rc, cc_soc;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC, &cc_soc);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC, rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = div_s64(cc_soc * chip->cl.nom_cap_uah, CC_SOC_30BIT);
+ return 0;
+}
+
+static int fg_get_cc_soc_sw(struct fg_chip *chip, int *val)
+{
+ int rc, cc_soc;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = div_s64(cc_soc * chip->cl.learned_cc_uah, CC_SOC_30BIT);
+ return 0;
+}
+
#define BATT_TEMP_NUMR 1
#define BATT_TEMP_DENR 1
static int fg_get_battery_temp(struct fg_chip *chip, int *val)
@@ -628,7 +650,6 @@ static int fg_get_msoc_raw(struct fg_chip *chip, int *val)
}
fg_dbg(chip, FG_POWER_SUPPLY, "raw: 0x%02x\n", cap[0]);
-
*val = cap[0];
return 0;
}
@@ -639,6 +660,11 @@ static int fg_get_prop_capacity(struct fg_chip *chip, int *val)
{
int rc, msoc;
+ if (chip->charge_full) {
+ *val = FULL_CAPACITY;
+ return 0;
+ }
+
rc = fg_get_msoc_raw(chip, &msoc);
if (rc < 0)
return rc;
@@ -678,7 +704,6 @@ static int fg_get_batt_id(struct fg_chip *chip, int *val)
return rc;
}
- chip->batt_id_avail = true;
fg_dbg(chip, FG_STATUS, "batt_id: %d\n", batt_id);
*val = batt_id;
@@ -698,13 +723,14 @@ static int fg_get_batt_profile(struct fg_chip *chip)
return rc;
}
+ batt_id /= 1000;
+ chip->batt_id = batt_id;
batt_node = of_find_node_by_name(node, "qcom,battery-data");
if (!batt_node) {
pr_err("Batterydata not available\n");
return -ENXIO;
}
- batt_id /= 1000;
profile_node = of_batterydata_get_best_profile(batt_node, batt_id,
NULL);
if (IS_ERR(profile_node))
@@ -736,6 +762,13 @@ static int fg_get_batt_profile(struct fg_chip *chip)
chip->bp.fastchg_curr_ma = -EINVAL;
}
+ rc = of_property_read_u32(profile_node, "qcom,fg-cc-cv-threshold-mv",
+ &chip->bp.vbatt_full_mv);
+ if (rc < 0) {
+ pr_err("battery cc_cv threshold unavailable, rc:%d\n", rc);
+ chip->bp.vbatt_full_mv = -EINVAL;
+ }
+
data = of_get_property(profile_node, "qcom,fg-profile-data", &len);
if (!data) {
pr_err("No profile data available\n");
@@ -747,6 +780,7 @@ static int fg_get_batt_profile(struct fg_chip *chip)
return -EINVAL;
}
+ chip->profile_available = true;
memcpy(chip->batt_profile, data, len);
return 0;
}
@@ -757,6 +791,27 @@ static inline void get_temp_setpoint(int threshold, u8 *val)
*val = DIV_ROUND_CLOSEST((threshold + 30) * 10, 5);
}
+static inline void get_batt_temp_delta(int delta, u8 *val)
+{
+ switch (delta) {
+ case 2:
+ *val = BTEMP_DELTA_2K;
+ break;
+ case 4:
+ *val = BTEMP_DELTA_4K;
+ break;
+ case 6:
+ *val = BTEMP_DELTA_6K;
+ break;
+ case 10:
+ *val = BTEMP_DELTA_10K;
+ break;
+ default:
+ *val = BTEMP_DELTA_2K;
+ break;
+ };
+}
+
static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
int flags)
{
@@ -796,6 +851,911 @@ static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
return 0;
}
+/* Other functions HERE */
+
+static int fg_awake_cb(struct votable *votable, void *data, int awake,
+ const char *client)
+{
+ struct fg_chip *chip = data;
+
+ if (awake)
+ pm_stay_awake(chip->dev);
+ else
+ pm_relax(chip->dev);
+
+ pr_debug("client: %s awake: %d\n", client, awake);
+ return 0;
+}
+
+static bool is_charger_available(struct fg_chip *chip)
+{
+ if (!chip->batt_psy)
+ chip->batt_psy = power_supply_get_by_name("battery");
+
+ if (!chip->batt_psy)
+ return false;
+
+ return true;
+}
+
+static int fg_save_learned_cap_to_sram(struct fg_chip *chip)
+{
+ int16_t cc_mah;
+ int rc;
+
+ if (chip->battery_missing || !chip->cl.learned_cc_uah)
+ return -EPERM;
+
+ cc_mah = div64_s64(chip->cl.learned_cc_uah, 1000);
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_ACT_BATT_CAP].addr_word,
+ chip->sp[FG_SRAM_ACT_BATT_CAP].addr_byte, (u8 *)&cc_mah,
+ chip->sp[FG_SRAM_ACT_BATT_CAP].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing act_batt_cap, rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_dbg(chip, FG_CAP_LEARN, "learned capacity %llduah/%dmah stored\n",
+ chip->cl.learned_cc_uah, cc_mah);
+ return 0;
+}
+
+#define CAPACITY_DELTA_DECIPCT 500
+static int fg_load_learned_cap_from_sram(struct fg_chip *chip)
+{
+ int rc, act_cap_mah;
+ int64_t delta_cc_uah, pct_nom_cap_uah;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_mah);
+ if (rc < 0) {
+ pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->cl.learned_cc_uah = act_cap_mah * 1000;
+ if (chip->cl.learned_cc_uah == 0)
+ chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
+
+ if (chip->cl.learned_cc_uah != chip->cl.nom_cap_uah) {
+ delta_cc_uah = abs(chip->cl.learned_cc_uah -
+ chip->cl.nom_cap_uah);
+ pct_nom_cap_uah = div64_s64((int64_t)chip->cl.nom_cap_uah *
+ CAPACITY_DELTA_DECIPCT, 1000);
+ /*
+ * If the learned capacity is out of range by 50% from the
+ * nominal capacity, then overwrite the learned capacity with
+ * the nominal capacity.
+ */
+ if (chip->cl.nom_cap_uah && delta_cc_uah > pct_nom_cap_uah) {
+ fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah: %lld is higher than expected\n",
+ chip->cl.learned_cc_uah);
+ fg_dbg(chip, FG_CAP_LEARN, "Capping it to nominal:%lld\n",
+ chip->cl.nom_cap_uah);
+ chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
+ rc = fg_save_learned_cap_to_sram(chip);
+ if (rc < 0)
+ pr_err("Error in saving learned_cc_uah, rc=%d\n",
+ rc);
+ }
+ }
+
+ fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah:%lld nom_cap_uah: %lld\n",
+ chip->cl.learned_cc_uah, chip->cl.nom_cap_uah);
+ return 0;
+}
+
+static bool is_temp_valid_cap_learning(struct fg_chip *chip)
+{
+ int rc, batt_temp;
+
+ rc = fg_get_battery_temp(chip, &batt_temp);
+ if (rc < 0) {
+ pr_err("Error in getting batt_temp\n");
+ return false;
+ }
+
+ if (batt_temp > chip->dt.cl_max_temp ||
+ batt_temp < chip->dt.cl_min_temp) {
+ fg_dbg(chip, FG_CAP_LEARN, "batt temp %d out of range [%d %d]\n",
+ batt_temp, chip->dt.cl_min_temp, chip->dt.cl_max_temp);
+ return false;
+ }
+
+ return true;
+}
+
+static void fg_cap_learning_post_process(struct fg_chip *chip)
+{
+ int64_t max_inc_val, min_dec_val, old_cap;
+ int rc;
+
+ max_inc_val = chip->cl.learned_cc_uah
+ * (1000 + chip->dt.cl_max_cap_inc);
+ do_div(max_inc_val, 1000);
+
+ min_dec_val = chip->cl.learned_cc_uah
+ * (1000 - chip->dt.cl_max_cap_dec);
+ do_div(min_dec_val, 1000);
+
+ old_cap = chip->cl.learned_cc_uah;
+ if (chip->cl.final_cc_uah > max_inc_val)
+ chip->cl.learned_cc_uah = max_inc_val;
+ else if (chip->cl.final_cc_uah < min_dec_val)
+ chip->cl.learned_cc_uah = min_dec_val;
+ else
+ chip->cl.learned_cc_uah =
+ chip->cl.final_cc_uah;
+
+ if (chip->dt.cl_max_cap_limit) {
+ max_inc_val = (int64_t)chip->cl.nom_cap_uah * (1000 +
+ chip->dt.cl_max_cap_limit);
+ do_div(max_inc_val, 1000);
+ if (chip->cl.final_cc_uah > max_inc_val) {
+ fg_dbg(chip, FG_CAP_LEARN, "learning capacity %lld goes above max limit %lld\n",
+ chip->cl.final_cc_uah, max_inc_val);
+ chip->cl.learned_cc_uah = max_inc_val;
+ }
+ }
+
+ if (chip->dt.cl_min_cap_limit) {
+ min_dec_val = (int64_t)chip->cl.nom_cap_uah * (1000 -
+ chip->dt.cl_min_cap_limit);
+ do_div(min_dec_val, 1000);
+ if (chip->cl.final_cc_uah < min_dec_val) {
+ fg_dbg(chip, FG_CAP_LEARN, "learning capacity %lld goes below min limit %lld\n",
+ chip->cl.final_cc_uah, min_dec_val);
+ chip->cl.learned_cc_uah = min_dec_val;
+ }
+ }
+
+ rc = fg_save_learned_cap_to_sram(chip);
+ if (rc < 0)
+ pr_err("Error in saving learned_cc_uah, rc=%d\n", rc);
+
+ fg_dbg(chip, FG_CAP_LEARN, "final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
+ chip->cl.final_cc_uah, old_cap, chip->cl.learned_cc_uah);
+}
+
+static int fg_cap_learning_process_full_data(struct fg_chip *chip)
+{
+ int rc, cc_soc_sw, cc_soc_delta_pct;
+ int64_t delta_cc_uah;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+ return rc;
+ }
+
+ cc_soc_delta_pct = DIV_ROUND_CLOSEST(
+ abs(cc_soc_sw - chip->cl.init_cc_soc_sw) * 100,
+ CC_SOC_30BIT);
+ delta_cc_uah = div64_s64(chip->cl.learned_cc_uah * cc_soc_delta_pct,
+ 100);
+ chip->cl.final_cc_uah = chip->cl.init_cc_uah + delta_cc_uah;
+ fg_dbg(chip, FG_CAP_LEARN, "Current cc_soc=%d cc_soc_delta_pct=%d total_cc_uah=%lld\n",
+ cc_soc_sw, cc_soc_delta_pct, chip->cl.final_cc_uah);
+ return 0;
+}
+
+static int fg_cap_learning_begin(struct fg_chip *chip, int batt_soc)
+{
+ int rc, cc_soc_sw;
+
+ if (DIV_ROUND_CLOSEST(batt_soc * 100, FULL_SOC_RAW) >
+ chip->dt.cl_start_soc) {
+ fg_dbg(chip, FG_CAP_LEARN, "Battery SOC %d is high!, not starting\n",
+ batt_soc);
+ return -EINVAL;
+ }
+
+ chip->cl.init_cc_uah = div64_s64(chip->cl.learned_cc_uah * batt_soc,
+ FULL_SOC_RAW);
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->cl.init_cc_soc_sw = cc_soc_sw;
+ chip->cl.active = true;
+ fg_dbg(chip, FG_CAP_LEARN, "Capacity learning started @ battery SOC %d init_cc_soc_sw:%d\n",
+ batt_soc, chip->cl.init_cc_soc_sw);
+ return 0;
+}
+
+static int fg_cap_learning_done(struct fg_chip *chip)
+{
+ int rc, cc_soc_sw;
+
+ rc = fg_cap_learning_process_full_data(chip);
+ if (rc < 0) {
+ pr_err("Error in processing cap learning full data, rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ /* Write a FULL value to cc_soc_sw */
+ cc_soc_sw = CC_SOC_30BIT;
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
+ chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
+ chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
+ if (rc < 0) {
+ pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
+ goto out;
+ }
+
+ fg_cap_learning_post_process(chip);
+out:
+ return rc;
+}
+
+#define FULL_SOC_RAW 255
+static void fg_cap_learning_update(struct fg_chip *chip)
+{
+ int rc, batt_soc;
+
+ mutex_lock(&chip->cl.lock);
+
+ if (!is_temp_valid_cap_learning(chip) || !chip->cl.learned_cc_uah ||
+ chip->battery_missing) {
+ fg_dbg(chip, FG_CAP_LEARN, "Aborting cap_learning %lld\n",
+ chip->cl.learned_cc_uah);
+ chip->cl.active = false;
+ chip->cl.init_cc_uah = 0;
+ goto out;
+ }
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+ if (rc < 0) {
+ pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+ goto out;
+ }
+
+ /* We need only the most significant byte here */
+ batt_soc = (u32)batt_soc >> 24;
+
+ fg_dbg(chip, FG_CAP_LEARN, "Chg_status: %d cl_active: %d batt_soc: %d\n",
+ chip->status, chip->cl.active, batt_soc);
+
+ /* Initialize the starting point of learning capacity */
+ if (!chip->cl.active) {
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ rc = fg_cap_learning_begin(chip, batt_soc);
+ chip->cl.active = (rc == 0);
+ }
+
+ } else {
+ if (chip->charge_done) {
+ rc = fg_cap_learning_done(chip);
+ if (rc < 0)
+ pr_err("Error in completing capacity learning, rc=%d\n",
+ rc);
+
+ chip->cl.active = false;
+ chip->cl.init_cc_uah = 0;
+ }
+
+ if (chip->status == POWER_SUPPLY_STATUS_NOT_CHARGING) {
+ fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n",
+ batt_soc);
+ chip->cl.active = false;
+ chip->cl.init_cc_uah = 0;
+ }
+ }
+
+out:
+ mutex_unlock(&chip->cl.lock);
+}
+
+#define KI_COEFF_MED_DISCHG_DEFAULT 1500
+#define KI_COEFF_HI_DISCHG_DEFAULT 2200
+static int fg_adjust_ki_coeff_dischg(struct fg_chip *chip)
+{
+ int rc, i, msoc;
+ int ki_coeff_med = KI_COEFF_MED_DISCHG_DEFAULT;
+ int ki_coeff_hi = KI_COEFF_HI_DISCHG_DEFAULT;
+ u8 val;
+
+ if (!chip->ki_coeff_dischg_en)
+ return 0;
+
+ rc = fg_get_prop_capacity(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting capacity, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ for (i = KI_COEFF_SOC_LEVELS - 1; i >= 0; i--) {
+ if (msoc < chip->dt.ki_coeff_soc[i]) {
+ ki_coeff_med = chip->dt.ki_coeff_med_dischg[i];
+ ki_coeff_hi = chip->dt.ki_coeff_hi_dischg[i];
+ }
+ }
+ }
+
+ fg_encode(chip->sp, FG_SRAM_KI_COEFF_MED_DISCHG, ki_coeff_med, &val);
+ rc = fg_sram_write(chip,
+ chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_word,
+ chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_byte, &val,
+ chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].len,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing ki_coeff_med, rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_encode(chip->sp, FG_SRAM_KI_COEFF_HI_DISCHG, ki_coeff_hi, &val);
+ rc = fg_sram_write(chip,
+ chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_word,
+ chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_byte, &val,
+ chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].len,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing ki_coeff_hi, rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_dbg(chip, FG_STATUS, "Wrote ki_coeff_med %d ki_coeff_hi %d\n",
+ ki_coeff_med, ki_coeff_hi);
+ return 0;
+}
+
+static int fg_charge_full_update(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0, };
+ int rc, msoc, bsoc, recharge_soc;
+ u8 full_soc[2] = {0xFF, 0xFF};
+
+ if (!chip->dt.hold_soc_while_full)
+ return 0;
+
+ if (!is_charger_available(chip))
+ return 0;
+
+ rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+ &prop);
+ if (rc < 0) {
+ pr_err("Error in getting battery health, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->health = prop.intval;
+ recharge_soc = chip->dt.recharge_soc_thr;
+ recharge_soc = DIV_ROUND_CLOSEST(recharge_soc * FULL_SOC_RAW,
+ FULL_CAPACITY);
+ rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &bsoc);
+ if (rc < 0) {
+ pr_err("Error in getting BATT_SOC, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* We need 2 most significant bytes here */
+ bsoc = (u32)bsoc >> 16;
+ rc = fg_get_prop_capacity(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting capacity, rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_dbg(chip, FG_STATUS, "msoc: %d health: %d status: %d\n", msoc,
+ chip->health, chip->status);
+ if (chip->charge_done) {
+ if (msoc >= 99 && chip->health == POWER_SUPPLY_HEALTH_GOOD)
+ chip->charge_full = true;
+ else
+ fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
+ msoc);
+ } else if ((bsoc >> 8) <= recharge_soc) {
+ fg_dbg(chip, FG_STATUS, "bsoc: %d recharge_soc: %d\n",
+ bsoc >> 8, recharge_soc);
+ chip->charge_full = false;
+ }
+
+ if (!chip->charge_full)
+ return 0;
+
+ /*
+ * During JEITA conditions, charge_full can happen early. FULL_SOC
+ * and MONOTONIC_SOC needs to be updated to reflect the same. Write
+ * battery SOC to FULL_SOC and write a full value to MONOTONIC_SOC.
+ */
+ rc = fg_sram_write(chip, FULL_SOC_WORD, FULL_SOC_OFFSET, (u8 *)&bsoc, 2,
+ FG_IMA_ATOMIC);
+ if (rc < 0) {
+ pr_err("failed to write full_soc rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_sram_write(chip, MONOTONIC_SOC_WORD, MONOTONIC_SOC_OFFSET,
+ full_soc, 2, FG_IMA_ATOMIC);
+ if (rc < 0) {
+ pr_err("failed to write monotonic_soc rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_dbg(chip, FG_STATUS, "Set charge_full to true @ soc %d\n", msoc);
+ return 0;
+}
+
+static int fg_set_recharge_soc(struct fg_chip *chip, int recharge_soc)
+{
+ u8 buf[4];
+ int rc;
+
+ fg_encode(chip->sp, FG_SRAM_RECHARGE_SOC_THR, recharge_soc, buf);
+ rc = fg_sram_write(chip,
+ chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_word,
+ chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_byte, buf,
+ chip->sp[FG_SRAM_RECHARGE_SOC_THR].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing recharge_soc_thr, rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int fg_adjust_recharge_soc(struct fg_chip *chip)
+{
+ int rc, msoc, recharge_soc, new_recharge_soc = 0;
+
+ recharge_soc = chip->dt.recharge_soc_thr;
+ /*
+ * If the input is present and charging had been terminated, adjust
+ * the recharge SOC threshold based on the monotonic SOC at which
+ * the charge termination had happened.
+ */
+ if (is_input_present(chip) && !chip->recharge_soc_adjusted
+ && chip->charge_done) {
+ /* Get raw monotonic SOC for calculation */
+ rc = fg_get_msoc_raw(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting msoc, rc=%d\n", rc);
+ return rc;
+ }
+
+ msoc = DIV_ROUND_CLOSEST(msoc * FULL_CAPACITY, FULL_SOC_RAW);
+ /* Adjust the recharge_soc threshold */
+ new_recharge_soc = msoc - (FULL_CAPACITY - recharge_soc);
+ } else if (chip->recharge_soc_adjusted && (!is_input_present(chip)
+ || chip->health == POWER_SUPPLY_HEALTH_GOOD)) {
+ /* Restore the default value */
+ new_recharge_soc = recharge_soc;
+ }
+
+ if (new_recharge_soc > 0 && new_recharge_soc < FULL_CAPACITY) {
+ rc = fg_set_recharge_soc(chip, new_recharge_soc);
+ if (rc) {
+ pr_err("Couldn't set resume SOC for FG, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->recharge_soc_adjusted = (new_recharge_soc !=
+ recharge_soc);
+ fg_dbg(chip, FG_STATUS, "resume soc set to %d\n",
+ new_recharge_soc);
+ }
+
+ return 0;
+}
+
+static void status_change_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip, status_change_work);
+ union power_supply_propval prop = {0, };
+ int rc;
+
+ if (!is_charger_available(chip)) {
+ fg_dbg(chip, FG_STATUS, "Charger not available?!\n");
+ goto out;
+ }
+
+ rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
+ &prop);
+ if (rc < 0) {
+ pr_err("Error in getting charging status, rc=%d\n", rc);
+ goto out;
+ }
+
+ chip->status = prop.intval;
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_DONE, &prop);
+ if (rc < 0) {
+ pr_err("Error in getting charge_done, rc=%d\n", rc);
+ goto out;
+ }
+
+ chip->charge_done = prop.intval;
+ fg_dbg(chip, FG_POWER_SUPPLY, "curr_status:%d charge_done: %d\n",
+ chip->status, chip->charge_done);
+
+ if (chip->cyc_ctr.en)
+ schedule_work(&chip->cycle_count_work);
+
+ fg_cap_learning_update(chip);
+
+ rc = fg_charge_full_update(chip);
+ if (rc < 0)
+ pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+ rc = fg_adjust_recharge_soc(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting recharge_soc, rc=%d\n", rc);
+
+ rc = fg_adjust_ki_coeff_dischg(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+out:
+ pm_relax(chip->dev);
+}
+
+static void restore_cycle_counter(struct fg_chip *chip)
+{
+ int rc = 0, i;
+ u8 data[2];
+
+ mutex_lock(&chip->cyc_ctr.lock);
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ rc = fg_sram_read(chip, CYCLE_COUNT_WORD + (i / 2),
+ CYCLE_COUNT_OFFSET + (i % 2) * 2, data, 2,
+ FG_IMA_DEFAULT);
+ if (rc < 0)
+ pr_err("failed to read bucket %d rc=%d\n", i, rc);
+ else
+ chip->cyc_ctr.count[i] = data[0] | data[1] << 8;
+ }
+ mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static void clear_cycle_counter(struct fg_chip *chip)
+{
+ int rc = 0, i;
+
+ if (!chip->cyc_ctr.en)
+ return;
+
+ mutex_lock(&chip->cyc_ctr.lock);
+ memset(chip->cyc_ctr.count, 0, sizeof(chip->cyc_ctr.count));
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ chip->cyc_ctr.started[i] = false;
+ chip->cyc_ctr.last_soc[i] = 0;
+ }
+ rc = fg_sram_write(chip, CYCLE_COUNT_WORD, CYCLE_COUNT_OFFSET,
+ (u8 *)&chip->cyc_ctr.count,
+ sizeof(chip->cyc_ctr.count) / sizeof(u8 *),
+ FG_IMA_DEFAULT);
+ if (rc < 0)
+ pr_err("failed to clear cycle counter rc=%d\n", rc);
+
+ mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static int fg_inc_store_cycle_ctr(struct fg_chip *chip, int bucket)
+{
+ int rc = 0;
+ u16 cyc_count;
+ u8 data[2];
+
+ if (bucket < 0 || (bucket > BUCKET_COUNT - 1))
+ return 0;
+
+ cyc_count = chip->cyc_ctr.count[bucket];
+ cyc_count++;
+ data[0] = cyc_count & 0xFF;
+ data[1] = cyc_count >> 8;
+
+ rc = fg_sram_write(chip, CYCLE_COUNT_WORD + (bucket / 2),
+ CYCLE_COUNT_OFFSET + (bucket % 2) * 2, data, 2,
+ FG_IMA_DEFAULT);
+ if (rc < 0)
+ pr_err("failed to write BATT_CYCLE[%d] rc=%d\n",
+ bucket, rc);
+ else
+ chip->cyc_ctr.count[bucket] = cyc_count;
+ return rc;
+}
+
+static void cycle_count_work(struct work_struct *work)
+{
+ int rc = 0, bucket, i, batt_soc;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ cycle_count_work);
+
+ mutex_lock(&chip->cyc_ctr.lock);
+ rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+ if (rc < 0) {
+ pr_err("Failed to read battery soc rc: %d\n", rc);
+ goto out;
+ }
+
+ /* We need only the most significant byte here */
+ batt_soc = (u32)batt_soc >> 24;
+
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ /* Find out which bucket the SOC falls in */
+ bucket = batt_soc / BUCKET_SOC_PCT;
+ pr_debug("batt_soc: %d bucket: %d\n", batt_soc, bucket);
+
+ /*
+ * If we've started counting for the previous bucket,
+ * then store the counter for that bucket if the
+ * counter for current bucket is getting started.
+ */
+ if (bucket > 0 && chip->cyc_ctr.started[bucket - 1] &&
+ !chip->cyc_ctr.started[bucket]) {
+ rc = fg_inc_store_cycle_ctr(chip, bucket - 1);
+ if (rc < 0) {
+ pr_err("Error in storing cycle_ctr rc: %d\n",
+ rc);
+ goto out;
+ } else {
+ chip->cyc_ctr.started[bucket - 1] = false;
+ chip->cyc_ctr.last_soc[bucket - 1] = 0;
+ }
+ }
+ if (!chip->cyc_ctr.started[bucket]) {
+ chip->cyc_ctr.started[bucket] = true;
+ chip->cyc_ctr.last_soc[bucket] = batt_soc;
+ }
+ } else {
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ if (chip->cyc_ctr.started[i] &&
+ batt_soc > chip->cyc_ctr.last_soc[i]) {
+ rc = fg_inc_store_cycle_ctr(chip, i);
+ if (rc < 0)
+ pr_err("Error in storing cycle_ctr rc: %d\n",
+ rc);
+ chip->cyc_ctr.last_soc[i] = 0;
+ }
+ chip->cyc_ctr.started[i] = false;
+ }
+ }
+out:
+ mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static int fg_get_cycle_count(struct fg_chip *chip)
+{
+ int count;
+
+ if (!chip->cyc_ctr.en)
+ return 0;
+
+ if ((chip->cyc_ctr.id <= 0) || (chip->cyc_ctr.id > BUCKET_COUNT))
+ return -EINVAL;
+
+ mutex_lock(&chip->cyc_ctr.lock);
+ count = chip->cyc_ctr.count[chip->cyc_ctr.id - 1];
+ mutex_unlock(&chip->cyc_ctr.lock);
+ return count;
+}
+
+static void dump_sram(u8 *buf, int len)
+{
+ int i;
+ char str[16];
+
+ for (i = 0; i < len; i += 4) {
+ str[0] = '\0';
+ fill_string(str, sizeof(str), buf + i, 4);
+ pr_info("%03d %s\n", PROFILE_LOAD_WORD + (i / 4), str);
+ }
+}
+
+static bool is_profile_load_required(struct fg_chip *chip)
+{
+ u8 buf[PROFILE_COMP_LEN], val;
+ bool profiles_same = false;
+ int rc;
+
+ rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
+ PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("failed to read profile integrity rc=%d\n", rc);
+ return false;
+ }
+
+ /* Check if integrity bit is set */
+ if (val == 0x01) {
+ fg_dbg(chip, FG_STATUS, "Battery profile integrity bit is set\n");
+ rc = fg_sram_read(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
+ buf, PROFILE_COMP_LEN, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in reading battery profile, rc:%d\n", rc);
+ return false;
+ }
+ profiles_same = memcmp(chip->batt_profile, buf,
+ PROFILE_COMP_LEN) == 0;
+ if (profiles_same) {
+ fg_dbg(chip, FG_STATUS, "Battery profile is same, not loading it\n");
+ return false;
+ }
+
+ if (!chip->dt.force_load_profile) {
+ pr_warn("Profiles doesn't match, skipping loading it since force_load_profile is disabled\n");
+ if (fg_sram_dump) {
+ pr_info("FG: loaded profile:\n");
+ dump_sram(buf, PROFILE_COMP_LEN);
+ pr_info("FG: available profile:\n");
+ dump_sram(chip->batt_profile, PROFILE_LEN);
+ }
+ return false;
+ }
+
+ fg_dbg(chip, FG_STATUS, "Profiles are different, loading the correct one\n");
+ } else {
+ fg_dbg(chip, FG_STATUS, "Profile integrity bit is not set\n");
+ if (fg_sram_dump) {
+ pr_info("FG: profile to be loaded:\n");
+ dump_sram(chip->batt_profile, PROFILE_LEN);
+ }
+ }
+ return true;
+}
+
+#define SOC_READY_WAIT_MS 2000
+static int __fg_restart(struct fg_chip *chip)
+{
+ int rc, msoc;
+ bool tried_again = false;
+
+ rc = fg_get_prop_capacity(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting capacity, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->last_soc = msoc;
+ chip->fg_restarting = true;
+ reinit_completion(&chip->soc_ready);
+ rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT,
+ RESTART_GO_BIT);
+ if (rc < 0) {
+ pr_err("Error in writing to %04x, rc=%d\n",
+ BATT_SOC_RESTART(chip), rc);
+ goto out;
+ }
+
+wait:
+ rc = wait_for_completion_interruptible_timeout(&chip->soc_ready,
+ msecs_to_jiffies(SOC_READY_WAIT_MS));
+
+ /* If we were interrupted wait again one more time. */
+ if (rc == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ goto wait;
+ } else if (rc <= 0) {
+ pr_err("wait for soc_ready timed out rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
+ if (rc < 0) {
+ pr_err("Error in writing to %04x, rc=%d\n",
+ BATT_SOC_RESTART(chip), rc);
+ goto out;
+ }
+out:
+ chip->fg_restarting = false;
+ return rc;
+}
+
+static void profile_load_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ profile_load_work.work);
+ u8 buf[2], val;
+ int rc;
+
+ vote(chip->awake_votable, PROFILE_LOAD, true, 0);
+ if (!is_profile_load_required(chip))
+ goto done;
+
+ clear_cycle_counter(chip);
+ mutex_lock(&chip->cl.lock);
+ chip->cl.learned_cc_uah = 0;
+ chip->cl.active = false;
+ mutex_unlock(&chip->cl.lock);
+
+ fg_dbg(chip, FG_STATUS, "profile loading started\n");
+ rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
+ if (rc < 0) {
+ pr_err("Error in writing to %04x, rc=%d\n",
+ BATT_SOC_RESTART(chip), rc);
+ goto out;
+ }
+
+ /* load battery profile */
+ rc = fg_sram_write(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
+ chip->batt_profile, PROFILE_LEN, FG_IMA_ATOMIC);
+ if (rc < 0) {
+ pr_err("Error in writing battery profile, rc:%d\n", rc);
+ goto out;
+ }
+
+ rc = __fg_restart(chip);
+ if (rc < 0) {
+ pr_err("Error in restarting FG, rc=%d\n", rc);
+ goto out;
+ }
+
+ fg_dbg(chip, FG_STATUS, "SOC is ready\n");
+
+ /* Set the profile integrity bit */
+ val = 0x1;
+ rc = fg_sram_write(chip, PROFILE_INTEGRITY_WORD,
+ PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("failed to write profile integrity rc=%d\n", rc);
+ goto out;
+ }
+
+done:
+ rc = fg_sram_read(chip, NOM_CAP_WORD, NOM_CAP_OFFSET, buf, 2,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in reading %04x[%d] rc=%d\n", NOM_CAP_WORD,
+ NOM_CAP_OFFSET, rc);
+ } else {
+ chip->cl.nom_cap_uah = (int)(buf[0] | buf[1] << 8) * 1000;
+ rc = fg_load_learned_cap_from_sram(chip);
+ if (rc < 0)
+ pr_err("Error in loading capacity learning data, rc:%d\n",
+ rc);
+ }
+
+ chip->profile_loaded = true;
+ fg_dbg(chip, FG_STATUS, "profile loaded successfully");
+out:
+ vote(chip->awake_votable, PROFILE_LOAD, false, 0);
+}
+
+static int fg_restart_sysfs(const char *val, const struct kernel_param *kp)
+{
+ int rc;
+ struct power_supply *bms_psy;
+ struct fg_chip *chip;
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_err("Unable to set fg_restart: %d\n", rc);
+ return rc;
+ }
+
+ if (fg_restart != 1) {
+ pr_err("Bad value %d\n", fg_restart);
+ return -EINVAL;
+ }
+
+ bms_psy = power_supply_get_by_name("bms");
+ if (!bms_psy) {
+ pr_err("bms psy not found\n");
+ return 0;
+ }
+
+ chip = power_supply_get_drvdata(bms_psy);
+ rc = __fg_restart(chip);
+ if (rc < 0) {
+ pr_err("Error in restarting FG, rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_info("FG restart done\n");
+ return rc;
+}
+
+static struct kernel_param_ops fg_restart_ops = {
+ .set = fg_restart_sysfs,
+ .get = param_get_int,
+};
+
+module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644);
+
/* PSY CALLBACKS STAY HERE */
static int fg_psy_get_property(struct power_supply *psy,
@@ -807,7 +1767,10 @@ static int fg_psy_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CAPACITY:
- rc = fg_get_prop_capacity(chip, &pval->intval);
+ if (chip->fg_restarting)
+ pval->intval = chip->last_soc;
+ else
+ rc = fg_get_prop_capacity(chip, &pval->intval);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
rc = fg_get_battery_voltage(chip, &pval->intval);
@@ -825,7 +1788,7 @@ static int fg_psy_get_property(struct power_supply *psy,
rc = fg_get_sram_prop(chip, FG_SRAM_OCV, &pval->intval);
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- pval->intval = chip->nom_cap_uah;
+ pval->intval = chip->cl.nom_cap_uah;
break;
case POWER_SUPPLY_PROP_RESISTANCE_ID:
rc = fg_get_batt_id(chip, &pval->intval);
@@ -835,6 +1798,23 @@ static int fg_psy_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
pval->intval = chip->bp.float_volt_uv;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ pval->intval = fg_get_cycle_count(chip);
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ pval->intval = chip->cyc_ctr.id;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
+ rc = fg_get_cc_soc(chip, &pval->intval);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ pval->intval = chip->cl.init_cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ pval->intval = chip->cl.learned_cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ rc = fg_get_cc_soc_sw(chip, &pval->intval);
break;
default:
break;
@@ -847,7 +1827,18 @@ static int fg_psy_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *pval)
{
+ struct fg_chip *chip = power_supply_get_drvdata(psy);
+
switch (psp) {
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ if ((pval->intval > 0) && (pval->intval <= BUCKET_COUNT)) {
+ chip->cyc_ctr.id = pval->intval;
+ } else {
+ pr_err("rejecting invalid cycle_count_id = %d\n",
+ pval->intval);
+ return -EINVAL;
+ }
+ break;
default:
break;
}
@@ -859,6 +1850,8 @@ static int fg_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
switch (psp) {
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ return 1;
default:
break;
}
@@ -881,8 +1874,14 @@ static int fg_notifier_cb(struct notifier_block *nb,
return NOTIFY_OK;
if ((strcmp(psy->desc->name, "battery") == 0)
- || (strcmp(psy->desc->name, "usb") == 0))
+ || (strcmp(psy->desc->name, "usb") == 0)) {
+ /*
+ * We cannot vote for awake votable here as that takes
+ * a mutex lock and this is executed in an atomic context.
+ */
+ pm_stay_awake(chip->dev);
schedule_work(&chip->status_change_work);
+ }
return NOTIFY_OK;
}
@@ -898,6 +1897,12 @@ static enum power_supply_property fg_psy_props[] = {
POWER_SUPPLY_PROP_BATTERY_TYPE,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+ POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
};
static const struct power_supply_desc fg_psy_desc = {
@@ -936,6 +1941,32 @@ static int fg_hw_init(struct fg_chip *chip)
return rc;
}
+ /* This SRAM register is only present in v2.0 */
+ if (chip->pmic_rev_id->rev4 == PMICOBALT_V2P0_REV4 &&
+ chip->bp.float_volt_uv > 0) {
+ fg_encode(chip->sp, FG_SRAM_FLOAT_VOLT,
+ chip->bp.float_volt_uv / 1000, buf);
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_FLOAT_VOLT].addr_word,
+ chip->sp[FG_SRAM_FLOAT_VOLT].addr_byte, buf,
+ chip->sp[FG_SRAM_FLOAT_VOLT].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing float_volt, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (chip->bp.vbatt_full_mv > 0) {
+ fg_encode(chip->sp, FG_SRAM_VBATT_FULL, chip->bp.vbatt_full_mv,
+ buf);
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_VBATT_FULL].addr_word,
+ chip->sp[FG_SRAM_VBATT_FULL].addr_byte, buf,
+ chip->sp[FG_SRAM_VBATT_FULL].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing vbatt_full, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
fg_encode(chip->sp, FG_SRAM_CHG_TERM_CURR, chip->dt.chg_term_curr_ma,
buf);
rc = fg_sram_write(chip, chip->sp[FG_SRAM_CHG_TERM_CURR].addr_word,
@@ -984,16 +2015,9 @@ static int fg_hw_init(struct fg_chip *chip)
}
if (chip->dt.recharge_soc_thr > 0 && chip->dt.recharge_soc_thr < 100) {
- fg_encode(chip->sp, FG_SRAM_RECHARGE_SOC_THR,
- chip->dt.recharge_soc_thr, buf);
- rc = fg_sram_write(chip,
- chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_word,
- chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_byte,
- buf, chip->sp[FG_SRAM_RECHARGE_SOC_THR].len,
- FG_IMA_DEFAULT);
+ rc = fg_set_recharge_soc(chip, chip->dt.recharge_soc_thr);
if (rc < 0) {
- pr_err("Error in writing recharge_soc_thr, rc=%d\n",
- rc);
+ pr_err("Error in setting recharge_soc, rc=%d\n", rc);
return rc;
}
}
@@ -1054,36 +2078,35 @@ static int fg_hw_init(struct fg_chip *chip)
}
}
- return 0;
-}
-
-static int fg_memif_init(struct fg_chip *chip)
-{
- return fg_ima_init(chip);
-}
+ if (chip->cyc_ctr.en)
+ restore_cycle_counter(chip);
-static int fg_batt_profile_init(struct fg_chip *chip)
-{
- int rc;
-
- if (!chip->batt_profile) {
- chip->batt_profile = devm_kcalloc(chip->dev, PROFILE_LEN,
- sizeof(*chip->batt_profile),
- GFP_KERNEL);
- if (!chip->batt_profile)
- return -ENOMEM;
+ if (chip->dt.jeita_hyst_temp >= 0) {
+ val = chip->dt.jeita_hyst_temp << JEITA_TEMP_HYST_SHIFT;
+ rc = fg_masked_write(chip, BATT_INFO_BATT_TEMP_CFG(chip),
+ JEITA_TEMP_HYST_MASK, val);
+ if (rc < 0) {
+ pr_err("Error in writing batt_temp_cfg, rc=%d\n", rc);
+ return rc;
+ }
}
- rc = fg_get_batt_profile(chip);
+ get_batt_temp_delta(chip->dt.batt_temp_delta, &val);
+ rc = fg_masked_write(chip, BATT_INFO_BATT_TMPR_INTR(chip),
+ CHANGE_THOLD_MASK, val);
if (rc < 0) {
- pr_err("Error in getting battery profile, rc:%d\n", rc);
+ pr_err("Error in writing batt_temp_delta, rc=%d\n", rc);
return rc;
}
- schedule_delayed_work(&chip->profile_load_work, msecs_to_jiffies(0));
return 0;
}
+static int fg_memif_init(struct fg_chip *chip)
+{
+ return fg_ima_init(chip);
+}
+
/* INTERRUPT HANDLERS STAY HERE */
static irqreturn_t fg_vbatt_low_irq_handler(int irq, void *data)
@@ -1111,15 +2134,16 @@ static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data)
chip->battery_missing = (status & BT_MISS_BIT);
if (chip->battery_missing) {
- chip->batt_id_avail = false;
+ chip->profile_available = false;
chip->profile_loaded = false;
+ clear_cycle_counter(chip);
} else {
- rc = fg_batt_profile_init(chip);
+ rc = fg_get_batt_profile(chip);
if (rc < 0) {
- pr_err("Error in initializing battery profile, rc=%d\n",
- rc);
+ pr_err("Error in getting battery profile, rc:%d\n", rc);
return IRQ_HANDLED;
}
+ schedule_delayed_work(&chip->profile_load_work, 0);
}
return IRQ_HANDLED;
@@ -1128,8 +2152,33 @@ static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data)
static irqreturn_t fg_delta_batt_temp_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
+ union power_supply_propval prop = {0, };
+ int rc, batt_temp;
fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+ rc = fg_get_battery_temp(chip, &batt_temp);
+ if (rc < 0) {
+ pr_err("Error in getting batt_temp\n");
+ return IRQ_HANDLED;
+ }
+
+ if (!is_charger_available(chip)) {
+ chip->last_batt_temp = batt_temp;
+ return IRQ_HANDLED;
+ }
+
+ power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+ &prop);
+ chip->health = prop.intval;
+
+ if (chip->last_batt_temp != batt_temp) {
+ chip->last_batt_temp = batt_temp;
+ power_supply_changed(chip->batt_psy);
+ }
+
+ if (abs(chip->last_batt_temp - batt_temp) > 30)
+ pr_warn("Battery temperature last:%d current: %d\n",
+ chip->last_batt_temp, batt_temp);
return IRQ_HANDLED;
}
@@ -1154,8 +2203,27 @@ static irqreturn_t fg_soc_update_irq_handler(int irq, void *data)
static irqreturn_t fg_delta_soc_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
+ int rc;
+
+ if (chip->cyc_ctr.en)
+ schedule_work(&chip->cycle_count_work);
+
+ if (is_charger_available(chip))
+ power_supply_changed(chip->batt_psy);
fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+
+ if (chip->cl.active)
+ fg_cap_learning_update(chip);
+
+ rc = fg_charge_full_update(chip);
+ if (rc < 0)
+ pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+ rc = fg_adjust_ki_coeff_dischg(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+
return IRQ_HANDLED;
}
@@ -1163,6 +2231,9 @@ static irqreturn_t fg_empty_soc_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
+ if (is_charger_available(chip))
+ power_supply_changed(chip->batt_psy);
+
fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
return IRQ_HANDLED;
}
@@ -1186,39 +2257,79 @@ static irqreturn_t fg_dummy_irq_handler(int irq, void *data)
static struct fg_irq_info fg_irqs[FG_IRQ_MAX] = {
/* BATT_SOC irqs */
[MSOC_FULL_IRQ] = {
- "msoc-full", fg_soc_irq_handler, true },
+ .name = "msoc-full",
+ .handler = fg_soc_irq_handler,
+ },
[MSOC_HIGH_IRQ] = {
- "msoc-high", fg_soc_irq_handler, true },
+ .name = "msoc-high",
+ .handler = fg_soc_irq_handler,
+ .wakeable = true,
+ },
[MSOC_EMPTY_IRQ] = {
- "msoc-empty", fg_empty_soc_irq_handler, true },
+ .name = "msoc-empty",
+ .handler = fg_empty_soc_irq_handler,
+ .wakeable = true,
+ },
[MSOC_LOW_IRQ] = {
- "msoc-low", fg_soc_irq_handler },
+ .name = "msoc-low",
+ .handler = fg_soc_irq_handler,
+ .wakeable = true,
+ },
[MSOC_DELTA_IRQ] = {
- "msoc-delta", fg_delta_soc_irq_handler, true },
+ .name = "msoc-delta",
+ .handler = fg_delta_soc_irq_handler,
+ .wakeable = true,
+ },
[BSOC_DELTA_IRQ] = {
- "bsoc-delta", fg_delta_soc_irq_handler, true },
+ .name = "bsoc-delta",
+ .handler = fg_dummy_irq_handler,
+ },
[SOC_READY_IRQ] = {
- "soc-ready", fg_first_est_irq_handler, true },
+ .name = "soc-ready",
+ .handler = fg_first_est_irq_handler,
+ .wakeable = true,
+ },
[SOC_UPDATE_IRQ] = {
- "soc-update", fg_soc_update_irq_handler },
+ .name = "soc-update",
+ .handler = fg_soc_update_irq_handler,
+ },
/* BATT_INFO irqs */
[BATT_TEMP_DELTA_IRQ] = {
- "batt-temp-delta", fg_delta_batt_temp_irq_handler },
+ .name = "batt-temp-delta",
+ .handler = fg_delta_batt_temp_irq_handler,
+ .wakeable = true,
+ },
[BATT_MISSING_IRQ] = {
- "batt-missing", fg_batt_missing_irq_handler, true },
+ .name = "batt-missing",
+ .handler = fg_batt_missing_irq_handler,
+ .wakeable = true,
+ },
[ESR_DELTA_IRQ] = {
- "esr-delta", fg_dummy_irq_handler },
+ .name = "esr-delta",
+ .handler = fg_dummy_irq_handler,
+ },
[VBATT_LOW_IRQ] = {
- "vbatt-low", fg_vbatt_low_irq_handler, true },
+ .name = "vbatt-low",
+ .handler = fg_vbatt_low_irq_handler,
+ .wakeable = true,
+ },
[VBATT_PRED_DELTA_IRQ] = {
- "vbatt-pred-delta", fg_dummy_irq_handler },
+ .name = "vbatt-pred-delta",
+ .handler = fg_dummy_irq_handler,
+ },
/* MEM_IF irqs */
[DMA_GRANT_IRQ] = {
- "dma-grant", fg_dummy_irq_handler },
+ .name = "dma-grant",
+ .handler = fg_dummy_irq_handler,
+ },
[MEM_XCP_IRQ] = {
- "mem-xcp", fg_dummy_irq_handler },
+ .name = "mem-xcp",
+ .handler = fg_dummy_irq_handler,
+ },
[IMA_RDY_IRQ] = {
- "ima-rdy", fg_dummy_irq_handler },
+ .name = "ima-rdy",
+ .handler = fg_dummy_irq_handler,
+ },
};
static int fg_get_irq_index_byname(const char *name)
@@ -1273,22 +2384,98 @@ static int fg_register_interrupts(struct fg_chip *chip)
return 0;
}
+static int fg_parse_ki_coefficients(struct fg_chip *chip)
+{
+ struct device_node *node = chip->dev->of_node;
+ int rc, i;
+
+ rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-soc-dischg",
+ sizeof(u32));
+ if (rc != KI_COEFF_SOC_LEVELS)
+ return 0;
+
+ rc = of_property_read_u32_array(node, "qcom,ki-coeff-soc-dischg",
+ chip->dt.ki_coeff_soc, KI_COEFF_SOC_LEVELS);
+ if (rc < 0) {
+ pr_err("Error in reading ki-coeff-soc-dischg, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-med-dischg",
+ sizeof(u32));
+ if (rc != KI_COEFF_SOC_LEVELS)
+ return 0;
+
+ rc = of_property_read_u32_array(node, "qcom,ki-coeff-med-dischg",
+ chip->dt.ki_coeff_med_dischg, KI_COEFF_SOC_LEVELS);
+ if (rc < 0) {
+ pr_err("Error in reading ki-coeff-med-dischg, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-hi-dischg",
+ sizeof(u32));
+ if (rc != KI_COEFF_SOC_LEVELS)
+ return 0;
+
+ rc = of_property_read_u32_array(node, "qcom,ki-coeff-hi-dischg",
+ chip->dt.ki_coeff_hi_dischg, KI_COEFF_SOC_LEVELS);
+ if (rc < 0) {
+ pr_err("Error in reading ki-coeff-hi-dischg, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ for (i = 0; i < KI_COEFF_SOC_LEVELS; i++) {
+ if (chip->dt.ki_coeff_soc[i] < 0 ||
+ chip->dt.ki_coeff_soc[i] > FULL_CAPACITY) {
+ pr_err("Error in ki_coeff_soc_dischg values\n");
+ return -EINVAL;
+ }
+
+ if (chip->dt.ki_coeff_med_dischg[i] < 0 ||
+ chip->dt.ki_coeff_med_dischg[i] > KI_COEFF_MAX) {
+ pr_err("Error in ki_coeff_med_dischg values\n");
+ return -EINVAL;
+ }
+
+ if (chip->dt.ki_coeff_med_dischg[i] < 0 ||
+ chip->dt.ki_coeff_med_dischg[i] > KI_COEFF_MAX) {
+ pr_err("Error in ki_coeff_med_dischg values\n");
+ return -EINVAL;
+ }
+ }
+ chip->ki_coeff_dischg_en = true;
+ return 0;
+}
+
#define DEFAULT_CUTOFF_VOLT_MV 3200
#define DEFAULT_EMPTY_VOLT_MV 3100
#define DEFAULT_CHG_TERM_CURR_MA 100
-#define DEFAULT_SYS_TERM_CURR_MA 125
+#define DEFAULT_SYS_TERM_CURR_MA -125
#define DEFAULT_DELTA_SOC_THR 1
#define DEFAULT_RECHARGE_SOC_THR 95
#define DEFAULT_BATT_TEMP_COLD 0
#define DEFAULT_BATT_TEMP_COOL 5
#define DEFAULT_BATT_TEMP_WARM 45
#define DEFAULT_BATT_TEMP_HOT 50
+#define DEFAULT_CL_START_SOC 15
+#define DEFAULT_CL_MIN_TEMP_DECIDEGC 150
+#define DEFAULT_CL_MAX_TEMP_DECIDEGC 450
+#define DEFAULT_CL_MAX_INC_DECIPERC 5
+#define DEFAULT_CL_MAX_DEC_DECIPERC 100
+#define DEFAULT_CL_MIN_LIM_DECIPERC 0
+#define DEFAULT_CL_MAX_LIM_DECIPERC 0
+#define BTEMP_DELTA_LOW 2
+#define BTEMP_DELTA_HIGH 10
static int fg_parse_dt(struct fg_chip *chip)
{
struct device_node *child, *revid_node, *node = chip->dev->of_node;
u32 base, temp;
u8 subtype;
- int rc, len;
+ int rc;
if (!node) {
dev_err(chip->dev, "device tree node missing\n");
@@ -1318,12 +2505,15 @@ static int fg_parse_dt(struct fg_chip *chip)
switch (chip->pmic_rev_id->pmic_subtype) {
case PMICOBALT_SUBTYPE:
- if (chip->pmic_rev_id->rev4 < PMICOBALT_V2P0_REV4)
+ if (chip->pmic_rev_id->rev4 < PMICOBALT_V2P0_REV4) {
chip->sp = pmicobalt_v1_sram_params;
- else if (chip->pmic_rev_id->rev4 == PMICOBALT_V2P0_REV4)
+ chip->alg_flags = pmicobalt_v1_alg_flags;
+ } else if (chip->pmic_rev_id->rev4 == PMICOBALT_V2P0_REV4) {
chip->sp = pmicobalt_v2_sram_params;
- else
+ chip->alg_flags = pmicobalt_v2_alg_flags;
+ } else {
return -EINVAL;
+ }
break;
default:
return -EINVAL;
@@ -1376,6 +2566,11 @@ static int fg_parse_dt(struct fg_chip *chip)
}
}
+ rc = fg_get_batt_profile(chip);
+ if (rc < 0)
+ pr_warn("profile for batt_id=%dKOhms not found..using OTP, rc:%d\n",
+ chip->batt_id, rc);
+
/* Read all the optional properties below */
rc = of_property_read_u32(node, "qcom,fg-cutoff-voltage", &temp);
if (rc < 0)
@@ -1429,15 +2624,14 @@ static int fg_parse_dt(struct fg_chip *chip)
chip->dt.jeita_thresholds[JEITA_COOL] = DEFAULT_BATT_TEMP_COOL;
chip->dt.jeita_thresholds[JEITA_WARM] = DEFAULT_BATT_TEMP_WARM;
chip->dt.jeita_thresholds[JEITA_HOT] = DEFAULT_BATT_TEMP_HOT;
- if (of_find_property(node, "qcom,fg-jeita-thresholds", &len)) {
- if (len == NUM_JEITA_LEVELS) {
- rc = of_property_read_u32_array(node,
- "qcom,fg-jeita-thresholds",
- chip->dt.jeita_thresholds, len);
- if (rc < 0)
- pr_warn("Error reading Jeita thresholds, default values will be used rc:%d\n",
- rc);
- }
+ if (of_property_count_elems_of_size(node, "qcom,fg-jeita-thresholds",
+ sizeof(u32)) == NUM_JEITA_LEVELS) {
+ rc = of_property_read_u32_array(node,
+ "qcom,fg-jeita-thresholds",
+ chip->dt.jeita_thresholds, NUM_JEITA_LEVELS);
+ if (rc < 0)
+ pr_warn("Error reading Jeita thresholds, default values will be used rc:%d\n",
+ rc);
}
rc = of_property_read_u32(node, "qcom,fg-esr-timer-charging", &temp);
@@ -1458,6 +2652,73 @@ static int fg_parse_dt(struct fg_chip *chip)
else
chip->dt.esr_timer_asleep = temp;
+ chip->cyc_ctr.en = of_property_read_bool(node, "qcom,cycle-counter-en");
+ if (chip->cyc_ctr.en)
+ chip->cyc_ctr.id = 1;
+
+ chip->dt.force_load_profile = of_property_read_bool(node,
+ "qcom,fg-force-load-profile");
+
+ rc = of_property_read_u32(node, "qcom,cl-start-capacity", &temp);
+ if (rc < 0)
+ chip->dt.cl_start_soc = DEFAULT_CL_START_SOC;
+ else
+ chip->dt.cl_start_soc = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-min-temp", &temp);
+ if (rc < 0)
+ chip->dt.cl_min_temp = DEFAULT_CL_MIN_TEMP_DECIDEGC;
+ else
+ chip->dt.cl_min_temp = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-temp", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_temp = DEFAULT_CL_MAX_TEMP_DECIDEGC;
+ else
+ chip->dt.cl_max_temp = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-increment", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_cap_inc = DEFAULT_CL_MAX_INC_DECIPERC;
+ else
+ chip->dt.cl_max_cap_inc = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-decrement", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_cap_dec = DEFAULT_CL_MAX_DEC_DECIPERC;
+ else
+ chip->dt.cl_max_cap_dec = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-min-limit", &temp);
+ if (rc < 0)
+ chip->dt.cl_min_cap_limit = DEFAULT_CL_MIN_LIM_DECIPERC;
+ else
+ chip->dt.cl_min_cap_limit = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-limit", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_cap_limit = DEFAULT_CL_MAX_LIM_DECIPERC;
+ else
+ chip->dt.cl_max_cap_limit = temp;
+
+ rc = of_property_read_u32(node, "qcom,fg-jeita-hyst-temp", &temp);
+ if (rc < 0)
+ chip->dt.jeita_hyst_temp = -EINVAL;
+ else
+ chip->dt.jeita_hyst_temp = temp;
+
+ rc = of_property_read_u32(node, "qcom,fg-batt-temp-delta", &temp);
+ if (rc < 0)
+ chip->dt.batt_temp_delta = -EINVAL;
+ else if (temp > BTEMP_DELTA_LOW && temp <= BTEMP_DELTA_HIGH)
+ chip->dt.batt_temp_delta = temp;
+
+ chip->dt.hold_soc_while_full = of_property_read_bool(node,
+ "qcom,hold-soc-while-full");
+
+ rc = fg_parse_ki_coefficients(chip);
+ if (rc < 0)
+ pr_err("Error in parsing Ki coefficients, rc=%d\n", rc);
return 0;
}
@@ -1465,7 +2726,7 @@ static int fg_parse_dt(struct fg_chip *chip)
static void fg_cleanup(struct fg_chip *chip)
{
power_supply_unreg_notifier(&chip->nb);
- debugfs_remove_recursive(chip->dentry);
+ debugfs_remove_recursive(chip->dfs_root);
if (chip->awake_votable)
destroy_votable(chip->awake_votable);
@@ -1510,10 +2771,13 @@ static int fg_gen3_probe(struct platform_device *pdev)
mutex_init(&chip->bus_lock);
mutex_init(&chip->sram_rw_lock);
+ mutex_init(&chip->cyc_ctr.lock);
+ mutex_init(&chip->cl.lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
INIT_WORK(&chip->status_change_work, status_change_work);
+ INIT_WORK(&chip->cycle_count_work, cycle_count_work);
rc = fg_memif_init(chip);
if (rc < 0) {
@@ -1562,17 +2826,15 @@ static int fg_gen3_probe(struct platform_device *pdev)
if (fg_irqs[SOC_UPDATE_IRQ].irq)
disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
- rc = fg_sram_debugfs_create(chip);
+ rc = fg_debugfs_create(chip);
if (rc < 0) {
dev_err(chip->dev, "Error in creating debugfs entries, rc:%d\n",
rc);
goto exit;
}
- rc = fg_batt_profile_init(chip);
- if (rc < 0)
- dev_warn(chip->dev, "Error in initializing battery profile, rc:%d\n",
- rc);
+ if (chip->profile_available)
+ schedule_delayed_work(&chip->profile_load_work, 0);
device_init_wakeup(chip->dev, true);
pr_debug("FG GEN3 driver successfully probed\n");
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index 7caa9548308a..8aaeb095db3c 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -18,11 +18,13 @@
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/qpnp/qpnp-revid.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
#include "smb-reg.h"
#include "smb-lib.h"
+#include "storm-watch.h"
#include "pmic-voter.h"
#define SMB2_DEFAULT_WPWR_UW 8000000
@@ -56,6 +58,13 @@ static struct smb_params v1_params = {
.max_u = 4800000,
.step_u = 25000,
},
+ .otg_cl = {
+ .name = "usb otg current limit",
+ .reg = OTG_CURRENT_LIMIT_CFG_REG,
+ .min_u = 250000,
+ .max_u = 2000000,
+ .step_u = 250000,
+ },
.dc_icl = {
.name = "dc input current limit",
.reg = DCIN_CURRENT_LIMIT_CFG_REG,
@@ -197,14 +206,16 @@ static struct smb_params v1_params = {
#define STEP_CHARGING_MAX_STEPS 5
struct smb_dt_props {
- bool suspend_input;
+ bool no_battery;
int fcc_ua;
int usb_icl_ua;
+ int otg_cl_ua;
int dc_icl_ua;
int fv_uv;
int wipower_max_uw;
u32 step_soc_threshold[STEP_CHARGING_MAX_STEPS - 1];
s32 step_cc_delta[STEP_CHARGING_MAX_STEPS];
+ struct device_node *revid_dev_node;
};
struct smb2 {
@@ -223,6 +234,7 @@ module_param_named(
pl_master_percent, __pl_master_percent, int, S_IRUSR | S_IWUSR
);
+#define MICRO_1P5A 1500000
static int smb2_parse_dt(struct smb2 *chip)
{
struct smb_charger *chg = &chip->chg;
@@ -256,8 +268,8 @@ static int smb2_parse_dt(struct smb2 *chip)
if (rc < 0)
chg->step_chg_enabled = false;
- chip->dt.suspend_input = of_property_read_bool(node,
- "qcom,suspend-input");
+ chip->dt.no_battery = of_property_read_bool(node,
+ "qcom,batteryless-platform");
rc = of_property_read_u32(node,
"qcom,fcc-max-ua", &chip->dt.fcc_ua);
@@ -275,6 +287,11 @@ static int smb2_parse_dt(struct smb2 *chip)
chip->dt.usb_icl_ua = -EINVAL;
rc = of_property_read_u32(node,
+ "qcom,otg-cl-ua", &chip->dt.otg_cl_ua);
+ if (rc < 0)
+ chip->dt.otg_cl_ua = MICRO_1P5A;
+
+ rc = of_property_read_u32(node,
"qcom,dc-icl-ua", &chip->dt.dc_icl_ua);
if (rc < 0)
chip->dt.dc_icl_ua = -EINVAL;
@@ -325,6 +342,7 @@ static enum power_supply_property smb2_usb_props[] = {
POWER_SUPPLY_PROP_PD_ACTIVE,
POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+ POWER_SUPPLY_PROP_PARALLEL_DISABLE,
};
static int smb2_usb_get_prop(struct power_supply *psy,
@@ -387,6 +405,10 @@ static int smb2_usb_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW:
rc = smblib_get_prop_usb_current_now(chg, val);
break;
+ case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+ val->intval = get_client_vote(chg->pl_disable_votable,
+ USER_VOTER);
+ break;
default:
pr_err("get prop %d is not supported\n", psp);
rc = -EINVAL;
@@ -431,6 +453,9 @@ static int smb2_usb_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_PD_ACTIVE:
rc = smblib_set_prop_pd_active(chg, val);
break;
+ case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+ vote(chg->pl_disable_votable, USER_VOTER, (bool)val->intval, 0);
+ break;
default:
pr_err("set prop %d is not supported\n", psp);
rc = -EINVAL;
@@ -445,6 +470,7 @@ static int smb2_usb_prop_is_writeable(struct power_supply *psy,
{
switch (psp) {
case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+ case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
return 1;
default:
break;
@@ -596,14 +622,21 @@ static enum power_supply_property smb2_batt_props[] = {
POWER_SUPPLY_PROP_CHARGER_TEMP,
POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_STEP_CHARGING_STEP,
+ POWER_SUPPLY_PROP_CHARGE_DONE,
};
static int smb2_batt_get_prop(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- int rc;
struct smb_charger *chg = power_supply_get_drvdata(psy);
+ int rc = 0;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -635,15 +668,37 @@ static int smb2_batt_get_prop(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
rc = smblib_get_prop_input_current_limited(chg, val);
+ case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
+ val->intval = chg->step_chg_enabled;
+ break;
+ case POWER_SUPPLY_PROP_STEP_CHARGING_STEP:
+ rc = smblib_get_prop_step_chg_step(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ rc = smblib_get_prop_batt_voltage_now(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ rc = smblib_get_prop_batt_current_now(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ rc = smblib_get_prop_batt_temp(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_DONE:
+ rc = smblib_get_prop_batt_charge_done(chg, val);
break;
default:
pr_err("batt power supply prop %d not supported\n", psp);
return -EINVAL;
}
+
if (rc < 0) {
pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
return -ENODATA;
}
+
return 0;
}
@@ -929,6 +984,9 @@ static int smb2_init_hw(struct smb2 *chip)
struct smb_charger *chg = &chip->chg;
int rc;
+ if (chip->dt.no_battery)
+ chg->fake_capacity = 50;
+
if (chip->dt.fcc_ua < 0)
smblib_get_charge_param(chg, &chg->param.fcc, &chip->dt.fcc_ua);
@@ -943,15 +1001,19 @@ static int smb2_init_hw(struct smb2 *chip)
smblib_get_charge_param(chg, &chg->param.dc_icl,
&chip->dt.dc_icl_ua);
+ chg->otg_cl_ua = chip->dt.otg_cl_ua;
+
/* votes must be cast before configuring software control */
vote(chg->pl_disable_votable,
- USBIN_ICL_VOTER, true, 0);
+ PL_INDIRECT_VOTER, true, 0);
vote(chg->pl_disable_votable,
CHG_STATE_VOTER, true, 0);
+ vote(chg->pl_disable_votable,
+ PARALLEL_PSY_VOTER, true, 0);
vote(chg->usb_suspend_votable,
- DEFAULT_VOTER, chip->dt.suspend_input, 0);
+ DEFAULT_VOTER, chip->dt.no_battery, 0);
vote(chg->dc_suspend_votable,
- DEFAULT_VOTER, chip->dt.suspend_input, 0);
+ DEFAULT_VOTER, chip->dt.no_battery, 0);
vote(chg->fcc_max_votable,
DEFAULT_VOTER, true, chip->dt.fcc_ua);
vote(chg->fv_votable,
@@ -961,14 +1023,10 @@ static int smb2_init_hw(struct smb2 *chip)
vote(chg->dc_icl_votable,
DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
- /*
- * Configure charge enable for software control; active high, and end
- * the charge cycle while the battery is OV.
- */
+ /* Configure charge enable for software control; active high */
rc = smblib_masked_write(chg, CHGR_CFG2_REG,
CHG_EN_POLARITY_BIT |
- CHG_EN_SRC_BIT |
- BAT_OV_ECC_BIT, BAT_OV_ECC_BIT);
+ CHG_EN_SRC_BIT, 0);
if (rc < 0) {
dev_err(chg->dev, "Couldn't configure charger rc=%d\n", rc);
return rc;
@@ -1064,6 +1122,43 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
+static int smb2_setup_wa_flags(struct smb2 *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ struct pmic_revid_data *pmic_rev_id;
+ struct device_node *revid_dev_node;
+
+ revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ pr_err("Missing qcom,pmic-revid property\n");
+ return -EINVAL;
+ }
+
+ pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR_OR_NULL(pmic_rev_id)) {
+ /*
+ * the revid peripheral must be registered, any failure
+ * here only indicates that the rev-id module has not
+ * probed yet.
+ */
+ return -EPROBE_DEFER;
+ }
+
+ switch (pmic_rev_id->pmic_subtype) {
+ case PMICOBALT_SUBTYPE:
+ if (pmic_rev_id->rev4 == PMICOBALT_V1P1_REV4) /* PMI rev 1.1 */
+ chg->wa_flags |= QC_CHARGER_DETECTION_WA_BIT;
+ break;
+ default:
+ pr_err("PMIC subtype %d not supported\n",
+ pmic_rev_id->pmic_subtype);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/****************************
* DETERMINE INITIAL STATUS *
****************************/
@@ -1088,60 +1183,181 @@ static int smb2_determine_initial_status(struct smb2 *chip)
**************************/
struct smb2_irq_info {
- const char *name;
- const irq_handler_t handler;
- const bool wake;
- int irq;
+ const char *name;
+ const irq_handler_t handler;
+ const bool wake;
+ const struct storm_watch storm_data;
+ int irq;
};
static struct smb2_irq_info smb2_irqs[] = {
/* CHARGER IRQs */
- { "chg-error", smblib_handle_debug },
- { "chg-state-change", smblib_handle_chg_state_change, true },
- { "step-chg-state-change", smblib_handle_step_chg_state_change,
- true },
- { "step-chg-soc-update-fail", smblib_handle_step_chg_soc_update_fail,
- true },
- { "step-chg-soc-update-request",
- smblib_handle_step_chg_soc_update_request, true },
+ {
+ .name = "chg-error",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "chg-state-change",
+ .handler = smblib_handle_chg_state_change,
+ .wake = true,
+ },
+ {
+ .name = "step-chg-state-change",
+ .handler = smblib_handle_step_chg_state_change,
+ .wake = true,
+ },
+ {
+ .name = "step-chg-soc-update-fail",
+ .handler = smblib_handle_step_chg_soc_update_fail,
+ .wake = true,
+ },
+ {
+ .name = "step-chg-soc-update-request",
+ .handler = smblib_handle_step_chg_soc_update_request,
+ .wake = true,
+ },
/* OTG IRQs */
- { "otg-fail", smblib_handle_debug },
- { "otg-overcurrent", smblib_handle_debug },
- { "otg-oc-dis-sw-sts", smblib_handle_debug },
- { "testmode-change-detect", smblib_handle_debug },
+ {
+ .name = "otg-fail",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "otg-overcurrent",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "otg-oc-dis-sw-sts",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "testmode-change-detect",
+ .handler = smblib_handle_debug,
+ },
/* BATTERY IRQs */
- { "bat-temp", smblib_handle_batt_temp_changed },
- { "bat-ocp", smblib_handle_batt_psy_changed },
- { "bat-ov", smblib_handle_batt_psy_changed },
- { "bat-low", smblib_handle_batt_psy_changed },
- { "bat-therm-or-id-missing", smblib_handle_batt_psy_changed },
- { "bat-terminal-missing", smblib_handle_batt_psy_changed },
+ {
+ .name = "bat-temp",
+ .handler = smblib_handle_batt_temp_changed,
+ },
+ {
+ .name = "bat-ocp",
+ .handler = smblib_handle_batt_psy_changed,
+ },
+ {
+ .name = "bat-ov",
+ .handler = smblib_handle_batt_psy_changed,
+ },
+ {
+ .name = "bat-low",
+ .handler = smblib_handle_batt_psy_changed,
+ },
+ {
+ .name = "bat-therm-or-id-missing",
+ .handler = smblib_handle_batt_psy_changed,
+ },
+ {
+ .name = "bat-terminal-missing",
+ .handler = smblib_handle_batt_psy_changed,
+ },
/* USB INPUT IRQs */
- { "usbin-collapse", smblib_handle_debug },
- { "usbin-lt-3p6v", smblib_handle_debug },
- { "usbin-uv", smblib_handle_debug },
- { "usbin-ov", smblib_handle_debug },
- { "usbin-plugin", smblib_handle_usb_plugin, true },
- { "usbin-src-change", smblib_handle_usb_source_change, true },
- { "usbin-icl-change", smblib_handle_icl_change, true },
- { "type-c-change", smblib_handle_usb_typec_change, true },
+ {
+ .name = "usbin-collapse",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "usbin-lt-3p6v",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "usbin-uv",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "usbin-ov",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "usbin-plugin",
+ .handler = smblib_handle_usb_plugin,
+ .wake = true,
+ },
+ {
+ .name = "usbin-src-change",
+ .handler = smblib_handle_usb_source_change,
+ .wake = true,
+ },
+ {
+ .name = "usbin-icl-change",
+ .handler = smblib_handle_icl_change,
+ .wake = true,
+ },
+ {
+ .name = "type-c-change",
+ .handler = smblib_handle_usb_typec_change,
+ .wake = true,
+ },
/* DC INPUT IRQs */
- { "dcin-collapse", smblib_handle_debug },
- { "dcin-lt-3p6v", smblib_handle_debug },
- { "dcin-uv", smblib_handle_debug },
- { "dcin-ov", smblib_handle_debug },
- { "dcin-plugin", smblib_handle_debug },
- { "div2-en-dg", smblib_handle_debug },
- { "dcin-icl-change", smblib_handle_debug },
+ {
+ .name = "dcin-collapse",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "dcin-lt-3p6v",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "dcin-uv",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "dcin-ov",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "dcin-plugin",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "div2-en-dg",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "dcin-icl-change",
+ .handler = smblib_handle_debug,
+ },
/* MISCELLANEOUS IRQs */
- { "wdog-snarl", NULL },
- { "wdog-bark", NULL },
- { "aicl-fail", smblib_handle_debug },
- { "aicl-done", smblib_handle_debug },
- { "high-duty-cycle", smblib_handle_high_duty_cycle, true },
- { "input-current-limiting", smblib_handle_debug },
- { "temperature-change", smblib_handle_debug },
- { "switcher-power-ok", smblib_handle_debug },
+ {
+ .name = "wdog-snarl",
+ .handler = NULL,
+ },
+ {
+ .name = "wdog-bark",
+ .handler = NULL,
+ },
+ {
+ .name = "aicl-fail",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "aicl-done",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "high-duty-cycle",
+ .handler = smblib_handle_high_duty_cycle,
+ .wake = true,
+ },
+ {
+ .name = "input-current-limiting",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "temperature-change",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "switcher-power-ok",
+ .handler = smblib_handle_debug,
+ },
};
static int smb2_get_irq_index_byname(const char *irq_name)
@@ -1184,6 +1400,7 @@ static int smb2_request_interrupt(struct smb2 *chip,
irq_data->parent_data = chip;
irq_data->name = irq_name;
+ irq_data->storm_data = smb2_irqs[irq_index].storm_data;
rc = devm_request_threaded_irq(chg->dev, irq, NULL,
smb2_irqs[irq_index].handler,
@@ -1249,6 +1466,13 @@ static int smb2_probe(struct platform_device *pdev)
return -EINVAL;
}
+ rc = smb2_setup_wa_flags(chip);
+ if (rc < 0) {
+ if (rc != -EPROBE_DEFER)
+ pr_err("Couldn't setup wa flags rc=%d\n", rc);
+ return rc;
+ }
+
rc = smblib_init(chg);
if (rc < 0) {
pr_err("Smblib_init failed rc=%d\n", rc);
diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c
index 21b330127369..ce76260be6f6 100644
--- a/drivers/power/qcom-charger/smb-lib.c
+++ b/drivers/power/qcom-charger/smb-lib.c
@@ -12,12 +12,14 @@
#include <linux/device.h>
#include <linux/regmap.h>
+#include <linux/delay.h>
#include <linux/iio/consumer.h>
#include <linux/power_supply.h>
#include <linux/regulator/driver.h>
#include <linux/irq.h>
#include "smb-lib.h"
#include "smb-reg.h"
+#include "storm-watch.h"
#include "pmic-voter.h"
#define smblib_dbg(chg, reason, fmt, ...) \
@@ -102,7 +104,8 @@ static int smblib_get_step_charging_adjustment(struct smb_charger *chg,
return rc;
}
- step_state = (stat & STEP_CHARGING_STATUS_MASK) >> 3;
+ step_state = (stat & STEP_CHARGING_STATUS_MASK) >>
+ STEP_CHARGING_STATUS_SHIFT;
rc = smblib_get_charge_param(chg, &chg->param.step_cc_delta[step_state],
cc_offset);
@@ -519,7 +522,7 @@ static int smblib_fcc_max_vote_callback(struct votable *votable, void *data,
{
struct smb_charger *chg = data;
- return vote(chg->fcc_votable, FCC_MAX_RESULT, true, fcc_ua);
+ return vote(chg->fcc_votable, FCC_MAX_RESULT_VOTER, true, fcc_ua);
}
static int smblib_fcc_vote_callback(struct votable *votable, void *data,
@@ -640,6 +643,31 @@ suspend:
return rc;
}
+#define MICRO_250MA 250000
+static int smblib_otg_cl_config(struct smb_charger *chg, int otg_cl_ua)
+{
+ int rc = 0;
+
+ rc = smblib_set_charge_param(chg, &chg->param.otg_cl, otg_cl_ua);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't set otg current limit rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* configure PFM/PWM mode for OTG regulator */
+ rc = smblib_masked_write(chg, DC_ENG_SSUPPLY_CFG3_REG,
+ ENG_SSUPPLY_CFG_SKIP_TH_V0P2_BIT,
+ otg_cl_ua > MICRO_250MA ? 1 : 0);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't write DC_ENG_SSUPPLY_CFG3_REG rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
static int smblib_dc_icl_vote_callback(struct votable *votable, void *data,
int icl_ua, const char *client)
{
@@ -729,18 +757,51 @@ static int smblib_chg_disable_vote_callback(struct votable *votable, void *data,
return 0;
}
+
+static int smblib_pl_enable_indirect_vote_callback(struct votable *votable,
+ void *data, int chg_enable, const char *client)
+{
+ struct smb_charger *chg = data;
+
+ vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, !chg_enable, 0);
+
+ return 0;
+}
+
/*****************
* OTG REGULATOR *
*****************/
+#define OTG_SOFT_START_DELAY_MS 20
int smblib_vbus_regulator_enable(struct regulator_dev *rdev)
{
struct smb_charger *chg = rdev_get_drvdata(rdev);
+ u8 stat;
int rc = 0;
- rc = regmap_write(chg->regmap, CMD_OTG_REG, OTG_EN_BIT);
- if (rc < 0)
+ rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+ if (rc < 0) {
dev_err(chg->dev, "Couldn't enable OTG regulator rc=%d\n", rc);
+ return rc;
+ }
+
+ msleep(OTG_SOFT_START_DELAY_MS);
+ rc = smblib_read(chg, OTG_STATUS_REG, &stat);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read OTG_STATUS_REG rc=%d\n", rc);
+ return rc;
+ }
+ if (stat & BOOST_SOFTSTART_DONE_BIT)
+ smblib_otg_cl_config(chg, chg->otg_cl_ua);
return rc;
}
@@ -750,9 +811,22 @@ int smblib_vbus_regulator_disable(struct regulator_dev *rdev)
struct smb_charger *chg = rdev_get_drvdata(rdev);
int rc = 0;
- rc = regmap_write(chg->regmap, CMD_OTG_REG, 0);
- if (rc < 0)
+ rc = smblib_write(chg, CMD_OTG_REG, 0);
+ if (rc < 0) {
dev_err(chg->dev, "Couldn't disable OTG regulator rc=%d\n", rc);
+ return rc;
+ }
+
+ smblib_otg_cl_config(chg, MICRO_250MA);
+
+ rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT, 0);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
return rc;
}
@@ -779,13 +853,24 @@ int smblib_vbus_regulator_is_enabled(struct regulator_dev *rdev)
int smblib_vconn_regulator_enable(struct regulator_dev *rdev)
{
struct smb_charger *chg = rdev_get_drvdata(rdev);
+ u8 stat;
int rc = 0;
+ /*
+ * VCONN_EN_ORIENTATION is overloaded with overriding the CC pin used
+ * for Vconn, and it should be set with reverse polarity of CC_OUT.
+ */
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ return rc;
+ }
+ stat = stat & CC_ORIENTATION_BIT ? 0 : VCONN_EN_ORIENTATION_BIT;
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
- VCONN_EN_VALUE_BIT, VCONN_EN_VALUE_BIT);
+ VCONN_EN_VALUE_BIT | VCONN_EN_ORIENTATION_BIT,
+ VCONN_EN_VALUE_BIT | stat);
if (rc < 0)
- dev_err(chg->dev, "Couldn't enable vconn regulator rc=%d\n",
- rc);
+ dev_err(chg->dev, "Couldn't enable vconn setting rc=%d\n", rc);
return rc;
}
@@ -870,25 +955,28 @@ int smblib_get_prop_batt_capacity(struct smb_charger *chg,
int smblib_get_prop_batt_status(struct smb_charger *chg,
union power_supply_propval *val)
{
- int rc;
- u8 stat;
union power_supply_propval pval = {0, };
+ bool usb_online, dc_online;
+ u8 stat;
+ int rc;
- smblib_get_prop_input_suspend(chg, &pval);
- if (pval.intval) {
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ rc = smblib_get_prop_usb_online(chg, &pval);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't get usb online property rc=%d\n",
+ rc);
return rc;
}
+ usb_online = (bool)pval.intval;
- rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+ rc = smblib_get_prop_dc_online(chg, &pval);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+ dev_err(chg->dev, "Couldn't get dc online property rc=%d\n",
rc);
return rc;
}
+ dc_online = (bool)pval.intval;
- if (!(stat & (USE_USBIN_BIT | USE_DCIN_BIT)) ||
- !(stat & VALID_INPUT_POWER_SOURCE_BIT)) {
+ if (!usb_online && !dc_online) {
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
return rc;
}
@@ -899,16 +987,29 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
rc);
return rc;
}
- smblib_dbg(chg, PR_REGISTER, "BATTERY_CHARGER_STATUS_1 = 0x%02x\n",
- stat);
stat = stat & BATTERY_CHARGER_STATUS_MASK;
- if (stat >= COMPLETED_CHARGE)
- val->intval = POWER_SUPPLY_STATUS_FULL;
- else
+ switch (stat) {
+ case TRICKLE_CHARGE:
+ case PRE_CHARGE:
+ case FAST_CHARGE:
+ case FULLON_CHARGE:
+ case TAPER_CHARGE:
val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case TERMINATE_CHARGE:
+ case INHIBIT_CHARGE:
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ break;
+ case DISABLE_CHARGE:
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ break;
+ }
- return rc;
+ return 0;
}
int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
@@ -923,8 +1024,6 @@ int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
rc);
return rc;
}
- smblib_dbg(chg, PR_REGISTER, "BATTERY_CHARGER_STATUS_1 = 0x%02x\n",
- stat);
switch (stat & BATTERY_CHARGER_STATUS_MASK) {
case TRICKLE_CHARGE:
@@ -1003,6 +1102,87 @@ int smblib_get_prop_input_current_limited(struct smb_charger *chg,
return 0;
}
+int smblib_get_prop_batt_voltage_now(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+
+ if (!chg->bms_psy)
+ return -EINVAL;
+
+ rc = power_supply_get_property(chg->bms_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, val);
+ return rc;
+}
+
+int smblib_get_prop_batt_current_now(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+
+ if (!chg->bms_psy)
+ return -EINVAL;
+
+ rc = power_supply_get_property(chg->bms_psy,
+ POWER_SUPPLY_PROP_CURRENT_NOW, val);
+ return rc;
+}
+
+int smblib_get_prop_batt_temp(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+
+ if (!chg->bms_psy)
+ return -EINVAL;
+
+ rc = power_supply_get_property(chg->bms_psy,
+ POWER_SUPPLY_PROP_TEMP, val);
+ return rc;
+}
+
+int smblib_get_prop_step_chg_step(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+ u8 stat;
+
+ if (!chg->step_chg_enabled) {
+ val->intval = -1;
+ return 0;
+ }
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ val->intval = (stat & STEP_CHARGING_STATUS_MASK) >>
+ STEP_CHARGING_STATUS_SHIFT;
+
+ return rc;
+}
+
+int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+ u8 stat;
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ stat = stat & BATTERY_CHARGER_STATUS_MASK;
+ val->intval = (stat == TERMINATE_CHARGE);
+ return 0;
+}
+
/***********************
* BATTERY PSY SETTERS *
***********************/
@@ -1054,13 +1234,14 @@ int smblib_set_prop_system_temp_level(struct smb_charger *chg,
chg->system_temp_level = val->intval;
if (chg->system_temp_level == chg->thermal_levels)
- return vote(chg->chg_disable_votable, THERMAL_DAEMON, true, 0);
+ return vote(chg->chg_disable_votable,
+ THERMAL_DAEMON_VOTER, true, 0);
- vote(chg->chg_disable_votable, THERMAL_DAEMON, false, 0);
+ vote(chg->chg_disable_votable, THERMAL_DAEMON_VOTER, false, 0);
if (chg->system_temp_level == 0)
- return vote(chg->fcc_votable, THERMAL_DAEMON, false, 0);
+ return vote(chg->fcc_votable, THERMAL_DAEMON_VOTER, false, 0);
- vote(chg->fcc_votable, THERMAL_DAEMON, true,
+ vote(chg->fcc_votable, THERMAL_DAEMON_VOTER, true,
chg->thermal_mitigation[chg->system_temp_level]);
return 0;
}
@@ -1181,7 +1362,6 @@ int smblib_get_prop_usb_online(struct smb_charger *chg,
val->intval = (stat & USE_USBIN_BIT) &&
(stat & VALID_INPUT_POWER_SOURCE_BIT);
-
return rc;
}
@@ -1500,7 +1680,11 @@ int smblib_set_prop_usb_voltage_min(struct smb_charger *chg,
return rc;
}
- chg->voltage_min_uv = val->intval;
+ if (chg->mode == PARALLEL_MASTER)
+ vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER,
+ min_uv > MICRO_5V, 0);
+
+ chg->voltage_min_uv = min_uv;
return rc;
}
@@ -1518,7 +1702,7 @@ int smblib_set_prop_usb_voltage_max(struct smb_charger *chg,
return rc;
}
- chg->voltage_max_uv = val->intval;
+ chg->voltage_max_uv = max_uv;
return rc;
}
@@ -1526,6 +1710,7 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
const union power_supply_propval *val)
{
int rc;
+ u8 stat;
if (!get_effective_result(chg->pd_allowed_votable)) {
dev_err(chg->dev, "PD is not allowed\n");
@@ -1543,11 +1728,61 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
vote(chg->pd_allowed_votable, PD_VOTER, val->intval, 0);
+ /*
+ * VCONN_EN_ORIENTATION_BIT controls whether to use CC1 or CC2 line
+ * when TYPEC_SPARE_CFG_BIT (CC pin selection s/w override) is set
+ * or when VCONN_EN_VALUE_BIT is set.
+ */
+ if (val->intval) {
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ stat &= CC_ORIENTATION_BIT;
+ rc = smblib_masked_write(chg,
+ TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ VCONN_EN_ORIENTATION_BIT,
+ stat ? 0 : VCONN_EN_ORIENTATION_BIT);
+ if (rc < 0)
+ dev_err(chg->dev,
+ "Couldn't enable vconn on CC line rc=%d\n", rc);
+ }
+
+ /* CC pin selection s/w override in PD session; h/w otherwise. */
+ rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+ TYPEC_SPARE_CFG_BIT,
+ val->intval ? TYPEC_SPARE_CFG_BIT : 0);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't change cc_out ctrl to %s rc=%d\n",
+ val->intval ? "SW" : "HW", rc);
+ return rc;
+ }
+
chg->pd_active = (bool)val->intval;
smblib_update_usb_type(chg);
return rc;
}
+/************************
+ * PARALLEL PSY GETTERS *
+ ************************/
+
+int smblib_get_prop_slave_current_now(struct smb_charger *chg,
+ union power_supply_propval *pval)
+{
+ if (IS_ERR_OR_NULL(chg->iio.batt_i_chan))
+ chg->iio.batt_i_chan = iio_channel_get(chg->dev, "batt_i");
+
+ if (IS_ERR(chg->iio.batt_i_chan))
+ return PTR_ERR(chg->iio.batt_i_chan);
+
+ return iio_read_channel_processed(chg->iio.batt_i_chan, &pval->intval);
+}
+
/**********************
* INTERRUPT HANDLERS *
**********************/
@@ -1558,45 +1793,57 @@ irqreturn_t smblib_handle_debug(int irq, void *data)
struct smb_charger *chg = irq_data->parent_data;
smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
-
return IRQ_HANDLED;
}
-irqreturn_t smblib_handle_chg_state_change(int irq, void *data)
+static void smblib_pl_handle_chg_state_change(struct smb_charger *chg, u8 stat)
{
- struct smb_irq_data *irq_data = data;
- struct smb_charger *chg = irq_data->parent_data;
- union power_supply_propval pval = {0, };
- int rc;
-
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+ bool pl_enabled;
if (chg->mode != PARALLEL_MASTER)
- return IRQ_HANDLED;
+ return;
- rc = smblib_get_prop_batt_charge_type(chg, &pval);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't get batt charge type rc=%d\n", rc);
- return IRQ_HANDLED;
+ pl_enabled = !get_effective_result_locked(chg->pl_disable_votable);
+ switch (stat) {
+ case FAST_CHARGE:
+ case FULLON_CHARGE:
+ vote(chg->pl_disable_votable, CHG_STATE_VOTER, false, 0);
+ break;
+ case TAPER_CHARGE:
+ if (pl_enabled) {
+ cancel_delayed_work_sync(&chg->pl_taper_work);
+ schedule_delayed_work(&chg->pl_taper_work, 0);
+ }
+ break;
+ case TERMINATE_CHARGE:
+ case INHIBIT_CHARGE:
+ case DISABLE_CHARGE:
+ vote(chg->pl_disable_votable, TAPER_END_VOTER, false, 0);
+ break;
+ default:
+ break;
}
+}
- if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_FAST)
- vote(chg->pl_disable_votable, CHG_STATE_VOTER, false, 0);
+irqreturn_t smblib_handle_chg_state_change(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ u8 stat;
+ int rc;
- if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER
- && !get_effective_result_locked(chg->pl_disable_votable)) {
- cancel_delayed_work_sync(&chg->pl_taper_work);
- schedule_delayed_work(&chg->pl_taper_work, 0);
- }
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
- rc = smblib_get_prop_batt_status(chg, &pval);
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't get batt status type rc=%d\n", rc);
+ dev_err(chg->dev, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ rc);
return IRQ_HANDLED;
}
- if (pval.intval == POWER_SUPPLY_STATUS_FULL)
- vote(chg->pl_disable_votable, TAPER_END_VOTER, false, 0);
+ stat = stat & BATTERY_CHARGER_STATUS_MASK;
+ smblib_pl_handle_chg_state_change(chg, stat);
+ power_supply_changed(chg->batt_psy);
return IRQ_HANDLED;
}
@@ -1666,7 +1913,7 @@ irqreturn_t smblib_handle_batt_psy_changed(int irq, void *data)
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
- smblib_handle_debug(irq, data);
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
power_supply_changed(chg->batt_psy);
return IRQ_HANDLED;
}
@@ -1676,7 +1923,7 @@ irqreturn_t smblib_handle_usb_psy_changed(int irq, void *data)
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
- smblib_handle_debug(irq, data);
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
power_supply_changed(chg->usb_psy);
return IRQ_HANDLED;
}
@@ -1729,54 +1976,29 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
}
skip_dpdm_float:
+ power_supply_changed(chg->usb_psy);
smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s %s\n",
irq_data->name, chg->vbus_present ? "attached" : "detached");
return IRQ_HANDLED;
}
-#define MICRO_5P5V 5500000
-#define USB_WEAK_INPUT_MA 1500000
-static bool is_icl_pl_ready(struct smb_charger *chg)
+#define USB_WEAK_INPUT_MA 1400000
+irqreturn_t smblib_handle_icl_change(int irq, void *data)
{
- union power_supply_propval pval = {0, };
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
int icl_ma;
int rc;
- rc = smblib_get_prop_usb_voltage_now(chg, &pval);
+ rc = smblib_get_charge_param(chg, &chg->param.icl_stat, &icl_ma);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't get prop usb voltage rc=%d\n", rc);
- return false;
- }
-
- if (pval.intval <= MICRO_5P5V) {
- rc = smblib_get_charge_param(chg,
- &chg->param.icl_stat, &icl_ma);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't get ICL status rc=%d\n",
- rc);
- return false;
- }
-
- if (icl_ma < USB_WEAK_INPUT_MA)
- return false;
+ dev_err(chg->dev, "Couldn't get ICL status rc=%d\n", rc);
+ return IRQ_HANDLED;
}
- /*
- * Always enable parallel charging when USB INPUT is higher than 5V
- * regardless of the AICL results. Assume chargers above 5V are strong
- */
-
- return true;
-}
-
-irqreturn_t smblib_handle_icl_change(int irq, void *data)
-{
- struct smb_irq_data *irq_data = data;
- struct smb_charger *chg = irq_data->parent_data;
-
if (chg->mode == PARALLEL_MASTER)
- vote(chg->pl_disable_votable, USBIN_ICL_VOTER,
- !is_icl_pl_ready(chg), 0);
+ vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER,
+ icl_ma >= USB_WEAK_INPUT_MA, 0);
smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
@@ -1813,12 +2035,27 @@ static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg,
if (!rising)
return;
+ if (chg->mode == PARALLEL_MASTER)
+ vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, true, 0);
+
/* the APSD done handler will set the USB supply type */
apsd_result = smblib_get_apsd_result(chg);
smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-3p0-auth-done rising; %s detected\n",
apsd_result->name);
}
+static void smblib_handle_hvdcp_check_timeout(struct smb_charger *chg,
+ bool rising, bool qc_charger)
+{
+ if (rising && !qc_charger) {
+ vote(chg->pd_allowed_votable, DEFAULT_VOTER, true, 0);
+ power_supply_changed(chg->usb_psy);
+ }
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: smblib_handle_hvdcp_check_timeout %s\n",
+ rising ? "rising" : "falling");
+}
+
/* triggers when HVDCP is detected */
static void smblib_handle_hvdcp_detect_done(struct smb_charger *chg,
bool rising)
@@ -1850,8 +2087,9 @@ static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
vote(chg->pd_allowed_votable, DEFAULT_VOTER, true, 0);
break;
case DCP_CHARGER_BIT:
- schedule_delayed_work(&chg->hvdcp_detect_work,
- msecs_to_jiffies(HVDCP_DET_MS));
+ if (chg->wa_flags & QC_CHARGER_DETECTION_WA_BIT)
+ schedule_delayed_work(&chg->hvdcp_detect_work,
+ msecs_to_jiffies(HVDCP_DET_MS));
break;
default:
break;
@@ -1885,6 +2123,10 @@ irqreturn_t smblib_handle_usb_source_change(int irq, void *data)
smblib_handle_hvdcp_detect_done(chg,
(bool)(stat & QC_CHARGER_BIT));
+ smblib_handle_hvdcp_check_timeout(chg,
+ (bool)(stat & HVDCP_CHECK_TIMEOUT_BIT),
+ (bool)(stat & QC_CHARGER_BIT));
+
smblib_handle_hvdcp_3p0_auth_done(chg,
(bool)(stat & QC_AUTH_DONE_STATUS_BIT));
@@ -1938,8 +2180,9 @@ static void smblib_handle_typec_debounce_done(struct smb_charger *chg,
!rising || sink_attached, 0);
if (!rising || sink_attached) {
- /* icl votes to disable parallel charging */
- vote(chg->pl_disable_votable, USBIN_ICL_VOTER, true, 0);
+ /* reset both usbin current and voltage votes */
+ vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+ vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
/* reset taper_end voter here */
vote(chg->pl_disable_votable, TAPER_END_VOTER, false, 0);
}
@@ -1964,11 +2207,6 @@ irqreturn_t smblib_handle_usb_typec_change(int irq, void *data)
}
smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n", stat);
- if (stat & TYPEC_VBUS_ERROR_STATUS_BIT) {
- dev_err(chg->dev, "IRQ: vbus-error rising\n");
- return IRQ_HANDLED;
- }
-
smblib_handle_typec_cc(chg,
(bool)(stat & CC_ATTACHED_BIT));
smblib_handle_typec_debounce_done(chg,
@@ -1977,6 +2215,10 @@ irqreturn_t smblib_handle_usb_typec_change(int irq, void *data)
power_supply_changed(chg->usb_psy);
+ if (stat & TYPEC_VBUS_ERROR_STATUS_BIT)
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s vbus-error\n",
+ irq_data->name);
+
return IRQ_HANDLED;
}
@@ -2037,8 +2279,7 @@ static void smblib_pl_detect_work(struct work_struct *work)
struct smb_charger *chg = container_of(work, struct smb_charger,
pl_detect_work);
- if (!get_effective_result_locked(chg->pl_disable_votable))
- rerun_election(chg->pl_disable_votable);
+ vote(chg->pl_disable_votable, PARALLEL_PSY_VOTER, false, 0);
}
#define MINIMUM_PARALLEL_FCC_UA 500000
@@ -2063,7 +2304,7 @@ static void smblib_pl_taper_work(struct work_struct *work)
}
if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
- vote(chg->awake_votable, PL_VOTER, true, 0);
+ vote(chg->awake_votable, PL_TAPER_WORK_RUNNING_VOTER, true, 0);
/* Reduce the taper percent by 25 percent */
chg->pl.taper_percent = chg->pl.taper_percent
* TAPER_RESIDUAL_PERCENT / 100;
@@ -2077,7 +2318,7 @@ static void smblib_pl_taper_work(struct work_struct *work)
* Master back to Fast Charge, get out of this round of taper reduction
*/
done:
- vote(chg->awake_votable, PL_VOTER, false, 0);
+ vote(chg->awake_votable, PL_TAPER_WORK_RUNNING_VOTER, false, 0);
}
static void clear_hdc_work(struct work_struct *work)
@@ -2179,6 +2420,15 @@ static int smblib_create_votables(struct smb_charger *chg)
return rc;
}
+ chg->pl_enable_votable_indirect = create_votable("PL_ENABLE_INDIRECT",
+ VOTE_SET_ANY,
+ smblib_pl_enable_indirect_vote_callback,
+ chg);
+ if (IS_ERR(chg->pl_enable_votable_indirect)) {
+ rc = PTR_ERR(chg->pl_enable_votable_indirect);
+ return rc;
+ }
+
return rc;
}
@@ -2204,6 +2454,10 @@ static void smblib_destroy_votables(struct smb_charger *chg)
destroy_votable(chg->awake_votable);
if (chg->pl_disable_votable)
destroy_votable(chg->pl_disable_votable);
+ if (chg->chg_disable_votable)
+ destroy_votable(chg->chg_disable_votable);
+ if (chg->pl_enable_votable_indirect)
+ destroy_votable(chg->pl_enable_votable_indirect);
}
static void smblib_iio_deinit(struct smb_charger *chg)
@@ -2216,6 +2470,8 @@ static void smblib_iio_deinit(struct smb_charger *chg)
iio_channel_release(chg->iio.usbin_i_chan);
if (!IS_ERR_OR_NULL(chg->iio.usbin_v_chan))
iio_channel_release(chg->iio.usbin_v_chan);
+ if (!IS_ERR_OR_NULL(chg->iio.batt_i_chan))
+ iio_channel_release(chg->iio.batt_i_chan);
}
int smblib_init(struct smb_charger *chg)
@@ -2240,9 +2496,6 @@ int smblib_init(struct smb_charger *chg)
return rc;
}
- chg->bms_psy = power_supply_get_by_name("bms");
- chg->pl.psy = power_supply_get_by_name("parallel");
-
rc = smblib_register_notifier(chg);
if (rc < 0) {
dev_err(chg->dev,
@@ -2250,6 +2503,12 @@ int smblib_init(struct smb_charger *chg)
return rc;
}
+ chg->bms_psy = power_supply_get_by_name("bms");
+ chg->pl.psy = power_supply_get_by_name("parallel");
+ if (chg->pl.psy)
+ vote(chg->pl_disable_votable, PARALLEL_PSY_VOTER,
+ false, 0);
+
break;
case PARALLEL_SLAVE:
break;
diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h
index aeb1eb2c454f..00975e6c1285 100644
--- a/drivers/power/qcom-charger/smb-lib.h
+++ b/drivers/power/qcom-charger/smb-lib.h
@@ -16,6 +16,7 @@
#include <linux/irqreturn.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/consumer.h>
+#include "storm-watch.h"
enum print_reason {
PR_INTERRUPT = BIT(0),
@@ -23,16 +24,19 @@ enum print_reason {
PR_MISC = BIT(2),
};
-#define DEFAULT_VOTER "DEFAULT_VOTER"
-#define USER_VOTER "USER_VOTER"
-#define PD_VOTER "PD_VOTER"
-#define PL_VOTER "PL_VOTER"
-#define USBIN_ICL_VOTER "USBIN_ICL_VOTER"
-#define CHG_STATE_VOTER "CHG_STATE_VOTER"
-#define TYPEC_SRC_VOTER "TYPEC_SRC_VOTER"
-#define TAPER_END_VOTER "TAPER_END_VOTER"
-#define FCC_MAX_RESULT "FCC_MAX_RESULT"
-#define THERMAL_DAEMON "THERMAL_DAEMON"
+#define DEFAULT_VOTER "DEFAULT_VOTER"
+#define USER_VOTER "USER_VOTER"
+#define PD_VOTER "PD_VOTER"
+#define PL_TAPER_WORK_RUNNING_VOTER "PL_TAPER_WORK_RUNNING_VOTER"
+#define PARALLEL_PSY_VOTER "PARALLEL_PSY_VOTER"
+#define PL_INDIRECT_VOTER "PL_INDIRECT_VOTER"
+#define USBIN_I_VOTER "USBIN_I_VOTER"
+#define USBIN_V_VOTER "USBIN_V_VOTER"
+#define CHG_STATE_VOTER "CHG_STATE_VOTER"
+#define TYPEC_SRC_VOTER "TYPEC_SRC_VOTER"
+#define TAPER_END_VOTER "TAPER_END_VOTER"
+#define FCC_MAX_RESULT_VOTER "FCC_MAX_RESULT_VOTER"
+#define THERMAL_DAEMON_VOTER "THERMAL_DAEMON_VOTER"
enum smb_mode {
PARALLEL_MASTER = 0,
@@ -40,14 +44,19 @@ enum smb_mode {
NUM_MODES,
};
+enum {
+ QC_CHARGER_DETECTION_WA_BIT = BIT(0),
+};
+
struct smb_regulator {
struct regulator_dev *rdev;
struct regulator_desc rdesc;
};
struct smb_irq_data {
- void *parent_data;
- const char *name;
+ void *parent_data;
+ const char *name;
+ struct storm_watch storm_data;
};
struct smb_chg_param {
@@ -68,6 +77,7 @@ struct smb_params {
struct smb_chg_param fv;
struct smb_chg_param usb_icl;
struct smb_chg_param icl_stat;
+ struct smb_chg_param otg_cl;
struct smb_chg_param dc_icl;
struct smb_chg_param dc_icl_pt_lv;
struct smb_chg_param dc_icl_pt_hv;
@@ -93,6 +103,7 @@ struct smb_iio {
struct iio_channel *temp_max_chan;
struct iio_channel *usbin_i_chan;
struct iio_channel *usbin_v_chan;
+ struct iio_channel *batt_i_chan;
};
struct smb_charger {
@@ -137,6 +148,7 @@ struct smb_charger {
struct votable *awake_votable;
struct votable *pl_disable_votable;
struct votable *chg_disable_votable;
+ struct votable *pl_enable_votable_indirect;
/* work */
struct work_struct bms_update_work;
@@ -157,10 +169,16 @@ struct smb_charger {
int thermal_levels;
int *thermal_mitigation;
+ int otg_cl_ua;
+
int fake_capacity;
bool step_chg_enabled;
bool is_hdc;
+ bool chg_done;
+
+ /* workaround flag */
+ u32 wa_flags;
};
int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
@@ -216,12 +234,22 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
union power_supply_propval *val);
+int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
+ union power_supply_propval *val);
int smblib_get_prop_batt_health(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_system_temp_level(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_input_current_limited(struct smb_charger *chg,
union power_supply_propval *val);
+int smblib_get_prop_batt_voltage_now(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_batt_current_now(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_batt_temp(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_step_chg_step(struct smb_charger *chg,
+ union power_supply_propval *val);
int smblib_set_prop_input_suspend(struct smb_charger *chg,
const union power_supply_propval *val);
@@ -276,6 +304,9 @@ int smblib_set_prop_typec_power_role(struct smb_charger *chg,
int smblib_set_prop_pd_active(struct smb_charger *chg,
const union power_supply_propval *val);
+int smblib_get_prop_slave_current_now(struct smb_charger *chg,
+ union power_supply_propval *val);
+
int smblib_init(struct smb_charger *chg);
int smblib_deinit(struct smb_charger *chg);
#endif /* __SMB2_CHARGER_H */
diff --git a/drivers/power/qcom-charger/smb-reg.h b/drivers/power/qcom-charger/smb-reg.h
index 0d5222ec08f8..8a49a8fb38ba 100644
--- a/drivers/power/qcom-charger/smb-reg.h
+++ b/drivers/power/qcom-charger/smb-reg.h
@@ -32,6 +32,7 @@
#define BATTERY_CHARGER_STATUS_1_REG (CHGR_BASE + 0x06)
#define BVR_INITIAL_RAMP_BIT BIT(7)
#define CC_SOFT_TERMINATE_BIT BIT(6)
+#define STEP_CHARGING_STATUS_SHIFT 3
#define STEP_CHARGING_STATUS_MASK GENMASK(5, 3)
#define BATTERY_CHARGER_STATUS_MASK GENMASK(2, 0)
enum {
@@ -40,8 +41,9 @@ enum {
FAST_CHARGE,
FULLON_CHARGE,
TAPER_CHARGE,
- COMPLETED_CHARGE,
+ TERMINATE_CHARGE,
INHIBIT_CHARGE,
+ DISABLE_CHARGE,
};
#define BATTERY_CHARGER_STATUS_2_REG (CHGR_BASE + 0x07)
@@ -364,6 +366,9 @@ enum {
#define OTG_EN_SRC_CFG_BIT BIT(1)
#define CONCURRENT_MODE_CFG_BIT BIT(0)
+#define OTG_ENG_OTG_CFG_REG (OTG_BASE + 0xC0)
+#define ENG_BUCKBOOST_HALT1_8_MODE_BIT BIT(0)
+
/* BATIF Peripheral Registers */
/* BATIF Interrupt Bits */
#define BAT_7_RT_STS_BIT BIT(7)
@@ -425,7 +430,7 @@ enum {
#define APSD_STATUS_REG (USBIN_BASE + 0x07)
#define APSD_STATUS_7_BIT BIT(7)
-#define APSD_STATUS_6_BIT BIT(6)
+#define HVDCP_CHECK_TIMEOUT_BIT BIT(6)
#define SLOW_PLUGIN_TIMEOUT_BIT BIT(5)
#define ENUMERATION_DONE_BIT BIT(4)
#define VADP_CHANGE_DONE_AFTER_AUTH_BIT BIT(3)
@@ -585,6 +590,7 @@ enum {
#define FORCE_FLOAT_SDP_CFG_BIT BIT(0)
#define TAPER_TIMER_SEL_CFG_REG (USBIN_BASE + 0x64)
+#define TYPEC_SPARE_CFG_BIT BIT(7)
#define TAPER_TIMER_SEL_MASK GENMASK(1, 0)
#define USBIN_LOAD_CFG_REG (USBIN_BASE + 0x65)
@@ -606,6 +612,8 @@ enum {
#define TYPEC_VBUS_ASSERT_INT_EN_BIT BIT(0)
#define TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG (USBIN_BASE + 0x68)
+#define EXIT_SNK_BASED_ON_CC BIT(7)
+#define VCONN_EN_ORIENTATION_BIT BIT(6)
#define TYPEC_VCONN_OVERCURR_INT_EN_BIT BIT(5)
#define VCONN_EN_SRC_BIT BIT(4)
#define VCONN_EN_VALUE_BIT BIT(3)
@@ -761,6 +769,13 @@ enum {
ZIN_ICL_HV_MAX_MV = 11000,
};
+#define DC_ENG_SSUPPLY_CFG3_REG (DCIN_BASE + 0xC2)
+#define ENG_SSUPPLY_HI_CAP_BIT BIT(6)
+#define ENG_SSUPPLY_HI_RES_BIT BIT(5)
+#define ENG_SSUPPLY_CFG_SKIP_TH_V0P2_BIT BIT(3)
+#define ENG_SSUPPLY_CFG_SYSOV_TH_4P8_BIT BIT(2)
+#define ENG_SSUPPLY_5V_OV_OPT_BIT BIT(0)
+
/* MISC Peripheral Registers */
#define REVISION1_REG (MISC_BASE + 0x00)
#define DIG_MINOR_MASK GENMASK(7, 0)
diff --git a/drivers/power/qcom-charger/smb138x-charger.c b/drivers/power/qcom-charger/smb138x-charger.c
index 5b4e7bcccdce..e8ec2f49f7eb 100644
--- a/drivers/power/qcom-charger/smb138x-charger.c
+++ b/drivers/power/qcom-charger/smb138x-charger.c
@@ -25,6 +25,7 @@
#include <linux/qpnp/qpnp-revid.h>
#include "smb-reg.h"
#include "smb-lib.h"
+#include "storm-watch.h"
#include "pmic-voter.h"
#define SMB138X_DEFAULT_FCC_UA 1000000
@@ -47,8 +48,8 @@ static struct smb_params v1_params = {
.name = "fast charge current",
.reg = FAST_CHARGE_CURRENT_CFG_REG,
.min_u = 0,
- .max_u = 5000000,
- .step_u = 50000,
+ .max_u = 4500000,
+ .step_u = 25000,
},
.fv = {
.name = "float voltage",
@@ -394,6 +395,7 @@ static enum power_supply_property smb138x_parallel_props[] = {
POWER_SUPPLY_PROP_INPUT_SUSPEND,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGER_TEMP,
POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
};
@@ -430,6 +432,9 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
rc = smblib_get_charge_param(chg, &chg->param.fcc,
&val->intval);
break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ rc = smblib_get_prop_slave_current_now(chg, val);
+ break;
case POWER_SUPPLY_PROP_CHARGER_TEMP:
rc = smblib_get_prop_charger_temp(chg, val);
break;
@@ -748,55 +753,170 @@ static int smb138x_determine_initial_status(struct smb138x *chip)
**************************/
struct smb138x_irq_info {
- const char *name;
- const irq_handler_t handler;
+ const char *name;
+ const irq_handler_t handler;
+ const struct storm_watch storm_data;
};
static const struct smb138x_irq_info smb138x_irqs[] = {
/* CHARGER IRQs */
- { "chg-error", smblib_handle_debug },
- { "chg-state-change", smblib_handle_debug },
- { "step-chg-state-change", smblib_handle_debug },
- { "step-chg-soc-update-fail", smblib_handle_debug },
- { "step-chg-soc-update-request", smblib_handle_debug },
+ {
+ .name = "chg-error",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "chg-state-change",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "step-chg-state-change",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "step-chg-soc-update-fail",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "step-chg-soc-update-request",
+ .handler = smblib_handle_debug,
+ },
/* OTG IRQs */
- { "otg-fail", smblib_handle_debug },
- { "otg-overcurrent", smblib_handle_debug },
- { "otg-oc-dis-sw-sts", smblib_handle_debug },
- { "testmode-change-detect", smblib_handle_debug },
+ {
+ .name = "otg-fail",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "otg-overcurrent",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "otg-oc-dis-sw-sts",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "testmode-change-detect",
+ .handler = smblib_handle_debug,
+ },
/* BATTERY IRQs */
- { "bat-temp", smblib_handle_batt_psy_changed },
- { "bat-ocp", smblib_handle_batt_psy_changed },
- { "bat-ov", smblib_handle_batt_psy_changed },
- { "bat-low", smblib_handle_batt_psy_changed },
- { "bat-therm-or-id-missing", smblib_handle_batt_psy_changed },
- { "bat-terminal-missing", smblib_handle_batt_psy_changed },
+ {
+ .name = "bat-temp",
+ .handler = smblib_handle_batt_psy_changed,
+ },
+ {
+ .name = "bat-ocp",
+ .handler = smblib_handle_batt_psy_changed,
+ },
+ {
+ .name = "bat-ov",
+ .handler = smblib_handle_batt_psy_changed,
+ },
+ {
+ .name = "bat-low",
+ .handler = smblib_handle_batt_psy_changed,
+ },
+ {
+ .name = "bat-therm-or-id-missing",
+ .handler = smblib_handle_batt_psy_changed,
+ },
+ {
+ .name = "bat-terminal-missing",
+ .handler = smblib_handle_batt_psy_changed,
+ },
/* USB INPUT IRQs */
- { "usbin-collapse", smblib_handle_debug },
- { "usbin-lt-3p6v", smblib_handle_debug },
- { "usbin-uv", smblib_handle_debug },
- { "usbin-ov", smblib_handle_debug },
- { "usbin-plugin", smblib_handle_usb_plugin },
- { "usbin-src-change", smblib_handle_usb_source_change },
- { "usbin-icl-change", smblib_handle_debug },
- { "type-c-change", smblib_handle_usb_typec_change },
+ {
+ .name = "usbin-collapse",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "usbin-lt-3p6v",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "usbin-uv",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "usbin-ov",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "usbin-plugin",
+ .handler = smblib_handle_usb_plugin,
+ },
+ {
+ .name = "usbin-src-change",
+ .handler = smblib_handle_usb_source_change,
+ },
+ {
+ .name = "usbin-icl-change",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "type-c-change",
+ .handler = smblib_handle_usb_typec_change,
+ },
/* DC INPUT IRQs */
- { "dcin-collapse", smblib_handle_debug },
- { "dcin-lt-3p6v", smblib_handle_debug },
- { "dcin-uv", smblib_handle_debug },
- { "dcin-ov", smblib_handle_debug },
- { "dcin-plugin", smblib_handle_debug },
- { "div2-en-dg", smblib_handle_debug },
- { "dcin-icl-change", smblib_handle_debug },
+ {
+ .name = "dcin-collapse",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "dcin-lt-3p6v",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "dcin-uv",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "dcin-ov",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "dcin-plugin",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "div2-en-dg",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "dcin-icl-change",
+ .handler = smblib_handle_debug,
+ },
/* MISCELLANEOUS IRQs */
- { "wdog-snarl", smblib_handle_debug },
- { "wdog-bark", smblib_handle_debug },
- { "aicl-fail", smblib_handle_debug },
- { "aicl-done", smblib_handle_debug },
- { "high-duty-cycle", smblib_handle_debug },
- { "input-current-limiting", smblib_handle_debug },
- { "temperature-change", smblib_handle_debug },
- { "switcher-power-ok", smblib_handle_debug },
+ {
+ .name = "wdog-snarl",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "wdog-bark",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "aicl-fail",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "aicl-done",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "high-duty-cycle",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "input-current-limiting",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "temperature-change",
+ .handler = smblib_handle_debug,
+ },
+ {
+ .name = "switcher-power-ok",
+ .handler = smblib_handle_debug,
+ },
};
static int smb138x_get_irq_index_byname(const char *irq_name)
@@ -837,6 +957,7 @@ static int smb138x_request_interrupt(struct smb138x *chip,
irq_data->parent_data = chip;
irq_data->name = irq_name;
+ irq_data->storm_data = smb138x_irqs[irq_index].storm_data;
rc = devm_request_threaded_irq(chg->dev, irq, NULL,
smb138x_irqs[irq_index].handler,
@@ -1008,6 +1129,15 @@ static int smb138x_slave_probe(struct smb138x *chip)
return rc;
}
+ /* enable parallel current sensing */
+ rc = smblib_masked_write(chg, CFG_REG,
+ VCHG_EN_CFG_BIT, VCHG_EN_CFG_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't enable parallel current sensing rc=%d\n",
+ rc);
+ return rc;
+ }
+
/* keep at the end of probe, ready to serve before notifying others */
rc = smb138x_init_parallel_psy(chip);
if (rc < 0) {
diff --git a/drivers/power/qcom-charger/storm-watch.c b/drivers/power/qcom-charger/storm-watch.c
new file mode 100644
index 000000000000..90fec12bd742
--- /dev/null
+++ b/drivers/power/qcom-charger/storm-watch.c
@@ -0,0 +1,57 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "storm-watch.h"
+
+/**
+ * is_storming(): Check if an event is storming
+ *
+ * @data: Data for tracking an event storm
+ *
+ * The return value will be true if a storm has been detected and
+ * false if a storm was not detected.
+ */
+bool is_storming(struct storm_watch *data)
+{
+ ktime_t curr_kt, delta_kt;
+ bool is_storming = false;
+
+ if (!data)
+ return false;
+
+ if (!data->enabled)
+ return false;
+
+ /* max storm count must be greater than 0 */
+ if (data->max_storm_count <= 0)
+ return false;
+
+ /* the period threshold must be greater than 0ms */
+ if (data->storm_period_ms <= 0)
+ return false;
+
+ curr_kt = ktime_get_boottime();
+ delta_kt = ktime_sub(curr_kt, data->last_kt);
+
+ if (ktime_to_ms(delta_kt) < data->storm_period_ms)
+ data->storm_count++;
+ else
+ data->storm_count = 0;
+
+ if (data->storm_count > data->max_storm_count) {
+ is_storming = true;
+ data->storm_count = 0;
+ }
+
+ data->last_kt = curr_kt;
+ return is_storming;
+}
diff --git a/drivers/power/qcom-charger/storm-watch.h b/drivers/power/qcom-charger/storm-watch.h
new file mode 100644
index 000000000000..44b9d64d8a87
--- /dev/null
+++ b/drivers/power/qcom-charger/storm-watch.h
@@ -0,0 +1,36 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __STORM_WATCH_H
+#define __STORM_WATCH_H
+#include <linux/ktime.h>
+
+/**
+ * Data used to track an event storm.
+ *
+ * @storm_period_ms: The maximum time interval between two events. If this limit
+ * is exceeded then the event chain will be broken and removed
+ * from consideration for a storm.
+ * @max_storm_count: The number of chained events required to trigger a storm.
+ * @storm_count: The current number of chained events.
+ * @last_kt: Kernel time of the last event seen.
+ */
+struct storm_watch {
+ bool enabled;
+ int storm_period_ms;
+ int max_storm_count;
+ int storm_count;
+ ktime_t last_kt;
+};
+
+bool is_storming(struct storm_watch *data);
+#endif
diff --git a/drivers/power/qcom/msm-core.c b/drivers/power/qcom/msm-core.c
index e990425bd63a..727a768e63eb 100644
--- a/drivers/power/qcom/msm-core.c
+++ b/drivers/power/qcom/msm-core.c
@@ -240,10 +240,10 @@ void trigger_cpu_pwr_stats_calc(void)
if (cpu_node->sensor_id < 0)
continue;
- if (cpu_node->temp == prev_temp[cpu])
+ if (cpu_node->temp == prev_temp[cpu]) {
sensor_get_temp(cpu_node->sensor_id, &temp);
-
- cpu_node->temp = temp / scaling_factor;
+ cpu_node->temp = temp / scaling_factor;
+ }
prev_temp[cpu] = cpu_node->temp;
@@ -373,7 +373,7 @@ static int update_userspace_power(struct sched_params __user *argp)
{
int i;
int ret;
- int cpu;
+ int cpu = -1;
struct cpu_activity_info *node;
struct cpu_static_info *sp, *clear_sp;
int cpumask, cluster, mpidr;
@@ -396,7 +396,7 @@ static int update_userspace_power(struct sched_params __user *argp)
}
}
- if (cpu >= num_possible_cpus())
+ if ((cpu < 0) || (cpu >= num_possible_cpus()))
return -EINVAL;
node = &activity[cpu];
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 75a0de0c532b..2f109013f723 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -36,6 +36,7 @@
#define EMERGENCY_DLOAD_MAGIC1 0x322A4F99
#define EMERGENCY_DLOAD_MAGIC2 0xC67E4350
#define EMERGENCY_DLOAD_MAGIC3 0x77777777
+#define EMMC_DLOAD_TYPE 0x2
#define SCM_IO_DISABLE_PMIC_ARBITER 1
#define SCM_IO_DEASSERT_PS_HOLD 2
@@ -46,12 +47,20 @@
static int restart_mode;
-void *restart_reason;
+static void *restart_reason, *dload_type_addr;
static bool scm_pmic_arbiter_disable_supported;
static bool scm_deassert_ps_hold_supported;
/* Download mode master kill-switch */
static void __iomem *msm_ps_hold;
static phys_addr_t tcsr_boot_misc_detect;
+static void scm_disable_sdi(void);
+
+/* Runtime could be only changed value once.
+ * There is no API from TZ to re-enable the registers.
+ * So the SDI cannot be re-enabled when it already by-passed.
+*/
+static int download_mode = 1;
+static struct kobject dload_kobj;
#ifdef CONFIG_QCOM_DLOAD_MODE
#define EDL_MODE_PROP "qcom,msm-imem-emergency_download_mode"
@@ -64,9 +73,23 @@ static void *emergency_dload_mode_addr;
static bool scm_dload_supported;
static int dload_set(const char *val, struct kernel_param *kp);
-static int download_mode = 1;
+/* interface for exporting attributes */
+struct reset_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+ char *buf);
+ size_t (*store)(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count);
+};
+#define to_reset_attr(_attr) \
+ container_of(_attr, struct reset_attribute, attr)
+#define RESET_ATTR(_name, _mode, _show, _store) \
+ static struct reset_attribute reset_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
module_param_call(download_mode, dload_set, param_get_int,
&download_mode, 0644);
+
static int panic_prep_restart(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -170,7 +193,10 @@ static int dload_set(const char *val, struct kernel_param *kp)
return 0;
}
#else
-#define set_dload_mode(x) do {} while (0)
+static void set_dload_mode(int on)
+{
+ return;
+}
static void enable_emergency_dload_mode(void)
{
@@ -183,6 +209,26 @@ static bool get_dload_mode(void)
}
#endif
+static void scm_disable_sdi(void)
+{
+ int ret;
+ struct scm_desc desc = {
+ .args[0] = 1,
+ .args[1] = 0,
+ .arginfo = SCM_ARGS(2),
+ };
+
+ /* Needed to bypass debug image on some chips */
+ if (!is_scm_armv8())
+ ret = scm_call_atomic2(SCM_SVC_BOOT,
+ SCM_WDOG_DEBUG_BOOT_PART, 1, 0);
+ else
+ ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
+ SCM_WDOG_DEBUG_BOOT_PART), &desc);
+ if (ret)
+ pr_err("Failed to disable secure wdog debug: %d\n", ret);
+}
+
void msm_set_restart_mode(int mode)
{
restart_mode = mode;
@@ -320,13 +366,6 @@ static void deassert_ps_hold(void)
static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
{
- int ret;
- struct scm_desc desc = {
- .args[0] = 1,
- .args[1] = 0,
- .arginfo = SCM_ARGS(2),
- };
-
pr_notice("Going down for restart now\n");
msm_restart_prepare(cmd);
@@ -341,16 +380,7 @@ static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
msm_trigger_wdog_bite();
#endif
- /* Needed to bypass debug image on some chips */
- if (!is_scm_armv8())
- ret = scm_call_atomic2(SCM_SVC_BOOT,
- SCM_WDOG_DEBUG_BOOT_PART, 1, 0);
- else
- ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
- SCM_WDOG_DEBUG_BOOT_PART), &desc);
- if (ret)
- pr_err("Failed to disable secure wdog debug: %d\n", ret);
-
+ scm_disable_sdi();
halt_spmi_pmic_arbiter();
deassert_ps_hold();
@@ -359,27 +389,11 @@ static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
static void do_msm_poweroff(void)
{
- int ret;
- struct scm_desc desc = {
- .args[0] = 1,
- .args[1] = 0,
- .arginfo = SCM_ARGS(2),
- };
-
pr_notice("Powering off the SoC\n");
-#ifdef CONFIG_QCOM_DLOAD_MODE
+
set_dload_mode(0);
-#endif
+ scm_disable_sdi();
qpnp_pon_system_pwr_off(PON_POWER_OFF_SHUTDOWN);
- /* Needed to bypass debug image on some chips */
- if (!is_scm_armv8())
- ret = scm_call_atomic2(SCM_SVC_BOOT,
- SCM_WDOG_DEBUG_BOOT_PART, 1, 0);
- else
- ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
- SCM_WDOG_DEBUG_BOOT_PART), &desc);
- if (ret)
- pr_err("Failed to disable wdog debug: %d\n", ret);
halt_spmi_pmic_arbiter();
deassert_ps_hold();
@@ -389,6 +403,84 @@ static void do_msm_poweroff(void)
return;
}
+static ssize_t attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct reset_attribute *reset_attr = to_reset_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (reset_attr->show)
+ ret = reset_attr->show(kobj, attr, buf);
+
+ return ret;
+}
+
+static ssize_t attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct reset_attribute *reset_attr = to_reset_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (reset_attr->store)
+ ret = reset_attr->store(kobj, attr, buf, count);
+
+ return ret;
+}
+
+static const struct sysfs_ops reset_sysfs_ops = {
+ .show = attr_show,
+ .store = attr_store,
+};
+
+static struct kobj_type reset_ktype = {
+ .sysfs_ops = &reset_sysfs_ops,
+};
+
+static ssize_t show_emmc_dload(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ uint32_t read_val, show_val;
+
+ read_val = __raw_readl(dload_type_addr);
+ if (read_val == EMMC_DLOAD_TYPE)
+ show_val = 1;
+ else
+ show_val = 0;
+
+ return snprintf(buf, sizeof(show_val), "%u\n", show_val);
+}
+
+static size_t store_emmc_dload(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t enabled;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &enabled);
+ if (ret < 0)
+ return ret;
+
+ if (!((enabled == 0) || (enabled == 1)))
+ return -EINVAL;
+
+ if (enabled == 1)
+ __raw_writel(EMMC_DLOAD_TYPE, dload_type_addr);
+ else
+ __raw_writel(0, dload_type_addr);
+
+ return count;
+}
+RESET_ATTR(emmc_dload, 0644, show_emmc_dload, store_emmc_dload);
+
+static struct attribute *reset_attrs[] = {
+ &reset_attr_emmc_dload.attr,
+ NULL
+};
+
+static struct attribute_group reset_attr_group = {
+ .attrs = reset_attrs,
+};
+
static int msm_restart_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -419,6 +511,33 @@ static int msm_restart_probe(struct platform_device *pdev)
pr_err("unable to map imem EDLOAD mode offset\n");
}
+ np = of_find_compatible_node(NULL, NULL,
+ "qcom,msm-imem-dload-type");
+ if (!np) {
+ pr_err("unable to find DT imem dload-type node\n");
+ goto skip_sysfs_create;
+ } else {
+ dload_type_addr = of_iomap(np, 0);
+ if (!dload_type_addr) {
+ pr_err("unable to map imem dload-type offset\n");
+ goto skip_sysfs_create;
+ }
+ }
+
+ ret = kobject_init_and_add(&dload_kobj, &reset_ktype,
+ kernel_kobj, "%s", "dload");
+ if (ret) {
+ pr_err("%s:Error in creation kobject_add\n", __func__);
+ kobject_put(&dload_kobj);
+ goto skip_sysfs_create;
+ }
+
+ ret = sysfs_create_group(&dload_kobj, &reset_attr_group);
+ if (ret) {
+ pr_err("%s:Error in creation sysfs_create_group\n", __func__);
+ kobject_del(&dload_kobj);
+ }
+skip_sysfs_create:
#endif
np = of_find_compatible_node(NULL, NULL,
"qcom,msm-imem-restart_reason");
@@ -454,6 +573,8 @@ static int msm_restart_probe(struct platform_device *pdev)
download_mode = scm_is_secure_device();
set_dload_mode(download_mode);
+ if (!download_mode)
+ scm_disable_sdi();
return 0;
diff --git a/drivers/pwm/pwm-qpnp.c b/drivers/pwm/pwm-qpnp.c
index ac71f2c75472..6d0c1fbe566b 100644
--- a/drivers/pwm/pwm-qpnp.c
+++ b/drivers/pwm/pwm-qpnp.c
@@ -1879,7 +1879,7 @@ static int qpnp_parse_dt_config(struct platform_device *pdev,
int rc, enable, lut_entry_size, list_size, i;
const char *lable;
const __be32 *prop;
- u64 size;
+ u32 size;
struct device_node *node;
int found_pwm_subnode = 0;
int found_lpg_subnode = 0;
@@ -1968,11 +1968,18 @@ static int qpnp_parse_dt_config(struct platform_device *pdev,
return rc;
prop = of_get_address_by_name(pdev->dev.of_node, QPNP_LPG_LUT_BASE,
- &size, 0);
+ 0, 0);
if (!prop) {
chip->flags |= QPNP_PWM_LUT_NOT_SUPPORTED;
} else {
lpg_config->lut_base_addr = be32_to_cpu(*prop);
+ rc = of_property_read_u32(of_node, "qcom,lpg-lut-size", &size);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Error reading qcom,lpg-lut-size, rc=%d\n",
+ rc);
+ return rc;
+ }
+
/*
* Each entry of LUT is of 2 bytes for generic LUT and of 1 byte
* for KPDBL/GLED LUT.
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 27a5deb1213e..80a9f0ee288b 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -4223,7 +4223,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
debugfs_create_file("consumers", 0444, rdev->debugfs, rdev,
&reg_consumers_fops);
- reg = regulator_get(NULL, rdev->desc->name);
+ reg = regulator_get(NULL, rdev_get_name(rdev));
if (IS_ERR(reg) || reg == NULL) {
pr_err("Error-Bad Function Input\n");
goto error;
diff --git a/drivers/regulator/cpr3-mmss-regulator.c b/drivers/regulator/cpr3-mmss-regulator.c
index b0439871c41a..59cbe7460750 100644
--- a/drivers/regulator/cpr3-mmss-regulator.c
+++ b/drivers/regulator/cpr3-mmss-regulator.c
@@ -217,6 +217,14 @@ msmcobalt_v1_rev0_mmss_fuse_ref_volt[MSM8996_MMSS_FUSE_CORNERS] = {
};
static const int msmcobalt_v2_mmss_fuse_ref_volt[MSM8996_MMSS_FUSE_CORNERS] = {
+ 516000,
+ 628000,
+ 752000,
+ 924000,
+};
+
+static const int
+msmcobalt_v2_rev0_mmss_fuse_ref_volt[MSM8996_MMSS_FUSE_CORNERS] = {
616000,
740000,
828000,
@@ -759,7 +767,10 @@ static int cpr3_msm8996_mmss_calculate_open_loop_voltages(
goto done;
}
- if (vreg->thread->ctrl->soc_revision == MSMCOBALT_V2_SOC_ID)
+ if (vreg->thread->ctrl->soc_revision == MSMCOBALT_V2_SOC_ID
+ && fuse->cpr_fusing_rev == 0)
+ ref_volt = msmcobalt_v2_rev0_mmss_fuse_ref_volt;
+ else if (vreg->thread->ctrl->soc_revision == MSMCOBALT_V2_SOC_ID)
ref_volt = msmcobalt_v2_mmss_fuse_ref_volt;
else if (vreg->thread->ctrl->soc_revision == MSMCOBALT_V1_SOC_ID
&& fuse->cpr_fusing_rev == 0)
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index cd02debc37aa..0df2b80ceca5 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -1264,6 +1264,8 @@ static void cprh_controller_program_sdelta(
mb();
}
+static int cprh_regulator_aging_adjust(struct cpr3_controller *ctrl);
+
/**
* cpr3_regulator_init_cprh() - performs hardware initialization at the
* controller and thread level required for CPRh operation.
@@ -1290,6 +1292,16 @@ static int cpr3_regulator_init_cprh(struct cpr3_controller *ctrl)
return -EINVAL;
}
+ rc = cprh_regulator_aging_adjust(ctrl);
+ if (rc && rc != -ETIMEDOUT) {
+ /*
+ * Don't fail initialization if the CPR aging measurement
+ * timed out due to sensors not being available.
+ */
+ cpr3_err(ctrl, "CPR aging adjustment failed, rc=%d\n", rc);
+ return rc;
+ }
+
cprh_controller_program_sdelta(ctrl);
rc = cpr3_regulator_init_cprh_corners(&ctrl->thread[0].vreg[0]);
@@ -3346,7 +3358,7 @@ static int cpr3_regulator_measure_aging(struct cpr3_controller *ctrl,
u32 mask, reg, result, quot_min, quot_max, sel_min, sel_max;
u32 quot_min_scaled, quot_max_scaled;
u32 gcnt, gcnt_ref, gcnt0_restore, gcnt1_restore, irq_restore;
- u32 cont_dly_restore, up_down_dly_restore = 0;
+ u32 ro_mask_restore, cont_dly_restore, up_down_dly_restore = 0;
int quot_delta, quot_delta_scaled, quot_delta_scaled_sum;
int *quot_delta_results;
int rc, rc2, i, aging_measurement_count, filtered_count;
@@ -3379,7 +3391,8 @@ static int cpr3_regulator_measure_aging(struct cpr3_controller *ctrl,
/* Switch from HW to SW closed-loop if necessary */
if (ctrl->supports_hw_closed_loop) {
- if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4 ||
+ ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) {
cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK,
CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE);
@@ -3397,6 +3410,10 @@ static int cpr3_regulator_measure_aging(struct cpr3_controller *ctrl,
cpr3_write(ctrl, CPR3_REG_GCNT(0), gcnt);
cpr3_write(ctrl, CPR3_REG_GCNT(1), gcnt);
+ /* Unmask all RO's */
+ ro_mask_restore = cpr3_read(ctrl, CPR3_REG_RO_MASK(0));
+ cpr3_write(ctrl, CPR3_REG_RO_MASK(0), 0);
+
/*
* Mask all sensors except for the one to measure and bypass all
* sensors in collapsible domains.
@@ -3535,6 +3552,8 @@ cleanup:
cpr3_write(ctrl, CPR3_REG_IRQ_EN, irq_restore);
+ cpr3_write(ctrl, CPR3_REG_RO_MASK(0), ro_mask_restore);
+
cpr3_write(ctrl, CPR3_REG_GCNT(0), gcnt0_restore);
cpr3_write(ctrl, CPR3_REG_GCNT(1), gcnt1_restore);
@@ -3565,7 +3584,8 @@ cleanup:
CPR3_IRQ_UP | CPR3_IRQ_DOWN | CPR3_IRQ_MID);
if (ctrl->supports_hw_closed_loop) {
- if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4 ||
+ ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) {
cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK,
ctrl->use_hw_closed_loop
@@ -3671,14 +3691,16 @@ static void cpr3_regulator_readjust_volt_and_quot(struct cpr3_regulator *vreg,
static void cpr3_regulator_set_aging_ref_adjustment(
struct cpr3_controller *ctrl, int ref_adjust_volt)
{
+ struct cpr3_regulator *vreg;
int i, j;
for (i = 0; i < ctrl->thread_count; i++) {
for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
- cpr3_regulator_readjust_volt_and_quot(
- &ctrl->thread[i].vreg[j],
- ctrl->aging_ref_adjust_volt,
- ref_adjust_volt);
+ vreg = &ctrl->thread[i].vreg[j];
+ cpr3_regulator_readjust_volt_and_quot(vreg,
+ ctrl->aging_ref_adjust_volt, ref_adjust_volt);
+ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH)
+ cprh_adjust_voltages_for_apm(vreg);
}
}
@@ -3867,6 +3889,126 @@ cleanup:
}
/**
+ * cprh_regulator_aging_adjust() - adjust the target quotients and open-loop
+ * voltages for CPRh regulators based on the output of CPR aging
+ * sensors
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_regulator_aging_adjust(struct cpr3_controller *ctrl)
+{
+ int i, j, id, rc, rc2, aging_volt, init_volt;
+ int max_aging_volt = 0;
+ u32 reg;
+
+ if (!ctrl->aging_required || !ctrl->cpr_enabled)
+ return 0;
+
+ if (!ctrl->vdd_regulator) {
+ cpr3_err(ctrl, "vdd-supply regulator missing\n");
+ return -ENODEV;
+ }
+
+ init_volt = regulator_get_voltage(ctrl->vdd_regulator);
+ if (init_volt < 0) {
+ cpr3_err(ctrl, "could not get vdd-supply voltage, rc=%d\n",
+ init_volt);
+ return init_volt;
+ }
+
+ if (init_volt > ctrl->aging_ref_volt) {
+ cpr3_info(ctrl, "unable to perform CPR aging measurement as vdd=%d uV > aging voltage=%d uV\n",
+ init_volt, ctrl->aging_ref_volt);
+ return 0;
+ }
+
+ /* Verify that none of the aging sensors are currently masked. */
+ for (i = 0; i < ctrl->aging_sensor_count; i++) {
+ id = ctrl->aging_sensor[i].sensor_id;
+ reg = cpr3_read(ctrl, CPR3_REG_SENSOR_MASK_READ(id));
+ if (reg & BIT(id % 32)) {
+ cpr3_info(ctrl, "unable to perform CPR aging measurement as CPR sensor %d is masked\n",
+ id);
+ return 0;
+ }
+ }
+
+ rc = regulator_set_voltage(ctrl->vdd_regulator, ctrl->aging_ref_volt,
+ INT_MAX);
+ if (rc) {
+ cpr3_err(ctrl, "unable to set vdd-supply to aging voltage=%d uV, rc=%d\n",
+ ctrl->aging_ref_volt, rc);
+ return rc;
+ }
+
+ if (ctrl->aging_vdd_mode) {
+ rc = regulator_set_mode(ctrl->vdd_regulator,
+ ctrl->aging_vdd_mode);
+ if (rc) {
+ cpr3_err(ctrl, "unable to configure vdd-supply for mode=%u, rc=%d\n",
+ ctrl->aging_vdd_mode, rc);
+ goto cleanup;
+ }
+ }
+
+ /* Perform aging measurement on all aging sensors */
+ for (i = 0; i < ctrl->aging_sensor_count; i++) {
+ for (j = 0; j < CPR3_AGING_RETRY_COUNT; j++) {
+ rc = cpr3_regulator_measure_aging(ctrl,
+ &ctrl->aging_sensor[i]);
+ if (!rc)
+ break;
+ }
+
+ if (!rc) {
+ aging_volt =
+ cpr3_voltage_adjustment(
+ ctrl->aging_sensor[i].ro_scale,
+ ctrl->aging_sensor[i].measured_quot_diff
+ - ctrl->aging_sensor[i].init_quot_diff);
+ max_aging_volt = max(max_aging_volt, aging_volt);
+ } else {
+ cpr3_err(ctrl, "CPR aging measurement failed after %d tries, rc=%d\n",
+ j, rc);
+ ctrl->aging_failed = true;
+ ctrl->aging_required = false;
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ /* Adjust the CPR target quotients according to the aging measurement */
+ if (!rc) {
+ cpr3_regulator_set_aging_ref_adjustment(ctrl, max_aging_volt);
+
+ cpr3_info(ctrl, "aging measurement successful; aging reference adjustment voltage=%d uV\n",
+ ctrl->aging_ref_adjust_volt);
+ ctrl->aging_succeeded = true;
+ ctrl->aging_required = false;
+ }
+
+ rc2 = regulator_set_voltage(ctrl->vdd_regulator, init_volt, INT_MAX);
+ if (rc2) {
+ cpr3_err(ctrl, "unable to reset vdd-supply to initial voltage=%d uV, rc=%d\n",
+ init_volt, rc2);
+ return rc2;
+ }
+
+ if (ctrl->aging_complete_vdd_mode) {
+ rc2 = regulator_set_mode(ctrl->vdd_regulator,
+ ctrl->aging_complete_vdd_mode);
+ if (rc2) {
+ cpr3_err(ctrl, "unable to configure vdd-supply for mode=%u, rc=%d\n",
+ ctrl->aging_complete_vdd_mode, rc2);
+ return rc2;
+ }
+ }
+
+ return rc;
+}
+
+/**
* cpr3_regulator_update_ctrl_state() - update the state of the CPR controller
* to reflect the corners used by all CPR3 regulators as well as
* the CPR operating mode and perform aging adjustments if needed
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
index 8897def3ef76..ac571271b0d5 100644
--- a/drivers/regulator/cpr3-regulator.h
+++ b/drivers/regulator/cpr3-regulator.h
@@ -875,6 +875,7 @@ int cpr4_parse_core_count_temp_voltage_adj(struct cpr3_regulator *vreg,
bool use_corner_band);
int cpr3_apm_init(struct cpr3_controller *ctrl);
int cpr3_mem_acc_init(struct cpr3_regulator *vreg);
+void cprh_adjust_voltages_for_apm(struct cpr3_regulator *vreg);
#else
@@ -1047,6 +1048,10 @@ static inline int cpr3_mem_acc_init(struct cpr3_regulator *vreg)
return 0;
}
+static inline void cprh_adjust_voltages_for_apm(struct cpr3_regulator *vreg)
+{
+}
+
#endif /* CONFIG_REGULATOR_CPR3 */
#endif /* __REGULATOR_CPR_REGULATOR_H__ */
diff --git a/drivers/regulator/cpr3-util.c b/drivers/regulator/cpr3-util.c
index 51179f28fcf5..c377a65a6393 100644
--- a/drivers/regulator/cpr3-util.c
+++ b/drivers/regulator/cpr3-util.c
@@ -1202,6 +1202,23 @@ int cpr3_parse_common_ctrl_data(struct cpr3_controller *ctrl)
if (rc)
return rc;
+ ctrl->vdd_regulator = devm_regulator_get(ctrl->dev, "vdd");
+ if (IS_ERR(ctrl->vdd_regulator)) {
+ rc = PTR_ERR(ctrl->vdd_regulator);
+ if (rc != -EPROBE_DEFER) {
+ /* vdd-supply is optional for CPRh controllers. */
+ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) {
+ cpr3_debug(ctrl, "unable to request vdd regulator, rc=%d\n",
+ rc);
+ ctrl->vdd_regulator = NULL;
+ return 0;
+ }
+ cpr3_err(ctrl, "unable to request vdd regulator, rc=%d\n",
+ rc);
+ }
+ return rc;
+ }
+
/*
* Regulator device handles are not necessary for CPRh controllers
* since communication with the regulators is completely managed
@@ -1210,15 +1227,6 @@ int cpr3_parse_common_ctrl_data(struct cpr3_controller *ctrl)
if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH)
return rc;
- ctrl->vdd_regulator = devm_regulator_get(ctrl->dev, "vdd");
- if (IS_ERR(ctrl->vdd_regulator)) {
- rc = PTR_ERR(ctrl->vdd_regulator);
- if (rc != -EPROBE_DEFER)
- cpr3_err(ctrl, "unable request vdd regulator, rc=%d\n",
- rc);
- return rc;
- }
-
ctrl->system_regulator = devm_regulator_get_optional(ctrl->dev,
"system");
if (IS_ERR(ctrl->system_regulator)) {
@@ -2000,3 +2008,78 @@ done:
return rc;
}
+
+/**
+ * cprh_adjust_voltages_for_apm() - adjust per-corner floor and ceiling voltages
+ * so that they do not overlap the APM threshold voltage.
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * The memory array power mux (APM) must be configured for a specific supply
+ * based upon where the VDD voltage lies with respect to the APM threshold
+ * voltage. When using CPR hardware closed-loop, the voltage may vary anywhere
+ * between the floor and ceiling voltage without software notification.
+ * Therefore, it is required that the floor to ceiling range for every corner
+ * not intersect the APM threshold voltage. This function adjusts the floor to
+ * ceiling range for each corner which violates this requirement.
+ *
+ * The following algorithm is applied:
+ * if floor < threshold <= ceiling:
+ * if open_loop >= threshold, then floor = threshold - adj
+ * else ceiling = threshold - step
+ * where:
+ * adj = APM hysteresis voltage established to minimize the number of
+ * corners with artificially increased floor voltages
+ * step = voltage in microvolts of a single step of the VDD supply
+ *
+ * The open-loop voltage is also bounded by the new floor or ceiling value as
+ * needed.
+ *
+ * Return: none
+ */
+void cprh_adjust_voltages_for_apm(struct cpr3_regulator *vreg)
+{
+ struct cpr3_controller *ctrl = vreg->thread->ctrl;
+ struct cpr3_corner *corner;
+ int i, adj, threshold, prev_ceiling, prev_floor, prev_open_loop;
+
+ if (!ctrl->apm_threshold_volt) {
+ /* APM not being used. */
+ return;
+ }
+
+ ctrl->apm_threshold_volt = CPR3_ROUND(ctrl->apm_threshold_volt,
+ ctrl->step_volt);
+ ctrl->apm_adj_volt = CPR3_ROUND(ctrl->apm_adj_volt, ctrl->step_volt);
+
+ threshold = ctrl->apm_threshold_volt;
+ adj = ctrl->apm_adj_volt;
+
+ for (i = 0; i < vreg->corner_count; i++) {
+ corner = &vreg->corner[i];
+
+ if (threshold <= corner->floor_volt
+ || threshold > corner->ceiling_volt)
+ continue;
+
+ prev_floor = corner->floor_volt;
+ prev_ceiling = corner->ceiling_volt;
+ prev_open_loop = corner->open_loop_volt;
+
+ if (corner->open_loop_volt >= threshold) {
+ corner->floor_volt = max(corner->floor_volt,
+ threshold - adj);
+ if (corner->open_loop_volt < corner->floor_volt)
+ corner->open_loop_volt = corner->floor_volt;
+ } else {
+ corner->ceiling_volt = threshold - ctrl->step_volt;
+ }
+
+ if (corner->floor_volt != prev_floor
+ || corner->ceiling_volt != prev_ceiling
+ || corner->open_loop_volt != prev_open_loop)
+ cpr3_debug(vreg, "APM threshold=%d, APM adj=%d changed corner %d voltages; prev: floor=%d, ceiling=%d, open-loop=%d; new: floor=%d, ceiling=%d, open-loop=%d\n",
+ threshold, adj, i, prev_floor, prev_ceiling,
+ prev_open_loop, corner->floor_volt,
+ corner->ceiling_volt, corner->open_loop_volt);
+ }
+}
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index 284180b0e72f..953ea5f33f40 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -54,6 +54,8 @@
* @force_highest_corner: Flag indicating that all corners must operate
* at the voltage of the highest corner. This is
* applicable to MSMCOBALT only.
+ * @aging_init_quot_diff: Initial quotient difference between CPR aging
+ * min and max sensors measured at time of manufacturing
*
* This struct holds the values for all of the fuses read from memory.
*/
@@ -65,6 +67,7 @@ struct cprh_msmcobalt_kbss_fuses {
u64 speed_bin;
u64 cpr_fusing_rev;
u64 force_highest_corner;
+ u64 aging_init_quot_diff;
};
/*
@@ -192,6 +195,18 @@ msmcobalt_cpr_force_highest_corner_param[] = {
{},
};
+static const struct cpr3_fuse_param
+msmcobalt_kbss_aging_init_quot_diff_param[2][2] = {
+ [MSMCOBALT_KBSS_POWER_CLUSTER_ID] = {
+ {69, 6, 13},
+ {},
+ },
+ [MSMCOBALT_KBSS_PERFORMANCE_CLUSTER_ID] = {
+ {71, 25, 32},
+ {},
+ },
+};
+
/*
* Open loop voltage fuse reference voltages in microvolts for MSMCOBALT v1
*/
@@ -225,6 +240,8 @@ msmcobalt_v2_kbss_fuse_ref_volt[2][MSMCOBALT_KBSS_FUSE_CORNERS] = {
#define MSMCOBALT_KBSS_FUSE_STEP_VOLT 10000
#define MSMCOBALT_KBSS_VOLTAGE_FUSE_SIZE 6
#define MSMCOBALT_KBSS_QUOT_OFFSET_SCALE 5
+#define MSMCOBALT_KBSS_AGING_INIT_QUOT_DIFF_SIZE 8
+#define MSMCOBALT_KBSS_AGING_INIT_QUOT_DIFF_SCALE 1
#define MSMCOBALT_KBSS_POWER_CPR_SENSOR_COUNT 6
#define MSMCOBALT_KBSS_PERFORMANCE_CPR_SENSOR_COUNT 9
@@ -242,6 +259,12 @@ msmcobalt_v2_kbss_fuse_ref_volt[2][MSMCOBALT_KBSS_FUSE_CORNERS] = {
#define MSMCOBALT_KBSS_PERFORMANCE_TEMP_SENSOR_ID_START 6
#define MSMCOBALT_KBSS_PERFORMANCE_TEMP_SENSOR_ID_END 11
+#define MSMCOBALT_KBSS_POWER_AGING_SENSOR_ID 0
+#define MSMCOBALT_KBSS_POWER_AGING_BYPASS_MASK0 0
+
+#define MSMCOBALT_KBSS_PERFORMANCE_AGING_SENSOR_ID 0
+#define MSMCOBALT_KBSS_PERFORMANCE_AGING_BYPASS_MASK0 0
+
/**
* cprh_msmcobalt_kbss_read_fuse_data() - load KBSS specific fuse parameter values
* @vreg: Pointer to the CPR3 regulator
@@ -321,6 +344,15 @@ static int cprh_msmcobalt_kbss_read_fuse_data(struct cpr3_regulator *vreg)
}
rc = cpr3_read_fuse_param(base,
+ msmcobalt_kbss_aging_init_quot_diff_param[id],
+ &fuse->aging_init_quot_diff);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read aging initial quotient difference fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_read_fuse_param(base,
msmcobalt_cpr_force_highest_corner_param,
&fuse->force_highest_corner);
if (rc) {
@@ -826,6 +858,7 @@ static int cprh_kbss_apm_crossover_as_corner(struct cpr3_regulator *vreg)
corner->floor_volt = ctrl->apm_crossover_volt;
corner->ceiling_volt = ctrl->apm_crossover_volt;
corner->open_loop_volt = ctrl->apm_crossover_volt;
+ corner->abs_ceiling_volt = ctrl->apm_crossover_volt;
corner->use_open_loop = true;
vreg->corner_count++;
@@ -833,79 +866,6 @@ static int cprh_kbss_apm_crossover_as_corner(struct cpr3_regulator *vreg)
}
/**
- * cprh_kbss_adjust_voltages_for_apm() - adjust per-corner floor and ceiling
- * voltages so that they do not overlap the APM threshold voltage.
- * @vreg: Pointer to the CPR3 regulator
- *
- * The KBSS memory array power mux (APM) must be configured for a specific
- * supply based upon where the VDD voltage lies with respect to the APM
- * threshold voltage. When using CPR hardware closed-loop, the voltage may vary
- * anywhere between the floor and ceiling voltage without software notification.
- * Therefore, it is required that the floor to ceiling range for every corner
- * not intersect the APM threshold voltage. This function adjusts the floor to
- * ceiling range for each corner which violates this requirement.
- *
- * The following algorithm is applied in the case that
- * floor < threshold <= ceiling:
- * if open_loop >= threshold, then floor = threshold - adj
- * else ceiling = threshold - step
- * where adj = APM hysteresis voltage established to minimize number
- * of corners with artificially increased floor voltages
- * and step = voltage in microvolts of a single step of the VDD supply
- *
- * The open-loop voltage is also bounded by the new floor or ceiling value as
- * needed.
- *
- * Return: 0 on success, errno on failure
- */
-static int cprh_kbss_adjust_voltages_for_apm(struct cpr3_regulator *vreg)
-{
- struct cpr3_controller *ctrl = vreg->thread->ctrl;
- struct cpr3_corner *corner;
- int i, adj, threshold, prev_ceiling, prev_floor, prev_open_loop;
-
- if (!ctrl->apm_threshold_volt) {
- /* APM not being used. */
- return 0;
- }
-
- ctrl->apm_threshold_volt = CPR3_ROUND(ctrl->apm_threshold_volt,
- ctrl->step_volt);
- ctrl->apm_adj_volt = CPR3_ROUND(ctrl->apm_adj_volt, ctrl->step_volt);
-
- threshold = ctrl->apm_threshold_volt;
- adj = ctrl->apm_adj_volt;
-
- for (i = 0; i < vreg->corner_count; i++) {
- corner = &vreg->corner[i];
-
- if (threshold <= corner->floor_volt
- || threshold > corner->ceiling_volt)
- continue;
-
- prev_floor = corner->floor_volt;
- prev_ceiling = corner->ceiling_volt;
- prev_open_loop = corner->open_loop_volt;
-
- if (corner->open_loop_volt >= threshold) {
- corner->floor_volt = max(corner->floor_volt,
- threshold - adj);
- if (corner->open_loop_volt < corner->floor_volt)
- corner->open_loop_volt = corner->floor_volt;
- } else {
- corner->ceiling_volt = threshold - ctrl->step_volt;
- }
-
- cpr3_debug(vreg, "APM threshold=%d, APM adj=%d changed corner %d voltages; prev: floor=%d, ceiling=%d, open-loop=%d; new: floor=%d, ceiling=%d, open-loop=%d\n",
- threshold, adj, i, prev_floor, prev_ceiling,
- prev_open_loop, corner->floor_volt,
- corner->ceiling_volt, corner->open_loop_volt);
- }
-
- return 0;
-}
-
-/**
* cprh_msmcobalt_kbss_set_no_interpolation_quotients() - use the fused target
* quotient values for lower frequencies.
* @vreg: Pointer to the CPR3 regulator
@@ -1235,12 +1195,7 @@ static int cprh_kbss_init_regulator(struct cpr3_regulator *vreg)
return rc;
}
- rc = cprh_kbss_adjust_voltages_for_apm(vreg);
- if (rc) {
- cpr3_err(vreg, "unable to adjust voltages for APM\n, rc=%d\n",
- rc);
- return rc;
- }
+ cprh_adjust_voltages_for_apm(vreg);
cpr3_open_loop_voltage_as_ceiling(vreg);
@@ -1299,6 +1254,80 @@ static int cprh_kbss_init_regulator(struct cpr3_regulator *vreg)
}
/**
+ * cprh_kbss_init_aging() - perform KBSS CPRh controller specific aging
+ * initializations
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cprh_kbss_init_aging(struct cpr3_controller *ctrl)
+{
+ struct cprh_msmcobalt_kbss_fuses *fuse = NULL;
+ struct cpr3_regulator *vreg;
+ u32 aging_ro_scale;
+ int i, j, rc;
+
+ for (i = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ if (ctrl->thread[i].vreg[j].aging_allowed) {
+ ctrl->aging_required = true;
+ vreg = &ctrl->thread[i].vreg[j];
+ fuse = vreg->platform_fuses;
+ break;
+ }
+ }
+ }
+
+ if (!ctrl->aging_required || !fuse || !vreg)
+ return 0;
+
+ rc = cpr3_parse_array_property(vreg, "qcom,cpr-aging-ro-scaling-factor",
+ 1, &aging_ro_scale);
+ if (rc)
+ return rc;
+
+ if (aging_ro_scale == 0) {
+ cpr3_err(ctrl, "aging RO scaling factor is invalid: %u\n",
+ aging_ro_scale);
+ return -EINVAL;
+ }
+
+ ctrl->aging_vdd_mode = REGULATOR_MODE_NORMAL;
+ ctrl->aging_complete_vdd_mode = REGULATOR_MODE_IDLE;
+
+ ctrl->aging_sensor_count = 1;
+ ctrl->aging_sensor = kzalloc(sizeof(*ctrl->aging_sensor), GFP_KERNEL);
+ if (!ctrl->aging_sensor)
+ return -ENOMEM;
+
+ if (ctrl->ctrl_id == MSMCOBALT_KBSS_POWER_CLUSTER_ID) {
+ ctrl->aging_sensor->sensor_id
+ = MSMCOBALT_KBSS_POWER_AGING_SENSOR_ID;
+ ctrl->aging_sensor->bypass_mask[0]
+ = MSMCOBALT_KBSS_POWER_AGING_BYPASS_MASK0;
+ } else {
+ ctrl->aging_sensor->sensor_id
+ = MSMCOBALT_KBSS_PERFORMANCE_AGING_SENSOR_ID;
+ ctrl->aging_sensor->bypass_mask[0]
+ = MSMCOBALT_KBSS_PERFORMANCE_AGING_BYPASS_MASK0;
+ }
+ ctrl->aging_sensor->ro_scale = aging_ro_scale;
+
+ ctrl->aging_sensor->init_quot_diff
+ = cpr3_convert_open_loop_voltage_fuse(0,
+ MSMCOBALT_KBSS_AGING_INIT_QUOT_DIFF_SCALE,
+ fuse->aging_init_quot_diff,
+ MSMCOBALT_KBSS_AGING_INIT_QUOT_DIFF_SIZE);
+
+ cpr3_debug(ctrl, "sensor %u aging init quotient diff = %d, aging RO scale = %u QUOT/V\n",
+ ctrl->aging_sensor->sensor_id,
+ ctrl->aging_sensor->init_quot_diff,
+ ctrl->aging_sensor->ro_scale);
+
+ return 0;
+}
+
+/**
* cprh_kbss_init_controller() - perform KBSS CPRh controller specific
* initializations
* @ctrl: Pointer to the CPR3 controller
@@ -1566,6 +1595,13 @@ static int cprh_kbss_regulator_probe(struct platform_device *pdev)
return rc;
}
+ rc = cprh_kbss_init_aging(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "failed to initialize aging configurations, rc=%d\n",
+ rc);
+ return rc;
+ }
+
platform_set_drvdata(pdev, ctrl);
rc = cprh_kbss_populate_opp_table(ctrl);
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index d6e372fc7922..0547853c4f3a 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -1479,8 +1479,8 @@ void ufsdbg_add_debugfs(struct ufs_hba *hba)
char root_name[sizeof("ufshcd00")];
if (!hba) {
- dev_err(hba->dev, "%s: NULL hba, exiting", __func__);
- goto err_no_root;
+ pr_err("%s: NULL hba, exiting", __func__);
+ return;
}
snprintf(root_name, ARRAY_SIZE(root_name), "%s%d", UFSHCD,
diff --git a/drivers/scsi/ufs/ufs_test.c b/drivers/scsi/ufs/ufs_test.c
index 8953722e8dad..e23dc3e8d9da 100644
--- a/drivers/scsi/ufs/ufs_test.c
+++ b/drivers/scsi/ufs/ufs_test.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -689,13 +689,13 @@ static void scenario_free_end_io_fn(struct request *rq, int err)
__blk_put_request(test_iosched->req_q, test_rq->rq);
spin_unlock_irqrestore(&test_iosched->lock, flags);
- test_iosched_free_test_req_data_buffer(test_rq);
- kfree(test_rq);
-
if (err)
pr_err("%s: request %d completed, err=%d", __func__,
test_rq->req_id, err);
+ test_iosched_free_test_req_data_buffer(test_rq);
+ kfree(test_rq);
+
check_test_completion(test_iosched);
}
@@ -984,14 +984,14 @@ static void long_test_free_end_io_fn(struct request *rq, int err)
return;
}
- test_iosched_free_test_req_data_buffer(test_rq);
- kfree(test_rq);
- utd->completed_req_count++;
-
if (err)
pr_err("%s: request %d completed, err=%d", __func__,
test_rq->req_id, err);
+ test_iosched_free_test_req_data_buffer(test_rq);
+ kfree(test_rq);
+ utd->completed_req_count++;
+
check_test_completion(test_iosched);
}
@@ -1007,7 +1007,7 @@ static void long_test_free_end_io_fn(struct request *rq, int err)
static int run_long_test(struct test_iosched *test_iosched)
{
int ret = 0;
- int direction, num_bios_per_request;
+ int direction, num_bios_per_request = 1;
static unsigned int inserted_requests;
u32 sector, seed, num_bios, seq_sector_delta;
struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
@@ -1028,14 +1028,12 @@ static int run_long_test(struct test_iosched *test_iosched)
/* Set test parameters */
switch (test_iosched->test_info.testcase) {
case UFS_TEST_LONG_RANDOM_READ:
- num_bios_per_request = 1;
utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
(LONG_RAND_TEST_REQ_RATIO * TEST_BIO_SIZE *
num_bios_per_request);
direction = READ;
break;
case UFS_TEST_LONG_RANDOM_WRITE:
- num_bios_per_request = 1;
utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
(LONG_RAND_TEST_REQ_RATIO * TEST_BIO_SIZE *
num_bios_per_request);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 9c27344165be..2bc74941abc8 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -571,6 +571,39 @@ config MSM_QDSP6_APRV3_GLINK
QDSP6. APR is used by audio driver to
configure QDSP6v2's ASM, ADM and AFE.
+config MSM_QDSP6_SSR
+ bool "Audio QDSP6 SSR support"
+ depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3 || \
+ MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK
+ help
+ Enable Subsystem Restart. Reset audio
+ clients when the ADSP subsystem is
+ restarted. Subsystem Restart for audio
+ is only used for processes on the ADSP
+ and signals audio drivers through APR.
+
+
+config MSM_QDSP6_PDR
+ bool "Audio QDSP6 PDR support"
+ depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3 || \
+ MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK
+ help
+ Enable Protection Domain Restart. Reset
+ audio clients when a process on the ADSP
+ is restarted. PDR for audio is only used
+ for processes on the ADSP and signals
+ audio drivers through APR.
+
+config MSM_QDSP6_NOTIFIER
+ bool "Audio QDSP6 PDR support"
+ depends on MSM_QDSP6_SSR || MSM_QDSP6_PDR
+ help
+ Enable notifier which decides whether
+ to use SSR or PDR and notifies all
+ audio clients of the event. Both SSR
+ and PDR are recovery methods when
+ there is a crash on ADSP. Audio drivers
+ are contacted by ADSP through APR.
config MSM_ADSP_LOADER
tristate "ADSP loader support"
@@ -716,14 +749,6 @@ config MSM_KERNEL_PROTECT_TEST
read-only. This test is FATAL whether it passes or fails!
Success is signaled by a stage-2 fault.
-config MSM_CORE_CTL_HELPER
- tristate "Core control helper functions for dynamically hotplug CPUs"
- help
- Provide helper functions for core control driver. Core control
- driver dynamically hotplugs CPUs from kernel based on current
- system load and state. It also supports limiting min and
- max online CPUs from userspace.
-
config QCOM_REMOTEQDSS
bool "Allow debug tools to enable events on other processors"
depends on QCOM_SCM && DEBUG_FS
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index d9134a558be6..434a114c000f 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_MEM_SHARE_QMI_SERVICE) += memshare/
obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
obj-$(CONFIG_MSM_PIL_MSS_QDSP6V5) += pil-q6v5.o pil-msa.o pil-q6v5-mss.o
obj-$(CONFIG_MSM_PIL) += peripheral-loader.o
-obj-$(CONFIG_MSM_CORE_CTL_HELPER) += core_ctl_helper.o
obj-$(CONFIG_MSM_PFE_WA) += pfe-wa.o
obj-$(CONFIG_ARCH_MSM8996) += msm_cpu_voltage.o
diff --git a/drivers/soc/qcom/common_log.c b/drivers/soc/qcom/common_log.c
index f4c69d624342..ecf89b2b3b37 100644
--- a/drivers/soc/qcom/common_log.c
+++ b/drivers/soc/qcom/common_log.c
@@ -20,7 +20,7 @@
#include <soc/qcom/memory_dump.h>
#define MISC_DUMP_DATA_LEN 4096
-#define PMIC_DUMP_DATA_LEN 4096
+#define PMIC_DUMP_DATA_LEN (64 * 1024)
#define VSENSE_DUMP_DATA_LEN 4096
#define RPM_DUMP_DATA_LEN (160 * 1024)
diff --git a/drivers/soc/qcom/core_ctl_helper.c b/drivers/soc/qcom/core_ctl_helper.c
deleted file mode 100644
index 88201412128e..000000000000
--- a/drivers/soc/qcom/core_ctl_helper.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/cpu.h>
-#include <linux/cpufreq.h>
-#include <linux/ktime.h>
-#include <linux/hrtimer.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <trace/events/power.h>
-#include <soc/qcom/core_ctl.h>
-
-void core_ctl_trace(int type, int cpu, int arg1, int arg2, int arg3)
-{
- switch (type) {
- case CORE_CTL_EVAL_NEED:
- trace_core_ctl_eval_need(cpu, arg1, arg2, arg3);
- break;
-
- case CORE_CTL_SET_BUSY:
- trace_core_ctl_set_busy(cpu, arg1, arg2, arg3);
- break;
- };
-}
-EXPORT_SYMBOL(core_ctl_trace);
-
-void core_ctl_block_hotplug(void)
-{
- get_online_cpus();
-}
-EXPORT_SYMBOL(core_ctl_block_hotplug);
-
-void core_ctl_unblock_hotplug(void)
-{
- put_online_cpus();
-}
-EXPORT_SYMBOL(core_ctl_unblock_hotplug);
-
-s64 core_ctl_get_time(void)
-{
- return ktime_to_ms(ktime_get());
-}
-EXPORT_SYMBOL(core_ctl_get_time);
-
-struct cpufreq_policy *core_ctl_get_policy(int cpu)
-{
- return cpufreq_cpu_get(cpu);
-}
-EXPORT_SYMBOL(core_ctl_get_policy);
-
-void core_ctl_put_policy(struct cpufreq_policy *policy)
-{
- cpufreq_cpu_put(policy);
-}
-EXPORT_SYMBOL(core_ctl_put_policy);
-
-struct device *core_ctl_find_cpu_device(unsigned cpu)
-{
- return get_cpu_device(cpu);
-}
-EXPORT_SYMBOL(core_ctl_find_cpu_device);
-
-int __ref core_ctl_online_core(unsigned int cpu)
-{
- int ret = -EINVAL;
- struct device *dev = get_cpu_device(cpu);
-
- if (dev) {
- lock_device_hotplug();
- ret = device_online(dev);
- unlock_device_hotplug();
- }
- return ret;
-}
-EXPORT_SYMBOL(core_ctl_online_core);
-
-int __ref core_ctl_offline_core(unsigned int cpu)
-{
- int ret = -EINVAL;
- struct device *dev = get_cpu_device(cpu);
-
- if (dev) {
- lock_device_hotplug();
- ret = device_offline(dev);
- unlock_device_hotplug();
- }
- return ret;
-}
-EXPORT_SYMBOL(core_ctl_offline_core);
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 5612075ba60c..1b1b14ec732e 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -2712,7 +2712,7 @@ int glink_close(void *handle)
{
struct glink_core_xprt_ctx *xprt_ctx = NULL;
struct channel_ctx *ctx = (struct channel_ctx *)handle;
- int ret;
+ int ret = 0;
unsigned long flags;
bool is_empty = false;
@@ -5380,7 +5380,7 @@ static int glink_scheduler_tx(struct channel_ctx *ctx,
size_t txd_len = 0;
size_t tx_len = 0;
uint32_t num_pkts = 0;
- int ret;
+ int ret = 0;
spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
while (txd_len < xprt_ctx->mtu &&
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
index 4952e12ffe3c..a14d912b7536 100644
--- a/drivers/soc/qcom/glink_ssr.c
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -769,7 +769,7 @@ static int glink_ssr_probe(struct platform_device *pdev)
struct device_node *phandle_node;
struct restart_notifier_block *nb;
struct subsys_info *ss_info;
- struct subsys_info_leaf *ss_info_leaf;
+ struct subsys_info_leaf *ss_info_leaf = NULL;
struct glink_link_info *link_info;
char *key;
const char *edge;
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 583856e6b3e1..f47d4a51fccd 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -33,7 +33,7 @@
#include <linux/dma-mapping.h>
#include <linux/qmi_encdec.h>
#include <linux/ipc_logging.h>
-#include <linux/msm-bus.h>
+#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <linux/qpnp/qpnp-adc.h>
#include <soc/qcom/memory_dump.h>
@@ -41,6 +41,7 @@
#include <soc/qcom/msm_qmi_interface.h>
#include <soc/qcom/secure_buffer.h>
#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/service-locator.h>
#include <soc/qcom/service-notifier.h>
#include <soc/qcom/socinfo.h>
@@ -244,9 +245,10 @@ enum icnss_debug_quirks {
RECOVERY_DISABLE,
SSR_ONLY,
PDR_ONLY,
+ VBATT_DISABLE,
};
-#define ICNSS_QUIRKS_DEFAULT 0
+#define ICNSS_QUIRKS_DEFAULT BIT(VBATT_DISABLE)
unsigned long quirks = ICNSS_QUIRKS_DEFAULT;
module_param(quirks, ulong, 0600);
@@ -257,7 +259,12 @@ void *icnss_ipc_log_context;
void *icnss_ipc_log_long_context;
#endif
-#define ICNSS_EVENT_PENDING 2989
+#define ICNSS_EVENT_PENDING 2989
+
+#define ICNSS_EVENT_SYNC BIT(0)
+#define ICNSS_EVENT_UNINTERRUPTIBLE BIT(1)
+#define ICNSS_EVENT_SYNC_UNINTERRUPTIBLE (ICNSS_EVENT_UNINTERRUPTIBLE | \
+ ICNSS_EVENT_SYNC)
enum icnss_driver_event_type {
ICNSS_DRIVER_EVENT_SERVER_ARRIVE,
@@ -269,6 +276,10 @@ enum icnss_driver_event_type {
ICNSS_DRIVER_EVENT_MAX,
};
+struct icnss_event_pd_service_down_data {
+ bool crashed;
+};
+
struct icnss_driver_event {
struct list_head list;
enum icnss_driver_event_type type;
@@ -284,13 +295,13 @@ enum icnss_driver_state {
ICNSS_FW_READY,
ICNSS_DRIVER_PROBED,
ICNSS_FW_TEST_MODE,
- ICNSS_SUSPEND,
ICNSS_PM_SUSPEND,
ICNSS_PM_SUSPEND_NOIRQ,
ICNSS_SSR_ENABLED,
ICNSS_PDR_ENABLED,
ICNSS_PD_RESTART,
ICNSS_MSA0_ASSIGNED,
+ ICNSS_WLFW_EXISTS,
};
struct ce_irq_list {
@@ -351,6 +362,8 @@ struct icnss_stats {
uint32_t pm_suspend_noirq_err;
uint32_t pm_resume_noirq;
uint32_t pm_resume_noirq_err;
+ uint32_t pm_stay_awake;
+ uint32_t pm_relax;
uint32_t ind_register_req;
uint32_t ind_register_resp;
@@ -397,8 +410,6 @@ static struct icnss_priv {
size_t smmu_iova_len;
dma_addr_t smmu_iova_ipa_start;
size_t smmu_iova_ipa_len;
- struct msm_bus_scale_pdata *bus_scale_table;
- uint32_t bus_client;
struct qmi_handle *wlfw_clnt;
struct list_head event_list;
spinlock_t event_lock;
@@ -428,7 +439,6 @@ static struct icnss_priv {
struct notifier_block get_service_nb;
void *modem_notify_handler;
struct notifier_block modem_ssr_nb;
- struct wakeup_source ws;
uint32_t diag_reg_read_addr;
uint32_t diag_reg_read_mem_type;
uint32_t diag_reg_read_len;
@@ -437,6 +447,7 @@ static struct icnss_priv {
struct qpnp_adc_tm_chip *adc_tm_dev;
struct qpnp_vadc_chip *vadc_dev;
uint64_t vph_pwr;
+ atomic_t pm_count;
} *penv;
static void icnss_hw_write_reg(void *base, u32 offset, u32 val)
@@ -504,6 +515,35 @@ static int icnss_hw_poll_reg_field(void *base, u32 offset, u32 mask, u32 val,
return 0;
}
+static void icnss_pm_stay_awake(struct icnss_priv *priv)
+{
+ if (atomic_inc_return(&priv->pm_count) != 1)
+ return;
+
+ icnss_pr_dbg("PM stay awake, state: 0x%lx, count: %d\n", priv->state,
+ atomic_read(&priv->pm_count));
+
+ pm_stay_awake(&priv->pdev->dev);
+
+ priv->stats.pm_stay_awake++;
+}
+
+static void icnss_pm_relax(struct icnss_priv *priv)
+{
+ int r = atomic_dec_return(&priv->pm_count);
+
+ WARN_ON(r < 0);
+
+ if (r != 0)
+ return;
+
+ icnss_pr_dbg("PM relax, state: 0x%lx, count: %d\n", priv->state,
+ atomic_read(&priv->pm_count));
+
+ pm_relax(&priv->pdev->dev);
+ priv->stats.pm_relax++;
+}
+
static char *icnss_driver_event_to_str(enum icnss_driver_event_type type)
{
switch (type) {
@@ -527,16 +567,16 @@ static char *icnss_driver_event_to_str(enum icnss_driver_event_type type)
};
static int icnss_driver_event_post(enum icnss_driver_event_type type,
- bool sync, void *data)
+ u32 flags, void *data)
{
struct icnss_driver_event *event;
- unsigned long flags;
+ unsigned long irq_flags;
int gfp = GFP_KERNEL;
int ret = 0;
- icnss_pr_dbg("Posting event: %s(%d)%s, state: 0x%lx\n",
- icnss_driver_event_to_str(type), type,
- sync ? "-sync" : "", penv->state);
+ icnss_pr_dbg("Posting event: %s(%d), %s, flags: 0x%x, state: 0x%lx\n",
+ icnss_driver_event_to_str(type), type, current->comm,
+ flags, penv->state);
if (type >= ICNSS_DRIVER_EVENT_MAX) {
icnss_pr_err("Invalid Event type: %d, can't post", type);
@@ -550,39 +590,47 @@ static int icnss_driver_event_post(enum icnss_driver_event_type type,
if (event == NULL)
return -ENOMEM;
+ icnss_pm_stay_awake(penv);
+
event->type = type;
event->data = data;
init_completion(&event->complete);
event->ret = ICNSS_EVENT_PENDING;
- event->sync = sync;
+ event->sync = !!(flags & ICNSS_EVENT_SYNC);
- spin_lock_irqsave(&penv->event_lock, flags);
+ spin_lock_irqsave(&penv->event_lock, irq_flags);
list_add_tail(&event->list, &penv->event_list);
- spin_unlock_irqrestore(&penv->event_lock, flags);
+ spin_unlock_irqrestore(&penv->event_lock, irq_flags);
penv->stats.events[type].posted++;
queue_work(penv->event_wq, &penv->event_work);
- if (!sync)
- return ret;
+ if (!(flags & ICNSS_EVENT_SYNC))
+ goto out;
- ret = wait_for_completion_interruptible(&event->complete);
+ if (flags & ICNSS_EVENT_UNINTERRUPTIBLE)
+ wait_for_completion(&event->complete);
+ else
+ ret = wait_for_completion_interruptible(&event->complete);
icnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
icnss_driver_event_to_str(type), type, penv->state, ret,
event->ret);
- spin_lock_irqsave(&penv->event_lock, flags);
+ spin_lock_irqsave(&penv->event_lock, irq_flags);
if (ret == -ERESTARTSYS && event->ret == ICNSS_EVENT_PENDING) {
event->sync = false;
- spin_unlock_irqrestore(&penv->event_lock, flags);
- return ret;
+ spin_unlock_irqrestore(&penv->event_lock, irq_flags);
+ ret = -EINTR;
+ goto out;
}
- spin_unlock_irqrestore(&penv->event_lock, flags);
+ spin_unlock_irqrestore(&penv->event_lock, irq_flags);
ret = event->ret;
kfree(event);
+out:
+ icnss_pm_relax(penv);
return ret;
}
@@ -625,7 +673,7 @@ static int wlfw_vbatt_send_sync_msg(struct icnss_priv *priv,
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI vbatt request failed %d %d\n",
+ icnss_pr_err("QMI vbatt request rejected, result:%d error:%d\n",
resp.resp.result, resp.resp.error);
ret = resp.resp.result;
goto out;
@@ -755,6 +803,9 @@ static int icnss_init_vph_monitor(struct icnss_priv *priv)
{
int ret = 0;
+ if (test_bit(VBATT_DISABLE, &quirks))
+ goto out;
+
ret = icnss_get_phone_power(priv, &priv->vph_pwr);
if (ret)
goto out;
@@ -1640,7 +1691,7 @@ static int wlfw_msa_mem_info_send_sync_msg(void)
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI MSA Mem info request failed %d %d\n",
+ icnss_pr_err("QMI MSA Mem info request rejected, result:%d error:%d\n",
resp.resp.result, resp.resp.error);
ret = resp.resp.result;
goto out;
@@ -1711,7 +1762,7 @@ static int wlfw_msa_ready_send_sync_msg(void)
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI MSA ready request failed %d %d\n",
+ icnss_pr_err("QMI MSA ready request rejected: result:%d error:%d\n",
resp.resp.result, resp.resp.error);
ret = resp.resp.result;
goto out;
@@ -1770,7 +1821,7 @@ static int wlfw_ind_register_send_sync_msg(void)
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI indication register request failed %d %d\n",
+ icnss_pr_err("QMI indication register request rejected, resut:%d error:%d\n",
resp.resp.result, resp.resp.error);
ret = resp.resp.result;
goto out;
@@ -1817,7 +1868,7 @@ static int wlfw_cap_send_sync_msg(void)
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI capability request failed %d %d\n",
+ icnss_pr_err("QMI capability request rejected, result:%d error:%d\n",
resp.resp.result, resp.resp.error);
ret = resp.resp.result;
goto out;
@@ -1896,7 +1947,7 @@ static int wlfw_wlan_mode_send_sync_msg(enum wlfw_driver_mode_enum_v01 mode)
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI mode request failed mode: %d, %d %d\n",
+ icnss_pr_err("QMI mode request rejected, mode:%d result:%d error:%d\n",
mode, resp.resp.result, resp.resp.error);
ret = resp.resp.result;
goto out;
@@ -1946,7 +1997,7 @@ static int wlfw_wlan_cfg_send_sync_msg(struct wlfw_wlan_cfg_req_msg_v01 *data)
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI config request failed %d %d\n",
+ icnss_pr_err("QMI config request rejected, result:%d error:%d\n",
resp.resp.result, resp.resp.error);
ret = resp.resp.result;
goto out;
@@ -1999,7 +2050,7 @@ static int wlfw_ini_send_sync_msg(bool enable_fw_log)
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI INI request failed fw_log: %d, %d %d\n",
+ icnss_pr_err("QMI INI request rejected, fw_log:%d result:%d error:%d\n",
enable_fw_log, resp.resp.result, resp.resp.error);
ret = resp.resp.result;
goto out;
@@ -2059,7 +2110,7 @@ static int wlfw_athdiag_read_send_sync_msg(struct icnss_priv *priv,
}
if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI athdiag read request failed %d %d\n",
+ icnss_pr_err("QMI athdiag read request rejected, result:%d error:%d\n",
resp->resp.result, resp->resp.error);
ret = resp->resp.result;
goto out;
@@ -2125,7 +2176,7 @@ static int wlfw_athdiag_write_send_sync_msg(struct icnss_priv *priv,
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI athdiag write request failed %d %d\n",
+ icnss_pr_err("QMI athdiag write request rejected, result:%d error:%d\n",
resp.resp.result, resp.resp.error);
ret = resp.resp.result;
goto out;
@@ -2183,7 +2234,7 @@ static void icnss_qmi_wlfw_clnt_ind(struct qmi_handle *handle,
switch (msg_id) {
case QMI_WLFW_FW_READY_IND_V01:
icnss_driver_event_post(ICNSS_DRIVER_EVENT_FW_READY_IND,
- false, NULL);
+ 0, NULL);
break;
case QMI_WLFW_MSA_READY_IND_V01:
icnss_pr_dbg("Received MSA Ready Indication msg_id 0x%x\n",
@@ -2208,6 +2259,8 @@ static int icnss_driver_event_server_arrive(void *data)
if (!penv)
return -ENODEV;
+ set_bit(ICNSS_WLFW_EXISTS, &penv->state);
+
penv->wlfw_clnt = qmi_handle_create(icnss_qmi_wlfw_clnt_notify, penv);
if (!penv->wlfw_clnt) {
icnss_pr_err("QMI client handle create failed\n");
@@ -2288,7 +2341,7 @@ static int icnss_driver_event_server_exit(void *data)
icnss_pr_info("QMI Service Disconnected: 0x%lx\n", penv->state);
- if (penv->adc_tm_dev)
+ if (!test_bit(VBATT_DISABLE, &quirks) && penv->adc_tm_dev)
qpnp_adc_tm_disable_chan_meas(penv->adc_tm_dev,
&penv->vph_monitor_params);
@@ -2307,6 +2360,8 @@ static int icnss_call_driver_probe(struct icnss_priv *priv)
if (!priv->ops || !priv->ops->probe)
return 0;
+ icnss_pr_dbg("Calling driver probe state: 0x%lx\n", priv->state);
+
icnss_hw_power_on(priv);
ret = priv->ops->probe(&priv->pdev->dev);
@@ -2335,6 +2390,8 @@ static int icnss_call_driver_reinit(struct icnss_priv *priv)
if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
goto out;
+ icnss_pr_dbg("Calling driver reinit state: 0x%lx\n", priv->state);
+
icnss_hw_power_on(priv);
ret = priv->ops->reinit(&priv->pdev->dev);
@@ -2365,8 +2422,6 @@ static int icnss_driver_event_fw_ready_ind(void *data)
if (!penv)
return -ENODEV;
- __pm_stay_awake(&penv->ws);
-
set_bit(ICNSS_FW_READY, &penv->state);
icnss_pr_info("WLAN FW is ready: 0x%lx\n", penv->state);
@@ -2384,10 +2439,7 @@ static int icnss_driver_event_fw_ready_ind(void *data)
else
ret = icnss_call_driver_probe(penv);
- __pm_relax(&penv->ws);
-
out:
- __pm_relax(&penv->ws);
return ret;
}
@@ -2398,8 +2450,6 @@ static int icnss_driver_event_register_driver(void *data)
if (penv->ops)
return -EEXIST;
- __pm_stay_awake(&penv->ws);
-
penv->ops = data;
if (test_bit(SKIP_QMI, &quirks))
@@ -2425,21 +2475,16 @@ static int icnss_driver_event_register_driver(void *data)
set_bit(ICNSS_DRIVER_PROBED, &penv->state);
- __pm_relax(&penv->ws);
-
return 0;
power_off:
icnss_hw_power_off(penv);
out:
- __pm_relax(&penv->ws);
return ret;
}
static int icnss_driver_event_unregister_driver(void *data)
{
- __pm_stay_awake(&penv->ws);
-
if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state)) {
penv->ops = NULL;
goto out;
@@ -2455,40 +2500,73 @@ static int icnss_driver_event_unregister_driver(void *data)
icnss_hw_power_off(penv);
out:
- __pm_relax(&penv->ws);
return 0;
}
-static int icnss_driver_event_pd_service_down(struct icnss_priv *priv,
- void *data)
+static int icnss_call_driver_remove(struct icnss_priv *priv)
{
- int ret = 0;
+ icnss_pr_dbg("Calling driver remove state: 0x%lx\n", priv->state);
- if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
- icnss_pr_err("PD Down while recovery inprogress, state: 0x%lx\n",
- priv->state);
- ICNSS_ASSERT(0);
- goto out;
- }
+ clear_bit(ICNSS_FW_READY, &priv->state);
+
+ if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
+ return 0;
+
+ if (!priv->ops || !priv->ops->remove)
+ return 0;
+
+ penv->ops->remove(&priv->pdev->dev);
+
+ clear_bit(ICNSS_DRIVER_PROBED, &priv->state);
+
+ return 0;
+}
+
+static int icnss_call_driver_shutdown(struct icnss_priv *priv)
+{
+ icnss_pr_dbg("Calling driver shutdown state: 0x%lx\n", priv->state);
set_bit(ICNSS_PD_RESTART, &priv->state);
clear_bit(ICNSS_FW_READY, &priv->state);
+ if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
+ return 0;
+
if (!priv->ops || !priv->ops->shutdown)
- goto out;
+ return 0;
- if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
+ priv->ops->shutdown(&priv->pdev->dev);
+
+ return 0;
+}
+
+static int icnss_driver_event_pd_service_down(struct icnss_priv *priv,
+ void *data)
+{
+ int ret = 0;
+ struct icnss_event_pd_service_down_data *event_data = data;
+
+ if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
+ return 0;
+
+ if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
+ icnss_pr_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n",
+ event_data->crashed, priv->state);
+ ICNSS_ASSERT(0);
goto out;
+ }
- priv->ops->shutdown(&priv->pdev->dev);
+ if (event_data->crashed)
+ icnss_call_driver_shutdown(priv);
+ else
+ icnss_call_driver_remove(priv);
out:
icnss_remove_msa_permissions(priv);
ret = icnss_hw_power_off(priv);
- icnss_pr_dbg("PD down completed: %d, state: 0x%lx\n",
- ret, priv->state);
+ kfree(data);
return ret;
}
@@ -2499,6 +2577,8 @@ static void icnss_driver_event_work(struct work_struct *work)
unsigned long flags;
int ret;
+ icnss_pm_stay_awake(penv);
+
spin_lock_irqsave(&penv->event_lock, flags);
while (!list_empty(&penv->event_list)) {
@@ -2529,7 +2609,8 @@ static void icnss_driver_event_work(struct work_struct *work)
ret = icnss_driver_event_unregister_driver(event->data);
break;
case ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN:
- icnss_driver_event_pd_service_down(penv, event->data);
+ ret = icnss_driver_event_pd_service_down(penv,
+ event->data);
break;
default:
icnss_pr_err("Invalid Event type: %d", event->type);
@@ -2539,6 +2620,11 @@ static void icnss_driver_event_work(struct work_struct *work)
penv->stats.events[event->type].processed++;
+ icnss_pr_dbg("Event Processed: %s%s(%d), ret: %d, state: 0x%lx\n",
+ icnss_driver_event_to_str(event->type),
+ event->sync ? "-sync" : "", event->type, ret,
+ penv->state);
+
spin_lock_irqsave(&penv->event_lock, flags);
if (event->sync) {
event->ret = ret;
@@ -2552,6 +2638,8 @@ static void icnss_driver_event_work(struct work_struct *work)
spin_lock_irqsave(&penv->event_lock, flags);
}
spin_unlock_irqrestore(&penv->event_lock, flags);
+
+ icnss_pm_relax(penv);
}
static int icnss_qmi_wlfw_clnt_svc_event_notify(struct notifier_block *this,
@@ -2568,12 +2656,12 @@ static int icnss_qmi_wlfw_clnt_svc_event_notify(struct notifier_block *this,
switch (code) {
case QMI_SERVER_ARRIVE:
ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_SERVER_ARRIVE,
- false, NULL);
+ 0, NULL);
break;
case QMI_SERVER_EXIT:
ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_SERVER_EXIT,
- false, NULL);
+ 0, NULL);
break;
default:
icnss_pr_dbg("Invalid code: %ld", code);
@@ -2586,23 +2674,31 @@ static struct notifier_block wlfw_clnt_nb = {
.notifier_call = icnss_qmi_wlfw_clnt_svc_event_notify,
};
-static int icnss_modem_notifier_nb(struct notifier_block *this,
+static int icnss_modem_notifier_nb(struct notifier_block *nb,
unsigned long code,
- void *ss_handle)
+ void *data)
{
+ struct icnss_event_pd_service_down_data *event_data;
+ struct notif_data *notif = data;
+ struct icnss_priv *priv = container_of(nb, struct icnss_priv,
+ modem_ssr_nb);
+
icnss_pr_dbg("Modem-Notify: event %lu\n", code);
- if (code == SUBSYS_AFTER_POWERUP) {
- icnss_pr_dbg("Modem-Notify: Powerup\n");
- } else if (code == SUBSYS_BEFORE_SHUTDOWN) {
- icnss_pr_info("Modem-Notify: Before shutdown\n");
- icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
- true, NULL);
- } else if (code == SUBSYS_AFTER_SHUTDOWN) {
- icnss_pr_info("Modem-Notify: After Shutdown\n");
- } else {
- return NOTIFY_DONE;
- }
+ if (code != SUBSYS_BEFORE_SHUTDOWN)
+ return NOTIFY_OK;
+
+ icnss_pr_info("Modem went down, state: %lx\n", priv->state);
+
+ event_data = kzalloc(sizeof(*data), GFP_KERNEL);
+
+ if (event_data == NULL)
+ return notifier_from_errno(-ENOMEM);
+
+ event_data->crashed = notif->crashed;
+
+ icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
+ ICNSS_EVENT_SYNC, event_data);
return NOTIFY_OK;
}
@@ -2661,14 +2757,23 @@ static int icnss_service_notifier_notify(struct notifier_block *nb,
{
struct icnss_priv *priv = container_of(nb, struct icnss_priv,
service_notifier_nb);
+ enum pd_subsys_state *state = data;
+ struct icnss_event_pd_service_down_data *event_data;
switch (notification) {
case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01:
- icnss_pr_info("Service down, state: 0x%lx\n", priv->state);
+ icnss_pr_info("Service down, data: 0x%p, state: 0x%lx\n", data,
+ priv->state);
+ event_data = kzalloc(sizeof(*data), GFP_KERNEL);
+
+ if (event_data == NULL)
+ return notifier_from_errno(-ENOMEM);
+
+ if (state == NULL || *state != SHUTDOWN)
+ event_data->crashed = true;
+
icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
- true, NULL);
- icnss_pr_dbg("Service down completed, state: 0x%lx\n",
- priv->state);
+ ICNSS_EVENT_SYNC, event_data);
break;
case SERVREG_NOTIF_SERVICE_STATE_UP_V01:
icnss_pr_dbg("Service up, state: 0x%lx\n", priv->state);
@@ -2814,8 +2919,6 @@ enable_pdr:
if (ret)
return ret;
- icnss_modem_ssr_unregister_notifier(priv);
-
return 0;
}
@@ -2842,7 +2945,7 @@ int icnss_register_driver(struct icnss_driver_ops *ops)
}
ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_REGISTER_DRIVER,
- true, ops);
+ ICNSS_EVENT_SYNC, ops);
if (ret == -ERESTARTSYS)
ret = 0;
@@ -2870,7 +2973,7 @@ int icnss_unregister_driver(struct icnss_driver_ops *ops)
}
ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
- true, NULL);
+ ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
out:
return ret;
}
@@ -3198,6 +3301,12 @@ int icnss_wlan_disable(enum icnss_driver_mode mode)
}
EXPORT_SYMBOL(icnss_wlan_disable);
+bool icnss_is_qmi_disable(void)
+{
+ return test_bit(SKIP_QMI, &quirks) ? true : false;
+}
+EXPORT_SYMBOL(icnss_is_qmi_disable);
+
int icnss_get_ce_id(int irq)
{
int i;
@@ -3298,66 +3407,9 @@ unsigned int icnss_socinfo_get_serial_number(struct device *dev)
}
EXPORT_SYMBOL(icnss_socinfo_get_serial_number);
-static int icnss_bw_vote(struct icnss_priv *priv, int index)
-{
- int ret = 0;
-
- icnss_pr_dbg("Vote %d for msm_bus, state 0x%lx\n",
- index, priv->state);
- ret = msm_bus_scale_client_update_request(priv->bus_client, index);
- if (ret)
- icnss_pr_err("Fail to vote %d: ret %d, state 0x%lx\n",
- index, ret, priv->state);
-
- return ret;
-}
-
-static int icnss_bw_init(struct icnss_priv *priv)
-{
- int ret = 0;
-
- priv->bus_scale_table = msm_bus_cl_get_pdata(priv->pdev);
- if (!priv->bus_scale_table) {
- icnss_pr_err("Missing entry for msm_bus scale table\n");
- return -EINVAL;
- }
-
- priv->bus_client = msm_bus_scale_register_client(priv->bus_scale_table);
- if (!priv->bus_client) {
- icnss_pr_err("Fail to register with bus_scale client\n");
- ret = -EINVAL;
- goto out;
- }
-
- ret = icnss_bw_vote(priv, 1);
- if (ret)
- goto out;
-
- return 0;
-
-out:
- msm_bus_cl_clear_pdata(priv->bus_scale_table);
- return ret;
-}
-
-static void icnss_bw_deinit(struct icnss_priv *priv)
-{
- if (!priv)
- return;
-
- if (priv->bus_client) {
- icnss_bw_vote(priv, 0);
- msm_bus_scale_unregister_client(priv->bus_client);
- }
-
- if (priv->bus_scale_table)
- msm_bus_cl_clear_pdata(priv->bus_scale_table);
-}
-
static int icnss_smmu_init(struct icnss_priv *priv)
{
struct dma_iommu_mapping *mapping;
- int disable_htw = 1;
int atomic_ctx = 1;
int s1_bypass = 1;
int ret = 0;
@@ -3374,15 +3426,6 @@ static int icnss_smmu_init(struct icnss_priv *priv)
}
ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret < 0) {
- icnss_pr_err("Set disable_htw attribute failed, err = %d\n",
- ret);
- goto set_attr_fail;
- }
-
- ret = iommu_domain_set_attr(mapping->domain,
DOMAIN_ATTR_ATOMIC,
&atomic_ctx);
if (ret < 0) {
@@ -3749,9 +3792,6 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
case ICNSS_FW_TEST_MODE:
seq_puts(s, "FW TEST MODE");
continue;
- case ICNSS_SUSPEND:
- seq_puts(s, "SUSPEND");
- continue;
case ICNSS_PM_SUSPEND:
seq_puts(s, "PM SUSPEND");
continue;
@@ -3770,6 +3810,9 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
case ICNSS_MSA0_ASSIGNED:
seq_puts(s, "MSA0 ASSIGNED");
continue;
+ case ICNSS_WLFW_EXISTS:
+ seq_puts(s, "WLAN FW EXISTS");
+ continue;
}
seq_printf(s, "UNKNOWN-%d", i);
@@ -3873,6 +3916,8 @@ static int icnss_stats_show(struct seq_file *s, void *data)
ICNSS_STATS_DUMP(s, priv, pm_suspend_noirq_err);
ICNSS_STATS_DUMP(s, priv, pm_resume_noirq);
ICNSS_STATS_DUMP(s, priv, pm_resume_noirq_err);
+ ICNSS_STATS_DUMP(s, priv, pm_stay_awake);
+ ICNSS_STATS_DUMP(s, priv, pm_relax);
icnss_stats_show_irqs(s, priv);
@@ -4315,22 +4360,16 @@ static int icnss_probe(struct platform_device *pdev)
priv->smmu_iova_len);
goto out;
}
-
- ret = icnss_bw_init(priv);
- if (ret)
- goto out_smmu_deinit;
}
spin_lock_init(&priv->event_lock);
spin_lock_init(&priv->on_off_lock);
- wakeup_source_init(&priv->ws, "icnss_ws");
-
priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
if (!priv->event_wq) {
icnss_pr_err("Workqueue creation failed\n");
ret = -EFAULT;
- goto out_bw_deinit;
+ goto out_smmu_deinit;
}
INIT_WORK(&priv->event_work, icnss_driver_event_work);
@@ -4358,8 +4397,6 @@ static int icnss_probe(struct platform_device *pdev)
out_destroy_wq:
destroy_workqueue(priv->event_wq);
-out_bw_deinit:
- icnss_bw_deinit(priv);
out_smmu_deinit:
icnss_smmu_deinit(priv);
out:
@@ -4385,10 +4422,6 @@ static int icnss_remove(struct platform_device *pdev)
if (penv->event_wq)
destroy_workqueue(penv->event_wq);
- icnss_bw_deinit(penv);
-
- wakeup_source_trash(&penv->ws);
-
icnss_hw_power_off(penv);
dev_set_drvdata(&pdev->dev, NULL);
@@ -4396,55 +4429,6 @@ static int icnss_remove(struct platform_device *pdev)
return 0;
}
-static int icnss_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- int ret = 0;
-
- if (!penv) {
- ret = -ENODEV;
- goto out;
- }
-
- icnss_pr_dbg("Driver suspending, state: 0x%lx\n",
- penv->state);
-
- if (!penv->ops || !penv->ops->suspend ||
- !test_bit(ICNSS_DRIVER_PROBED, &penv->state))
- goto out;
-
- ret = penv->ops->suspend(&pdev->dev, state);
-
-out:
- if (ret == 0)
- set_bit(ICNSS_SUSPEND, &penv->state);
- return ret;
-}
-
-static int icnss_resume(struct platform_device *pdev)
-{
- int ret = 0;
-
- if (!penv) {
- ret = -ENODEV;
- goto out;
- }
-
- icnss_pr_dbg("Driver resuming, state: 0x%lx\n",
- penv->state);
-
- if (!penv->ops || !penv->ops->resume ||
- !test_bit(ICNSS_DRIVER_PROBED, &penv->state))
- goto out;
-
- ret = penv->ops->resume(&pdev->dev);
-
-out:
- if (ret == 0)
- clear_bit(ICNSS_SUSPEND, &penv->state);
- return ret;
-}
-
#ifdef CONFIG_PM_SLEEP
static int icnss_pm_suspend(struct device *dev)
{
@@ -4580,8 +4564,6 @@ MODULE_DEVICE_TABLE(of, icnss_dt_match);
static struct platform_driver icnss_driver = {
.probe = icnss_probe,
.remove = icnss_remove,
- .suspend = icnss_suspend,
- .resume = icnss_resume,
.driver = {
.name = "icnss",
.pm = &icnss_pm_ops,
diff --git a/drivers/soc/qcom/jtag-fuse.c b/drivers/soc/qcom/jtag-fuse.c
index 0b05ce9a22bb..0f347723e378 100644
--- a/drivers/soc/qcom/jtag-fuse.c
+++ b/drivers/soc/qcom/jtag-fuse.c
@@ -152,8 +152,6 @@ static int jtag_fuse_probe(struct platform_device *pdev)
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- /* Store the driver data pointer for use in exported functions */
- fusedrvdata = drvdata;
drvdata->dev = &pdev->dev;
platform_set_drvdata(pdev, drvdata);
@@ -174,6 +172,8 @@ static int jtag_fuse_probe(struct platform_device *pdev)
if (!drvdata->base)
return -ENOMEM;
+ /* Store the driver data pointer for use in exported functions */
+ fusedrvdata = drvdata;
dev_info(dev, "JTag Fuse initialized\n");
return 0;
}
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index 3873a34c60fb..00ab1ce3b6b4 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -582,7 +582,8 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
mba_dp_virt = dma_alloc_attrs(&md->mba_mem_dev, drv->mba_dp_size,
&mba_dp_phys, GFP_KERNEL, &md->attrs_dma);
if (!mba_dp_virt) {
- dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
+ dev_err(pil->dev, "%s MBA metadata buffer allocation %zx bytes failed\n",
+ __func__, drv->mba_dp_size);
ret = -ENOMEM;
goto err_invalid_fw;
}
@@ -595,8 +596,8 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
drv->mba_dp_virt = mba_dp_virt;
mba_dp_phys_end = mba_dp_phys + drv->mba_dp_size;
- dev_info(pil->dev, "Loading MBA and DP (if present) from %pa to %pa\n",
- &mba_dp_phys, &mba_dp_phys_end);
+ dev_info(pil->dev, "Loading MBA and DP (if present) from %pa to %pa size %zx\n",
+ &mba_dp_phys, &mba_dp_phys_end, drv->mba_dp_size);
/* Load the MBA image into memory */
count = fw->size;
@@ -666,7 +667,8 @@ static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
mdata_virt = dma_alloc_attrs(&drv->mba_mem_dev, size, &mdata_phys,
GFP_KERNEL, &attrs);
if (!mdata_virt) {
- dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
+ dev_err(pil->dev, "%s MBA metadata buffer allocation %zx bytes failed\n",
+ __func__, size);
ret = -ENOMEM;
goto fail;
}
diff --git a/drivers/soc/qcom/qdsp6v2/Makefile b/drivers/soc/qcom/qdsp6v2/Makefile
index d78328191bfe..f3505bab1a34 100644
--- a/drivers/soc/qcom/qdsp6v2/Makefile
+++ b/drivers/soc/qcom/qdsp6v2/Makefile
@@ -4,3 +4,6 @@ obj-$(CONFIG_MSM_QDSP6_APRV2_GLINK) += apr.o apr_v2.o apr_tal_glink.o voice_svc.
obj-$(CONFIG_MSM_QDSP6_APRV3_GLINK) += apr.o apr_v3.o apr_tal_glink.o voice_svc.o
obj-$(CONFIG_SND_SOC_MSM_QDSP6V2_INTF) += msm_audio_ion.o
obj-$(CONFIG_MSM_ADSP_LOADER) += adsp-loader.o
+obj-$(CONFIG_MSM_QDSP6_SSR) += audio_ssr.o
+obj-$(CONFIG_MSM_QDSP6_PDR) += audio_pdr.o
+obj-$(CONFIG_MSM_QDSP6_NOTIFIER) += audio_notifier.o
diff --git a/drivers/soc/qcom/qdsp6v2/apr.c b/drivers/soc/qcom/qdsp6v2/apr.c
index bbe686f4bc42..ee9b054dcc24 100644
--- a/drivers/soc/qcom/qdsp6v2/apr.c
+++ b/drivers/soc/qcom/qdsp6v2/apr.c
@@ -27,16 +27,15 @@
#include <linux/device.h>
#include <linux/slab.h>
#include <soc/qcom/subsystem_restart.h>
-#include <soc/qcom/subsystem_notif.h>
#include <soc/qcom/scm.h>
#include <sound/apr_audio-v2.h>
#include <soc/qcom/smd.h>
#include <linux/qdsp6v2/apr.h>
#include <linux/qdsp6v2/apr_tal.h>
#include <linux/qdsp6v2/dsp_debug.h>
+#include <linux/qdsp6v2/audio_notifier.h>
#include <linux/ipc_logging.h>
-#define SCM_Q6_NMI_CMD 0x1
#define APR_PKT_IPC_LOG_PAGE_CNT 2
static struct apr_q6 q6;
@@ -45,9 +44,11 @@ static void *apr_pkt_ctx;
static wait_queue_head_t dsp_wait;
static wait_queue_head_t modem_wait;
static bool is_modem_up;
+static bool is_initial_boot;
/* Subsystem restart: QDSP6 data, functions */
static struct workqueue_struct *apr_reset_workqueue;
static void apr_reset_deregister(struct work_struct *work);
+static void dispatch_event(unsigned long code, uint16_t proc);
struct apr_reset_work {
void *handle;
struct work_struct work;
@@ -202,6 +203,20 @@ enum apr_subsys_state apr_cmpxchg_modem_state(enum apr_subsys_state prev,
return atomic_cmpxchg(&q6.modem_state, prev, new);
}
+static void apr_modem_down(unsigned long opcode)
+{
+ apr_set_modem_state(APR_SUBSYS_DOWN);
+ dispatch_event(opcode, APR_DEST_MODEM);
+}
+
+static void apr_modem_up(void)
+{
+ if (apr_cmpxchg_modem_state(APR_SUBSYS_DOWN, APR_SUBSYS_UP) ==
+ APR_SUBSYS_DOWN)
+ wake_up(&modem_wait);
+ is_modem_up = 1;
+}
+
enum apr_subsys_state apr_get_q6_state(void)
{
return atomic_read(&q6.q6_state);
@@ -224,6 +239,19 @@ enum apr_subsys_state apr_cmpxchg_q6_state(enum apr_subsys_state prev,
return atomic_cmpxchg(&q6.q6_state, prev, new);
}
+static void apr_adsp_down(unsigned long opcode)
+{
+ apr_set_q6_state(APR_SUBSYS_DOWN);
+ dispatch_event(opcode, APR_DEST_QDSP6);
+}
+
+static void apr_adsp_up(void)
+{
+ if (apr_cmpxchg_q6_state(APR_SUBSYS_DOWN, APR_SUBSYS_LOADED) ==
+ APR_SUBSYS_DOWN)
+ wake_up(&dsp_wait);
+}
+
int apr_wait_for_device_up(int dest_id)
{
int rc = -1;
@@ -276,6 +304,7 @@ int apr_send_pkt(void *handle, uint32_t *buf)
uint16_t dest_id;
uint16_t client_id;
uint16_t w_len;
+ int rc;
unsigned long flags;
if (!handle || !buf) {
@@ -317,14 +346,23 @@ int apr_send_pkt(void *handle, uint32_t *buf)
APR_PKT_INFO("Tx: dest_svc[%d], opcode[0x%X], size[%d]",
hdr->dest_svc, hdr->opcode, hdr->pkt_size);
- w_len = apr_tal_write(clnt->handle, buf,
+ rc = apr_tal_write(clnt->handle, buf,
(struct apr_pkt_priv *)&svc->pkt_owner,
hdr->pkt_size);
- if (w_len != hdr->pkt_size)
- pr_err("Unable to write APR pkt successfully: %d\n", w_len);
+ if (rc >= 0) {
+ w_len = rc;
+ if (w_len != hdr->pkt_size) {
+ pr_err("%s: Unable to write whole APR pkt successfully: %d\n",
+ __func__, rc);
+ rc = -EINVAL;
+ }
+ } else {
+ pr_err("%s: Write APR pkt failed with error %d\n",
+ __func__, rc);
+ }
spin_unlock_irqrestore(&svc->w_lock, flags);
- return w_len;
+ return rc;
}
int apr_pkt_config(void *handle, struct apr_pkt_cfg *cfg)
@@ -730,7 +768,7 @@ void apr_reset(void *handle)
}
/* Dispatch the Reset events to Modem and audio clients */
-void dispatch_event(unsigned long code, uint16_t proc)
+static void dispatch_event(unsigned long code, uint16_t proc)
{
struct apr_client *apr_client;
struct apr_client_data data;
@@ -783,128 +821,51 @@ void dispatch_event(unsigned long code, uint16_t proc)
}
}
-static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
- void *_cmd)
+static int apr_notifier_service_cb(struct notifier_block *this,
+ unsigned long opcode, void *data)
{
- static int boot_count = 2;
-
- if (boot_count) {
- boot_count--;
- return NOTIFY_OK;
- }
+ struct audio_notifier_cb_data *cb_data = data;
- switch (code) {
- case SUBSYS_BEFORE_SHUTDOWN:
- pr_debug("M-Notify: Shutdown started\n");
- apr_set_modem_state(APR_SUBSYS_DOWN);
- dispatch_event(code, APR_DEST_MODEM);
- break;
- case SUBSYS_AFTER_SHUTDOWN:
- pr_debug("M-Notify: Shutdown Completed\n");
- break;
- case SUBSYS_BEFORE_POWERUP:
- pr_debug("M-notify: Bootup started\n");
- break;
- case SUBSYS_AFTER_POWERUP:
- if (apr_cmpxchg_modem_state(APR_SUBSYS_DOWN, APR_SUBSYS_UP) ==
- APR_SUBSYS_DOWN)
- wake_up(&modem_wait);
- is_modem_up = 1;
- pr_debug("M-Notify: Bootup Completed\n");
- break;
- default:
- pr_err("M-Notify: General: %lu\n", code);
- break;
+ if (cb_data == NULL) {
+ pr_err("%s: Callback data is NULL!\n", __func__);
+ goto done;
}
- return NOTIFY_DONE;
-}
-
-static struct notifier_block mnb = {
- .notifier_call = modem_notifier_cb,
-};
-static bool powered_on;
+ pr_debug("%s: Service opcode 0x%lx, domain %d\n",
+ __func__, opcode, cb_data->domain);
-static int lpass_notifier_cb(struct notifier_block *this, unsigned long code,
- void *_cmd)
-{
- static int boot_count = 2;
- struct notif_data *data = (struct notif_data *)_cmd;
- struct scm_desc desc;
-
- if (boot_count) {
- boot_count--;
- return NOTIFY_OK;
- }
-
- switch (code) {
- case SUBSYS_BEFORE_SHUTDOWN:
- pr_debug("L-Notify: Shutdown started\n");
- apr_set_q6_state(APR_SUBSYS_DOWN);
- dispatch_event(code, APR_DEST_QDSP6);
- if (data && data->crashed) {
- /* Send NMI to QDSP6 via an SCM call. */
- if (!is_scm_armv8()) {
- scm_call_atomic1(SCM_SVC_UTIL,
- SCM_Q6_NMI_CMD, 0x1);
- } else {
- desc.args[0] = 0x1;
- desc.arginfo = SCM_ARGS(1);
- scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_UTIL,
- SCM_Q6_NMI_CMD), &desc);
- }
- /* The write should go through before q6 is shutdown */
- mb();
- pr_debug("L-Notify: Q6 NMI was sent.\n");
- }
- break;
- case SUBSYS_AFTER_SHUTDOWN:
- powered_on = false;
- pr_debug("L-Notify: Shutdown Completed\n");
- break;
- case SUBSYS_BEFORE_POWERUP:
- pr_debug("L-notify: Bootup started\n");
+ switch (opcode) {
+ case AUDIO_NOTIFIER_SERVICE_DOWN:
+ /*
+ * Use flag to ignore down notifications during
+ * initial boot. There is no benefit from error
+ * recovery notifications during initial boot
+ * up since everything is expected to be down.
+ */
+ if (is_initial_boot)
+ break;
+ if (cb_data->domain == AUDIO_NOTIFIER_MODEM_DOMAIN)
+ apr_modem_down(opcode);
+ else
+ apr_adsp_down(opcode);
break;
- case SUBSYS_AFTER_POWERUP:
- if (apr_cmpxchg_q6_state(APR_SUBSYS_DOWN,
- APR_SUBSYS_LOADED) == APR_SUBSYS_DOWN)
- wake_up(&dsp_wait);
- powered_on = true;
- pr_debug("L-Notify: Bootup Completed\n");
+ case AUDIO_NOTIFIER_SERVICE_UP:
+ is_initial_boot = false;
+ if (cb_data->domain == AUDIO_NOTIFIER_MODEM_DOMAIN)
+ apr_modem_up();
+ else
+ apr_adsp_up();
break;
default:
- pr_err("L-Notify: Generel: %lu\n", code);
break;
}
- return NOTIFY_DONE;
+done:
+ return NOTIFY_OK;
}
-static struct notifier_block lnb = {
+static struct notifier_block service_nb = {
+ .notifier_call = apr_notifier_service_cb,
.priority = 0,
- .notifier_call = lpass_notifier_cb,
-};
-
-static int panic_handler(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct scm_desc desc;
-
- if (powered_on) {
- /* Send NMI to QDSP6 via an SCM call. */
- if (!is_scm_armv8()) {
- scm_call_atomic1(SCM_SVC_UTIL, SCM_Q6_NMI_CMD, 0x1);
- } else {
- desc.args[0] = 0x1;
- desc.arginfo = SCM_ARGS(1);
- scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_UTIL,
- SCM_Q6_NMI_CMD), &desc);
- }
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block panic_nb = {
- .notifier_call = panic_handler,
};
static int __init apr_init(void)
@@ -924,13 +885,18 @@ static int __init apr_init(void)
apr_reset_workqueue = create_singlethread_workqueue("apr_driver");
if (!apr_reset_workqueue)
return -ENOMEM;
- atomic_notifier_chain_register(&panic_notifier_list, &panic_nb);
apr_pkt_ctx = ipc_log_context_create(APR_PKT_IPC_LOG_PAGE_CNT,
"apr", 0);
if (!apr_pkt_ctx)
pr_err("%s: Unable to create ipc log context\n", __func__);
+ is_initial_boot = true;
+ subsys_notif_register("apr_adsp", AUDIO_NOTIFIER_ADSP_DOMAIN,
+ &service_nb);
+ subsys_notif_register("apr_modem", AUDIO_NOTIFIER_MODEM_DOMAIN,
+ &service_nb);
+
return 0;
}
device_initcall(apr_init);
@@ -940,7 +906,7 @@ static int __init apr_late_init(void)
int ret = 0;
init_waitqueue_head(&dsp_wait);
init_waitqueue_head(&modem_wait);
- subsys_notif_register(&mnb, &lnb);
+
return ret;
}
late_initcall(apr_late_init);
diff --git a/drivers/soc/qcom/qdsp6v2/apr_v2.c b/drivers/soc/qcom/qdsp6v2/apr_v2.c
index 2d6ea825c811..037fb3327ef0 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_v2.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_v2.c
@@ -18,8 +18,7 @@
#include <linux/qdsp6v2/apr.h>
#include <linux/qdsp6v2/apr_tal.h>
#include <linux/qdsp6v2/dsp_debug.h>
-
-static const char *lpass_subsys_name = "adsp";
+#include <linux/qdsp6v2/audio_notifier.h>
enum apr_subsys_state apr_get_subsys_state(void)
{
@@ -32,11 +31,6 @@ void apr_set_subsys_state(void)
apr_set_modem_state(APR_SUBSYS_UP);
}
-const char *apr_get_lpass_subsys_name(void)
-{
- return lpass_subsys_name;
-}
-
uint16_t apr_get_data_src(struct apr_hdr *hdr)
{
if (hdr->src_domain == APR_DOMAIN_MODEM)
@@ -57,11 +51,15 @@ int apr_get_dest_id(char *dest)
return APR_DEST_MODEM;
}
-void subsys_notif_register(struct notifier_block *mod_notif,
- struct notifier_block *lp_notif)
+void subsys_notif_register(char *client_name, int domain,
+ struct notifier_block *nb)
{
- subsys_notif_register_notifier("modem", mod_notif);
- subsys_notif_register_notifier(apr_get_lpass_subsys_name(), lp_notif);
+ int ret;
+
+ ret = audio_notifier_register(client_name, domain, nb);
+ if (ret < 0)
+ pr_err("%s: Audio notifier register failed for domain %d ret = %d\n",
+ __func__, domain, ret);
}
uint16_t apr_get_reset_domain(uint16_t proc)
diff --git a/drivers/soc/qcom/qdsp6v2/apr_v3.c b/drivers/soc/qcom/qdsp6v2/apr_v3.c
index 6bb913edf3ec..2bfc518841c9 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_v3.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_v3.c
@@ -18,6 +18,7 @@
#include <linux/qdsp6v2/apr.h>
#include <linux/qdsp6v2/apr_tal.h>
#include <linux/qdsp6v2/dsp_debug.h>
+#include <linux/qdsp6v2/audio_notifier.h>
#define DEST_ID APR_DEST_MODEM
@@ -41,10 +42,21 @@ int apr_get_dest_id(char *dest)
return DEST_ID;
}
-void subsys_notif_register(struct notifier_block *mod_notif,
- struct notifier_block *lp_notif)
+void subsys_notif_register(char *client_name, int domain,
+ struct notifier_block *nb)
{
- subsys_notif_register_notifier("modem", mod_notif);
+ int ret;
+
+ if (domain != AUDIO_NOTIFIER_MODEM_DOMAIN) {
+ pr_debug("%s: Unused domain %d not registering with notifier\n",
+ __func__, domain);
+ return;
+ }
+
+ ret = audio_notifier_register(client_name, domain, nb);
+ if (ret < 0)
+ pr_err("%s: Audio notifier register failed for domain %d ret = %d\n",
+ __func__, domain, ret);
}
uint16_t apr_get_reset_domain(uint16_t proc)
diff --git a/drivers/soc/qcom/qdsp6v2/audio_notifier.c b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
new file mode 100644
index 000000000000..47adc3bb3f40
--- /dev/null
+++ b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
@@ -0,0 +1,635 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/qdsp6v2/audio_pdr.h>
+#include <linux/qdsp6v2/audio_ssr.h>
+#include <linux/qdsp6v2/audio_notifier.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/service-notifier.h>
+
+/* Audio states internal to notifier. Client */
+/* used states defined in audio_notifier.h */
+/* for AUDIO_NOTIFIER_SERVICE_DOWN & UP */
+#define NO_SERVICE -2
+#define UNINIT_SERVICE -1
+
+/*
+ * Used for each client registered with audio notifier
+ */
+struct client_data {
+ struct list_head list;
+ /* Notifier block given by client */
+ struct notifier_block *nb;
+ char client_name[20];
+ int service;
+ int domain;
+};
+
+/*
+ * Used for each service and domain combination
+ * Tracks information specific to the underlying
+ * service.
+ */
+struct service_info {
+ const char name[20];
+ int domain_id;
+ int state;
+ void *handle;
+ /* Notifier block registered to service */
+ struct notifier_block *nb;
+ /* Used to determine when to register and deregister service */
+ int num_of_clients;
+ /* List of all clients registered to the service and domain */
+ struct srcu_notifier_head client_nb_list;
+};
+
+static int audio_notifer_ssr_adsp_cb(struct notifier_block *this,
+ unsigned long opcode, void *data);
+static int audio_notifer_ssr_modem_cb(struct notifier_block *this,
+ unsigned long opcode, void *data);
+static int audio_notifer_pdr_adsp_cb(struct notifier_block *this,
+ unsigned long opcode, void *data);
+
+static struct notifier_block notifier_ssr_adsp_nb = {
+ .notifier_call = audio_notifer_ssr_adsp_cb,
+ .priority = 0,
+};
+
+static struct notifier_block notifier_ssr_modem_nb = {
+ .notifier_call = audio_notifer_ssr_modem_cb,
+ .priority = 0,
+};
+
+static struct notifier_block notifier_pdr_adsp_nb = {
+ .notifier_call = audio_notifer_pdr_adsp_cb,
+ .priority = 0,
+};
+
+static struct service_info service_data[AUDIO_NOTIFIER_MAX_SERVICES]
+ [AUDIO_NOTIFIER_MAX_DOMAINS] = {
+
+ {{
+ .name = "SSR_ADSP",
+ .domain_id = AUDIO_SSR_DOMAIN_ADSP,
+ .state = AUDIO_NOTIFIER_SERVICE_DOWN,
+ .nb = &notifier_ssr_adsp_nb
+ },
+ {
+ .name = "SSR_MODEM",
+ .domain_id = AUDIO_SSR_DOMAIN_MODEM,
+ .state = AUDIO_NOTIFIER_SERVICE_DOWN,
+ .nb = &notifier_ssr_modem_nb
+ } },
+
+ {{
+ .name = "PDR_ADSP",
+ .domain_id = AUDIO_PDR_DOMAIN_ADSP,
+ .state = UNINIT_SERVICE,
+ .nb = &notifier_pdr_adsp_nb
+ },
+ { /* PDR MODEM service not enabled */
+ .name = "INVALID",
+ .state = NO_SERVICE,
+ .nb = NULL
+ } }
+};
+
+/* Master list of all audio notifier clients */
+struct list_head client_list;
+struct mutex notifier_mutex;
+
+static int audio_notifer_get_default_service(int domain)
+{
+ int service = NO_SERVICE;
+
+ /* initial service to connect per domain */
+ switch (domain) {
+ case AUDIO_NOTIFIER_ADSP_DOMAIN:
+ service = AUDIO_NOTIFIER_PDR_SERVICE;
+ break;
+ case AUDIO_NOTIFIER_MODEM_DOMAIN:
+ service = AUDIO_NOTIFIER_SSR_SERVICE;
+ break;
+ }
+
+ return service;
+}
+
+static void audio_notifer_disable_service(int service)
+{
+ int i;
+
+ for (i = 0; i < AUDIO_NOTIFIER_MAX_DOMAINS; i++)
+ service_data[service][i].state = NO_SERVICE;
+}
+
+static bool audio_notifer_is_service_enabled(int service)
+{
+ int i;
+
+ for (i = 0; i < AUDIO_NOTIFIER_MAX_DOMAINS; i++)
+ if (service_data[service][i].state != NO_SERVICE)
+ return true;
+ return false;
+}
+
+static void audio_notifer_init_service(int service)
+{
+ int i;
+
+ for (i = 0; i < AUDIO_NOTIFIER_MAX_DOMAINS; i++) {
+ if (service_data[service][i].state == UNINIT_SERVICE)
+ service_data[service][i].state =
+ AUDIO_NOTIFIER_SERVICE_DOWN;
+ }
+}
+
+static int audio_notifer_reg_service(int service, int domain)
+{
+ void *handle;
+ int ret = 0;
+ int curr_state = AUDIO_NOTIFIER_SERVICE_DOWN;
+
+ switch (service) {
+ case AUDIO_NOTIFIER_SSR_SERVICE:
+ handle = audio_ssr_register(
+ service_data[service][domain].domain_id,
+ service_data[service][domain].nb);
+ break;
+ case AUDIO_NOTIFIER_PDR_SERVICE:
+ handle = audio_pdr_service_register(
+ service_data[service][domain].domain_id,
+ service_data[service][domain].nb, &curr_state);
+
+ if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UP_V01)
+ curr_state = AUDIO_NOTIFIER_SERVICE_UP;
+ else
+ curr_state = AUDIO_NOTIFIER_SERVICE_DOWN;
+ break;
+ default:
+ pr_err("%s: Invalid service %d\n",
+ __func__, service);
+ ret = -EINVAL;
+ goto done;
+ }
+ if (IS_ERR_OR_NULL(handle)) {
+ pr_err("%s: handle is incorrect for service %s\n",
+ __func__, service_data[service][domain].name);
+ ret = -EINVAL;
+ goto done;
+ }
+ service_data[service][domain].state = curr_state;
+ service_data[service][domain].handle = handle;
+
+ pr_info("%s: service %s is in use\n",
+ __func__, service_data[service][domain].name);
+ pr_debug("%s: service %s has current state %d, handle 0x%pK\n",
+ __func__, service_data[service][domain].name,
+ service_data[service][domain].state,
+ service_data[service][domain].handle);
+done:
+ return ret;
+}
+
+static int audio_notifer_dereg_service(int service, int domain)
+{
+ int ret;
+
+ switch (service) {
+ case AUDIO_NOTIFIER_SSR_SERVICE:
+ ret = audio_ssr_deregister(
+ service_data[service][domain].handle,
+ service_data[service][domain].nb);
+ break;
+ case AUDIO_NOTIFIER_PDR_SERVICE:
+ ret = audio_pdr_service_deregister(
+ service_data[service][domain].handle,
+ service_data[service][domain].nb);
+ break;
+ default:
+ pr_err("%s: Invalid service %d\n",
+ __func__, service);
+ ret = -EINVAL;
+ goto done;
+ }
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("%s: deregister failed for service %s, ret %d\n",
+ __func__, service_data[service][domain].name, ret);
+ goto done;
+ }
+
+ pr_debug("%s: service %s with handle 0x%pK deregistered\n",
+ __func__, service_data[service][domain].name,
+ service_data[service][domain].handle);
+
+ service_data[service][domain].state = AUDIO_NOTIFIER_SERVICE_DOWN;
+ service_data[service][domain].handle = NULL;
+done:
+ return ret;
+}
+
+static int audio_notifer_reg_client_service(struct client_data *client_data,
+ int service)
+{
+ int ret = 0;
+ int domain = client_data->domain;
+ struct audio_notifier_cb_data data;
+
+ switch (service) {
+ case AUDIO_NOTIFIER_SSR_SERVICE:
+ case AUDIO_NOTIFIER_PDR_SERVICE:
+ if (service_data[service][domain].num_of_clients == 0)
+ ret = audio_notifer_reg_service(service, domain);
+ break;
+ default:
+ pr_err("%s: Invalid service for client %s, service %d, domain %d\n",
+ __func__, client_data->client_name, service, domain);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("%s: service registration failed on service %s for client %s\n",
+ __func__, service_data[service][domain].name,
+ client_data->client_name);
+ goto done;
+ }
+
+ client_data->service = service;
+ srcu_notifier_chain_register(
+ &service_data[service][domain].client_nb_list,
+ client_data->nb);
+ service_data[service][domain].num_of_clients++;
+
+ pr_debug("%s: registered client %s on service %s, current state 0x%x\n",
+ __func__, client_data->client_name,
+ service_data[service][domain].name,
+ service_data[service][domain].state);
+
+ /*
+ * PDR registration returns current state
+ * Force callback of client with current state for PDR
+ */
+ if (client_data->service == AUDIO_NOTIFIER_PDR_SERVICE) {
+ data.service = service;
+ data.domain = domain;
+ (void)client_data->nb->notifier_call(client_data->nb,
+ service_data[service][domain].state, &data);
+ }
+done:
+ return ret;
+}
+
+static int audio_notifer_reg_client(struct client_data *client_data)
+{
+ int ret = 0;
+ int service;
+ int domain = client_data->domain;
+
+ service = audio_notifer_get_default_service(domain);
+ if (service < 0) {
+ pr_err("%s: service %d is incorrect\n", __func__, service);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Search through services to find a valid one to register client on. */
+ for (; service >= 0; service--) {
+ /* If a service is not initialized, wait for it to come up. */
+ if (service_data[service][domain].state == UNINIT_SERVICE)
+ goto done;
+ /* Skip unsupported service and domain combinations. */
+ if (service_data[service][domain].state < 0)
+ continue;
+ /* Only register clients who have not acquired a service. */
+ if (client_data->service != NO_SERVICE)
+ continue;
+
+ /*
+ * Only register clients, who have not acquired a service, on
+ * the best available service for their domain. Uninitialized
+ * services will try to register all of their clients after
+ * they initialize correctly or will disable their service and
+ * register clients on the next best avaialable service.
+ */
+ pr_debug("%s: register client %s on service %s",
+ __func__, client_data->client_name,
+ service_data[service][domain].name);
+
+ ret = audio_notifer_reg_client_service(client_data, service);
+ if (IS_ERR_VALUE(ret))
+ pr_err("%s: client %s failed to register on service %s",
+ __func__, client_data->client_name,
+ service_data[service][domain].name);
+ }
+
+done:
+ return ret;
+}
+
+static int audio_notifer_dereg_client(struct client_data *client_data)
+{
+ int ret = 0;
+ int service = client_data->service;
+ int domain = client_data->domain;
+
+ switch (client_data->service) {
+ case AUDIO_NOTIFIER_SSR_SERVICE:
+ case AUDIO_NOTIFIER_PDR_SERVICE:
+ if (service_data[service][domain].num_of_clients == 1)
+ ret = audio_notifer_dereg_service(service, domain);
+ break;
+ case NO_SERVICE:
+ goto done;
+ default:
+ pr_err("%s: Invalid service for client %s, service %d\n",
+ __func__, client_data->client_name,
+ client_data->service);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("%s: deregister failed for client %s on service %s, ret %d\n",
+ __func__, client_data->client_name,
+ service_data[service][domain].name, ret);
+ goto done;
+ }
+
+ ret = srcu_notifier_chain_unregister(&service_data[service][domain].
+ client_nb_list, client_data->nb);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("%s: srcu_notifier_chain_unregister failed, ret %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ pr_debug("%s: deregistered client %s on service %s\n",
+ __func__, client_data->client_name,
+ service_data[service][domain].name);
+
+ client_data->service = NO_SERVICE;
+ if (service_data[service][domain].num_of_clients > 0)
+ service_data[service][domain].num_of_clients--;
+done:
+ return ret;
+}
+
+static void audio_notifer_reg_all_clients(void)
+{
+ struct list_head *ptr, *next;
+ struct client_data *client_data;
+ int ret;
+
+ list_for_each_safe(ptr, next, &client_list) {
+ client_data = list_entry(ptr,
+ struct client_data, list);
+ ret = audio_notifer_reg_client(client_data);
+ if (IS_ERR_VALUE(ret))
+ pr_err("%s: audio_notifer_reg_client failed for client %s, ret %d\n",
+ __func__, client_data->client_name,
+ ret);
+ }
+}
+
+static int audio_notifer_pdr_callback(struct notifier_block *this,
+ unsigned long opcode, void *data)
+{
+ pr_debug("%s: Audio PDR framework state 0x%lx\n",
+ __func__, opcode);
+ mutex_lock(&notifier_mutex);
+ if (opcode == AUDIO_PDR_FRAMEWORK_DOWN)
+ audio_notifer_disable_service(AUDIO_NOTIFIER_PDR_SERVICE);
+ else
+ audio_notifer_init_service(AUDIO_NOTIFIER_PDR_SERVICE);
+
+ audio_notifer_reg_all_clients();
+ mutex_unlock(&notifier_mutex);
+ return 0;
+}
+
+static struct notifier_block pdr_nb = {
+ .notifier_call = audio_notifer_pdr_callback,
+ .priority = 0,
+};
+
+static int audio_notifer_convert_opcode(unsigned long opcode,
+ unsigned long *notifier_opcode)
+{
+ int ret = 0;
+
+ switch (opcode) {
+ case SUBSYS_BEFORE_SHUTDOWN:
+ case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01:
+ *notifier_opcode = AUDIO_NOTIFIER_SERVICE_DOWN;
+ break;
+ case SUBSYS_AFTER_POWERUP:
+ case SERVREG_NOTIF_SERVICE_STATE_UP_V01:
+ *notifier_opcode = AUDIO_NOTIFIER_SERVICE_UP;
+ break;
+ default:
+ pr_debug("%s: Unused opcode 0x%lx\n", __func__, opcode);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int audio_notifer_service_cb(unsigned long opcode,
+ int service, int domain)
+{
+ int ret = 0;
+ unsigned long notifier_opcode;
+ struct audio_notifier_cb_data data;
+
+ if (audio_notifer_convert_opcode(opcode, &notifier_opcode) < 0)
+ goto done;
+
+ data.service = service;
+ data.domain = domain;
+
+ pr_debug("%s: service %s, opcode 0x%lx\n",
+ __func__, service_data[service][domain].name, notifier_opcode);
+
+ mutex_lock(&notifier_mutex);
+
+ service_data[service][domain].state = notifier_opcode;
+ ret = srcu_notifier_call_chain(&service_data[service][domain].
+ client_nb_list, notifier_opcode, &data);
+ if (IS_ERR_VALUE(ret))
+ pr_err("%s: srcu_notifier_call_chain returned %d, service %s, opcode 0x%lx\n",
+ __func__, ret, service_data[service][domain].name,
+ notifier_opcode);
+
+ mutex_unlock(&notifier_mutex);
+done:
+ return NOTIFY_OK;
+}
+
+static int audio_notifer_pdr_adsp_cb(struct notifier_block *this,
+ unsigned long opcode, void *data)
+{
+ return audio_notifer_service_cb(opcode,
+ AUDIO_NOTIFIER_PDR_SERVICE,
+ AUDIO_NOTIFIER_ADSP_DOMAIN);
+}
+
+static int audio_notifer_ssr_adsp_cb(struct notifier_block *this,
+ unsigned long opcode, void *data)
+{
+ if (opcode == SUBSYS_BEFORE_SHUTDOWN)
+ audio_ssr_send_nmi(data);
+
+ return audio_notifer_service_cb(opcode,
+ AUDIO_NOTIFIER_SSR_SERVICE,
+ AUDIO_NOTIFIER_ADSP_DOMAIN);
+}
+
+static int audio_notifer_ssr_modem_cb(struct notifier_block *this,
+ unsigned long opcode, void *data)
+{
+ return audio_notifer_service_cb(opcode,
+ AUDIO_NOTIFIER_SSR_SERVICE,
+ AUDIO_NOTIFIER_MODEM_DOMAIN);
+}
+
+int audio_notifier_deregister(char *client_name)
+{
+ int ret = 0;
+ int ret2;
+ struct list_head *ptr, *next;
+ struct client_data *client_data;
+
+ if (client_name == NULL) {
+ pr_err("%s: client_name is NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ mutex_lock(&notifier_mutex);
+ list_for_each_safe(ptr, next, &client_data->list) {
+ client_data = list_entry(ptr, struct client_data,
+ list);
+ if (!strcmp(client_name, client_data->client_name)) {
+ ret2 = audio_notifer_dereg_client(client_data);
+ if (ret2 < 0) {
+ pr_err("%s: audio_notifer_dereg_client failed, ret %d\n, service %d, domain %d",
+ __func__, ret2, client_data->service,
+ client_data->domain);
+ ret = ret2;
+ continue;
+ }
+ list_del(&client_data->list);
+ kfree(client_data);
+ }
+ }
+ mutex_unlock(&notifier_mutex);
+done:
+ return ret;
+}
+EXPORT_SYMBOL(audio_notifier_deregister);
+
+int audio_notifier_register(char *client_name, int domain,
+ struct notifier_block *nb)
+{
+ int ret;
+ struct client_data *client_data;
+
+ if (client_name == NULL) {
+ pr_err("%s: client_name is NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ } else if (nb == NULL) {
+ pr_err("%s: Notifier block is NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ client_data = kmalloc(sizeof(*client_data), GFP_KERNEL);
+ if (client_data == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ INIT_LIST_HEAD(&client_data->list);
+ client_data->nb = nb;
+ strlcpy(client_data->client_name, client_name,
+ sizeof(client_data->client_name));
+ client_data->service = NO_SERVICE;
+ client_data->domain = domain;
+
+ mutex_lock(&notifier_mutex);
+ ret = audio_notifer_reg_client(client_data);
+ if (IS_ERR_VALUE(ret)) {
+ mutex_unlock(&notifier_mutex);
+ pr_err("%s: audio_notifer_reg_client for client %s failed ret = %d\n",
+ __func__, client_data->client_name,
+ ret);
+ kfree(client_data);
+ goto done;
+ }
+ list_add_tail(&client_data->list, &client_list);
+ mutex_unlock(&notifier_mutex);
+done:
+ return ret;
+}
+EXPORT_SYMBOL(audio_notifier_register);
+
+static int __init audio_notifier_subsys_init(void)
+{
+ int i, j;
+
+ mutex_init(&notifier_mutex);
+ INIT_LIST_HEAD(&client_list);
+ for (i = 0; i < AUDIO_NOTIFIER_MAX_SERVICES; i++) {
+ for (j = 0; j < AUDIO_NOTIFIER_MAX_DOMAINS; j++) {
+ if (service_data[i][j].state <= NO_SERVICE)
+ continue;
+
+ srcu_init_notifier_head(
+ &service_data[i][j].client_nb_list);
+ }
+ }
+
+ return 0;
+}
+subsys_initcall(audio_notifier_subsys_init);
+
+static int __init audio_notifier_init(void)
+{
+ int ret;
+
+ ret = audio_pdr_register(&pdr_nb);
+ if (IS_ERR_VALUE(ret)) {
+ pr_debug("%s: PDR register failed, ret = %d, disable service\n",
+ __func__, ret);
+ audio_notifer_disable_service(AUDIO_NOTIFIER_PDR_SERVICE);
+ }
+
+ /* Do not return error since PDR enablement is not critical */
+ return 0;
+}
+module_init(audio_notifier_init);
+
+static int __init audio_notifier_late_init(void)
+{
+ /*
+ * If pdr registration failed, register clients on next service
+ * Do in late init to ensure that SSR subsystem is initialized
+ */
+ if (!audio_notifer_is_service_enabled(AUDIO_NOTIFIER_PDR_SERVICE))
+ audio_notifer_reg_all_clients();
+
+ return 0;
+}
+late_initcall(audio_notifier_late_init);
diff --git a/drivers/soc/qcom/qdsp6v2/audio_pdr.c b/drivers/soc/qcom/qdsp6v2/audio_pdr.c
new file mode 100644
index 000000000000..642ceb5533b0
--- /dev/null
+++ b/drivers/soc/qcom/qdsp6v2/audio_pdr.c
@@ -0,0 +1,148 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/qdsp6v2/audio_pdr.h>
+#include <soc/qcom/service-locator.h>
+#include <soc/qcom/service-notifier.h>
+
+static struct pd_qmi_client_data audio_pdr_services[AUDIO_PDR_DOMAIN_MAX] = {
+ { /* AUDIO_PDR_DOMAIN_ADSP */
+ .client_name = "audio_pdr_adsp",
+ .service_name = "avs/audio"
+ }
+};
+
+struct srcu_notifier_head audio_pdr_cb_list;
+
+static int audio_pdr_locator_callback(struct notifier_block *this,
+ unsigned long opcode, void *data)
+{
+ unsigned long pdr_state = AUDIO_PDR_FRAMEWORK_DOWN;
+
+ if (opcode == LOCATOR_DOWN) {
+ pr_debug("%s: Service %s is down!", __func__,
+ audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].
+ service_name);
+ goto done;
+ }
+
+ memcpy(&audio_pdr_services, data,
+ sizeof(audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP]));
+ if (audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].total_domains == 1) {
+ pr_debug("%s: Service %s, returned total domains %d, ",
+ __func__,
+ audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].service_name,
+ audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].
+ total_domains);
+ pdr_state = AUDIO_PDR_FRAMEWORK_UP;
+ goto done;
+ } else
+ pr_err("%s: Service %s returned invalid total domains %d",
+ __func__,
+ audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].service_name,
+ audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].
+ total_domains);
+done:
+ srcu_notifier_call_chain(&audio_pdr_cb_list, pdr_state, NULL);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block audio_pdr_locator_nb = {
+ .notifier_call = audio_pdr_locator_callback,
+ .priority = 0,
+};
+
+int audio_pdr_register(struct notifier_block *nb)
+{
+ if (nb == NULL) {
+ pr_err("%s: Notifier block is NULL\n", __func__);
+ return -EINVAL;
+ }
+ return srcu_notifier_chain_register(&audio_pdr_cb_list, nb);
+}
+EXPORT_SYMBOL(audio_pdr_register);
+
+void *audio_pdr_service_register(int domain_id,
+ struct notifier_block *nb, int *curr_state)
+{
+ void *handle;
+
+ if ((domain_id < 0) ||
+ (domain_id >= AUDIO_PDR_DOMAIN_MAX)) {
+ pr_err("%s: Invalid service ID %d\n", __func__, domain_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ handle = service_notif_register_notifier(
+ audio_pdr_services[domain_id].domain_list[0].name,
+ audio_pdr_services[domain_id].domain_list[0].instance_id,
+ nb, curr_state);
+ if (IS_ERR_OR_NULL(handle)) {
+ pr_err("%s: Failed to register for service %s, instance %d\n",
+ __func__,
+ audio_pdr_services[domain_id].domain_list[0].name,
+ audio_pdr_services[domain_id].domain_list[0].
+ instance_id);
+ }
+ return handle;
+}
+EXPORT_SYMBOL(audio_pdr_service_register);
+
+int audio_pdr_service_deregister(void *service_handle,
+ struct notifier_block *nb)
+{
+ int ret;
+
+ if (service_handle == NULL) {
+ pr_err("%s: service handle is NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = service_notif_unregister_notifier(
+ service_handle, nb);
+ if (IS_ERR_VALUE(ret))
+ pr_err("%s: Failed to deregister service ret %d\n",
+ __func__, ret);
+done:
+ return ret;
+}
+EXPORT_SYMBOL(audio_pdr_service_deregister);
+
+static int __init audio_pdr_subsys_init(void)
+{
+ srcu_init_notifier_head(&audio_pdr_cb_list);
+ return 0;
+}
+subsys_initcall(audio_pdr_subsys_init);
+
+static int __init audio_pdr_late_init(void)
+{
+ int ret;
+
+ ret = get_service_location(
+ audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].client_name,
+ audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].service_name,
+ &audio_pdr_locator_nb);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("%s get_service_location failed ret %d\n",
+ __func__, ret);
+ srcu_notifier_call_chain(&audio_pdr_cb_list,
+ AUDIO_PDR_FRAMEWORK_DOWN, NULL);
+ }
+
+ return ret;
+}
+late_initcall(audio_pdr_late_init);
+
diff --git a/drivers/soc/qcom/qdsp6v2/audio_ssr.c b/drivers/soc/qcom/qdsp6v2/audio_ssr.c
new file mode 100644
index 000000000000..a66fb2a63fae
--- /dev/null
+++ b/drivers/soc/qcom/qdsp6v2/audio_ssr.c
@@ -0,0 +1,66 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/qdsp6v2/audio_ssr.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+
+#define SCM_Q6_NMI_CMD 0x1
+
+static char *audio_ssr_domains[] = {
+ "adsp",
+ "modem"
+};
+
+void *audio_ssr_register(int domain_id, struct notifier_block *nb)
+{
+ if ((domain_id < 0) ||
+ (domain_id >= AUDIO_SSR_DOMAIN_MAX)) {
+ pr_err("%s: Invalid service ID %d\n", __func__, domain_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return subsys_notif_register_notifier(
+ audio_ssr_domains[domain_id], nb);
+}
+EXPORT_SYMBOL(audio_ssr_register);
+
+int audio_ssr_deregister(void *handle, struct notifier_block *nb)
+{
+ return subsys_notif_unregister_notifier(handle, nb);
+}
+EXPORT_SYMBOL(audio_ssr_deregister);
+
+void audio_ssr_send_nmi(void *ssr_cb_data)
+{
+ struct notif_data *data = (struct notif_data *)ssr_cb_data;
+ struct scm_desc desc;
+
+ if (data && data->crashed) {
+ /* Send NMI to QDSP6 via an SCM call. */
+ if (!is_scm_armv8()) {
+ scm_call_atomic1(SCM_SVC_UTIL,
+ SCM_Q6_NMI_CMD, 0x1);
+ } else {
+ desc.args[0] = 0x1;
+ desc.arginfo = SCM_ARGS(1);
+ scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_UTIL,
+ SCM_Q6_NMI_CMD), &desc);
+ }
+ /* The write should go through before q6 is shutdown */
+ mb();
+ pr_debug("%s: Q6 NMI was sent.\n", __func__);
+ }
+}
+EXPORT_SYMBOL(audio_ssr_send_nmi);
diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
index 9b44fb03cf94..83e3775ed533 100644
--- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
+++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
@@ -741,7 +741,6 @@ static int msm_audio_smmu_init(struct device *dev)
{
struct dma_iommu_mapping *mapping;
int ret;
- int disable_htw = 1;
mapping = arm_iommu_create_mapping(
msm_iommu_get_bus(dev),
@@ -750,10 +749,6 @@ static int msm_audio_smmu_init(struct device *dev)
if (IS_ERR(mapping))
return PTR_ERR(mapping);
- iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
-
ret = arm_iommu_attach_device(dev, mapping);
if (ret) {
dev_err(dev, "%s: Attach failed, err = %d\n",
diff --git a/drivers/soc/qcom/qsee_ipc_irq_bridge.c b/drivers/soc/qcom/qsee_ipc_irq_bridge.c
index ab43bbb7e86a..eee42d7ba314 100644
--- a/drivers/soc/qcom/qsee_ipc_irq_bridge.c
+++ b/drivers/soc/qcom/qsee_ipc_irq_bridge.c
@@ -115,10 +115,8 @@ static struct qiib_driver_data *qiib_info;
static int qiib_driver_data_init(void)
{
qiib_info = kzalloc(sizeof(*qiib_info), GFP_KERNEL);
- if (!qiib_info) {
- QIIB_ERR("Unable to allocate info pointer\n");
+ if (!qiib_info)
return -ENOMEM;
- }
INIT_LIST_HEAD(&qiib_info->list);
mutex_init(&qiib_info->list_lock);
@@ -356,6 +354,7 @@ static int qiib_parse_node(struct device_node *node, struct qiib_dev *devp)
const char *dev_name;
uint32_t irqtype;
uint32_t irq_clear[2];
+ struct irq_data *irqtype_data;
int ret = -ENODEV;
key = "qcom,dev-name";
@@ -374,7 +373,12 @@ static int qiib_parse_node(struct device_node *node, struct qiib_dev *devp)
}
QIIB_DBG("%s: %s = %d\n", __func__, key, devp->irq_line);
- irqtype = irqd_get_trigger_type(irq_get_irq_data(devp->irq_line));
+ irqtype_data = irq_get_irq_data(devp->irq_line);
+ if (!irqtype_data) {
+ QIIB_ERR("%s: get irqdata fail:%d\n", __func__, devp->irq_line);
+ goto missing_key;
+ }
+ irqtype = irqd_get_trigger_type(irqtype_data);
QIIB_DBG("%s: irqtype = %d\n", __func__, irqtype);
key = "label";
diff --git a/drivers/soc/qcom/rpm-smd-debug.c b/drivers/soc/qcom/rpm-smd-debug.c
index c08668149636..4e406f7cd379 100644
--- a/drivers/soc/qcom/rpm-smd-debug.c
+++ b/drivers/soc/qcom/rpm-smd-debug.c
@@ -104,8 +104,6 @@ static ssize_t rsc_ops_write(struct file *fp, const char __user *user_buffer,
if (msm_rpm_wait_for_ack(msm_rpm_send_request(req)))
pr_err("Sending the RPM message failed\n");
- else
- pr_info("RPM message sent succesfully\n");
err_request:
msm_rpm_free_request(req);
diff --git a/drivers/soc/qcom/rpm-smd.c b/drivers/soc/qcom/rpm-smd.c
index 03a1591e5b09..242071f52811 100644
--- a/drivers/soc/qcom/rpm-smd.c
+++ b/drivers/soc/qcom/rpm-smd.c
@@ -967,8 +967,10 @@ static struct msm_rpm_request *msm_rpm_create_request_common(
cdata->client_buf = kzalloc(buf_size, GFP_FLAG(noirq));
- if (!cdata->client_buf)
- goto cdata_alloc_fail;
+ if (!cdata->client_buf) {
+ pr_warn("Cannot allocate memory for client_buf\n");
+ goto client_buf_alloc_fail;
+ }
set_set_type(cdata->client_buf, set);
set_rsc_type(cdata->client_buf, rsc_type);
@@ -997,6 +999,8 @@ static struct msm_rpm_request *msm_rpm_create_request_common(
buf_alloc_fail:
kfree(cdata->kvp);
kvp_alloc_fail:
+ kfree(cdata->client_buf);
+client_buf_alloc_fail:
kfree(cdata);
cdata_alloc_fail:
return NULL;
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index e7dbcac064aa..d9ebc1edda9c 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -418,6 +418,8 @@ const char *msm_secure_vmid_to_string(int secure_vmid)
return "VMID_WLAN";
case VMID_WLAN_CE:
return "VMID_WLAN_CE";
+ case VMID_CP_CAMERA_PREVIEW:
+ return "VMID_CP_CAMERA_PREVIEW";
case VMID_INVAL:
return "VMID_INVAL";
default:
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index 981f78491ecf..504a3263253c 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -114,6 +114,7 @@ struct qmi_client_info {
};
static LIST_HEAD(qmi_client_list);
static DEFINE_MUTEX(qmi_list_lock);
+static DEFINE_MUTEX(qmi_client_release_lock);
static DEFINE_MUTEX(notif_add_lock);
@@ -417,9 +418,11 @@ static void root_service_service_exit(struct qmi_client_info *data,
* Destroy client handle and try connecting when
* service comes up again.
*/
+ mutex_lock(&qmi_client_release_lock);
data->service_connected = false;
qmi_handle_destroy(data->clnt_handle);
data->clnt_handle = NULL;
+ mutex_unlock(&qmi_client_release_lock);
}
static void root_service_exit_work(struct work_struct *work)
@@ -459,7 +462,7 @@ static int ssr_event_notify(struct notifier_block *this,
struct qmi_client_info, ssr_notifier);
struct notif_data *notif = data;
switch (code) {
- case SUBSYS_BEFORE_SHUTDOWN:
+ case SUBSYS_AFTER_SHUTDOWN:
pr_debug("Root PD DOWN(SSR notification), crashed?%d\n",
notif->crashed);
if (notif->crashed)
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
index a1344f0780b0..6de73217bf86 100644
--- a/drivers/soc/qcom/smcinvoke.c
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -375,23 +375,27 @@ long smcinvoke_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
nr_args = object_counts_num_buffers(req.counts) +
object_counts_num_objects(req.counts);
- if (!nr_args || req.argsize != sizeof(union smcinvoke_arg)) {
+ if (req.argsize != sizeof(union smcinvoke_arg)) {
ret = -EINVAL;
goto out;
}
- args_buf = kzalloc(nr_args * req.argsize, GFP_KERNEL);
- if (!args_buf) {
- ret = -ENOMEM;
- goto out;
- }
+ if (nr_args) {
- ret = copy_from_user(args_buf, (void __user *)(req.args),
+ args_buf = kzalloc(nr_args * req.argsize, GFP_KERNEL);
+ if (!args_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = copy_from_user(args_buf,
+ (void __user *)(req.args),
nr_args * req.argsize);
- if (ret) {
- ret = -EFAULT;
- goto out;
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
}
inmsg_size = compute_in_msg_size(&req, args_buf);
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index ae175e176aa3..1fa731776926 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -917,8 +917,24 @@ static void check_pbl_done(struct pil_tz_data *d)
err_value = __raw_readl(d->err_status);
pr_debug("PBL_DONE received from %s!\n", d->subsys_desc.name);
- if (err_value)
+ if (err_value) {
+ uint32_t rmb_err_spare0;
+ uint32_t rmb_err_spare1;
+ uint32_t rmb_err_spare2;
+
+ rmb_err_spare2 = __raw_readl(d->err_status_spare);
+ rmb_err_spare1 = __raw_readl(d->err_status_spare-4);
+ rmb_err_spare0 = __raw_readl(d->err_status_spare-8);
+
pr_err("PBL error status register: 0x%08x\n", err_value);
+
+ pr_err("PBL error status spare0 register: 0x%08x\n",
+ rmb_err_spare0);
+ pr_err("PBL error status spare1 register: 0x%08x\n",
+ rmb_err_spare1);
+ pr_err("PBL error status spare2 register: 0x%08x\n",
+ rmb_err_spare2);
+ }
__raw_writel(BIT(d->bits_arr[PBL_DONE]), d->irq_clear);
}
diff --git a/drivers/soc/qcom/system_stats.c b/drivers/soc/qcom/system_stats.c
index 476d2f6dca27..ba35928a991b 100644
--- a/drivers/soc/qcom/system_stats.c
+++ b/drivers/soc/qcom/system_stats.c
@@ -154,7 +154,7 @@ static int rpm_stats_write_buf(struct seq_file *m)
time = get_time_in_msec(time);
seq_printf(m, "\ttime in last mode(msec):%llu\n", time);
- time = arch_counter_get_cntpct() - rs.last_exited_at;
+ time = arch_counter_get_cntvct() - rs.last_exited_at;
time = get_time_in_sec(time);
seq_printf(m, "\ttime since last mode(sec):%llu\n", time);
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index aa20705b9adc..8f58eaa537b1 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -360,7 +360,7 @@ static void ping_other_cpus(struct msm_watchdog_data *wdog_dd)
cpumask_clear(&wdog_dd->alive_mask);
smp_mb();
for_each_cpu(cpu, cpu_online_mask) {
- if (!cpu_idle_pc_state[cpu])
+ if (!cpu_idle_pc_state[cpu] && !cpu_isolated(cpu))
smp_call_function_single(cpu, keep_alive_response,
wdog_dd, 1);
}
diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c
index 310903b10a98..92cdadef715d 100644
--- a/drivers/soc/qcom/wcd-dsp-glink.c
+++ b/drivers/soc/qcom/wcd-dsp-glink.c
@@ -32,6 +32,7 @@
#define WDSP_EDGE "wdsp"
#define RESP_QUEUE_SIZE 3
#define QOS_PKT_SIZE 1024
+#define TIMEOUT_MS 1000
struct wdsp_glink_dev {
struct class *cls;
@@ -71,7 +72,19 @@ struct wdsp_glink_ch {
/* To free up the channel memory */
bool free_mem;
- /* Glink channel configuration */
+ /* Glink local channel open work */
+ struct work_struct lcl_ch_open_wrk;
+
+ /* Glink local channel close work */
+ struct work_struct lcl_ch_cls_wrk;
+
+ /* Wait for ch connect state before sending any command */
+ wait_queue_head_t ch_connect_wait;
+
+ /*
+ * Glink channel configuration. This has to be the last
+ * member of the strucuture as it has variable size
+ */
struct wdsp_glink_ch_cfg ch_cfg;
};
@@ -89,12 +102,15 @@ struct wdsp_glink_priv {
struct mutex rsp_mutex;
/* Glink channel related */
+ struct mutex glink_mutex;
struct wdsp_glink_state glink_state;
struct wdsp_glink_ch **ch;
u8 no_of_channels;
struct work_struct ch_open_cls_wrk;
struct workqueue_struct *work_queue;
+ wait_queue_head_t link_state_wait;
+
struct device *dev;
};
@@ -214,6 +230,36 @@ done:
}
/*
+ * wdsp_glink_lcl_ch_open_wrk - Work function to open channel again
+ * when local disconnect event happens
+ * work: Work structure
+ */
+static void wdsp_glink_lcl_ch_open_wrk(struct work_struct *work)
+{
+ struct wdsp_glink_ch *ch;
+
+ ch = container_of(work, struct wdsp_glink_ch,
+ lcl_ch_open_wrk);
+
+ wdsp_glink_open_ch(ch);
+}
+
+/*
+ * wdsp_glink_lcl_ch_cls_wrk - Work function to close channel locally
+ * when remote disconnect event happens
+ * work: Work structure
+ */
+static void wdsp_glink_lcl_ch_cls_wrk(struct work_struct *work)
+{
+ struct wdsp_glink_ch *ch;
+
+ ch = container_of(work, struct wdsp_glink_ch,
+ lcl_ch_cls_wrk);
+
+ wdsp_glink_close_ch(ch);
+}
+
+/*
* wdsp_glink_notify_state - Glink channel state information event callback
* handle: Opaque Channel handle returned by GLink
* priv: Private pointer to the channel
@@ -258,6 +304,7 @@ static void wdsp_glink_notify_state(void *handle, const void *priv,
__func__, ch->ch_cfg.latency_in_us,
ch->ch_cfg.name);
+ wake_up(&ch->ch_connect_wait);
mutex_unlock(&ch->mutex);
} else if (event == GLINK_LOCAL_DISCONNECTED) {
/*
@@ -271,6 +318,9 @@ static void wdsp_glink_notify_state(void *handle, const void *priv,
if (ch->free_mem) {
kfree(ch);
ch = NULL;
+ } else {
+ /* Open the glink channel again */
+ queue_work(wpriv->work_queue, &ch->lcl_ch_open_wrk);
}
} else if (event == GLINK_REMOTE_DISCONNECTED) {
dev_dbg(wpriv->dev, "%s: remote channel: %s disconnected remotely\n",
@@ -278,10 +328,10 @@ static void wdsp_glink_notify_state(void *handle, const void *priv,
mutex_unlock(&ch->mutex);
/*
* If remote disconnect happens, local side also has
- * to close the channel and reopen again as per glink
+ * to close the channel as per glink design in a
+ * separate work_queue.
*/
- if (!wdsp_glink_close_ch(ch))
- wdsp_glink_open_ch(ch);
+ queue_work(wpriv->work_queue, &ch->lcl_ch_cls_wrk);
}
}
@@ -294,16 +344,23 @@ static int wdsp_glink_close_ch(struct wdsp_glink_ch *ch)
struct wdsp_glink_priv *wpriv = ch->wpriv;
int ret = 0;
+ mutex_lock(&wpriv->glink_mutex);
+ if (ch->handle) {
+ ret = glink_close(ch->handle);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(wpriv->dev, "%s: glink_close is failed, ret = %d\n",
+ __func__, ret);
+ } else {
+ ch->handle = NULL;
+ dev_dbg(wpriv->dev, "%s: ch %s is closed\n", __func__,
+ ch->ch_cfg.name);
+ }
+ } else {
+ dev_dbg(wpriv->dev, "%s: ch %s is already closed\n", __func__,
+ ch->ch_cfg.name);
+ }
+ mutex_unlock(&wpriv->glink_mutex);
- mutex_lock(&ch->mutex);
-
- dev_dbg(wpriv->dev, "%s: ch %s closing\n", __func__, ch->ch_cfg.name);
- ret = glink_close(ch->handle);
- if (IS_ERR_VALUE(ret))
- dev_err(wpriv->dev, "%s: glink_close is failed, ret = %d\n",
- __func__, ret);
-
- mutex_unlock(&ch->mutex);
return ret;
}
@@ -318,29 +375,34 @@ static int wdsp_glink_open_ch(struct wdsp_glink_ch *ch)
struct glink_open_config open_cfg;
int ret = 0;
- memset(&open_cfg, 0, sizeof(open_cfg));
- open_cfg.options = GLINK_OPT_INITIAL_XPORT;
- open_cfg.edge = WDSP_EDGE;
- open_cfg.notify_rx = wdsp_glink_notify_rx;
- open_cfg.notify_tx_done = wdsp_glink_notify_tx_done;
- open_cfg.notify_state = wdsp_glink_notify_state;
- open_cfg.notify_rx_intent_req = wdsp_glink_notify_rx_intent_req;
- open_cfg.priv = ch;
- open_cfg.name = ch->ch_cfg.name;
-
- dev_dbg(wpriv->dev, "%s: ch->ch_cfg.name = %s, latency_in_us = %d, intents = %d\n",
- __func__, ch->ch_cfg.name, ch->ch_cfg.latency_in_us,
- ch->ch_cfg.no_of_intents);
-
- mutex_lock(&ch->mutex);
- ch->handle = glink_open(&open_cfg);
- if (IS_ERR_OR_NULL(ch->handle)) {
- dev_err(wpriv->dev, "%s: glink_open failed %s\n",
- __func__, ch->ch_cfg.name);
- ch->handle = NULL;
- ret = -EINVAL;
+ mutex_lock(&wpriv->glink_mutex);
+ if (!ch->handle) {
+ memset(&open_cfg, 0, sizeof(open_cfg));
+ open_cfg.options = GLINK_OPT_INITIAL_XPORT;
+ open_cfg.edge = WDSP_EDGE;
+ open_cfg.notify_rx = wdsp_glink_notify_rx;
+ open_cfg.notify_tx_done = wdsp_glink_notify_tx_done;
+ open_cfg.notify_state = wdsp_glink_notify_state;
+ open_cfg.notify_rx_intent_req = wdsp_glink_notify_rx_intent_req;
+ open_cfg.priv = ch;
+ open_cfg.name = ch->ch_cfg.name;
+
+ dev_dbg(wpriv->dev, "%s: ch->ch_cfg.name = %s, latency_in_us = %d, intents = %d\n",
+ __func__, ch->ch_cfg.name, ch->ch_cfg.latency_in_us,
+ ch->ch_cfg.no_of_intents);
+
+ ch->handle = glink_open(&open_cfg);
+ if (IS_ERR_OR_NULL(ch->handle)) {
+ dev_err(wpriv->dev, "%s: glink_open failed for ch %s\n",
+ __func__, ch->ch_cfg.name);
+ ch->handle = NULL;
+ ret = -EINVAL;
+ }
+ } else {
+ dev_err(wpriv->dev, "%s: ch %s is already opened\n", __func__,
+ ch->ch_cfg.name);
}
- mutex_unlock(&ch->mutex);
+ mutex_unlock(&wpriv->glink_mutex);
return ret;
}
@@ -354,7 +416,7 @@ static void wdsp_glink_close_all_ch(struct wdsp_glink_priv *wpriv)
int i;
for (i = 0; i < wpriv->no_of_channels; i++)
- if (wpriv->ch[i])
+ if (wpriv->ch && wpriv->ch[i])
wdsp_glink_close_ch(wpriv->ch[i]);
}
@@ -425,7 +487,12 @@ static void wdsp_glink_link_state_cb(struct glink_link_state_cb_info *cb_info,
}
wpriv = (struct wdsp_glink_priv *)priv;
+
+ mutex_lock(&wpriv->glink_mutex);
wpriv->glink_state.link_state = cb_info->link_state;
+ wake_up(&wpriv->link_state_wait);
+ mutex_unlock(&wpriv->glink_mutex);
+
queue_work(wpriv->work_queue, &wpriv->ch_open_cls_wrk);
}
@@ -477,6 +544,9 @@ static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv,
mutex_init(&ch[i]->mutex);
ch[i]->wpriv = wpriv;
+ INIT_WORK(&ch[i]->lcl_ch_open_wrk, wdsp_glink_lcl_ch_open_wrk);
+ INIT_WORK(&ch[i]->lcl_ch_cls_wrk, wdsp_glink_lcl_ch_cls_wrk);
+ init_waitqueue_head(&ch[i]->ch_connect_wait);
}
wpriv->ch = ch;
wpriv->no_of_channels = no_of_channels;
@@ -540,15 +610,26 @@ static void wdsp_glink_tx_buf_work(struct work_struct *work)
ret = glink_tx(ch->handle, tx_buf,
cpkt->payload, cpkt->payload_size,
GLINK_TX_REQ_INTENT);
- if (IS_ERR_VALUE(ret))
+ if (IS_ERR_VALUE(ret)) {
dev_err(wpriv->dev, "%s: glink tx failed, ret = %d\n",
__func__, ret);
+ /*
+ * If glink_tx() is failed then free tx_buf here as
+ * there won't be any tx_done notification to
+ * free the buffer.
+ */
+ kfree(tx_buf);
+ }
} else {
dev_err(wpriv->dev, "%s: channel %s is not in connected state\n",
__func__, ch->ch_cfg.name);
+ /*
+ * Free tx_buf here as there won't be any tx_done
+ * notification in this case also.
+ */
+ kfree(tx_buf);
}
mutex_unlock(&tx_buf->ch->mutex);
-
}
/*
@@ -678,7 +759,32 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf,
__func__, ret);
kfree(tx_buf);
break;
+ case WDSP_READY_PKT:
+ ret = wait_event_timeout(wpriv->link_state_wait,
+ (wpriv->glink_state.link_state ==
+ GLINK_LINK_STATE_UP),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ dev_err(wpriv->dev, "%s: Link state wait timeout\n",
+ __func__);
+ ret = -ETIMEDOUT;
+ goto free_buf;
+ }
+ ret = 0;
+ kfree(tx_buf);
+ break;
case WDSP_CMD_PKT:
+ mutex_lock(&wpriv->glink_mutex);
+ if (wpriv->glink_state.link_state == GLINK_LINK_STATE_DOWN) {
+ mutex_unlock(&wpriv->glink_mutex);
+ dev_err(wpriv->dev, "%s: Link state is Down\n",
+ __func__);
+
+ ret = -ENETRESET;
+ goto free_buf;
+ }
+ mutex_unlock(&wpriv->glink_mutex);
+
cpkt = (struct wdsp_cmd_pkt *)wpkt->payload;
dev_dbg(wpriv->dev, "%s: requested ch_name: %s\n", __func__,
cpkt->ch_name);
@@ -696,6 +802,20 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf,
ret = -EINVAL;
goto free_buf;
}
+
+ ret = wait_event_timeout(tx_buf->ch->ch_connect_wait,
+ (tx_buf->ch->channel_state ==
+ GLINK_CONNECTED),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ dev_err(wpriv->dev, "%s: glink channel %s is not in connected state %d\n",
+ __func__, tx_buf->ch->ch_cfg.name,
+ tx_buf->ch->channel_state);
+ ret = -ETIMEDOUT;
+ goto free_buf;
+ }
+ ret = 0;
+
INIT_WORK(&tx_buf->tx_work, wdsp_glink_tx_buf_work);
queue_work(wpriv->work_queue, &tx_buf->tx_work);
break;
@@ -747,7 +867,9 @@ static int wdsp_glink_open(struct inode *inode, struct file *file)
}
init_completion(&wpriv->rsp_complete);
+ init_waitqueue_head(&wpriv->link_state_wait);
mutex_init(&wpriv->rsp_mutex);
+ mutex_init(&wpriv->glink_mutex);
file->private_data = wpriv;
goto done;
@@ -801,28 +923,39 @@ static int wdsp_glink_release(struct inode *inode, struct file *file)
goto done;
}
+ if (wpriv->glink_state.handle)
+ glink_unregister_link_state_cb(wpriv->glink_state.handle);
+
flush_workqueue(wpriv->work_queue);
+ destroy_workqueue(wpriv->work_queue);
+
/*
* Clean up glink channel memory in channel state
* callback only if close channels are called from here.
*/
if (wpriv->ch) {
- for (i = 0; i < wpriv->no_of_channels; i++)
- if (wpriv->ch[i])
+ for (i = 0; i < wpriv->no_of_channels; i++) {
+ if (wpriv->ch[i]) {
wpriv->ch[i]->free_mem = true;
+ /*
+ * Channel handle NULL means channel is already
+ * closed. Free the channel memory here itself.
+ */
+ if (!wpriv->ch[i]->handle) {
+ kfree(wpriv->ch[i]);
+ wpriv->ch[i] = NULL;
+ } else {
+ wdsp_glink_close_ch(wpriv->ch[i]);
+ }
+ }
+ }
- wdsp_glink_close_all_ch(wpriv);
kfree(wpriv->ch);
wpriv->ch = NULL;
}
- if (wpriv->glink_state.handle)
- glink_unregister_link_state_cb(wpriv->glink_state.handle);
-
+ mutex_destroy(&wpriv->glink_mutex);
mutex_destroy(&wpriv->rsp_mutex);
- if (wpriv->work_queue)
- destroy_workqueue(wpriv->work_queue);
-
kfree(wpriv);
file->private_data = NULL;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index a8c8e120c348..e0af922a0329 100755
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1201,7 +1201,7 @@ static void ion_vm_open(struct vm_area_struct *vma)
mutex_lock(&buffer->lock);
list_add(&vma_list->list, &buffer->vmas);
mutex_unlock(&buffer->lock);
- pr_debug("%s: adding %p\n", __func__, vma);
+ pr_debug("%s: adding %pK\n", __func__, vma);
}
static void ion_vm_close(struct vm_area_struct *vma)
@@ -1216,7 +1216,7 @@ static void ion_vm_close(struct vm_area_struct *vma)
continue;
list_del(&vma_list->list);
kfree(vma_list);
- pr_debug("%s: deleting %p\n", __func__, vma);
+ pr_debug("%s: deleting %pK\n", __func__, vma);
break;
}
mutex_unlock(&buffer->lock);
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index aaea7bed36e1..b2e1a4c1b170 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -94,7 +94,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
/* keep this for memory release */
buffer->priv_virt = info;
- dev_dbg(dev, "Allocate buffer %p\n", buffer);
+ dev_dbg(dev, "Allocate buffer %pK\n", buffer);
return 0;
err:
@@ -107,7 +107,7 @@ static void ion_cma_free(struct ion_buffer *buffer)
struct device *dev = buffer->heap->priv;
struct ion_cma_buffer_info *info = buffer->priv_virt;
- dev_dbg(dev, "Release buffer %p\n", buffer);
+ dev_dbg(dev, "Release buffer %pK\n", buffer);
/* release memory */
dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
sg_free_table(info->table);
@@ -123,7 +123,7 @@ static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
struct device *dev = heap->priv;
struct ion_cma_buffer_info *info = buffer->priv_virt;
- dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer,
+ dev_dbg(dev, "Return buffer %pK physical address %pa\n", buffer,
&info->handle);
*addr = info->handle;
diff --git a/drivers/staging/android/ion/ion_cma_secure_heap.c b/drivers/staging/android/ion/ion_cma_secure_heap.c
index d945b9251437..90ae7eb65b65 100644
--- a/drivers/staging/android/ion/ion_cma_secure_heap.c
+++ b/drivers/staging/android/ion/ion_cma_secure_heap.c
@@ -3,7 +3,7 @@
*
* Copyright (C) Linaro 2012
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -501,7 +501,7 @@ retry:
/* keep this for memory release */
buffer->priv_virt = info;
- dev_dbg(sheap->dev, "Allocate buffer %p\n", buffer);
+ dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer);
return info;
err:
@@ -634,7 +634,7 @@ retry:
sg = sg_next(sg);
}
buffer->priv_virt = info;
- dev_dbg(sheap->dev, "Allocate buffer %p\n", buffer);
+ dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer);
return info;
err2:
@@ -721,7 +721,7 @@ static void ion_secure_cma_free(struct ion_buffer *buffer)
struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
int ret = 0;
- dev_dbg(sheap->dev, "Release buffer %p\n", buffer);
+ dev_dbg(sheap->dev, "Release buffer %pK\n", buffer);
if (msm_secure_v2_is_supported())
ret = msm_unsecure_table(info->table);
atomic_sub(buffer->size, &sheap->total_allocated);
@@ -743,8 +743,8 @@ static int ion_secure_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
container_of(heap, struct ion_cma_secure_heap, heap);
struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
- dev_dbg(sheap->dev, "Return buffer %p physical address 0x%pa\n", buffer,
- &info->phys);
+ dev_dbg(sheap->dev, "Return buffer %pK physical address 0x%pa\n",
+ buffer, &info->phys);
*addr = info->phys;
*len = buffer->size;
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index fd4d45ad8db2..03b2b8a38991 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -204,11 +204,16 @@ static struct page *split_page_from_secure_pool(struct ion_system_heap *heap,
split_page(page, order);
break;
}
- /* Return the remaining order-0 pages to the pool */
- if (page)
- for (j = 1; j < (1 << order); j++)
+ /*
+ * Return the remaining order-0 pages to the pool.
+ * SetPagePrivate flag to mark memory as secure.
+ */
+ if (page) {
+ for (j = 1; j < (1 << order); j++) {
+ SetPagePrivate(page + j);
free_buffer_page(heap, buffer, page + j, 0);
-
+ }
+ }
got_page:
mutex_unlock(&heap->split_page_mutex);
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index cd420c429031..176f22ba570c 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -632,7 +632,8 @@ bool is_secure_vmid_valid(int vmid)
vmid == VMID_CP_NON_PIXEL ||
vmid == VMID_CP_CAMERA ||
vmid == VMID_CP_SEC_DISPLAY ||
- vmid == VMID_CP_APP);
+ vmid == VMID_CP_APP ||
+ vmid == VMID_CP_CAMERA_PREVIEW);
}
int get_secure_vmid(unsigned long flags)
@@ -651,6 +652,8 @@ int get_secure_vmid(unsigned long flags)
return VMID_CP_SEC_DISPLAY;
if (flags & ION_FLAG_CP_APP)
return VMID_CP_APP;
+ if (flags & ION_FLAG_CP_CAMERA_PREVIEW)
+ return VMID_CP_CAMERA_PREVIEW;
return -EINVAL;
}
/* fix up the cases where the ioctl direction bits are incorrect */
@@ -708,7 +711,7 @@ long msm_ion_custom_ioctl(struct ion_client *client,
} else {
handle = ion_import_dma_buf(client, data.flush_data.fd);
if (IS_ERR(handle)) {
- pr_info("%s: Could not import handle: %p\n",
+ pr_info("%s: Could not import handle: %pK\n",
__func__, handle);
return -EINVAL;
}
@@ -721,8 +724,8 @@ long msm_ion_custom_ioctl(struct ion_client *client,
+ data.flush_data.length;
if (start && check_vaddr_bounds(start, end)) {
- pr_err("%s: virtual address %p is out of bounds\n",
- __func__, data.flush_data.vaddr);
+ pr_err("%s: virtual address %pK is out of bounds\n",
+ __func__, data.flush_data.vaddr);
ret = -EINVAL;
} else {
ret = ion_do_cache_op(
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index b3c29826834e..54a4672ff143 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -3,6 +3,8 @@
#include "ion.h"
+#define ION_BIT(nr) (1UL << (nr))
+
enum msm_ion_heap_types {
ION_HEAP_TYPE_MSM_START = ION_HEAP_TYPE_CUSTOM + 1,
ION_HEAP_TYPE_SECURE_DMA = ION_HEAP_TYPE_MSM_START,
@@ -76,39 +78,41 @@ enum cp_mem_usage {
* Flags to be used when allocating from the secure heap for
* content protection
*/
-#define ION_FLAG_CP_TOUCH (1 << 17)
-#define ION_FLAG_CP_BITSTREAM (1 << 18)
-#define ION_FLAG_CP_PIXEL (1 << 19)
-#define ION_FLAG_CP_NON_PIXEL (1 << 20)
-#define ION_FLAG_CP_CAMERA (1 << 21)
-#define ION_FLAG_CP_HLOS (1 << 22)
-#define ION_FLAG_CP_HLOS_FREE (1 << 23)
-#define ION_FLAG_CP_SEC_DISPLAY (1 << 25)
-#define ION_FLAG_CP_APP (1 << 26)
+#define ION_FLAG_CP_TOUCH ION_BIT(17)
+#define ION_FLAG_CP_BITSTREAM ION_BIT(18)
+#define ION_FLAG_CP_PIXEL ION_BIT(19)
+#define ION_FLAG_CP_NON_PIXEL ION_BIT(20)
+#define ION_FLAG_CP_CAMERA ION_BIT(21)
+#define ION_FLAG_CP_HLOS ION_BIT(22)
+#define ION_FLAG_CP_HLOS_FREE ION_BIT(23)
+#define ION_FLAG_CP_SEC_DISPLAY ION_BIT(25)
+#define ION_FLAG_CP_APP ION_BIT(26)
+#define ION_FLAG_CP_CAMERA_PREVIEW ION_BIT(27)
+
/**
* Flag to allow non continguous allocation of memory from secure
* heap
*/
-#define ION_FLAG_ALLOW_NON_CONTIG (1 << 24)
+#define ION_FLAG_ALLOW_NON_CONTIG ION_BIT(24)
/**
* Flag to use when allocating to indicate that a heap is secure.
*/
-#define ION_FLAG_SECURE (1 << ION_HEAP_ID_RESERVED)
+#define ION_FLAG_SECURE ION_BIT(ION_HEAP_ID_RESERVED)
/**
* Flag for clients to force contiguous memort allocation
*
* Use of this flag is carefully monitored!
*/
-#define ION_FLAG_FORCE_CONTIGUOUS (1 << 30)
+#define ION_FLAG_FORCE_CONTIGUOUS ION_BIT(30)
/*
* Used in conjunction with heap which pool memory to force an allocation
* to come from the page allocator directly instead of from the pool allocation
*/
-#define ION_FLAG_POOL_FORCE_ALLOC (1 << 16)
+#define ION_FLAG_POOL_FORCE_ALLOC ION_BIT(16)
/**
* Deprecated! Please use the corresponding ION_FLAG_*
@@ -119,7 +123,7 @@ enum cp_mem_usage {
/**
* Macro should be used with ion_heap_ids defined above.
*/
-#define ION_HEAP(bit) (1 << (bit))
+#define ION_HEAP(bit) ION_BIT(bit)
#define ION_ADSP_HEAP_NAME "adsp"
#define ION_SYSTEM_HEAP_NAME "system"
diff --git a/drivers/thermal/lmh_lite.c b/drivers/thermal/lmh_lite.c
index bd456d25b124..32a573d22270 100644
--- a/drivers/thermal/lmh_lite.c
+++ b/drivers/thermal/lmh_lite.c
@@ -640,7 +640,7 @@ sens_exit:
static int lmh_get_sensor_list(void)
{
- int ret = 0;
+ int ret = 0, buf_size = 0;
uint32_t size = 0, next = 0, idx = 0, count = 0;
struct scm_desc desc_arg;
struct lmh_sensor_packet *payload = NULL;
@@ -649,12 +649,13 @@ static int lmh_get_sensor_list(void)
uint32_t size;
} cmd_buf;
- payload = kzalloc(sizeof(*payload), GFP_KERNEL);
+ buf_size = PAGE_ALIGN(sizeof(*payload));
+ payload = kzalloc(buf_size, GFP_KERNEL);
if (!payload)
return -ENOMEM;
do {
- memset(payload, 0, sizeof(*payload));
+ memset(payload, 0, buf_size);
payload->count = next;
cmd_buf.addr = SCM_BUFFER_PHYS(payload);
/* payload_phys may be a physical address > 4 GB */
@@ -663,7 +664,7 @@ static int lmh_get_sensor_list(void)
lmh_sensor_packet);
desc_arg.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
trace_lmh_event_call("GET_SENSORS enter");
- dmac_flush_range(payload, payload + sizeof(*payload));
+ dmac_flush_range(payload, payload + buf_size);
if (!is_scm_armv8())
ret = scm_call(SCM_SVC_LMH, LMH_GET_SENSORS,
(void *) &cmd_buf,
@@ -881,7 +882,8 @@ static int lmh_debug_read(struct lmh_debug_ops *ops, uint32_t **buf)
if (curr_size != size) {
if (payload)
devm_kfree(lmh_data->dev, payload);
- payload = devm_kzalloc(lmh_data->dev, size, GFP_KERNEL);
+ payload = devm_kzalloc(lmh_data->dev, PAGE_ALIGN(size),
+ GFP_KERNEL);
if (!payload) {
pr_err("payload buffer alloc failed\n");
ret = -ENOMEM;
@@ -948,7 +950,8 @@ static int lmh_debug_config_write(uint32_t cmd_id, uint32_t *buf, int size)
trace_lmh_debug_data("Config LMH", buf, size);
size_bytes = (size - 3) * sizeof(uint32_t);
- payload = devm_kzalloc(lmh_data->dev, size_bytes, GFP_KERNEL);
+ payload = devm_kzalloc(lmh_data->dev, PAGE_ALIGN(size_bytes),
+ GFP_KERNEL);
if (!payload) {
ret = -ENOMEM;
goto set_cfg_exit;
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index 97ab02dfc753..df3a638510c2 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -933,6 +933,9 @@ static struct of_device_id tsens_match[] = {
{ .compatible = "qcom,msmfalcon-tsens",
.data = (void *)TSENS_CALIB_FUSE_MAP_NONE,
},
+ { .compatible = "qcom,msmtriton-tsens",
+ .data = (void *)TSENS_CALIB_FUSE_MAP_NONE,
+ },
{}
};
@@ -1066,68 +1069,68 @@ static int tsens_get_sw_id_mapping_for_controller(
return 0;
}
-int tsens_get_hw_id_mapping(int sensor_sw_id, int *sensor_client_id)
+int tsens_get_hw_id_mapping(int thermal_sensor_num, int *sensor_client_id)
{
- int i = 0;
- bool id_found = false;
struct tsens_tm_device *tmdev = NULL;
struct device_node *of_node = NULL;
const struct of_device_id *id;
+ uint32_t tsens_max_sensors = 0, idx = 0, i = 0;
- tmdev = get_tsens_controller_for_client_id(sensor_sw_id);
- if (tmdev == NULL) {
- pr_debug("TSENS early init not done\n");
+ if (list_empty(&tsens_device_list)) {
+ pr_err("%s: TSENS controller not available\n", __func__);
return -EPROBE_DEFER;
}
- of_node = tmdev->pdev->dev.of_node;
- if (of_node == NULL) {
- pr_err("Invalid of_node??\n");
+ list_for_each_entry(tmdev, &tsens_device_list, list)
+ tsens_max_sensors += tmdev->tsens_num_sensor;
+
+ if (tsens_max_sensors != thermal_sensor_num) {
+ pr_err("TSENS total sensors is %d, thermal expects:%d\n",
+ tsens_max_sensors, thermal_sensor_num);
return -EINVAL;
}
- if (!of_match_node(tsens_match, of_node)) {
- pr_err("Need to read SoC specific fuse map\n");
- return -ENODEV;
- }
+ list_for_each_entry(tmdev, &tsens_device_list, list) {
+ of_node = tmdev->pdev->dev.of_node;
+ if (of_node == NULL) {
+ pr_err("Invalid of_node??\n");
+ return -EINVAL;
+ }
- id = of_match_node(tsens_match, of_node);
- if (id == NULL) {
- pr_err("can not find tsens_match of_node\n");
- return -ENODEV;
- }
+ if (!of_match_node(tsens_match, of_node)) {
+ pr_err("Need to read SoC specific fuse map\n");
+ return -ENODEV;
+ }
- if (!strcmp(id->compatible, "qcom,msm8996-tsens") ||
- (!strcmp(id->compatible, "qcom,msmcobalt-tsens")) ||
- (!strcmp(id->compatible, "qcom,msmhamster-tsens"))) {
- /* Assign a client id which will be used to get the
- * controller and hw_sensor details
- */
- while (i < tmdev->tsens_num_sensor && !id_found) {
- if (sensor_sw_id == tmdev->sensor[i].sensor_client_id) {
- *sensor_client_id =
+ id = of_match_node(tsens_match, of_node);
+ if (id == NULL) {
+ pr_err("can not find tsens_match of_node\n");
+ return -ENODEV;
+ }
+
+ if (!strcmp(id->compatible, "qcom,msm8996-tsens") ||
+ (!strcmp(id->compatible, "qcom,msmcobalt-tsens")) ||
+ (!strcmp(id->compatible, "qcom,msmhamster-tsens"))) {
+ /* Assign client id's that is used to get the
+ * controller and hw_sensor details
+ */
+ for (i = 0; i < tmdev->tsens_num_sensor; i++) {
+ sensor_client_id[idx] =
tmdev->sensor[i].sensor_client_id;
- id_found = true;
+ idx++;
}
- i++;
- }
- } else {
- /* Assign the corresponding hw sensor number which is done
- * prior to support for multiple controllres
- */
- while (i < tmdev->tsens_num_sensor && !id_found) {
- if (sensor_sw_id == tmdev->sensor[i].sensor_client_id) {
- *sensor_client_id =
+ } else {
+ /* Assign the corresponding hw sensor number
+ * prior to support for multiple controllres
+ */
+ for (i = 0; i < tmdev->tsens_num_sensor; i++) {
+ sensor_client_id[idx] =
tmdev->sensor[i].sensor_hw_num;
- id_found = true;
+ idx++;
}
- i++;
}
}
- if (!id_found)
- return -EINVAL;
-
return 0;
}
EXPORT_SYMBOL(tsens_get_hw_id_mapping);
@@ -1383,7 +1386,7 @@ static int msm_tsens_get_temp(int sensor_client_id, int *temp)
}
if ((!tmdev->prev_reading_avail) && !tmdev->tsens_valid_status_check) {
- while (!((readl_relaxed(trdy_addr)) & TSENS_TRDY_MASK))
+ while (!((readl_relaxed_no_log(trdy_addr)) & TSENS_TRDY_MASK))
usleep_range(TSENS_TRDY_RDY_MIN_TIME,
TSENS_TRDY_RDY_MAX_TIME);
tmdev->prev_reading_avail = true;
@@ -1394,7 +1397,7 @@ static int msm_tsens_get_temp(int sensor_client_id, int *temp)
else
last_temp_mask = TSENS_SN_STATUS_TEMP_MASK;
- code = readl_relaxed(sensor_addr +
+ code = readl_relaxed_no_log(sensor_addr +
(sensor_hw_num << TSENS_STATUS_ADDR_OFFSET));
last_temp = code & last_temp_mask;
@@ -1406,14 +1409,14 @@ static int msm_tsens_get_temp(int sensor_client_id, int *temp)
if (code & valid_status_mask)
last_temp_valid = true;
else {
- code = readl_relaxed(sensor_addr +
+ code = readl_relaxed_no_log(sensor_addr +
(sensor_hw_num << TSENS_STATUS_ADDR_OFFSET));
last_temp2 = code & last_temp_mask;
if (code & valid_status_mask) {
last_temp = last_temp2;
last_temp2_valid = true;
} else {
- code = readl_relaxed(sensor_addr +
+ code = readl_relaxed_no_log(sensor_addr +
(sensor_hw_num <<
TSENS_STATUS_ADDR_OFFSET));
last_temp3 = code & last_temp_mask;
@@ -5435,7 +5438,8 @@ static int get_device_tree_data(struct platform_device *pdev,
tmdev->tsens_type = TSENS_TYPE3;
else if (!strcmp(id->compatible, "qcom,msmtitanium-tsens") ||
(!strcmp(id->compatible, "qcom,msmfalcon-tsens") ||
- (!strcmp(id->compatible, "qcom,msmhamster-tsens")))) {
+ (!strcmp(id->compatible, "qcom,msmtriton-tsens") ||
+ (!strcmp(id->compatible, "qcom,msmhamster-tsens"))))) {
tmdev->tsens_type = TSENS_TYPE3;
tsens_poll_check = 0;
} else if (!strcmp(id->compatible, "qcom,msm8952-tsens") ||
@@ -5457,7 +5461,8 @@ static int get_device_tree_data(struct platform_device *pdev,
(!strcmp(id->compatible, "qcom,msmtitanium-tsens")) ||
(!strcmp(id->compatible, "qcom,msmcobalt-tsens")) ||
(!strcmp(id->compatible, "qcom,msmfalcon-tsens") ||
- (!strcmp(id->compatible, "qcom,msmhamster-tsens"))))
+ (!strcmp(id->compatible, "qcom,msmtriton-tsens") ||
+ (!strcmp(id->compatible, "qcom,msmhamster-tsens")))))
tmdev->tsens_valid_status_check = true;
}
@@ -5473,7 +5478,8 @@ static int get_device_tree_data(struct platform_device *pdev,
(!strcmp(id->compatible, "qcom,msmcobalt-tsens")) ||
(!strcmp(id->compatible, "qcom,msmhamster-tsens")) ||
(!strcmp(id->compatible, "qcom,msmfalcon-tsens") ||
- (!strcmp(id->compatible, "qcom,msmtitanium-tsens")))) {
+ (!strcmp(id->compatible, "qcom,msmtriton-tsens") ||
+ (!strcmp(id->compatible, "qcom,msmtitanium-tsens"))))) {
tmdev->tsens_critical_irq =
platform_get_irq_byname(pdev,
"tsens-critical");
@@ -5700,8 +5706,6 @@ static void tsens_debugfs_init(void)
}
}
-int tsens_sensor_sw_idx = 0;
-
static int tsens_thermal_zone_register(struct tsens_tm_device *tmdev)
{
int rc = 0, i = 0;
@@ -5715,7 +5719,7 @@ static int tsens_thermal_zone_register(struct tsens_tm_device *tmdev)
char name[18];
snprintf(name, sizeof(name), "tsens_tz_sensor%d",
- tsens_sensor_sw_idx);
+ tmdev->sensor[i].sensor_client_id);
tmdev->sensor[i].mode = THERMAL_DEVICE_ENABLED;
tmdev->sensor[i].tm = tmdev;
if (tmdev->tsens_type == TSENS_TYPE3) {
@@ -5741,7 +5745,6 @@ static int tsens_thermal_zone_register(struct tsens_tm_device *tmdev)
goto fail;
}
}
- tsens_sensor_sw_idx++;
}
if (tmdev->tsens_type == TSENS_TYPE3) {
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index ff40d6fad922..17ecd61e9ee6 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -2460,7 +2460,6 @@ fail:
static int create_sensor_id_map(struct device *dev)
{
- int i = 0;
int ret = 0;
tsens_id_map = devm_kzalloc(dev,
@@ -2471,19 +2470,10 @@ static int create_sensor_id_map(struct device *dev)
return -ENOMEM;
}
- for (i = 0; i < max_tsens_num; i++) {
- ret = tsens_get_hw_id_mapping(i, &tsens_id_map[i]);
- /* If return -ENXIO, hw_id is default in sequence */
- if (ret) {
- if (ret == -ENXIO) {
- tsens_id_map[i] = i;
- ret = 0;
- } else {
- pr_err("Failed to get hw id for id:%d.err:%d\n",
- i, ret);
- goto fail;
- }
- }
+ ret = tsens_get_hw_id_mapping(max_tsens_num, tsens_id_map);
+ if (ret) {
+ pr_err("Failed to get tsens id's:%d\n", ret);
+ goto fail;
}
return ret;
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index 84ab45fde4ae..c78406cb3325 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -492,7 +492,7 @@ static int32_t qpnp_adc_tm_rc_check_channel_en(struct qpnp_adc_tm_chip *chip)
{
u8 adc_tm_ctl = 0, status_low = 0, status_high = 0;
int rc = 0, i = 0;
- bool ldo_en;
+ bool ldo_en = false;
for (i = 0; i < chip->max_channels_available; i++) {
rc = qpnp_adc_tm_read_reg(chip, QPNP_BTM_Mn_CTL(i),
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 3df80c73b74a..ac0eb0939ecf 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2990,6 +2990,9 @@ void usb_remove_hcd(struct usb_hcd *hcd)
cancel_work_sync(&hcd->wakeup_work);
#endif
+ /* handle any pending hub events before XHCI stops */
+ usb_flush_hub_wq();
+
mutex_lock(&usb_bus_list_lock);
usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_list_lock);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 84df093639ac..269c1ee2da44 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -610,6 +610,12 @@ void usb_kick_hub_wq(struct usb_device *hdev)
kick_hub_wq(hub);
}
+void usb_flush_hub_wq(void)
+{
+ flush_workqueue(hub_wq);
+}
+EXPORT_SYMBOL(usb_flush_hub_wq);
+
/*
* Let the USB core know that a USB 3.0 device has sent a Function Wake Device
* Notification, which indicates it had initiated remote wakeup.
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 9fb05bbf3e74..07867ead2413 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -123,6 +123,9 @@ static int dwc3_init_usb_phys(struct dwc3 *dwc)
return ret;
}
+ if (dwc->maximum_speed == USB_SPEED_HIGH)
+ goto generic_phy_init;
+
ret = usb_phy_init(dwc->usb3_phy);
if (ret == -EBUSY) {
/*
@@ -135,6 +138,8 @@ static int dwc3_init_usb_phys(struct dwc3 *dwc)
__func__, ret);
return ret;
}
+
+generic_phy_init:
ret = phy_init(dwc->usb2_generic_phy);
if (ret < 0)
return ret;
@@ -159,7 +164,9 @@ static int dwc3_core_reset(struct dwc3 *dwc)
/* Reset PHYs */
usb_phy_reset(dwc->usb2_phy);
- usb_phy_reset(dwc->usb3_phy);
+
+ if (dwc->maximum_speed == USB_SPEED_SUPER)
+ usb_phy_reset(dwc->usb3_phy);
/* Initialize PHYs */
ret = dwc3_init_usb_phys(dwc);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 83a265f0211e..08006d84fb38 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1249,6 +1249,7 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
struct usb_gsi_request *request;
struct gsi_channel_info *ch_info;
bool block_db, f_suspend;
+ unsigned long flags;
switch (op) {
case GSI_EP_OP_PREPARE_TRBS:
@@ -1263,11 +1264,15 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
case GSI_EP_OP_CONFIG:
request = (struct usb_gsi_request *)op_data;
dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
+ spin_lock_irqsave(&dwc->lock, flags);
gsi_configure_ep(ep, request);
+ spin_unlock_irqrestore(&dwc->lock, flags);
break;
case GSI_EP_OP_STARTXFER:
dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
+ spin_lock_irqsave(&dwc->lock, flags);
ret = gsi_startxfer_for_ep(ep);
+ spin_unlock_irqrestore(&dwc->lock, flags);
break;
case GSI_EP_OP_GET_XFER_IDX:
dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
@@ -1293,12 +1298,16 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
case GSI_EP_OP_UPDATEXFER:
request = (struct usb_gsi_request *)op_data;
dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
+ spin_lock_irqsave(&dwc->lock, flags);
ret = gsi_updatexfer_for_ep(ep, request);
+ spin_unlock_irqrestore(&dwc->lock, flags);
break;
case GSI_EP_OP_ENDXFER:
request = (struct usb_gsi_request *)op_data;
dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
+ spin_lock_irqsave(&dwc->lock, flags);
gsi_endxfer_for_ep(ep);
+ spin_unlock_irqrestore(&dwc->lock, flags);
break;
case GSI_EP_OP_SET_CLR_BLOCK_DBL:
block_db = *((bool *)op_data);
@@ -1923,7 +1932,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
usb_phy_set_suspend(mdwc->hs_phy, 1);
/* Suspend SS PHY */
- if (can_suspend_ssphy) {
+ if (dwc->maximum_speed == USB_SPEED_SUPER && can_suspend_ssphy) {
/* indicate phy about SS mode */
if (dwc3_msm_is_superspeed(mdwc))
mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
@@ -2068,7 +2077,8 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
clk_prepare_enable(mdwc->bus_aggr_clk);
/* Resume SS PHY */
- if (mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
+ if (dwc->maximum_speed == USB_SPEED_SUPER &&
+ mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
if (mdwc->typec_orientation == ORIENTATION_CC1)
mdwc->ss_phy->flags |= PHY_LANE_A;
@@ -2365,36 +2375,21 @@ static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
(u32 *)&mdwc->core_clk_rate)) {
mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
mdwc->core_clk_rate);
- } else {
- /*
- * Get Max supported clk frequency for USB Core CLK and request
- * to set the same.
- */
- mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk, LONG_MAX);
}
+ dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
+ mdwc->core_clk_rate);
+ ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
+ if (ret)
+ dev_err(mdwc->dev, "fail to set core_clk freq:%d\n", ret);
+
+
mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
if (IS_ERR(mdwc->core_reset)) {
dev_err(mdwc->dev, "failed to get core_reset\n");
return PTR_ERR(mdwc->core_reset);
}
- /*
- * Get Max supported clk frequency for USB Core CLK and request
- * to set the same.
- */
- mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk, LONG_MAX);
- if (IS_ERR_VALUE(mdwc->core_clk_rate)) {
- dev_err(mdwc->dev, "fail to get core clk max freq.\n");
- } else {
- dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
- mdwc->core_clk_rate);
- ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
- if (ret)
- dev_err(mdwc->dev, "fail to set core_clk freq:%d\n",
- ret);
- }
-
mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
if (IS_ERR(mdwc->sleep_clk)) {
dev_err(mdwc->dev, "failed to get sleep_clk\n");
@@ -2437,9 +2432,11 @@ static int dwc3_msm_id_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
struct extcon_dev *edev = ptr;
enum dwc3_id_state id;
int cc_state;
+ int speed;
if (!edev) {
dev_err(mdwc->dev, "%s: edev null\n", __func__);
@@ -2459,6 +2456,9 @@ static int dwc3_msm_id_notifier(struct notifier_block *nb,
dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
+ speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
+ dwc->maximum_speed = (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
+
if (mdwc->id_state != id) {
mdwc->id_state = id;
dbg_event(0xFF, "id_state", mdwc->id_state);
@@ -2476,6 +2476,7 @@ static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
struct extcon_dev *edev = ptr;
int cc_state;
+ int speed;
if (!edev) {
dev_err(mdwc->dev, "%s: edev null\n", __func__);
@@ -2496,6 +2497,9 @@ static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
+ speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
+ dwc->maximum_speed = (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
+
mdwc->vbus_active = event;
if (dwc->is_drd && !mdwc->in_restart) {
dbg_event(0xFF, "Q RW (vbus)", mdwc->vbus_active);
@@ -3041,11 +3045,13 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
if (on) {
dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
+ mdwc->hs_phy->flags |= PHY_HOST_MODE;
+ if (dwc->maximum_speed == USB_SPEED_SUPER)
+ mdwc->ss_phy->flags |= PHY_HOST_MODE;
+
pm_runtime_get_sync(mdwc->dev);
dbg_event(0xFF, "StrtHost gync",
atomic_read(&mdwc->dev->power.usage_count));
- mdwc->hs_phy->flags |= PHY_HOST_MODE;
- mdwc->ss_phy->flags |= PHY_HOST_MODE;
usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
if (!IS_ERR(mdwc->vbus_reg))
ret = regulator_enable(mdwc->vbus_reg);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 4ad994972b19..805c5e1931e1 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -421,7 +421,16 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
if (dep->endpoint.ep_type == EP_TYPE_GSI)
return;
- if (dep->trb_pool && dep->trb_pool_dma) {
+ /*
+ * Clean up ep ring to avoid getting xferInProgress due to stale trbs
+ * with HWO bit set from previous composition when update transfer cmd
+ * is issued.
+ */
+ if (dep->number > 1 && dep->trb_pool && dep->trb_pool_dma) {
+ memset(&dep->trb_pool[0], 0,
+ sizeof(struct dwc3_trb) * dep->num_trbs);
+ dbg_event(dep->number, "Clr_TRB", 0);
+
dma_free_coherent(dwc->dev,
sizeof(struct dwc3_trb) * DWC3_TRB_NUM, dep->trb_pool,
dep->trb_pool_dma);
@@ -723,17 +732,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
(dep->number & 1) ? "in" : "out");
}
- /*
- * Clean up ep ring of non-control endpoint to avoid getting xferInProgress
- * due to stale trbs with HWO bit set from previous composition when update
- * transfer cmd is issued.
- */
- if (dep->number > 1 && dep->trb_pool) {
- memset(&dep->trb_pool[0], 0,
- sizeof(struct dwc3_trb) * dep->num_trbs);
- dbg_event(dep->number, "Clr_TRB", 0);
- }
-
return 0;
}
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index c89ae79763c6..9ef57e5d7d64 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -175,6 +175,9 @@ config USB_F_SUBSET
config USB_F_RNDIS
tristate
+config USB_F_QCRNDIS
+ tristate
+
config USB_F_MASS_STORAGE
tristate
@@ -318,6 +321,14 @@ config USB_CONFIGFS_ECM_SUBSET
On hardware that can't implement the full protocol,
a simple CDC subset is used, placing fewer demands on USB.
+config USB_CONFIGFS_QCRNDIS
+ bool "RNDIS"
+ depends on USB_CONFIGFS
+ depends on RNDIS_IPA
+ depends on NET
+ select USB_U_ETHER
+ select USB_F_QCRNDIS
+
config USB_CONFIGFS_RNDIS
bool "RNDIS"
depends on USB_CONFIGFS
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 9a27deac7978..a213cd4c8377 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -60,3 +60,5 @@ usb_f_cdev-y := f_cdev.o
obj-$(CONFIG_USB_F_CDEV) += usb_f_cdev.o
usb_f_qdss-y := f_qdss.o u_qdss.o
obj-$(CONFIG_USB_F_QDSS) += usb_f_qdss.o
+usb_f_qcrndis-y := f_qc_rndis.o u_data_ipa.o
+obj-$(CONFIG_USB_F_QCRNDIS) += usb_f_qcrndis.o
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
index b288a848aaa0..3b7b23cfde44 100644
--- a/drivers/usb/gadget/function/f_cdev.c
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -50,6 +50,9 @@
#define MODULE_NAME "msm_usb_bridge"
#define NUM_INSTANCE 2
+#define MAX_CDEV_INST_NAME 15
+#define MAX_CDEV_FUNC_NAME 5
+
#define BRIDGE_RX_QUEUE_SIZE 8
#define BRIDGE_RX_BUF_SIZE 2048
#define BRIDGE_TX_QUEUE_SIZE 8
@@ -823,6 +826,7 @@ static void cser_free_inst(struct usb_function_instance *fi)
device_destroy(fcdev_classp, MKDEV(major, opts->port->minor));
cdev_del(&opts->port->fcdev_cdev);
usb_cser_chardev_deinit();
+ kfree(opts->func_name);
kfree(opts->port);
kfree(opts);
}
@@ -1647,16 +1651,6 @@ static struct configfs_item_operations cserial_item_ops = {
.release = cserial_attr_release,
};
-static ssize_t usb_cser_port_num_show(struct config_item *item, char *page)
-{
- return sprintf(page, "%u\n", to_f_cdev_opts(item)->port_num);
-}
-
-static ssize_t usb_cser_func_name_show(struct config_item *item, char *page)
-{
- return sprintf(page, "%s\n", to_f_cdev_opts(item)->func_name);
-}
-
static ssize_t usb_cser_status_show(struct config_item *item, char *page)
{
struct f_cdev *port = to_f_cdev_opts(item)->port;
@@ -1722,12 +1716,8 @@ static ssize_t usb_cser_status_store(struct config_item *item,
return len;
}
-CONFIGFS_ATTR_RO(usb_cser_, port_num);
-CONFIGFS_ATTR_RO(usb_cser_, func_name);
CONFIGFS_ATTR(usb_cser_, status);
static struct configfs_attribute *cserial_attrs[] = {
- &usb_cser_attr_port_num,
- &usb_cser_attr_func_name,
&usb_cser_attr_status,
NULL,
};
@@ -1748,7 +1738,7 @@ static int cser_set_inst_name(struct usb_function_instance *f, const char *name)
struct f_cdev *port;
name_len = strlen(name) + 1;
- if (name_len > 15)
+ if (name_len > MAX_CDEV_INST_NAME)
return -ENAMETOOLONG;
/* expect name as cdev.<func>.<port_num> */
@@ -1760,6 +1750,9 @@ static int cser_set_inst_name(struct usb_function_instance *f, const char *name)
/* get function name */
str_size = name_len - strlen(str);
+ if (str_size > MAX_CDEV_FUNC_NAME)
+ return -ENAMETOOLONG;
+
ptr = kstrndup(name, str_size - 1, GFP_KERNEL);
if (!ptr) {
pr_err("error:%ld\n", PTR_ERR(ptr));
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index d489e453594a..f058ab4cedaa 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -461,7 +461,7 @@ static struct usb_gadget_strings *rmnet_gsi_strings[] = {
/* rndis device descriptors */
-/* interface descriptor: */
+/* interface descriptor: Supports "Wireless" RNDIS; auto-detected by Windows*/
static struct usb_interface_descriptor rndis_gsi_control_intf = {
.bLength = sizeof(rndis_gsi_control_intf),
.bDescriptorType = USB_DT_INTERFACE,
@@ -469,9 +469,9 @@ static struct usb_interface_descriptor rndis_gsi_control_intf = {
/* .bInterfaceNumber = DYNAMIC */
/* status endpoint is optional; this could be patched later */
.bNumEndpoints = 1,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM,
- .bInterfaceProtocol = USB_CDC_ACM_PROTO_VENDOR,
+ .bInterfaceClass = USB_CLASS_WIRELESS_CONTROLLER,
+ .bInterfaceSubClass = 0x01,
+ .bInterfaceProtocol = 0x03,
/* .iInterface = DYNAMIC */
};
@@ -522,15 +522,16 @@ static struct usb_interface_descriptor rndis_gsi_data_intf = {
/* .iInterface = DYNAMIC */
};
+/* Supports "Wireless" RNDIS; auto-detected by Windows */
static struct usb_interface_assoc_descriptor
rndis_gsi_iad_descriptor = {
.bLength = sizeof(rndis_gsi_iad_descriptor),
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
.bFirstInterface = 0, /* XXX, hardcoded */
.bInterfaceCount = 2, /* control + data */
- .bFunctionClass = USB_CLASS_COMM,
- .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bFunctionProtocol = USB_CDC_PROTO_NONE,
+ .bFunctionClass = USB_CLASS_WIRELESS_CONTROLLER,
+ .bFunctionSubClass = 0x01,
+ .bFunctionProtocol = 0x03,
/* .iFunction = DYNAMIC */
};
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index aa186781ef22..5e50fe245a59 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -1876,7 +1876,8 @@ struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,
dev->function.disable = mtp_function_disable;
dev->function.setup = mtp_ctrlreq_configfs;
dev->function.free_func = mtp_free;
- dev->is_ptp = mtp_config;
+ dev->is_ptp = !mtp_config;
+ fi->f = &dev->function;
return &dev->function;
}
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 7ad798ace1e5..4e35ed9654b7 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -333,6 +333,77 @@ static struct usb_descriptor_header *ncm_hs_function[] = {
NULL,
};
+/* Super Speed Support */
+static struct usb_endpoint_descriptor ncm_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = USB_MS_TO_HS_INTERVAL(NCM_STATUS_INTERVAL_MS),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_notify_comp_desc = {
+ .bLength = sizeof(ncm_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(NCM_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ncm_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_in_comp_desc = {
+ .bLength = sizeof(ncm_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ncm_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_out_comp_desc = {
+ .bLength = sizeof(ncm_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ncm_ss_function[] = {
+ (struct usb_descriptor_header *) &ncm_iad_desc,
+ /* CDC NCM control descriptors */
+ (struct usb_descriptor_header *) &ncm_control_intf,
+ (struct usb_descriptor_header *) &ncm_header_desc,
+ (struct usb_descriptor_header *) &ncm_union_desc,
+ (struct usb_descriptor_header *) &ecm_desc,
+ (struct usb_descriptor_header *) &ncm_desc,
+ (struct usb_descriptor_header *) &ncm_ss_notify_desc,
+ (struct usb_descriptor_header *) &ncm_ss_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ncm_data_nop_intf,
+ (struct usb_descriptor_header *) &ncm_data_intf,
+ (struct usb_descriptor_header *) &ncm_ss_in_desc,
+ (struct usb_descriptor_header *) &ncm_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &ncm_ss_out_desc,
+ (struct usb_descriptor_header *) &ncm_ss_out_comp_desc,
+ NULL,
+};
+
/* string descriptors: */
#define STRING_CTRL_IDX 0
@@ -1431,8 +1502,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
hs_ncm_notify_desc.bEndpointAddress =
fs_ncm_notify_desc.bEndpointAddress;
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ncm_ss_in_desc.bEndpointAddress =
+ fs_ncm_in_desc.bEndpointAddress;
+ ncm_ss_out_desc.bEndpointAddress =
+ fs_ncm_out_desc.bEndpointAddress;
+ ncm_ss_notify_desc.bEndpointAddress =
+ fs_ncm_notify_desc.bEndpointAddress;
+ }
+
status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
- NULL);
+ ncm_ss_function);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
index bc319d4fe16c..316967415aa9 100644
--- a/drivers/usb/gadget/function/f_qc_rndis.c
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -25,16 +25,17 @@
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/atomic.h>
#include "u_ether.h"
-#include "u_qc_ether.h"
#include "rndis.h"
-#include "u_bam_data.h"
+#include "u_data_ipa.h"
#include <linux/rndis_ipa.h>
+#include "configfs.h"
unsigned int rndis_dl_max_xfer_size = 9216;
module_param(rndis_dl_max_xfer_size, uint, S_IRUGO | S_IWUSR);
@@ -86,7 +87,7 @@ MODULE_PARM_DESC(rndis_dl_max_xfer_size,
*/
struct f_rndis_qc {
- struct qc_gether port;
+ struct usb_function func;
u8 ctrl_id, data_id;
u8 ethaddr[ETH_ALEN];
u32 vendorID;
@@ -94,27 +95,27 @@ struct f_rndis_qc {
u8 pkt_alignment_factor;
u32 max_pkt_size;
const char *manufacturer;
- int config;
+ struct rndis_params *params;
atomic_t ioctl_excl;
atomic_t open_excl;
struct usb_ep *notify;
struct usb_request *notify_req;
atomic_t notify_count;
- struct data_port bam_port;
- enum transport_type xport;
+ struct gadget_ipa_port bam_port;
u8 port_num;
+ u16 cdc_filter;
bool net_ready_trigger;
};
static struct ipa_usb_init_params rndis_ipa_params;
static spinlock_t rndis_lock;
static bool rndis_ipa_supported;
-static void rndis_qc_open(struct qc_gether *geth);
+static void rndis_qc_open(struct f_rndis_qc *rndis);
static inline struct f_rndis_qc *func_to_rndis_qc(struct usb_function *f)
{
- return container_of(f, struct f_rndis_qc, port.func);
+ return container_of(f, struct f_rndis_qc, func);
}
/* peak (theoretical) bulk transfer rate in bits-per-second */
@@ -322,10 +323,20 @@ static struct usb_endpoint_descriptor rndis_qc_ss_notify_desc = {
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
- .wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT),
+ .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
.bInterval = RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
};
+static struct usb_ss_ep_comp_descriptor ss_intr_comp_desc = {
+ .bLength = sizeof(ss_intr_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+};
+
static struct usb_ss_ep_comp_descriptor rndis_qc_ss_intr_comp_desc = {
.bLength = sizeof(ss_intr_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
@@ -333,7 +344,16 @@ static struct usb_ss_ep_comp_descriptor rndis_qc_ss_intr_comp_desc = {
/* the following 3 values can be tweaked if necessary */
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
- .wBytesPerInterval = cpu_to_le16(STATUS_BYTECOUNT),
+ .wBytesPerInterval = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = {
+ .bLength = sizeof(ss_bulk_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
};
static struct usb_endpoint_descriptor rndis_qc_ss_in_desc = {
@@ -407,7 +427,7 @@ struct f_rndis_qc *_rndis_qc;
static inline int rndis_qc_lock(atomic_t *excl)
{
- if (atomic_inc_return(excl) == 1) {
+ if (atomic_inc_return(excl) == 1)
return 0;
atomic_dec(excl);
@@ -421,46 +441,6 @@ static inline void rndis_qc_unlock(atomic_t *excl)
/*-------------------------------------------------------------------------*/
-static struct sk_buff *rndis_qc_add_header(struct qc_gether *port,
- struct sk_buff *skb)
-{
- struct sk_buff *skb2;
-
- skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
- if (skb2)
- rndis_add_hdr(skb2);
-
- dev_kfree_skb_any(skb);
- return skb2;
-}
-
-int rndis_qc_rm_hdr(struct qc_gether *port,
- struct sk_buff *skb,
- struct sk_buff_head *list)
-{
- /* tmp points to a struct rndis_packet_msg_type */
- __le32 *tmp = (void *)skb->data;
-
- /* MessageType, MessageLength */
- if (cpu_to_le32(RNDIS_MSG_PACKET)
- != get_unaligned(tmp++)) {
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
- tmp++;
-
- /* DataOffset, DataLength */
- if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) {
- dev_kfree_skb_any(skb);
- return -EOVERFLOW;
- }
- skb_trim(skb, get_unaligned_le32(tmp++));
-
- skb_queue_tail(list, skb);
- return 0;
-}
-
-
static void rndis_qc_response_available(void *_rndis)
{
struct f_rndis_qc *rndis = _rndis;
@@ -496,12 +476,12 @@ static void rndis_qc_response_complete(struct usb_ep *ep,
int status = req->status;
struct usb_composite_dev *cdev;
- if (!rndis->port.func.config || !rndis->port.func.config->cdev) {
+ if (!rndis->func.config || !rndis->func.config->cdev) {
pr_err("%s(): cdev or config is NULL.\n", __func__);
return;
}
- cdev = rndis->port.func.config->cdev;
+ cdev = rndis->func.config->cdev;
/* after TX:
* - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
* - RNDIS_RESPONSE_AVAILABLE (status/irq)
@@ -544,7 +524,7 @@ static void rndis_qc_command_complete(struct usb_ep *ep,
u32 ul_max_xfer_size, dl_max_xfer_size;
/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
- status = rndis_msg_parser(rndis->config, (u8 *) req->buf);
+ status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
if (status < 0)
pr_err("RNDIS command error %d, %d/%d\n",
status, req->actual, req->length);
@@ -552,8 +532,8 @@ static void rndis_qc_command_complete(struct usb_ep *ep,
buf = (rndis_init_msg_type *)req->buf;
if (buf->MessageType == RNDIS_MSG_INIT) {
- ul_max_xfer_size = rndis_get_ul_max_xfer_size(rndis->config);
- u_bam_data_set_ul_max_xfer_size(ul_max_xfer_size);
+ ul_max_xfer_size = rndis_get_ul_max_xfer_size(rndis->params);
+ ipa_data_set_ul_max_xfer_size(ul_max_xfer_size);
/*
* For consistent data throughput from IPA, it is required to
* fine tune aggregation byte limit as 7KB. RNDIS IPA driver
@@ -565,11 +545,11 @@ static void rndis_qc_command_complete(struct usb_ep *ep,
*/
if (rndis_dl_max_xfer_size)
dl_max_xfer_size = min_t(u32, rndis_dl_max_xfer_size,
- rndis_get_dl_max_xfer_size(rndis->config));
+ rndis_get_dl_max_xfer_size(rndis->params));
else
dl_max_xfer_size =
- rndis_get_dl_max_xfer_size(rndis->config);
- u_bam_data_set_dl_max_xfer_size(dl_max_xfer_size);
+ rndis_get_dl_max_xfer_size(rndis->params);
+ ipa_data_set_dl_max_xfer_size(dl_max_xfer_size);
}
}
@@ -612,11 +592,11 @@ rndis_qc_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
u32 n;
/* return the result */
- buf = rndis_get_next_response(rndis->config, &n);
+ buf = rndis_get_next_response(rndis->params, &n);
if (buf) {
memcpy(req->buf, buf, n);
req->complete = rndis_qc_response_complete;
- rndis_free_response(rndis->config, buf);
+ rndis_free_response(rndis->params, buf);
value = n;
}
/* else stalls ... spec says to avoid that */
@@ -647,11 +627,31 @@ invalid:
return value;
}
+struct net_device *rndis_qc_get_net(const char *netname)
+{
+ struct net_device *net_dev;
+
+ net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Decrement net_dev refcount as it was incremented in
+ * dev_get_by_name().
+ */
+ dev_put(net_dev);
+ return net_dev;
+}
static int rndis_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct f_rndis_qc_opts *opts;
struct usb_composite_dev *cdev = f->config->cdev;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+ int ret;
/* we know alt == 0 */
@@ -672,35 +672,28 @@ static int rndis_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
struct net_device *net;
rndis->net_ready_trigger = false;
- if (rndis->port.in_ep->driver_data) {
+ if (rndis->bam_port.in->driver_data) {
DBG(cdev, "reset rndis\n");
- /* rndis->port is needed for disconnecting the BAM data
+ /* bam_port is needed for disconnecting the BAM data
* path. Only after the BAM data path is disconnected,
* we can disconnect the port from the network layer.
*/
- bam_data_disconnect(&rndis->bam_port, USB_FUNC_RNDIS,
- rndis->port_num);
-
- if (rndis->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
- gether_qc_disconnect_name(&rndis->port,
- "rndis0");
+ ipa_data_disconnect(&rndis->bam_port,
+ USB_IPA_FUNC_RNDIS);
}
- if (!rndis->port.in_ep->desc || !rndis->port.out_ep->desc) {
+ if (!rndis->bam_port.in->desc || !rndis->bam_port.out->desc) {
DBG(cdev, "init rndis\n");
if (config_ep_by_speed(cdev->gadget, f,
- rndis->port.in_ep) ||
+ rndis->bam_port.in) ||
config_ep_by_speed(cdev->gadget, f,
- rndis->port.out_ep)) {
- rndis->port.in_ep->desc = NULL;
- rndis->port.out_ep->desc = NULL;
+ rndis->bam_port.out)) {
+ rndis->bam_port.in->desc = NULL;
+ rndis->bam_port.out->desc = NULL;
goto fail;
}
}
- /* Avoid ZLPs; they can be troublesome. */
- rndis->port.is_zlp_ok = false;
-
/* RNDIS should be in the "RNDIS uninitialized" state,
* either never activated or after rndis_uninit().
*
@@ -713,30 +706,37 @@ static int rndis_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
* very long time. We need another call to the link layer
* code -- gether_updown(...bool) maybe -- to do it right.
*/
- rndis->port.cdc_filter = 0;
+ rndis->cdc_filter = 0;
rndis->bam_port.cdev = cdev;
- rndis->bam_port.func = &rndis->port.func;
- rndis->bam_port.in = rndis->port.in_ep;
- rndis->bam_port.out = rndis->port.out_ep;
-
- if (bam_data_connect(&rndis->bam_port, rndis->xport,
- rndis->port_num, USB_FUNC_RNDIS))
+ rndis->bam_port.func = &rndis->func;
+ ipa_data_port_select(USB_IPA_FUNC_RNDIS);
+ usb_bam_type = usb_bam_get_bam_type(cdev->gadget->name);
+
+ src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
+ rndis->port_num);
+ dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
+ rndis->port_num);
+ if (src_connection_idx < 0 || dst_connection_idx < 0) {
+ pr_err("%s: usb_bam_get_connection_idx failed\n",
+ __func__);
+ return ret;
+ }
+ if (ipa_data_connect(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+ src_connection_idx, dst_connection_idx))
goto fail;
DBG(cdev, "RNDIS RX/TX early activation ...\n");
- if (rndis->xport != USB_GADGET_XPORT_BAM2BAM_IPA) {
- net = gether_qc_connect_name(&rndis->port, "rndis0",
- false);
- } else {
- rndis_qc_open(&rndis->port);
- net = gether_qc_get_net("rndis0");
- }
+ rndis_qc_open(rndis);
+ net = rndis_qc_get_net("rndis0");
if (IS_ERR(net))
return PTR_ERR(net);
+ opts->net = net;
- rndis_set_param_dev(rndis->config, net,
- &rndis->port.cdc_filter);
+ rndis_set_param_dev(rndis->params, net,
+ &rndis->cdc_filter);
} else
goto fail;
@@ -753,18 +753,13 @@ static void rndis_qc_disable(struct usb_function *f)
if (!rndis->notify->driver_data)
return;
- pr_info("rndis deactivated\n");
+ DBG(cdev, "rndis deactivated\n");
- rndis_uninit(rndis->config);
- bam_data_disconnect(&rndis->bam_port, USB_FUNC_RNDIS, rndis->port_num);
- if (rndis->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
- gether_qc_disconnect_name(&rndis->port, "rndis0");
+ rndis_uninit(rndis->params);
+ ipa_data_disconnect(&rndis->bam_port, USB_IPA_FUNC_RNDIS);
- if (rndis->xport == USB_GADGET_XPORT_BAM2BAM_IPA &&
- gadget_is_dwc3(cdev->gadget)) {
- msm_ep_unconfig(rndis->port.out_ep);
- msm_ep_unconfig(rndis->port.in_ep);
- }
+ msm_ep_unconfig(rndis->bam_port.out);
+ msm_ep_unconfig(rndis->bam_port.in);
usb_ep_disable(rndis->notify);
rndis->notify->driver_data = NULL;
}
@@ -789,11 +784,11 @@ static void rndis_qc_suspend(struct usb_function *f)
* host case. In case of windows, this RNDIS state machine is
* already updated due to receiving of PACKET_FILTER.
*/
- rndis_flow_control(rndis->config, true);
+ rndis_flow_control(rndis->params, true);
pr_debug("%s(): Disconnecting\n", __func__);
}
- bam_data_suspend(&rndis->bam_port, rndis->port_num, USB_FUNC_RNDIS,
+ ipa_data_suspend(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
remote_wakeup_allowed);
pr_debug("rndis suspended\n");
}
@@ -816,12 +811,11 @@ static void rndis_qc_resume(struct usb_function *f)
else
remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
- bam_data_resume(&rndis->bam_port, rndis->port_num, USB_FUNC_RNDIS,
- remote_wakeup_allowed);
+ ipa_data_resume(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+ remote_wakeup_allowed);
if (!remote_wakeup_allowed) {
- if (rndis->xport == USB_GADGET_XPORT_BAM2BAM_IPA)
- rndis_qc_open(&rndis->port);
+ rndis_qc_open(rndis);
/*
* Linux Host doesn't sends RNDIS_MSG_INIT or non-zero value
* set with RNDIS_MESSAGE_PACKET_FILTER after performing bus
@@ -829,7 +823,7 @@ static void rndis_qc_resume(struct usb_function *f)
* explicitly here. For Windows host case is also being
* handle with RNDIS state machine.
*/
- rndis_flow_control(rndis->config, false);
+ rndis_flow_control(rndis->params, false);
}
pr_debug("%s: RNDIS resume completed\n", __func__);
@@ -844,26 +838,23 @@ static void rndis_qc_resume(struct usb_function *f)
* not used to tell whether the link should send packets or not.
*/
-static void rndis_qc_open(struct qc_gether *geth)
+static void rndis_qc_open(struct f_rndis_qc *rndis)
{
- struct f_rndis_qc *rndis = func_to_rndis_qc(&geth->func);
- struct usb_composite_dev *cdev = geth->func.config->cdev;
+ struct usb_composite_dev *cdev = rndis->func.config->cdev;
DBG(cdev, "%s\n", __func__);
- rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3,
+ rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3,
rndis_qc_bitrate(cdev->gadget) / 100);
- rndis_signal_connect(rndis->config);
+ rndis_signal_connect(rndis->params);
}
-static void rndis_qc_close(struct qc_gether *geth)
+void ipa_data_flow_control_enable(bool enable, struct rndis_params *param)
{
- struct f_rndis_qc *rndis = func_to_rndis_qc(&geth->func);
-
- DBG(geth->func.config->cdev, "%s\n", __func__);
-
- rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
- rndis_signal_disconnect(rndis->config);
+ if (enable)
+ ipa_data_stop_rndis_ipa(USB_IPA_FUNC_RNDIS);
+ else
+ ipa_data_start_rndis_ipa(USB_IPA_FUNC_RNDIS);
}
/*-------------------------------------------------------------------------*/
@@ -875,9 +866,35 @@ rndis_qc_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct rndis_params *params;
int status;
struct usb_ep *ep;
+ /* maybe allocate device-global string IDs */
+ if (rndis_qc_string_defs[0].id == 0) {
+
+ /* control interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[0].id = status;
+ rndis_qc_control_intf.iInterface = status;
+
+ /* data interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[1].id = status;
+ rndis_qc_data_intf.iInterface = status;
+
+ /* IAD iFunction label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[2].id = status;
+ rndis_qc_iad_descriptor.iFunction = status;
+ }
+
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
@@ -902,13 +919,13 @@ rndis_qc_bind(struct usb_configuration *c, struct usb_function *f)
ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_in_desc);
if (!ep)
goto fail;
- rndis->port.in_ep = ep;
+ rndis->bam_port.in = ep;
ep->driver_data = cdev; /* claim */
ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_out_desc);
if (!ep)
goto fail;
- rndis->port.out_ep = ep;
+ rndis->bam_port.out = ep;
ep->driver_data = cdev; /* claim */
/* NOTE: a status/notification endpoint is, strictly speaking,
@@ -972,33 +989,30 @@ rndis_qc_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
}
- rndis->port.open = rndis_qc_open;
- rndis->port.close = rndis_qc_close;
-
- status = rndis_register(rndis_qc_response_available, rndis,
- bam_data_flow_control_enable);
- if (status < 0)
+ params = rndis_register(rndis_qc_response_available, rndis,
+ ipa_data_flow_control_enable);
+ if (params < 0)
goto fail;
- rndis->config = status;
+ rndis->params = params;
- rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
- rndis_set_host_mac(rndis->config, rndis->ethaddr);
+ rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3, 0);
+ rndis_set_host_mac(rndis->params, rndis->ethaddr);
if (rndis->manufacturer && rndis->vendorID &&
- rndis_set_param_vendor(rndis->config, rndis->vendorID,
+ rndis_set_param_vendor(rndis->params, rndis->vendorID,
rndis->manufacturer))
goto fail;
pr_debug("%s(): max_pkt_per_xfer:%d\n", __func__,
rndis->ul_max_pkt_per_xfer);
- rndis_set_max_pkt_xfer(rndis->config, rndis->ul_max_pkt_per_xfer);
+ rndis_set_max_pkt_xfer(rndis->params, rndis->ul_max_pkt_per_xfer);
/* In case of aggregated packets QC device will request
* aliment to 4 (2^2).
*/
pr_debug("%s(): pkt_alignment_factor:%d\n", __func__,
rndis->pkt_alignment_factor);
- rndis_set_pkt_alignment_factor(rndis->config,
+ rndis_set_pkt_alignment_factor(rndis->params,
rndis->pkt_alignment_factor);
/* NOTE: all that is done without knowing or caring about
@@ -1009,7 +1023,7 @@ rndis_qc_bind(struct usb_configuration *c, struct usb_function *f)
DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
gadget_is_superspeed(c->cdev->gadget) ? "super" :
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
- rndis->port.in_ep->name, rndis->port.out_ep->name,
+ rndis->bam_port.in->name, rndis->bam_port.out->name,
rndis->notify->name);
return 0;
@@ -1029,24 +1043,31 @@ fail:
/* we might as well release our claims on endpoints */
if (rndis->notify)
rndis->notify->driver_data = NULL;
- if (rndis->port.out_ep->desc)
- rndis->port.out_ep->driver_data = NULL;
- if (rndis->port.in_ep->desc)
- rndis->port.in_ep->driver_data = NULL;
+ if (rndis->bam_port.out->desc)
+ rndis->bam_port.out->driver_data = NULL;
+ if (rndis->bam_port.in->desc)
+ rndis->bam_port.in->driver_data = NULL;
pr_err("%s: can't bind, err %d\n", f->name, status);
return status;
}
+static void rndis_qc_free(struct usb_function *f)
+{
+ struct f_rndis_qc_opts *opts;
+
+ opts = container_of(f->fi, struct f_rndis_qc_opts, func_inst);
+ opts->refcnt--;
+}
+
static void
rndis_qc_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_rndis_qc *rndis = func_to_rndis_qc(f);
- unsigned long flags;
pr_debug("rndis_qc_unbind: free\n");
- rndis_deregister(rndis->config);
+ rndis_deregister(rndis->params);
if (gadget_is_dualspeed(c->cdev->gadget))
usb_free_descriptors(f->hs_descriptors);
@@ -1055,23 +1076,17 @@ rndis_qc_unbind(struct usb_configuration *c, struct usb_function *f)
kfree(rndis->notify_req->buf);
usb_ep_free_request(rndis->notify, rndis->notify_req);
- if (rndis->xport == USB_GADGET_XPORT_BAM2BAM_IPA) {
- /*
- * call flush_workqueue to make sure that any pending
- * disconnect_work() from u_bam_data.c file is being
- * flushed before calling this rndis_ipa_cleanup API
- * as rndis ipa disconnect API is required to be
- * called before this.
- */
- bam_data_flush_workqueue();
- rndis_ipa_cleanup(rndis_ipa_params.private);
- rndis_ipa_supported = false;
- }
+ /*
+ * call flush_workqueue to make sure that any pending
+ * disconnect_work() from u_bam_data.c file is being
+ * flushed before calling this rndis_ipa_cleanup API
+ * as rndis ipa disconnect API is required to be
+ * called before this.
+ */
+ ipa_data_flush_workqueue();
+ rndis_ipa_cleanup(rndis_ipa_params.private);
+ rndis_ipa_supported = false;
- spin_lock_irqsave(&rndis_lock, flags);
- kfree(rndis);
- _rndis_qc = NULL;
- spin_unlock_irqrestore(&rndis_lock, flags);
}
void rndis_ipa_reset_trigger(void)
@@ -1099,7 +1114,6 @@ void rndis_net_ready_notify(void)
{
struct f_rndis_qc *rndis;
unsigned long flags;
- int port_num;
spin_lock_irqsave(&rndis_lock, flags);
rndis = _rndis_qc;
@@ -1117,19 +1131,7 @@ void rndis_net_ready_notify(void)
pr_debug("%s: Set net_ready_trigger", __func__);
rndis->net_ready_trigger = true;
spin_unlock_irqrestore(&rndis_lock, flags);
- port_num = (u_bam_data_func_to_port(USB_FUNC_RNDIS,
- RNDIS_QC_ACTIVE_PORT));
- if (port_num < 0)
- return;
- bam_data_start_rx_tx(port_num);
-}
-
-
-/* Some controllers can't support RNDIS ... */
-static inline bool can_support_rndis_qc(struct usb_configuration *c)
-{
- /* everything else is *presumably* fine */
- return true;
+ ipa_data_start_rx_tx(USB_IPA_FUNC_RNDIS);
}
/**
@@ -1144,84 +1146,42 @@ static inline bool can_support_rndis_qc(struct usb_configuration *c)
* Caller must have called @gether_setup(). Caller is also responsible
* for calling @gether_cleanup() before module unload.
*/
-int
-rndis_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
-{
- return rndis_qc_bind_config_vendor(c, ethaddr, 0, NULL, 1, 0, NULL);
-}
-int
-rndis_qc_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
- u32 vendorID, const char *manufacturer,
- u8 max_pkt_per_xfer,
- u8 pkt_alignment_factor,
- char *xport_name)
+static struct
+usb_function *rndis_qc_bind_config_vendor(struct usb_function_instance *fi,
+ u32 vendorID, const char *manufacturer,
+ u8 max_pkt_per_xfer, u8 pkt_alignment_factor)
{
+ struct f_rndis_qc_opts *opts = container_of(fi,
+ struct f_rndis_qc_opts, func_inst);
struct f_rndis_qc *rndis;
int status;
- if (!can_support_rndis_qc(c) || !ethaddr) {
- pr_debug("%s: invalid argument\n", __func__);
- return -EINVAL;
- }
-
- /* maybe allocate device-global string IDs */
- if (rndis_qc_string_defs[0].id == 0) {
-
- /* control interface label */
- status = usb_string_id(c->cdev);
- if (status < 0)
- return status;
- rndis_qc_string_defs[0].id = status;
- rndis_qc_control_intf.iInterface = status;
-
- /* data interface label */
- status = usb_string_id(c->cdev);
- if (status < 0)
- return status;
- rndis_qc_string_defs[1].id = status;
- rndis_qc_data_intf.iInterface = status;
-
- /* IAD iFunction label */
- status = usb_string_id(c->cdev);
- if (status < 0)
- return status;
- rndis_qc_string_defs[2].id = status;
- rndis_qc_iad_descriptor.iFunction = status;
- }
-
/* allocate and initialize one new instance */
status = -ENOMEM;
- rndis = kzalloc(sizeof(*rndis), GFP_KERNEL);
- if (!rndis) {
- pr_err("%s: fail allocate and initialize new instance\n",
- __func__);
- goto fail;
- }
- rndis->xport = str_to_xport(xport_name);
+ opts = container_of(fi, struct f_rndis_qc_opts, func_inst);
- /* export host's Ethernet address in CDC format */
- if (rndis->xport == USB_GADGET_XPORT_BAM2BAM_IPA) {
- gether_qc_get_macs(rndis_ipa_params.device_ethaddr,
- rndis_ipa_params.host_ethaddr);
- pr_debug("setting host_ethaddr=%pM, device_ethaddr=%pM\n",
- rndis_ipa_params.host_ethaddr,
- rndis_ipa_params.device_ethaddr);
- rndis_ipa_supported = true;
- ether_addr_copy(rndis->ethaddr, &rndis_ipa_params.host_ethaddr);
- rndis_ipa_params.device_ready_notify = rndis_net_ready_notify;
- } else
- ether_addr_copy(rndis->ethaddr, ethaddr);
+ opts->refcnt++;
+ rndis = opts->rndis;
- rndis->vendorID = vendorID;
- rndis->manufacturer = manufacturer;
+ rndis->vendorID = opts->vendor_id;
+ rndis->manufacturer = opts->manufacturer;
+ /* export host's Ethernet address in CDC format */
+ random_ether_addr(rndis_ipa_params.host_ethaddr);
+ random_ether_addr(rndis_ipa_params.device_ethaddr);
+ pr_debug("setting host_ethaddr=%pM, device_ethaddr=%pM\n",
+ rndis_ipa_params.host_ethaddr,
+ rndis_ipa_params.device_ethaddr);
+ rndis_ipa_supported = true;
+ ether_addr_copy(rndis->ethaddr, rndis_ipa_params.host_ethaddr);
+ rndis_ipa_params.device_ready_notify = rndis_net_ready_notify;
/* if max_pkt_per_xfer was not configured set to default value */
rndis->ul_max_pkt_per_xfer =
max_pkt_per_xfer ? max_pkt_per_xfer :
DEFAULT_MAX_PKT_PER_XFER;
- u_bam_data_set_ul_max_pkt_num(rndis->ul_max_pkt_per_xfer);
+ ipa_data_set_ul_max_pkt_num(rndis->ul_max_pkt_per_xfer);
/*
* Check no RNDIS aggregation, and alignment if not mentioned,
@@ -1241,47 +1201,35 @@ rndis_qc_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
DEFAULT_PKT_ALIGNMENT_FACTOR;
/* RNDIS activates when the host changes this filter */
- rndis->port.cdc_filter = 0;
+ rndis->cdc_filter = 0;
- /* RNDIS has special (and complex) framing */
- rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
- rndis->port.wrap = rndis_qc_add_header;
- rndis->port.unwrap = rndis_qc_rm_hdr;
-
- rndis->port.func.name = "rndis";
- rndis->port.func.strings = rndis_qc_strings;
+ rndis->func.name = "rndis";
+ rndis->func.strings = rndis_qc_strings;
/* descriptors are per-instance copies */
- rndis->port.func.bind = rndis_qc_bind;
- rndis->port.func.unbind = rndis_qc_unbind;
- rndis->port.func.set_alt = rndis_qc_set_alt;
- rndis->port.func.setup = rndis_qc_setup;
- rndis->port.func.disable = rndis_qc_disable;
- rndis->port.func.suspend = rndis_qc_suspend;
- rndis->port.func.resume = rndis_qc_resume;
+ rndis->func.bind = rndis_qc_bind;
+ rndis->func.unbind = rndis_qc_unbind;
+ rndis->func.set_alt = rndis_qc_set_alt;
+ rndis->func.setup = rndis_qc_setup;
+ rndis->func.disable = rndis_qc_disable;
+ rndis->func.suspend = rndis_qc_suspend;
+ rndis->func.resume = rndis_qc_resume;
+ rndis->func.free_func = rndis_qc_free;
_rndis_qc = rndis;
- if (rndis->xport == USB_GADGET_XPORT_BAM2BAM_IPA) {
- status = rndis_ipa_init(&rndis_ipa_params);
- if (status) {
- pr_err("%s: failed to init rndis_ipa\n", __func__);
- goto fail;
- }
- }
-
- status = usb_add_function(c, &rndis->port.func);
+ status = rndis_ipa_init(&rndis_ipa_params);
if (status) {
- if (rndis->xport == USB_GADGET_XPORT_BAM2BAM_IPA)
- rndis_ipa_cleanup(rndis_ipa_params.private);
- goto fail;
+ pr_err("%s: failed to init rndis_ipa\n", __func__);
+ kfree(rndis);
+ return ERR_PTR(status);
}
- return 0;
+ return &rndis->func;
+}
-fail:
- kfree(rndis);
- _rndis_qc = NULL;
- return status;
+static struct usb_function *qcrndis_alloc(struct usb_function_instance *fi)
+{
+ return rndis_qc_bind_config_vendor(fi, 0, NULL, 1, 0);
}
static int rndis_qc_open_dev(struct inode *ip, struct file *fp)
@@ -1370,24 +1318,100 @@ static struct miscdevice rndis_qc_device = {
.fops = &rndis_qc_fops,
};
-static int rndis_qc_init(void)
+static void qcrndis_free_inst(struct usb_function_instance *f)
{
+ struct f_rndis_qc *rndis;
+ struct f_rndis_qc_opts *opts = container_of(f,
+ struct f_rndis_qc_opts, func_inst);
+ unsigned long flags;
+
+ rndis = opts->rndis;
+ misc_deregister(&rndis_qc_device);
+
+ ipa_data_free(USB_IPA_FUNC_RNDIS);
+ spin_lock_irqsave(&rndis_lock, flags);
+ kfree(rndis);
+ _rndis_qc = NULL;
+ kfree(opts->rndis);
+ kfree(opts);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+}
+
+static int qcrndis_set_inst_name(struct usb_function_instance *fi,
+ const char *name)
+{
+ struct f_rndis_qc_opts *opts = container_of(fi,
+ struct f_rndis_qc_opts, func_inst);
+ struct f_rndis_qc *rndis;
+ int name_len;
int ret;
- pr_info("initialize rndis QC instance\n");
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_INST_NAME_LEN)
+ return -ENAMETOOLONG;
+ pr_debug("initialize rndis QC instance\n");
+ rndis = kzalloc(sizeof(*rndis), GFP_KERNEL);
+ if (!rndis) {
+ pr_err("%s: fail allocate and initialize new instance\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ opts->rndis = rndis;
ret = misc_register(&rndis_qc_device);
if (ret)
pr_err("rndis QC driver failed to register\n");
spin_lock_init(&rndis_lock);
- ret = bam_data_setup(USB_FUNC_RNDIS, RNDIS_QC_NO_PORTS);
+ ret = ipa_data_setup(USB_IPA_FUNC_RNDIS);
if (ret) {
pr_err("bam_data_setup failed err: %d\n", ret);
+ kfree(rndis);
return ret;
}
- return ret;
+ return 0;
+}
+
+static inline
+struct f_rndis_qc_opts *to_f_qc_rndis_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_rndis_qc_opts,
+ func_inst.group);
+}
+
+static void qcrndis_attr_release(struct config_item *item)
+{
+ struct f_rndis_qc_opts *opts = to_f_qc_rndis_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations qcrndis_item_ops = {
+ .release = qcrndis_attr_release,
+};
+
+static struct config_item_type qcrndis_func_type = {
+ .ct_item_ops = &qcrndis_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct usb_function_instance *qcrndis_alloc_inst(void)
+{
+ struct f_rndis_qc_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.set_inst_name = qcrndis_set_inst_name;
+ opts->func_inst.free_func_inst = qcrndis_free_inst;
+
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &qcrndis_func_type);
+
+ return &opts->func_inst;
}
static void rndis_qc_cleanup(void)
@@ -1416,3 +1440,27 @@ bool rndis_qc_get_skip_ep_config(void)
{
return rndis_ipa_params.skip_ep_cfg;
}
+
+DECLARE_USB_FUNCTION_INIT(qcrndis, qcrndis_alloc_inst, qcrndis_alloc);
+
+static int __init usb_qcrndis_init(void)
+{
+ int ret;
+
+ ret = usb_function_register(&qcrndisusb_func);
+ if (ret) {
+ pr_err("%s: failed to register diag %d\n", __func__, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static void __exit usb_qcrndis_exit(void)
+{
+ usb_function_unregister(&qcrndisusb_func);
+ rndis_qc_cleanup();
+}
+
+module_init(usb_qcrndis_init);
+module_exit(usb_qcrndis_exit);
+MODULE_DESCRIPTION("USB RMNET Function Driver");
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index b0e7b65b84bd..98ac1ff58323 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -596,6 +596,7 @@ static int rndis_init_response(struct rndis_params *params,
resp->AFListOffset = cpu_to_le32(0);
resp->AFListSize = cpu_to_le32(0);
+ params->ul_max_xfer_size = le32_to_cpu(resp->MaxTransferSize);
params->resp_avail(params->v);
return 0;
}
@@ -799,7 +800,7 @@ EXPORT_SYMBOL_GPL(rndis_set_host_mac);
*/
int rndis_msg_parser(struct rndis_params *params, u8 *buf)
{
- u32 MsgType, MsgLength;
+ u32 MsgType, MsgLength, major, minor, max_transfer_size;
__le32 *tmp;
if (!buf)
@@ -822,6 +823,19 @@ int rndis_msg_parser(struct rndis_params *params, u8 *buf)
case RNDIS_MSG_INIT:
pr_debug("%s: RNDIS_MSG_INIT\n",
__func__);
+ major = get_unaligned_le32(tmp++);
+ minor = get_unaligned_le32(tmp++);
+ max_transfer_size = get_unaligned_le32(tmp++);
+
+ params->host_rndis_major_ver = major;
+ params->host_rndis_minor_ver = minor;
+ params->dl_max_xfer_size = max_transfer_size;
+
+ pr_debug("%s(): RNDIS Host Major:%d Minor:%d version\n",
+ __func__, major, minor);
+ pr_debug("%s(): UL Max Transfer size:%x\n", __func__,
+ max_transfer_size);
+
params->state = RNDIS_INITIALIZED;
return rndis_init_response(params, (rndis_init_msg_type *)buf);
@@ -1013,6 +1027,18 @@ int rndis_set_param_medium(struct rndis_params *params, u32 medium, u32 speed)
}
EXPORT_SYMBOL_GPL(rndis_set_param_medium);
+u32 rndis_get_dl_max_xfer_size(struct rndis_params *params)
+{
+ pr_debug("%s:\n", __func__);
+ return params->dl_max_xfer_size;
+}
+
+u32 rndis_get_ul_max_xfer_size(struct rndis_params *params)
+{
+ pr_debug("%s:\n", __func__);
+ return params->ul_max_xfer_size;
+}
+
void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer)
{
pr_debug("%s:\n", __func__);
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index 939c3bebe015..3d130b0576fc 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -199,6 +199,10 @@ typedef struct rndis_params
void *v;
struct list_head resp_queue;
+ u32 host_rndis_major_ver;
+ u32 host_rndis_minor_ver;
+ u32 ul_max_xfer_size;
+ u32 dl_max_xfer_size;
} rndis_params;
/* RNDIS Message parser and other useless functions */
@@ -213,6 +217,8 @@ int rndis_set_param_vendor(struct rndis_params *params, u32 vendorID,
int rndis_set_param_medium(struct rndis_params *params, u32 medium,
u32 speed);
void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer);
+u32 rndis_get_ul_max_xfer_size(struct rndis_params *params);
+u32 rndis_get_dl_max_xfer_size(struct rndis_params *params);
void rndis_add_hdr(struct sk_buff *skb);
int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
struct sk_buff_head *list);
diff --git a/drivers/usb/gadget/function/u_data_ipa.c b/drivers/usb/gadget/function/u_data_ipa.c
index 3a5b1e2da2e6..56e7dea427ec 100644
--- a/drivers/usb/gadget/function/u_data_ipa.c
+++ b/drivers/usb/gadget/function/u_data_ipa.c
@@ -22,37 +22,49 @@
#include <linux/termios.h>
#include <linux/usb_bam.h>
-#include "usb_gadget_xport.h"
+#include "u_data_ipa.h"
-#define IPA_N_PORTS 4
struct ipa_data_ch_info {
- struct usb_request *rx_req;
- struct usb_request *tx_req;
- unsigned long flags;
- unsigned id;
- enum transport_type trans;
- enum gadget_type gtype;
- bool is_connected;
- unsigned port_num;
- spinlock_t port_lock;
-
- struct work_struct connect_w;
- struct work_struct disconnect_w;
- struct work_struct suspend_w;
- struct work_struct resume_w;
-
- u32 src_pipe_idx;
- u32 dst_pipe_idx;
- u8 src_connection_idx;
- u8 dst_connection_idx;
- enum usb_ctrl usb_bam_type;
- struct gadget_ipa_port *port_usb;
+ struct usb_request *rx_req;
+ struct usb_request *tx_req;
+ unsigned long flags;
+ unsigned id;
+ enum ipa_func_type func_type;
+ bool is_connected;
+ unsigned port_num;
+ spinlock_t port_lock;
+
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+ struct work_struct suspend_w;
+ struct work_struct resume_w;
+
+ u32 src_pipe_idx;
+ u32 dst_pipe_idx;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+ struct gadget_ipa_port *port_usb;
+ struct usb_gadget *gadget;
+ atomic_t pipe_connect_notified;
struct usb_bam_connect_ipa_params ipa_params;
};
-static int n_ipa_ports;
+struct rndis_data_ch_info {
+ /* this provides downlink (device->host i.e host) side configuration*/
+ u32 dl_max_transfer_size;
+ /* this provides uplink (host->device i.e device) side configuration */
+ u32 ul_max_transfer_size;
+ u32 ul_max_packets_number;
+ bool ul_aggregation_enable;
+ u32 prod_clnt_hdl;
+ u32 cons_clnt_hdl;
+ void *priv;
+};
+
static struct workqueue_struct *ipa_data_wq;
struct ipa_data_ch_info *ipa_data_ports[IPA_N_PORTS];
+static struct rndis_data_ch_info *rndis_data;
/**
* ipa_data_endless_complete() - completion callback for endless TX/RX request
* @ep: USB endpoint for which this completion happen
@@ -132,6 +144,56 @@ static void ipa_data_stop_endless_xfer(struct ipa_data_ch_info *port, bool in)
}
}
+/*
+ * Called when IPA triggers us that the network interface is up.
+ * Starts the transfers on bulk endpoints.
+ * (optimization reasons, the pipes and bam with IPA are already connected)
+ */
+void ipa_data_start_rx_tx(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+
+ pr_debug("%s: Triggered: starting tx, rx", __func__);
+ /* queue in & out requests */
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("%s: port is NULL, can't start tx, rx", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->port_usb || !port->port_usb->in ||
+ !port->port_usb->out) {
+ pr_err("%s: Can't start tx, rx, ep not enabled", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if (!port->rx_req || !port->tx_req) {
+ pr_err("%s: No request d->rx_req=%p, d->tx_req=%p", __func__,
+ port->rx_req, port->tx_req);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ if (!port->is_connected) {
+ pr_debug("%s: pipes are disconnected", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ /* queue in & out requests */
+ pr_debug("%s: Starting rx", __func__);
+ if (port->port_usb->out)
+ ipa_data_start_endless_xfer(port, false);
+
+ pr_debug("%s: Starting tx", __func__);
+ if (port->port_usb->in)
+ ipa_data_start_endless_xfer(port, true);
+}
/**
* ipa_data_disconnect_work() - Perform USB IPA BAM disconnect
* @w: disconnect work
@@ -166,6 +228,23 @@ static void ipa_data_disconnect_work(struct work_struct *w)
if (ret)
pr_err("usb_bam_disconnect_ipa failed: err:%d\n", ret);
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ /*
+ * NOTE: it is required to disconnect USB and IPA BAM related
+ * pipes before calling IPA tethered function related disconnect
+ * API. IPA tethered function related disconnect API delete
+ * depedency graph with IPA RM which would results into IPA not
+ * pulling data although there is pending data on USB BAM
+ * producer pipe.
+ */
+ if (atomic_xchg(&port->pipe_connect_notified, 0) == 1) {
+ void *priv;
+
+ priv = rndis_qc_get_ipa_priv();
+ rndis_ipa_pipe_disconnect_notify(priv);
+ }
+ }
+
if (port->ipa_params.prod_clnt_hdl)
usb_bam_free_fifos(port->usb_bam_type,
port->dst_connection_idx);
@@ -173,6 +252,12 @@ static void ipa_data_disconnect_work(struct work_struct *w)
usb_bam_free_fifos(port->usb_bam_type,
port->src_connection_idx);
+ /*
+ * Decrement usage count which was incremented
+ * upon cable connect or cable disconnect in suspended state.
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+
pr_debug("%s(): disconnect work completed.\n", __func__);
}
@@ -186,15 +271,15 @@ static void ipa_data_disconnect_work(struct work_struct *w)
* switch is being trigger. This API performs restoring USB endpoint operation
* and disable USB endpoint used for accelerated path.
*/
-void ipa_data_disconnect(struct gadget_ipa_port *gp, u8 port_num)
+void ipa_data_disconnect(struct gadget_ipa_port *gp, enum ipa_func_type func)
{
struct ipa_data_ch_info *port;
unsigned long flags;
struct usb_gadget *gadget = NULL;
- pr_debug("dev:%p port number:%d\n", gp, port_num);
- if (port_num >= n_ipa_ports) {
- pr_err("invalid ipa portno#%d\n", port_num);
+ pr_debug("dev:%p port number:%d\n", gp, func);
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid ipa portno#%d\n", func);
return;
}
@@ -203,9 +288,9 @@ void ipa_data_disconnect(struct gadget_ipa_port *gp, u8 port_num)
return;
}
- port = ipa_data_ports[port_num];
+ port = ipa_data_ports[func];
if (!port) {
- pr_err("port %u is NULL", port_num);
+ pr_err("port %u is NULL", func);
return;
}
@@ -223,8 +308,7 @@ void ipa_data_disconnect(struct gadget_ipa_port *gp, u8 port_num)
* complete function will be called, where we try
* to obtain the spinlock as well.
*/
- if (gadget_is_dwc3(gadget))
- msm_ep_unconfig(port->port_usb->in);
+ msm_ep_unconfig(port->port_usb->in);
spin_unlock_irqrestore(&port->port_lock, flags);
usb_ep_disable(port->port_usb->in);
spin_lock_irqsave(&port->port_lock, flags);
@@ -232,8 +316,7 @@ void ipa_data_disconnect(struct gadget_ipa_port *gp, u8 port_num)
}
if (port->port_usb->out) {
- if (gadget_is_dwc3(gadget))
- msm_ep_unconfig(port->port_usb->out);
+ msm_ep_unconfig(port->port_usb->out);
spin_unlock_irqrestore(&port->port_lock, flags);
usb_ep_disable(port->port_usb->out);
spin_lock_irqsave(&port->port_lock, flags);
@@ -257,14 +340,14 @@ void ipa_data_disconnect(struct gadget_ipa_port *gp, u8 port_num)
*/
static void configure_fifo(enum usb_ctrl bam_type, u8 idx, struct usb_ep *ep)
{
- struct u_bam_data_connect_info bam_info;
struct sps_mem_buffer data_fifo = {0};
+ u32 usb_bam_pipe_idx;
get_bam2bam_connection_info(bam_type, idx,
- &bam_info.usb_bam_pipe_idx,
+ &usb_bam_pipe_idx,
NULL, &data_fifo, NULL);
msm_data_fifo_config(ep, data_fifo.phys_base, data_fifo.size,
- bam_info.usb_bam_pipe_idx);
+ usb_bam_pipe_idx);
}
/**
@@ -308,8 +391,21 @@ static void ipa_data_connect_work(struct work_struct *w)
return;
}
+ /*
+ * check if connect_w got called two times during RNDIS resume as
+ * explicit flow control is called to start data transfers after
+ * ipa_data_connect()
+ */
+ if (port->is_connected) {
+ pr_debug("IPA connect is already done & Transfers started\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+ return;
+ }
+
gport->ipa_consumer_ep = -1;
gport->ipa_producer_ep = -1;
+
if (gport->out) {
port->rx_req = usb_ep_alloc_request(gport->out, GFP_ATOMIC);
if (!port->rx_req) {
@@ -341,8 +437,7 @@ static void ipa_data_connect_work(struct work_struct *w)
/* update IPA Parameteres here. */
port->ipa_params.usb_connection_speed = gadget->speed;
- if (gadget_is_dwc3(gadget))
- port->ipa_params.reset_pipe_after_lpm =
+ port->ipa_params.reset_pipe_after_lpm =
msm_dwc3_reset_ep_after_lpm(gadget);
port->ipa_params.skip_ep_cfg = true;
port->ipa_params.keep_ipa_awake = true;
@@ -354,49 +449,35 @@ static void ipa_data_connect_work(struct work_struct *w)
usb_bam_alloc_fifos(port->usb_bam_type,
port->src_connection_idx);
- if (gadget_is_dwc3(gadget)) {
- sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
- | MSM_PRODUCER | port->src_pipe_idx;
- port->rx_req->length = 32*1024;
- port->rx_req->udc_priv = sps_params;
- configure_fifo(port->usb_bam_type,
- port->src_connection_idx,
- port->port_usb->out);
- ret = msm_ep_config(gport->out, port->rx_req,
- GFP_ATOMIC);
- if (ret) {
- pr_err("msm_ep_config() failed for OUT EP\n");
- usb_bam_free_fifos(port->usb_bam_type,
- port->src_connection_idx);
- goto free_rx_tx_req;
- }
- } else {
- sps_params = (MSM_SPS_MODE | port->src_pipe_idx |
- MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
- port->rx_req->udc_priv = sps_params;
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+ | MSM_PRODUCER | port->src_pipe_idx;
+ port->rx_req->length = 32*1024;
+ port->rx_req->udc_priv = sps_params;
+ configure_fifo(port->usb_bam_type,
+ port->src_connection_idx,
+ port->port_usb->out);
+ ret = msm_ep_config(gport->out);
+ if (ret) {
+ pr_err("msm_ep_config() failed for OUT EP\n");
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->src_connection_idx);
+ goto free_rx_tx_req;
}
}
if (gport->in) {
usb_bam_alloc_fifos(port->usb_bam_type,
port->dst_connection_idx);
- if (gadget_is_dwc3(gadget)) {
- sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
- port->dst_pipe_idx;
- port->tx_req->length = 32*1024;
- port->tx_req->udc_priv = sps_params;
- configure_fifo(port->usb_bam_type,
- port->dst_connection_idx, gport->in);
- ret = msm_ep_config(gport->in, port->tx_req,
- GFP_ATOMIC);
- if (ret) {
- pr_err("msm_ep_config() failed for IN EP\n");
- goto unconfig_msm_ep_out;
- }
- } else {
- sps_params = (MSM_SPS_MODE | port->dst_pipe_idx |
- MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
- port->tx_req->udc_priv = sps_params;
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
+ port->dst_pipe_idx;
+ port->tx_req->length = 32*1024;
+ port->tx_req->udc_priv = sps_params;
+ configure_fifo(port->usb_bam_type,
+ port->dst_connection_idx, gport->in);
+ ret = msm_ep_config(gport->in);
+ if (ret) {
+ pr_err("msm_ep_config() failed for IN EP\n");
+ goto unconfig_msm_ep_out;
}
}
@@ -410,13 +491,20 @@ static void ipa_data_connect_work(struct work_struct *w)
if (gport->out) {
pr_debug("configure bam ipa connect for USB OUT\n");
port->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ port->ipa_params.notify = rndis_qc_get_ipa_rx_cb();
+ port->ipa_params.priv = rndis_qc_get_ipa_priv();
+ port->ipa_params.skip_ep_cfg =
+ rndis_qc_get_skip_ep_config();
+ }
+
ret = usb_bam_connect_ipa(port->usb_bam_type,
&port->ipa_params);
if (ret) {
pr_err("usb_bam_connect_ipa out failed err:%d\n", ret);
goto unconfig_msm_ep_in;
}
- gadget->bam2bam_func_enabled = true;
gport->ipa_consumer_ep = port->ipa_params.ipa_cons_ep_idx;
is_ipa_disconnected = false;
@@ -425,30 +513,71 @@ static void ipa_data_connect_work(struct work_struct *w)
if (gport->in) {
pr_debug("configure bam ipa connect for USB IN\n");
port->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
- port->ipa_params.dst_client = IPA_CLIENT_USB_DPL_CONS;
+
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ port->ipa_params.notify = rndis_qc_get_ipa_tx_cb();
+ port->ipa_params.priv = rndis_qc_get_ipa_priv();
+ port->ipa_params.skip_ep_cfg =
+ rndis_qc_get_skip_ep_config();
+ }
+
+ if (port->func_type == USB_IPA_FUNC_DPL)
+ port->ipa_params.dst_client = IPA_CLIENT_USB_DPL_CONS;
ret = usb_bam_connect_ipa(port->usb_bam_type,
&port->ipa_params);
if (ret) {
pr_err("usb_bam_connect_ipa IN failed err:%d\n", ret);
goto disconnect_usb_bam_ipa_out;
}
- gadget->bam2bam_func_enabled = true;
gport->ipa_producer_ep = port->ipa_params.ipa_prod_ep_idx;
is_ipa_disconnected = false;
}
- pr_debug("ipa_producer_ep:%d ipa_consumer_ep:%d\n",
- gport->ipa_producer_ep,
- gport->ipa_consumer_ep);
+ /* For DPL need to update_ipa_pipes to qti */
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ rndis_data->prod_clnt_hdl =
+ port->ipa_params.prod_clnt_hdl;
+ rndis_data->cons_clnt_hdl =
+ port->ipa_params.cons_clnt_hdl;
+ rndis_data->priv = port->ipa_params.priv;
+
+ pr_debug("ul_max_transfer_size:%d\n",
+ rndis_data->ul_max_transfer_size);
+ pr_debug("ul_max_packets_number:%d\n",
+ rndis_data->ul_max_packets_number);
+ pr_debug("dl_max_transfer_size:%d\n",
+ rndis_data->dl_max_transfer_size);
+
+ ret = rndis_ipa_pipe_connect_notify(
+ rndis_data->cons_clnt_hdl,
+ rndis_data->prod_clnt_hdl,
+ rndis_data->ul_max_transfer_size,
+ rndis_data->ul_max_packets_number,
+ rndis_data->dl_max_transfer_size,
+ rndis_data->priv);
+ if (ret) {
+ pr_err("%s: failed to connect IPA: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ atomic_set(&port->pipe_connect_notified, 1);
+ }
- gqti_ctrl_update_ipa_pipes(NULL, DPL_QTI_CTRL_PORT_NO,
+ pr_debug("ipa_producer_ep:%d ipa_consumer_ep:%d\n",
gport->ipa_producer_ep,
gport->ipa_consumer_ep);
pr_debug("src_bam_idx:%d dst_bam_idx:%d\n",
port->src_connection_idx, port->dst_connection_idx);
+ /* Don't queue the transfers yet, only after network stack is up */
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ pr_debug("%s: Not starting now, waiting for network notify",
+ __func__);
+ return;
+ }
+
if (gport->out)
ipa_data_start_endless_xfer(port, false);
if (gport->in)
@@ -496,7 +625,7 @@ free_rx_req:
* initiate USB BAM IPA connection. This API is enabling accelerated endpoints
* and schedule connect_work() which establishes USB IPA BAM communication.
*/
-int ipa_data_connect(struct gadget_ipa_port *gp, u8 port_num,
+int ipa_data_connect(struct gadget_ipa_port *gp, enum ipa_func_type func,
u8 src_connection_idx, u8 dst_connection_idx)
{
struct ipa_data_ch_info *port;
@@ -504,10 +633,10 @@ int ipa_data_connect(struct gadget_ipa_port *gp, u8 port_num,
int ret;
pr_debug("dev:%p port#%d src_connection_idx:%d dst_connection_idx:%d\n",
- gp, port_num, src_connection_idx, dst_connection_idx);
+ gp, func, src_connection_idx, dst_connection_idx);
- if (port_num >= n_ipa_ports) {
- pr_err("invalid portno#%d\n", port_num);
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid portno#%d\n", func);
ret = -ENODEV;
goto err;
}
@@ -518,10 +647,11 @@ int ipa_data_connect(struct gadget_ipa_port *gp, u8 port_num,
goto err;
}
- port = ipa_data_ports[port_num];
+ port = ipa_data_ports[func];
spin_lock_irqsave(&port->port_lock, flags);
port->port_usb = gp;
+ port->gadget = gp->cdev->gadget;
port->src_connection_idx = src_connection_idx;
port->dst_connection_idx = dst_connection_idx;
port->usb_bam_type = usb_bam_get_bam_type(gp->cdev->gadget->name);
@@ -565,6 +695,19 @@ int ipa_data_connect(struct gadget_ipa_port *gp, u8 port_num,
goto err_usb_in;
}
+ /* Wait for host to enable flow_control */
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ ret = 0;
+ goto err_usb_in;
+ }
+
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work (due to cable disconnect)
+ * or in suspend work.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+
queue_work(ipa_data_wq, &port->connect_w);
spin_unlock_irqrestore(&port->port_lock, flags);
@@ -642,6 +785,12 @@ static void ipa_data_stop(void *param, enum usb_bam_pipe_dir dir)
}
}
+void ipa_data_flush_workqueue(void)
+{
+ pr_debug("%s(): Flushing workqueue\n", __func__);
+ flush_workqueue(ipa_data_wq);
+}
+
/**
* ipa_data_suspend() - Initiate USB BAM IPA suspend functionality
* @gp: Gadget IPA port
@@ -650,15 +799,14 @@ static void ipa_data_stop(void *param, enum usb_bam_pipe_dir dir)
* It is being used to initiate USB BAM IPA suspend functionality
* for USB bus suspend functionality.
*/
-void ipa_data_suspend(struct gadget_ipa_port *gp, u8 port_num)
+void ipa_data_suspend(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled)
{
struct ipa_data_ch_info *port;
- int ret;
-
- pr_debug("dev:%p port number:%d\n", gp, port_num);
+ unsigned long flags;
- if (port_num >= n_ipa_ports) {
- pr_err("invalid ipa portno#%d\n", port_num);
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid ipa portno#%d\n", func);
return;
}
@@ -666,14 +814,61 @@ void ipa_data_suspend(struct gadget_ipa_port *gp, u8 port_num)
pr_err("data port is null\n");
return;
}
+ pr_debug("%s: suspended port %d\n", __func__, func);
- port = ipa_data_ports[port_num];
+ port = ipa_data_ports[func];
if (!port) {
- pr_err("port %u is NULL", port_num);
+ pr_err("%s(): Port is NULL.\n", __func__);
+ return;
+ }
+
+ /* suspend with remote wakeup disabled */
+ if (!remote_wakeup_enabled) {
+ /*
+ * When remote wakeup is disabled, IPA BAM is disconnected
+ * because it cannot send new data until the USB bus is resumed.
+ * Endpoint descriptors info is saved before it gets reset by
+ * the BAM disconnect API. This lets us restore this info when
+ * the USB bus is resumed.
+ */
+ gp->in_ep_desc_backup = gp->in->desc;
+ gp->out_ep_desc_backup = gp->out->desc;
+
+ pr_debug("in_ep_desc_backup = %p, out_ep_desc_backup = %p",
+ gp->in_ep_desc_backup,
+ gp->out_ep_desc_backup);
+
+ ipa_data_disconnect(gp, func);
return;
}
+ spin_lock_irqsave(&port->port_lock, flags);
+ queue_work(ipa_data_wq, &port->suspend_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+static void bam2bam_data_suspend_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ connect_w);
+ unsigned long flags;
+ int ret;
+
pr_debug("%s: suspend started\n", __func__);
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ /* In case of RNDIS, host enables flow_control invoking connect_w. If it
+ * is delayed then we may end up having suspend_w run before connect_w.
+ * In this scenario, connect_w may or may not at all start if cable gets
+ * disconnected or if host changes configuration e.g. RNDIS --> MBIM
+ * For these cases don't do runtime_put as there was no _get yet, and
+ * detect this condition on disconnect to not do extra pm_runtme_get
+ * for SUSPEND --> DISCONNECT scenario.
+ */
+ if (!port->is_connected) {
+ pr_err("%s: Not yet connected. SUSPEND pending.\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
ret = usb_bam_register_wake_cb(port->usb_bam_type,
port->dst_connection_idx, NULL, port);
if (ret) {
@@ -685,7 +880,23 @@ void ipa_data_suspend(struct gadget_ipa_port *gp, u8 port_num)
usb_bam_register_start_stop_cbs(port->usb_bam_type,
port->dst_connection_idx, ipa_data_start,
ipa_data_stop, port);
+ /*
+ * release lock here because bam_data_start() or
+ * bam_data_stop() called from usb_bam_suspend()
+ * re-acquires port lock.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
usb_bam_suspend(port->usb_bam_type, &port->ipa_params);
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ /*
+ * Decrement usage count after IPA handshake is done
+ * to allow gadget parent to go to lpm. This counter was
+ * incremented upon cable connect.
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
}
/**
@@ -696,17 +907,20 @@ void ipa_data_suspend(struct gadget_ipa_port *gp, u8 port_num)
* It is being used to initiate USB resume functionality
* for USB bus resume case.
*/
-void ipa_data_resume(struct gadget_ipa_port *gp, u8 port_num)
+void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled)
{
struct ipa_data_ch_info *port;
unsigned long flags;
struct usb_gadget *gadget = NULL;
- int ret;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
- pr_debug("dev:%p port number:%d\n", gp, port_num);
+ pr_debug("dev:%p port number:%d\n", gp, func);
- if (port_num >= n_ipa_ports) {
- pr_err("invalid ipa portno#%d\n", port_num);
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid ipa portno#%d\n", func);
return;
}
@@ -715,12 +929,66 @@ void ipa_data_resume(struct gadget_ipa_port *gp, u8 port_num)
return;
}
- port = ipa_data_ports[port_num];
+ port = ipa_data_ports[func];
if (!port) {
- pr_err("port %u is NULL", port_num);
+ pr_err("port %u is NULL", func);
+ return;
+ }
+
+ gadget = gp->cdev->gadget;
+ /* resume with remote wakeup disabled */
+ if (!remote_wakeup_enabled) {
+ /* Restore endpoint descriptors info. */
+ gp->in->desc = gp->in_ep_desc_backup;
+ gp->out->desc = gp->out_ep_desc_backup;
+
+ pr_debug("in_ep_desc_backup = %p, out_ep_desc_backup = %p",
+ gp->in_ep_desc_backup,
+ gp->out_ep_desc_backup);
+ usb_bam_type = usb_bam_get_bam_type(gadget->name);
+ src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
+ 0);
+ dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
+ 0);
+ ipa_data_connect(gp, func,
+ src_connection_idx, dst_connection_idx);
return;
}
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ /*
+ * Increment usage count here to disallow gadget
+ * parent suspend. This counter will decrement
+ * after IPA handshake is done in disconnect work
+ * (due to cable disconnect) or in bam_data_disconnect
+ * in suspended state.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(ipa_data_wq, &port->resume_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void bam2bam_data_resume_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ connect_w);
+ struct usb_gadget *gadget;
+ unsigned long flags;
+ int ret;
+
+ if (!port->port_usb->cdev) {
+ pr_err("!port->port_usb->cdev is NULL");
+ goto exit;
+ }
+
+ if (!port->port_usb->cdev->gadget) {
+ pr_err("!port->port_usb->cdev->gadget is NULL");
+ goto exit;
+ }
+
pr_debug("%s: resume started\n", __func__);
spin_lock_irqsave(&port->port_lock, flags);
gadget = port->port_usb->cdev->gadget;
@@ -750,6 +1018,7 @@ void ipa_data_resume(struct gadget_ipa_port *gp, u8 port_num)
usb_bam_resume(port->usb_bam_type, &port->ipa_params);
}
+exit:
spin_unlock_irqrestore(&port->port_lock, flags);
}
@@ -762,12 +1031,12 @@ void ipa_data_resume(struct gadget_ipa_port *gp, u8 port_num)
*
* Retrun: 0 in case of success, otherwise errno.
*/
-static int ipa_data_port_alloc(int portno)
+static int ipa_data_port_alloc(enum ipa_func_type func)
{
struct ipa_data_ch_info *port = NULL;
- if (ipa_data_ports[portno] != NULL) {
- pr_debug("port %d already allocated.\n", portno);
+ if (ipa_data_ports[func] != NULL) {
+ pr_debug("port %d already allocated.\n", func);
return 0;
}
@@ -775,29 +1044,29 @@ static int ipa_data_port_alloc(int portno)
if (!port)
return -ENOMEM;
- ipa_data_ports[portno] = port;
+ ipa_data_ports[func] = port;
- pr_debug("port:%p with portno:%d allocated\n", port, portno);
+ pr_debug("port:%p with portno:%d allocated\n", port, func);
return 0;
}
/**
* ipa_data_port_select() - Select particular port for BAM2BAM IPA mode
* @portno: port number to be used by particular USB function
- * @gtype: USB gadget function type
+ * @func_type: USB gadget function type
*
* It is being used by USB function driver to select which BAM2BAM IPA
* port particular USB function wants to use.
*
*/
-void ipa_data_port_select(int portno, enum gadget_type gtype)
+void ipa_data_port_select(enum ipa_func_type func)
{
struct ipa_data_ch_info *port = NULL;
- pr_debug("portno:%d\n", portno);
+ pr_debug("portno:%d\n", func);
- port = ipa_data_ports[portno];
- port->port_num = portno;
+ port = ipa_data_ports[func];
+ port->port_num = func;
port->is_connected = false;
spin_lock_init(&port->port_lock);
@@ -808,14 +1077,30 @@ void ipa_data_port_select(int portno, enum gadget_type gtype)
if (!work_pending(&port->disconnect_w))
INIT_WORK(&port->disconnect_w, ipa_data_disconnect_work);
+ INIT_WORK(&port->suspend_w, bam2bam_data_suspend_work);
+ INIT_WORK(&port->resume_w, bam2bam_data_resume_work);
+
port->ipa_params.src_client = IPA_CLIENT_USB_PROD;
port->ipa_params.dst_client = IPA_CLIENT_USB_CONS;
- port->gtype = gtype;
+ port->func_type = func;
};
+void ipa_data_free(enum ipa_func_type func)
+{
+ pr_debug("freeing %d IPA BAM port", func);
+
+ kfree(ipa_data_ports[func]);
+ ipa_data_ports[func] = NULL;
+ if (func == USB_IPA_FUNC_RNDIS)
+ kfree(rndis_data);
+ if (ipa_data_wq) {
+ destroy_workqueue(ipa_data_wq);
+ ipa_data_wq = NULL;
+ }
+}
+
/**
* ipa_data_setup() - setup BAM2BAM IPA port
- * @no_ipa_port: total number of BAM2BAM IPA port to support
*
* Each USB function who wants to use BAM2BAM IPA port would
* be counting number of IPA port to use and initialize those
@@ -823,32 +1108,34 @@ void ipa_data_port_select(int portno, enum gadget_type gtype)
*
* Retrun: 0 in case of success, otherwise errno.
*/
-int ipa_data_setup(unsigned int no_ipa_port)
+int ipa_data_setup(enum ipa_func_type func)
{
- int i, ret;
+ int ret;
- pr_debug("requested %d IPA BAM ports", no_ipa_port);
+ pr_debug("requested %d IPA BAM port", func);
- if (!no_ipa_port || no_ipa_port > IPA_N_PORTS) {
- pr_err("Invalid num of ports count:%d\n", no_ipa_port);
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("Invalid num of ports count:%d\n", func);
return -EINVAL;
}
- for (i = 0; i < no_ipa_port; i++) {
- n_ipa_ports++;
- ret = ipa_data_port_alloc(i);
- if (ret) {
- n_ipa_ports--;
- pr_err("Failed to alloc port:%d\n", i);
+ ret = ipa_data_port_alloc(func);
+ if (ret) {
+ pr_err("Failed to alloc port:%d\n", func);
+ return ret;
+ }
+
+ if (func == USB_IPA_FUNC_RNDIS) {
+ rndis_data = kzalloc(sizeof(*rndis_data), GFP_KERNEL);
+ if (!rndis_data) {
+ pr_err("%s: fail allocate and initialize new instance\n",
+ __func__);
goto free_ipa_ports;
}
}
-
- pr_debug("n_ipa_ports:%d\n", n_ipa_ports);
-
if (ipa_data_wq) {
pr_debug("ipa_data_wq is already setup.");
- return 0;
+ goto free_rndis_data;
}
ipa_data_wq = alloc_workqueue("k_usb_ipa_data",
@@ -856,20 +1143,111 @@ int ipa_data_setup(unsigned int no_ipa_port)
if (!ipa_data_wq) {
pr_err("Failed to create workqueue\n");
ret = -ENOMEM;
- goto free_ipa_ports;
+ goto free_rndis_data;
}
return 0;
+free_rndis_data:
+ if (func == USB_IPA_FUNC_RNDIS)
+ kfree(rndis_data);
free_ipa_ports:
- for (i = 0; i < n_ipa_ports; i++) {
- kfree(ipa_data_ports[i]);
- ipa_data_ports[i] = NULL;
- if (ipa_data_wq) {
- destroy_workqueue(ipa_data_wq);
- ipa_data_wq = NULL;
- }
- }
+ kfree(ipa_data_ports[func]);
+ ipa_data_ports[func] = NULL;
return ret;
}
+
+void ipa_data_set_ul_max_xfer_size(u32 max_transfer_size)
+{
+ if (!max_transfer_size) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+ rndis_data->ul_max_transfer_size = max_transfer_size;
+ pr_debug("%s(): ul_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
+
+void ipa_data_set_dl_max_xfer_size(u32 max_transfer_size)
+{
+
+ if (!max_transfer_size) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+ rndis_data->dl_max_transfer_size = max_transfer_size;
+ pr_debug("%s(): dl_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
+
+void ipa_data_set_ul_max_pkt_num(u8 max_packets_number)
+{
+ if (!max_packets_number) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+
+ rndis_data->ul_max_packets_number = max_packets_number;
+
+ if (max_packets_number > 1)
+ rndis_data->ul_aggregation_enable = true;
+ else
+ rndis_data->ul_aggregation_enable = false;
+
+ pr_debug("%s(): ul_aggregation enable:%d ul_max_packets_number:%d\n",
+ __func__, rndis_data->ul_aggregation_enable,
+ max_packets_number);
+}
+
+void ipa_data_start_rndis_ipa(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port;
+
+ pr_debug("%s\n", __func__);
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("%s: port is NULL", __func__);
+ return;
+ }
+
+ if (atomic_read(&port->pipe_connect_notified)) {
+ pr_debug("%s: Transfers already started?\n", __func__);
+ return;
+ }
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work due to cable disconnect
+ * or in suspend work.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(ipa_data_wq, &port->connect_w);
+}
+
+void ipa_data_stop_rndis_ipa(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("%s: port is NULL", __func__);
+ return;
+ }
+
+ if (!atomic_read(&port->pipe_connect_notified))
+ return;
+
+ rndis_ipa_reset_trigger();
+ ipa_data_stop_endless_xfer(port, true);
+ ipa_data_stop_endless_xfer(port, false);
+ spin_lock_irqsave(&port->port_lock, flags);
+ /* check if USB cable is disconnected or not */
+ if (port->port_usb) {
+ msm_ep_unconfig(port->port_usb->in);
+ msm_ep_unconfig(port->port_usb->out);
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ queue_work(ipa_data_wq, &port->disconnect_w);
+}
diff --git a/drivers/usb/gadget/function/u_data_ipa.h b/drivers/usb/gadget/function/u_data_ipa.h
index b7d47ab1bb04..a1c1055bd8ef 100644
--- a/drivers/usb/gadget/function/u_data_ipa.h
+++ b/drivers/usb/gadget/function/u_data_ipa.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014,2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,23 +13,78 @@
#ifndef __U_DATA_IPA_H
#define __U_DATA_IPA_H
-#include "usb_gadget_xport.h"
+#include <linux/usb/composite.h>
+#include <linux/rndis_ipa.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/miscdevice.h>
+#include <linux/ipa_usb.h>
+#include <linux/usb_bam.h>
+
+enum ipa_func_type {
+ USB_IPA_FUNC_ECM,
+ USB_IPA_FUNC_MBIM,
+ USB_IPA_FUNC_RMNET,
+ USB_IPA_FUNC_RNDIS,
+ USB_IPA_FUNC_DPL,
+ USB_IPA_NUM_FUNCS,
+};
+
+/* Max Number of IPA data ports supported */
+#define IPA_N_PORTS USB_IPA_NUM_FUNCS
struct gadget_ipa_port {
struct usb_composite_dev *cdev;
struct usb_function *func;
+ int rx_buffer_size;
struct usb_ep *in;
struct usb_ep *out;
int ipa_consumer_ep;
int ipa_producer_ep;
+ const struct usb_endpoint_descriptor *in_ep_desc_backup;
+ const struct usb_endpoint_descriptor *out_ep_desc_backup;
+
+};
+
+/* for configfs support */
+#define MAX_INST_NAME_LEN 40
+
+struct f_rndis_qc_opts {
+ struct usb_function_instance func_inst;
+ struct f_rndis_qc *rndis;
+ u32 vendor_id;
+ const char *manufacturer;
+ struct net_device *net;
+ int refcnt;
};
-void ipa_data_port_select(int portno, enum gadget_type gtype);
-void ipa_data_disconnect(struct gadget_ipa_port *gp, u8 port_num);
-int ipa_data_connect(struct gadget_ipa_port *gp, u8 port_num,
+void ipa_data_port_select(enum ipa_func_type func);
+void ipa_data_disconnect(struct gadget_ipa_port *gp, enum ipa_func_type func);
+int ipa_data_connect(struct gadget_ipa_port *gp, enum ipa_func_type func,
u8 src_connection_idx, u8 dst_connection_idx);
-int ipa_data_setup(unsigned int no_ipa_port);
-void ipa_data_resume(struct gadget_ipa_port *gp, u8 port_num);
-void ipa_data_suspend(struct gadget_ipa_port *gp, u8 port_num);
+int ipa_data_setup(enum ipa_func_type func);
+void ipa_data_free(enum ipa_func_type func);
+
+void ipa_data_flush_workqueue(void);
+void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled);
+void ipa_data_suspend(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled);
+
+void ipa_data_set_ul_max_xfer_size(u32 ul_max_xfer_size);
+
+void ipa_data_set_dl_max_xfer_size(u32 dl_max_transfer_size);
+
+void ipa_data_set_ul_max_pkt_num(u8 ul_max_packets_number);
+
+void ipa_data_start_rx_tx(enum ipa_func_type func);
+
+void ipa_data_start_rndis_ipa(enum ipa_func_type func);
+
+void ipa_data_stop_rndis_ipa(enum ipa_func_type func);
+void *rndis_qc_get_ipa_priv(void);
+void *rndis_qc_get_ipa_rx_cb(void);
+bool rndis_qc_get_skip_ep_config(void);
+void *rndis_qc_get_ipa_tx_cb(void);
+void rndis_ipa_reset_trigger(void);
#endif
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index dd7669331d00..b30831ef4014 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -116,17 +116,20 @@ int xhci_halt(struct xhci_hcd *xhci)
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
if (!ret) {
xhci->xhc_state |= XHCI_STATE_HALTED;
- xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
-
- if (timer_pending(&xhci->cmd_timer)) {
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Cleanup command queue");
- del_timer(&xhci->cmd_timer);
- xhci_cleanup_command_queue(xhci);
- }
- } else
+ } else {
xhci_warn(xhci, "Host not halted after %u microseconds.\n",
XHCI_MAX_HALT_USEC);
+ }
+
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+
+ if (timer_pending(&xhci->cmd_timer)) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Cleanup command queue");
+ del_timer(&xhci->cmd_timer);
+ xhci_cleanup_command_queue(xhci);
+ }
+
return ret;
}
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 82ae0b4fe135..915080a5b817 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -168,8 +168,12 @@ static void *usbpd_ipc_log;
#define PS_HARD_RESET_TIME 25
#define PS_SOURCE_ON 400
#define PS_SOURCE_OFF 750
+#define SWAP_SOURCE_START_TIME 20
#define VDM_BUSY_TIME 50
+/* tPSHardReset + tSafe0V + tSrcRecover + tSrcTurnOn */
+#define SNK_HARD_RESET_RECOVER_TIME (35 + 650 + 1000 + 275)
+
#define PD_CAPS_COUNT 50
#define PD_MAX_MSG_ID 7
@@ -242,6 +246,12 @@ static void *usbpd_ipc_log;
static int min_sink_current = 900;
module_param(min_sink_current, int, S_IRUSR | S_IWUSR);
+static bool ss_host;
+module_param(ss_host, bool, S_IRUSR | S_IWUSR);
+
+static bool ss_dev = true;
+module_param(ss_dev, bool, S_IRUSR | S_IWUSR);
+
static const u32 default_src_caps[] = { 0x36019096 }; /* VSafe5V @ 1.5A */
static const u32 default_snk_caps[] = { 0x2601905A, /* 5V @ 900mA */
@@ -318,6 +328,7 @@ static const unsigned int usbpd_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_USB_CC,
+ EXTCON_USB_SPEED,
EXTCON_NONE,
};
@@ -586,6 +597,11 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
/* Defer starting USB host mode until after PD */
}
+ /* Set CC back to DRP toggle for the next disconnect */
+ val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
+
pd->rx_msg_len = 0;
pd->rx_msg_type = 0;
pd->rx_msgid = -1;
@@ -615,10 +631,15 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_ACTIVE, &val);
- pd->in_pr_swap = false;
pd->current_state = PE_SRC_SEND_CAPABILITIES;
- usbpd_dbg(&pd->dev, "Enter %s\n",
- usbpd_state_strings[pd->current_state]);
+ if (pd->in_pr_swap) {
+ pd->in_pr_swap = false;
+ hrtimer_start(&pd->timer,
+ ms_to_ktime(SWAP_SOURCE_START_TIME),
+ HRTIMER_MODE_REL);
+ break;
+ }
+
/* fall-through */
case PE_SRC_SEND_CAPABILITIES:
@@ -685,6 +706,10 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
SVDM_CMD_TYPE_INITIATOR, 0,
NULL, 0);
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB_CC,
+ is_cable_flipped(pd));
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB_SPEED,
+ ss_host);
extcon_set_cable_state_(pd->extcon, EXTCON_USB_HOST, 1);
}
@@ -755,6 +780,8 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
EXTCON_USB_CC,
is_cable_flipped(pd));
extcon_set_cable_state_(pd->extcon,
+ EXTCON_USB_SPEED, ss_dev);
+ extcon_set_cable_state_(pd->extcon,
EXTCON_USB, 1);
}
}
@@ -787,18 +814,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
pd->in_pr_swap = false;
pd->current_voltage = 5000000;
- if (!pd->vbus_present) {
- /* can get here during a hard reset and we lost vbus */
- pd->current_state = PE_SNK_DISCOVERY;
- hrtimer_start(&pd->timer, ms_to_ktime(2000),
- HRTIMER_MODE_REL);
- break;
- }
-
- /*
- * If VBUS is already present go and skip ahead to
- * PE_SNK_WAIT_FOR_CAPABILITIES.
- */
pd->current_state = PE_SNK_WAIT_FOR_CAPABILITIES;
/* fall-through */
@@ -852,6 +867,8 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
pd->current_dr = DR_UFP;
extcon_set_cable_state_(pd->extcon, EXTCON_USB_CC,
is_cable_flipped(pd));
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB_SPEED,
+ ss_dev);
extcon_set_cable_state_(pd->extcon, EXTCON_USB, 1);
pd_phy_update_roles(pd->current_dr, pd->current_pr);
}
@@ -867,8 +884,28 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
POWER_SUPPLY_PROP_VOLTAGE_MAX, &val);
pd->current_voltage = pd->requested_voltage;
- /* recursive call; go back to beginning state */
- usbpd_set_state(pd, PE_SNK_STARTUP);
+ /* max time for hard reset to toggle vbus off/on */
+ hrtimer_start(&pd->timer,
+ ms_to_ktime(SNK_HARD_RESET_RECOVER_TIME),
+ HRTIMER_MODE_REL);
+ break;
+
+ case PE_PRS_SNK_SRC_TRANSITION_TO_OFF:
+ val.intval = pd->requested_current = 0; /* suspend charging */
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &val);
+
+ pd->in_explicit_contract = false;
+
+ /*
+ * need to update PR bit in message header so that
+ * proper GoodCRC is sent when receiving next PS_RDY
+ */
+ pd_phy_update_roles(pd->current_dr, PR_SRC);
+
+ /* wait for PS_RDY */
+ hrtimer_start(&pd->timer, ms_to_ktime(PS_SOURCE_OFF),
+ HRTIMER_MODE_REL);
break;
default:
@@ -1194,12 +1231,14 @@ static void dr_swap(struct usbpd *pd)
extcon_set_cable_state_(pd->extcon, EXTCON_USB_HOST, 0);
extcon_set_cable_state_(pd->extcon, EXTCON_USB_CC,
is_cable_flipped(pd));
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB_SPEED, ss_dev);
extcon_set_cable_state_(pd->extcon, EXTCON_USB, 1);
pd->current_dr = DR_UFP;
} else if (pd->current_dr == DR_UFP) {
extcon_set_cable_state_(pd->extcon, EXTCON_USB, 0);
extcon_set_cable_state_(pd->extcon, EXTCON_USB_CC,
is_cable_flipped(pd));
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB_SPEED, ss_host);
extcon_set_cable_state_(pd->extcon, EXTCON_USB_HOST, 1);
pd->current_dr = DR_DFP;
@@ -1251,18 +1290,20 @@ static void usbpd_sm(struct work_struct *w)
pd->caps_count = 0;
pd->hard_reset_count = 0;
pd->src_cap_id = 0;
+ pd->requested_voltage = 0;
+ pd->requested_current = 0;
memset(&pd->received_pdos, 0, sizeof(pd->received_pdos));
val.intval = 0;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_ACTIVE, &val);
- if (pd->current_pr == PR_SRC) {
+ if (pd->current_pr == PR_SRC)
regulator_disable(pd->vbus);
- if (pd->vconn_enabled) {
- regulator_disable(pd->vconn);
- pd->vconn_enabled = false;
- }
+
+ if (pd->vconn_enabled) {
+ regulator_disable(pd->vconn);
+ pd->vconn_enabled = false;
}
if (pd->current_dr == DR_UFP)
@@ -1345,6 +1386,8 @@ static void usbpd_sm(struct work_struct *w)
extcon_set_cable_state_(pd->extcon,
EXTCON_USB_CC, is_cable_flipped(pd));
extcon_set_cable_state_(pd->extcon,
+ EXTCON_USB_SPEED, ss_host);
+ extcon_set_cable_state_(pd->extcon,
EXTCON_USB_HOST, 1);
} else if (pd->caps_count >= PD_CAPS_COUNT) {
usbpd_dbg(&pd->dev, "Src CapsCounter exceeded, disabling PD\n");
@@ -1367,7 +1410,7 @@ static void usbpd_sm(struct work_struct *w)
pd->hard_reset_count = 0;
pd->pd_connected = true; /* we know peer is PD capable */
- val.intval = POWER_SUPPLY_TYPE_USB_PD;
+ val.intval = pd->psy_type = POWER_SUPPLY_TYPE_USB_PD;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_TYPE, &val);
@@ -1415,6 +1458,9 @@ static void usbpd_sm(struct work_struct *w)
dr_swap(pd);
kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
} else if (ctrl_recvd == MSG_PR_SWAP) {
+ /* lock in current mode */
+ set_power_role(pd, pd->current_pr);
+
/* we'll happily accept Src->Sink requests anytime */
ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
if (ret) {
@@ -1424,7 +1470,9 @@ static void usbpd_sm(struct work_struct *w)
}
pd->current_state = PE_PRS_SRC_SNK_TRANSITION_TO_OFF;
- queue_work(pd->wq, &pd->sm_work);
+ hrtimer_start(&pd->timer,
+ ms_to_ktime(SRC_TRANSITION_TIME),
+ HRTIMER_MODE_REL);
break;
} else {
if (data_recvd == MSG_VDM)
@@ -1444,28 +1492,13 @@ static void usbpd_sm(struct work_struct *w)
usbpd_set_state(pd, PE_SRC_TRANSITION_TO_DEFAULT);
break;
- case PE_SNK_DISCOVERY:
- if (!pd->vbus_present) {
- /* Hard reset and VBUS didn't come back? */
- power_supply_get_property(pd->usb_psy,
- POWER_SUPPLY_PROP_TYPE, &val);
- if (val.intval == POWER_SUPPLY_TYPEC_NONE) {
- pd->typec_mode = POWER_SUPPLY_TYPEC_NONE;
- queue_work(pd->wq, &pd->sm_work);
- }
- break;
- }
-
- usbpd_set_state(pd, PE_SNK_WAIT_FOR_CAPABILITIES);
- break;
-
case PE_SNK_WAIT_FOR_CAPABILITIES:
if (data_recvd == MSG_SOURCE_CAPABILITIES) {
val.intval = 1;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_ACTIVE, &val);
- val.intval = POWER_SUPPLY_TYPE_USB_PD;
+ val.intval = pd->psy_type = POWER_SUPPLY_TYPE_USB_PD;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_TYPE, &val);
@@ -1559,6 +1592,9 @@ static void usbpd_sm(struct work_struct *w)
dr_swap(pd);
kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
} else if (ctrl_recvd == MSG_PR_SWAP) {
+ /* lock in current mode */
+ set_power_role(pd, pd->current_pr);
+
/* TODO: should we Reject in certain circumstances? */
ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
if (ret) {
@@ -1568,19 +1604,7 @@ static void usbpd_sm(struct work_struct *w)
}
pd->in_pr_swap = true;
- pd->current_state = PE_PRS_SNK_SRC_TRANSITION_TO_OFF;
- /* turn off sink */
- pd->in_explicit_contract = false;
-
- /*
- * need to update PR bit in message header so that
- * proper GoodCRC is sent when receiving next PS_RDY
- */
- pd->current_pr = PR_SRC;
- pd_phy_update_roles(pd->current_dr, pd->current_pr);
-
- hrtimer_start(&pd->timer, ms_to_ktime(PS_SOURCE_OFF),
- HRTIMER_MODE_REL);
+ usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
break;
} else {
if (data_recvd == MSG_VDM)
@@ -1590,6 +1614,20 @@ static void usbpd_sm(struct work_struct *w)
}
break;
+ case PE_SNK_TRANSITION_TO_DEFAULT:
+ if (pd->vbus_present) {
+ usbpd_set_state(pd, PE_SNK_STARTUP);
+ } else {
+ /* Hard reset and VBUS didn't come back? */
+ power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_TYPEC_MODE, &val);
+ if (val.intval == POWER_SUPPLY_TYPEC_NONE) {
+ pd->typec_mode = POWER_SUPPLY_TYPEC_NONE;
+ queue_work(pd->wq, &pd->sm_work);
+ }
+ }
+ break;
+
case PE_SRC_SOFT_RESET:
case PE_SNK_SOFT_RESET:
/* Reset protocol layer */
@@ -1663,16 +1701,24 @@ static void usbpd_sm(struct work_struct *w)
}
pd->current_state = PE_PRS_SRC_SNK_TRANSITION_TO_OFF;
- /* fall-through */
+ hrtimer_start(&pd->timer, ms_to_ktime(SRC_TRANSITION_TIME),
+ HRTIMER_MODE_REL);
+ break;
+
case PE_PRS_SRC_SNK_TRANSITION_TO_OFF:
pd->in_pr_swap = true;
pd->in_explicit_contract = false;
regulator_disable(pd->vbus);
- set_power_role(pd, PR_SINK); /* switch Rp->Rd */
+
+ /* PE_PRS_SRC_SNK_Assert_Rd */
pd->current_pr = PR_SINK;
+ set_power_role(pd, pd->current_pr);
pd_phy_update_roles(pd->current_dr, pd->current_pr);
+ /* allow time for Vbus discharge, must be < tSrcSwapStdby */
+ msleep(500);
+
ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
if (ret) {
usbpd_err(&pd->dev, "Error sending PS_RDY\n");
@@ -1699,19 +1745,7 @@ static void usbpd_sm(struct work_struct *w)
}
pd->in_pr_swap = true;
- pd->current_state = PE_PRS_SNK_SRC_TRANSITION_TO_OFF;
- /* turn off sink */
- pd->in_explicit_contract = false;
-
- /*
- * need to update PR bit in message header so that
- * proper GoodCRC is sent when receiving next PS_RDY
- */
- pd->current_pr = PR_SRC;
- pd_phy_update_roles(pd->current_dr, pd->current_pr);
-
- hrtimer_start(&pd->timer, ms_to_ktime(PS_SOURCE_OFF),
- HRTIMER_MODE_REL);
+ usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
break;
case PE_PRS_SNK_SRC_TRANSITION_TO_OFF:
@@ -1720,14 +1754,20 @@ static void usbpd_sm(struct work_struct *w)
break;
}
+ /* PE_PRS_SNK_SRC_Assert_Rp */
+ pd->current_pr = PR_SRC;
+ set_power_role(pd, pd->current_pr);
pd->current_state = PE_PRS_SNK_SRC_SOURCE_ON;
+
/* fall-through */
+
case PE_PRS_SNK_SRC_SOURCE_ON:
- set_power_role(pd, PR_SRC);
ret = regulator_enable(pd->vbus);
if (ret)
usbpd_err(&pd->dev, "Unable to enable vbus\n");
+ msleep(200); /* allow time VBUS ramp-up, must be < tNewSrc */
+
ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
if (ret) {
usbpd_err(&pd->dev, "Error sending PS_RDY\n");
@@ -1748,6 +1788,20 @@ static void usbpd_sm(struct work_struct *w)
pd->rx_msg_type = pd->rx_msg_len = 0;
}
+static inline const char *src_current(enum power_supply_typec_mode typec_mode)
+{
+ switch (typec_mode) {
+ case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+ return "default";
+ case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+ return "medium - 1.5A";
+ case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+ return "high - 3.0A";
+ default:
+ return "";
+ }
+}
+
static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
{
struct usbpd *pd = container_of(nb, struct usbpd, psy_nb);
@@ -1811,11 +1865,12 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
* until the HW bug is fixed: in which disconnection won't be reported
* on VBUS loss alone unless pullup is also removed from CC.
*/
- if ((pd->hard_reset || pd->in_pr_swap) &&
- typec_mode == POWER_SUPPLY_TYPEC_NONE &&
- pd->psy_type != POWER_SUPPLY_TYPE_USB) {
+ if (typec_mode == POWER_SUPPLY_TYPEC_NONE &&
+ (pd->in_pr_swap ||
+ (pd->psy_type != POWER_SUPPLY_TYPE_USB &&
+ pd->current_state == PE_SNK_TRANSITION_TO_DEFAULT))) {
usbpd_dbg(&pd->dev, "Ignoring disconnect due to %s\n",
- pd->hard_reset ? "hard reset" : "PR swap");
+ pd->in_pr_swap ? "PR swap" : "hard reset");
return 0;
}
@@ -1828,8 +1883,9 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
psy_type = val.intval;
- usbpd_dbg(&pd->dev, "typec mode:%d present:%d type:%d\n", typec_mode,
- pd->vbus_present, psy_type);
+ usbpd_dbg(&pd->dev, "typec mode:%d present:%d type:%d orientation:%d\n",
+ typec_mode, pd->vbus_present, psy_type,
+ usbpd_get_plug_orientation(pd));
/* any change? */
if (pd->typec_mode == typec_mode && pd->psy_type == psy_type)
@@ -1848,8 +1904,10 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
- usbpd_info(&pd->dev, "Type-C Source connected\n");
- if (pd->current_pr != PR_SINK) {
+ usbpd_info(&pd->dev, "Type-C Source (%s) connected\n",
+ src_current(typec_mode));
+ if (pd->current_pr != PR_SINK ||
+ pd->current_state == PE_SNK_TRANSITION_TO_DEFAULT) {
pd->current_pr = PR_SINK;
queue_work(pd->wq, &pd->sm_work);
}
@@ -1858,7 +1916,9 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
/* Source states */
case POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE:
case POWER_SUPPLY_TYPEC_SINK:
- usbpd_info(&pd->dev, "Type-C Sink connected\n");
+ usbpd_info(&pd->dev, "Type-C Sink%s connected\n",
+ typec_mode == POWER_SUPPLY_TYPEC_SINK ?
+ "" : " (powered)");
if (pd->current_pr != PR_SRC) {
pd->current_pr = PR_SRC;
queue_work(pd->wq, &pd->sm_work);
@@ -2229,12 +2289,15 @@ struct usbpd *devm_usbpd_get_by_phandle(struct device *dev, const char *phandle)
struct platform_device *pdev;
struct device *pd_dev;
+ if (!usbpd_class.p) /* usbpd_init() not yet called */
+ return ERR_PTR(-EAGAIN);
+
if (!dev->of_node)
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-EINVAL);
pd_np = of_parse_phandle(dev->of_node, phandle, 0);
if (!pd_np)
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-ENXIO);
pdev = of_find_device_by_node(pd_np);
if (!pdev)
@@ -2244,7 +2307,8 @@ struct usbpd *devm_usbpd_get_by_phandle(struct device *dev, const char *phandle)
match_usbpd_device);
if (!pd_dev) {
platform_device_put(pdev);
- return ERR_PTR(-ENODEV);
+ /* device was found but maybe hadn't probed yet, so defer */
+ return ERR_PTR(-EPROBE_DEFER);
}
ptr = devres_alloc(devm_usbpd_put, sizeof(*ptr), GFP_KERNEL);
@@ -2256,7 +2320,7 @@ struct usbpd *devm_usbpd_get_by_phandle(struct device *dev, const char *phandle)
pd = dev_get_drvdata(pd_dev);
if (!pd)
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-EPROBE_DEFER);
*ptr = pd;
devres_add(dev, ptr);
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index a5368cdf2254..55918d47a21a 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -193,6 +193,7 @@ enum mdss_qos_settings {
MDSS_QOS_REMAPPER,
MDSS_QOS_IB_NOCR,
MDSS_QOS_WB2_WRITE_GATHER_EN,
+ MDSS_QOS_WB_QOS,
MDSS_QOS_MAX,
};
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index 57e18a7dc5e1..4e68952d33a9 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -45,6 +45,18 @@
#define VDDA_UA_ON_LOAD 100000 /* uA units */
#define VDDA_UA_OFF_LOAD 100 /* uA units */
+#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3
+static u32 supported_modes[] = {
+ HDMI_VFRMT_640x480p60_4_3,
+ HDMI_VFRMT_720x480p60_4_3, HDMI_VFRMT_720x480p60_16_9,
+ HDMI_VFRMT_1280x720p60_16_9,
+ HDMI_VFRMT_1920x1080p60_16_9,
+ HDMI_VFRMT_3840x2160p24_16_9, HDMI_VFRMT_3840x2160p30_16_9,
+ HDMI_VFRMT_3840x2160p60_16_9,
+ HDMI_VFRMT_4096x2160p24_256_135, HDMI_VFRMT_4096x2160p30_256_135,
+ HDMI_VFRMT_4096x2160p60_256_135, HDMI_EVFRMT_4096x2160p24_16_9
+};
+
static void mdss_dp_put_dt_clk_data(struct device *dev,
struct dss_module_power *module_power)
{
@@ -789,17 +801,34 @@ void mdss_dp_config_ctrl(struct mdss_dp_drv_pdata *dp)
cap = &dp->dpcd;
- data = dp->lane_cnt - 1;
- data <<= 4;
+ data |= (2 << 13); /* Default-> LSCLK DIV: 1/4 LCLK */
+
+ /* Color Format */
+ switch (dp->panel_data.panel_info.out_format) {
+ case MDP_Y_CBCR_H2V2:
+ data |= (1 << 11); /* YUV420 */
+ break;
+ case MDP_Y_CBCR_H2V1:
+ data |= (2 << 11); /* YUV422 */
+ break;
+ default:
+ data |= (0 << 11); /* RGB */
+ break;
+ }
+
+ /* Scrambler reset enable */
+ if (cap->scrambler_reset)
+ data |= (1 << 10);
+
+ if (dp->edid.color_depth != 6)
+ data |= 0x100; /* Default: 8 bits */
+
+ /* Num of Lanes */
+ data |= ((dp->lane_cnt - 1) << 4);
if (cap->enhanced_frame)
data |= 0x40;
- if (dp->edid.color_depth == 8) {
- /* 0 == 6 bits, 1 == 8 bits */
- data |= 0x100; /* bit 8 */
- }
-
if (!timing->interlaced) /* progressive */
data |= 0x04;
@@ -863,6 +892,8 @@ static int dp_audio_info_setup(struct platform_device *pdev,
mdss_dp_set_safe_to_exit_level(&dp_ctrl->ctrl_io, dp_ctrl->lane_cnt);
mdss_dp_audio_enable(&dp_ctrl->ctrl_io, true);
+ dp_ctrl->wait_for_audio_comp = true;
+
return rc;
} /* dp_audio_info_setup */
@@ -885,6 +916,17 @@ static int dp_get_audio_edid_blk(struct platform_device *pdev,
return rc;
} /* dp_get_audio_edid_blk */
+static void dp_audio_codec_teardown_done(struct platform_device *pdev)
+{
+ struct mdss_dp_drv_pdata *dp = platform_get_drvdata(pdev);
+
+ if (!dp)
+ pr_err("invalid input\n");
+
+ pr_debug("audio codec teardown done\n");
+ complete_all(&dp->audio_comp);
+}
+
static int mdss_dp_init_ext_disp(struct mdss_dp_drv_pdata *dp)
{
int ret = 0;
@@ -906,6 +948,8 @@ static int mdss_dp_init_ext_disp(struct mdss_dp_drv_pdata *dp)
dp_get_audio_edid_blk;
dp->ext_audio_data.codec_ops.cable_status =
dp_get_cable_status;
+ dp->ext_audio_data.codec_ops.teardown_done =
+ dp_audio_codec_teardown_done;
if (!dp->pdev->dev.of_node) {
pr_err("%s cannot find dp dev.of_node\n", __func__);
@@ -936,8 +980,6 @@ end:
return ret;
}
-#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3
-
static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic)
{
struct mdss_panel_info *pinfo;
@@ -949,7 +991,6 @@ static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic)
return -EINVAL;
}
- dp_drv->ds_data.ds_registered = false;
ret = hdmi_get_supported_mode(&timing, &dp_drv->ds_data, vic);
pinfo = &dp_drv->panel_data.panel_info;
@@ -981,12 +1022,21 @@ static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic)
pinfo->lcdc.hsync_skew = 0;
pinfo->is_pluggable = true;
+ dp_drv->bpp = pinfo->bpp;
+
pr_debug("update res. vic= %d, pclk_rate = %llu\n",
dp_drv->vic, pinfo->clk_rate);
return 0;
} /* dp_init_panel_info */
+static inline void mdss_dp_set_audio_switch_node(
+ struct mdss_dp_drv_pdata *dp, int val)
+{
+ if (dp && dp->ext_audio_data.intf_ops.notify)
+ dp->ext_audio_data.intf_ops.notify(dp->ext_pdev,
+ val);
+}
int mdss_dp_on(struct mdss_panel_data *pdata)
{
@@ -1054,6 +1104,9 @@ int mdss_dp_on(struct mdss_panel_data *pdata)
goto exit;
}
+ mdss_dp_phy_share_lane_config(&dp_drv->phy_io,
+ orientation, dp_drv->dpcd.max_lane_count);
+
pr_debug("link_rate = 0x%x\n", dp_drv->link_rate);
dp_drv->power_data[DP_CTRL_PM].clk_config[0].rate =
@@ -1096,6 +1149,7 @@ int mdss_dp_on(struct mdss_panel_data *pdata)
pr_debug("mainlink ready\n");
dp_drv->power_on = true;
+ mdss_dp_set_audio_switch_node(dp_drv, true);
pr_debug("End-\n");
exit:
@@ -1119,14 +1173,15 @@ int mdss_dp_off(struct mdss_panel_data *pdata)
mutex_lock(&dp_drv->train_mutex);
reinit_completion(&dp_drv->idle_comp);
-
- mdss_dp_state_ctrl(&dp_drv->ctrl_io, 0);
+ mdss_dp_state_ctrl(&dp_drv->ctrl_io, ST_PUSH_IDLE);
if (dp_drv->link_clks_on)
mdss_dp_mainlink_ctrl(&dp_drv->ctrl_io, false);
mdss_dp_aux_ctrl(&dp_drv->ctrl_io, false);
+ mdss_dp_audio_enable(&dp_drv->ctrl_io, false);
+
mdss_dp_irq_disable(dp_drv);
mdss_dp_config_gpios(dp_drv, false);
@@ -1147,14 +1202,6 @@ int mdss_dp_off(struct mdss_panel_data *pdata)
return 0;
}
-static inline void mdss_dp_set_audio_switch_node(
- struct mdss_dp_drv_pdata *dp, int val)
-{
- if (dp && dp->ext_audio_data.intf_ops.notify)
- dp->ext_audio_data.intf_ops.notify(dp->ext_pdev,
- val);
-}
-
static void mdss_dp_send_cable_notification(
struct mdss_dp_drv_pdata *dp, int val)
{
@@ -1169,6 +1216,38 @@ static void mdss_dp_send_cable_notification(
dp->ext_audio_data.type, val);
}
+static void mdss_dp_audio_codec_wait(struct mdss_dp_drv_pdata *dp)
+{
+ const int audio_completion_timeout_ms = HZ * 3;
+ int ret = 0;
+
+ if (!dp->wait_for_audio_comp)
+ return;
+
+ reinit_completion(&dp->audio_comp);
+ ret = wait_for_completion_timeout(&dp->audio_comp,
+ audio_completion_timeout_ms);
+ if (ret <= 0)
+ pr_warn("audio codec teardown timed out\n");
+
+ dp->wait_for_audio_comp = false;
+}
+
+static void mdss_dp_notify_clients(struct mdss_dp_drv_pdata *dp, bool enable)
+{
+ if (enable) {
+ mdss_dp_send_cable_notification(dp, enable);
+ } else {
+ mdss_dp_set_audio_switch_node(dp, enable);
+ mdss_dp_audio_codec_wait(dp);
+ mdss_dp_send_cable_notification(dp, enable);
+ }
+
+ pr_debug("notify state %s done\n",
+ enable ? "ENABLE" : "DISABLE");
+}
+
+
static int mdss_dp_edid_init(struct mdss_panel_data *pdata)
{
struct mdss_dp_drv_pdata *dp_drv = NULL;
@@ -1183,6 +1262,10 @@ static int mdss_dp_edid_init(struct mdss_panel_data *pdata)
dp_drv = container_of(pdata, struct mdss_dp_drv_pdata,
panel_data);
+ dp_drv->ds_data.ds_registered = true;
+ dp_drv->ds_data.modes_num = ARRAY_SIZE(supported_modes);
+ dp_drv->ds_data.modes = supported_modes;
+
dp_drv->max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
edid_init_data.kobj = dp_drv->kobj;
edid_init_data.ds_data = dp_drv->ds_data;
@@ -1236,15 +1319,19 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata)
mdss_dp_aux_init(dp_drv);
+ mdss_dp_phy_initialize(dp_drv);
+ mdss_dp_ctrl_reset(&dp_drv->ctrl_io);
mdss_dp_phy_reset(&dp_drv->ctrl_io);
mdss_dp_aux_reset(&dp_drv->ctrl_io);
- mdss_dp_phy_initialize(dp_drv);
mdss_dp_aux_ctrl(&dp_drv->ctrl_io, true);
pr_debug("Ctrl_hw_rev =0x%x, phy hw_rev =0x%x\n",
mdss_dp_get_ctrl_hw_version(&dp_drv->ctrl_io),
mdss_dp_get_phy_hw_version(&dp_drv->phy_io));
+ pr_debug("plug Orientation = %d\n",
+ usbpd_get_plug_orientation(dp_drv->pd));
+
mdss_dp_phy_aux_setup(&dp_drv->phy_io);
mdss_dp_irq_enable(dp_drv);
@@ -1264,8 +1351,7 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata)
goto edid_error;
}
- mdss_dp_send_cable_notification(dp_drv, true);
- mdss_dp_set_audio_switch_node(dp_drv, true);
+ mdss_dp_notify_clients(dp_drv, true);
dp_drv->dp_initialized = true;
return ret;
@@ -1702,16 +1788,15 @@ static void mdss_dp_do_link_train(struct mdss_dp_drv_pdata *dp)
static void mdss_dp_event_work(struct work_struct *work)
{
struct mdss_dp_drv_pdata *dp = NULL;
- struct delayed_work *dw = to_delayed_work(work);
unsigned long flag;
- u32 todo = 0, dp_config_pkt[2];
+ u32 todo = 0, config;
- if (!dw) {
+ if (!work) {
pr_err("invalid work structure\n");
return;
}
- dp = container_of(dw, struct mdss_dp_drv_pdata, dwork);
+ dp = container_of(work, struct mdss_dp_drv_pdata, work);
spin_lock_irqsave(&dp->event_lock, flag);
todo = dp->current_event;
@@ -1756,11 +1841,9 @@ static void mdss_dp_event_work(struct work_struct *work)
SVDM_CMD_TYPE_INITIATOR, 0x1, 0x0, 0x0);
break;
case EV_USBPD_DP_CONFIGURE:
- dp_config_pkt[0] = SVDM_HDR(USB_C_DP_SID, VDM_VERSION, 0x1,
- SVDM_CMD_TYPE_INITIATOR, DP_VDM_CONFIGURE);
- dp_config_pkt[1] = mdss_dp_usbpd_gen_config_pkt(dp);
+ config = mdss_dp_usbpd_gen_config_pkt(dp);
usbpd_send_svdm(dp->pd, USB_C_DP_SID, DP_VDM_CONFIGURE,
- SVDM_CMD_TYPE_INITIATOR, 0x1, dp_config_pkt, 0x2);
+ SVDM_CMD_TYPE_INITIATOR, 0x1, &config, 0x1);
break;
default:
pr_err("Unknown event:%d\n", todo);
@@ -1771,8 +1854,7 @@ static void dp_send_events(struct mdss_dp_drv_pdata *dp, u32 events)
{
spin_lock(&dp->event_lock);
dp->current_event = events;
- queue_delayed_work(dp->workq,
- &dp->dwork, HZ);
+ queue_work(dp->workq, &dp->work);
spin_unlock(&dp->event_lock);
}
@@ -1848,7 +1930,7 @@ static int mdss_dp_event_setup(struct mdss_dp_drv_pdata *dp)
return -EPERM;
}
- INIT_DELAYED_WORK(&dp->dwork, mdss_dp_event_work);
+ INIT_WORK(&dp->work, mdss_dp_event_work);
return 0;
}
@@ -1883,8 +1965,7 @@ static void usbpd_disconnect_callback(struct usbpd_svid_handler *hdlr)
mutex_lock(&dp_drv->pd_msg_mutex);
dp_drv->cable_connected = false;
mutex_unlock(&dp_drv->pd_msg_mutex);
- mdss_dp_send_cable_notification(dp_drv, false);
- mdss_dp_set_audio_switch_node(dp_drv, false);
+ mdss_dp_notify_clients(dp_drv, false);
}
static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
@@ -1968,8 +2049,7 @@ static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
}
break;
case DP_VDM_CONFIGURE:
- if ((dp_drv->cable_connected == true)
- || (cmd_type == SVDM_CMD_TYPE_RESP_ACK)) {
+ if (cmd_type == SVDM_CMD_TYPE_RESP_ACK) {
dp_drv->alt_mode.current_state = DP_CONFIGURE_DONE;
pr_debug("config USBPD to DP done\n");
mdss_dp_host_init(&dp_drv->panel_data);
@@ -2135,6 +2215,8 @@ static int mdss_dp_probe(struct platform_device *pdev)
mdss_dp_device_register(dp_drv);
dp_drv->inited = true;
+ dp_drv->wait_for_audio_comp = false;
+ init_completion(&dp_drv->audio_comp);
pr_debug("done\n");
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index 4710cf7a98e2..6c391f6f7de0 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -399,6 +399,7 @@ struct mdss_dp_drv_pdata {
struct completion train_comp;
struct completion idle_comp;
struct completion video_comp;
+ struct completion audio_comp;
struct mutex aux_mutex;
struct mutex train_mutex;
struct mutex pd_msg_mutex;
@@ -423,10 +424,11 @@ struct mdss_dp_drv_pdata {
char delay_start;
u32 bpp;
struct dp_statistic dp_stat;
+ bool wait_for_audio_comp;
/* event */
struct workqueue_struct *workq;
- struct delayed_work dwork;
+ struct work_struct work;
u32 current_event;
spinlock_t event_lock;
spinlock_t lock;
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index 584d2edc364e..119e2a2b05cf 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -374,7 +374,19 @@ static int dp_aux_read_buf(struct mdss_dp_drv_pdata *ep, u32 addr,
/*
* edid standard header bytes
*/
-static char edid_hdr[8] = {0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00};
+static u8 edid_hdr[8] = {0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00};
+
+static bool dp_edid_is_valid_header(u8 *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(edid_hdr); i++) {
+ if (buf[i] != edid_hdr[i])
+ return false;
+ }
+
+ return true;
+}
int dp_edid_buf_error(char *buf, int len)
{
@@ -396,11 +408,6 @@ int dp_edid_buf_error(char *buf, int len)
return -EINVAL;
}
- if (strncmp(buf, edid_hdr, strlen(edid_hdr))) {
- pr_err("Error: header\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -510,11 +517,20 @@ char mdss_dp_gen_link_clk(struct mdss_panel_info *pinfo, char lane_cnt)
pr_debug("clk_rate=%llu, bpp= %d, lane_cnt=%d\n",
pinfo->clk_rate, pinfo->bpp, lane_cnt);
- min_link_rate = (pinfo->clk_rate * 10) /
- (lane_cnt * encoding_factx10);
- min_link_rate = (min_link_rate * pinfo->bpp)
- / (DP_LINK_RATE_MULTIPLIER);
+
+ /*
+ * The max pixel clock supported is 675Mhz. The
+ * current calculations below will make sure
+ * the min_link_rate is within 32 bit limits.
+ * Any changes in the section of code should
+ * consider this limitation.
+ */
+ min_link_rate = pinfo->clk_rate
+ / (lane_cnt * encoding_factx10);
min_link_rate /= ln_to_link_ratio;
+ min_link_rate = (min_link_rate * pinfo->bpp);
+ min_link_rate = (u32)div_u64(min_link_rate * 10,
+ DP_LINK_RATE_MULTIPLIER);
pr_debug("min_link_rate = %d\n", min_link_rate);
@@ -699,10 +715,11 @@ static int dp_aux_chan_ready(struct mdss_dp_drv_pdata *ep)
int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
{
- struct edp_buf *rp;
- int cnt, rlen;
- int ret = 0;
- int blk_num = 0;
+ struct edp_buf *rp = &dp->rxp;
+ int rlen, ret = 0;
+ int edid_blk = 0, blk_num = 0, retries = 10;
+ bool edid_parsing_done = false;
+ const u8 cea_tag = 0x02;
ret = dp_aux_chan_ready(dp);
if (ret) {
@@ -710,70 +727,56 @@ int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
return ret;
}
- for (cnt = 5; cnt; cnt--) {
- rlen = dp_aux_read_buf
- (dp, EDID_START_ADDRESS, EDID_BLOCK_SIZE, 1);
- if (rlen > 0) {
- pr_debug("cnt=%d, block=%d, rlen=%d\n",
- cnt, blk_num, rlen);
-
- rp = &dp->rxp;
- if (!dp_edid_buf_error(rp->data, rp->len))
- break;
+ do {
+ rlen = dp_aux_read_buf(dp, EDID_START_ADDRESS +
+ (blk_num * EDID_BLOCK_SIZE),
+ EDID_BLOCK_SIZE, 1);
+ if (rlen != EDID_BLOCK_SIZE) {
+ pr_err("Read failed. rlen=%d\n", rlen);
+ continue;
}
- }
- if ((cnt <= 0) && (rlen != EDID_BLOCK_SIZE)) {
- pr_err("Read failed. rlen=%d\n", rlen);
- return -EINVAL;
- }
+ pr_debug("blk_num=%d, rlen=%d\n", blk_num, rlen);
- rp = &dp->rxp;
+ if (dp_edid_is_valid_header(rp->data)) {
+ if (dp_edid_buf_error(rp->data, rp->len))
+ continue;
- dp_extract_edid_manufacturer(&dp->edid, rp->data);
- dp_extract_edid_product(&dp->edid, rp->data);
- dp_extract_edid_version(&dp->edid, rp->data);
- dp_extract_edid_ext_block_cnt(&dp->edid, rp->data);
- dp_extract_edid_video_support(&dp->edid, rp->data);
- dp_extract_edid_feature(&dp->edid, rp->data);
- dp_extract_edid_detailed_timing_description(&dp->edid, rp->data);
- /* for the first block initialize the edid buffer size */
- dp->edid_buf_size = 0;
+ if (edid_parsing_done) {
+ blk_num++;
+ continue;
+ }
- pr_debug("edid extension = %d\n",
- dp->edid.ext_block_cnt);
+ dp_extract_edid_manufacturer(&dp->edid, rp->data);
+ dp_extract_edid_product(&dp->edid, rp->data);
+ dp_extract_edid_version(&dp->edid, rp->data);
+ dp_extract_edid_ext_block_cnt(&dp->edid, rp->data);
+ dp_extract_edid_video_support(&dp->edid, rp->data);
+ dp_extract_edid_feature(&dp->edid, rp->data);
+ dp_extract_edid_detailed_timing_description(&dp->edid,
+ rp->data);
- memcpy(dp->edid_buf, rp->data, EDID_BLOCK_SIZE);
- dp->edid_buf_size += EDID_BLOCK_SIZE;
+ edid_parsing_done = true;
+ } else {
+ edid_blk++;
+ blk_num++;
- if (!dp->edid.ext_block_cnt)
- return 0;
+ /* fix dongle byte shift issue */
+ if (edid_blk == 1 && rp->data[0] != cea_tag) {
+ u8 tmp[EDID_BLOCK_SIZE - 1];
- for (blk_num = 1; blk_num <= dp->edid.ext_block_cnt;
- blk_num++) {
- for (cnt = 5; cnt; cnt--) {
- rlen = dp_aux_read_buf
- (dp, EDID_START_ADDRESS +
- (blk_num * EDID_BLOCK_SIZE),
- EDID_BLOCK_SIZE, 1);
- if (rlen > 0) {
- pr_debug("cnt=%d, blk_num=%d, rlen=%d\n",
- cnt, blk_num, rlen);
- rp = &dp->rxp;
- if (!dp_edid_buf_error(rp->data, rp->len))
- break;
+ memcpy(tmp, rp->data, EDID_BLOCK_SIZE - 1);
+ rp->data[0] = cea_tag;
+ memcpy(rp->data + 1, tmp, EDID_BLOCK_SIZE - 1);
}
}
- if ((cnt <= 0) && (rlen != EDID_BLOCK_SIZE)) {
- pr_err("Read failed. rlen=%d\n", rlen);
- return -EINVAL;
- }
+ memcpy(dp->edid_buf + (edid_blk * EDID_BLOCK_SIZE),
+ rp->data, EDID_BLOCK_SIZE);
- memcpy(dp->edid_buf + (blk_num * EDID_BLOCK_SIZE),
- rp->data, EDID_BLOCK_SIZE);
- dp->edid_buf_size += EDID_BLOCK_SIZE;
- }
+ if (edid_blk == dp->edid.ext_block_cnt)
+ return 0;
+ } while (retries--);
return 0;
}
@@ -1113,17 +1116,17 @@ static void dp_host_train_set(struct mdss_dp_drv_pdata *ep, int train)
}
char vm_pre_emphasis[4][4] = {
- {0x00, 0x06, 0x09, 0x0C}, /* pe0, 0 db */
- {0x00, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */
- {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */
- {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
+ {0x00, 0x09, 0x11, 0x0C}, /* pe0, 0 db */
+ {0x00, 0x0A, 0x10, 0xFF}, /* pe1, 3.5 db */
+ {0x00, 0x0C, 0xFF, 0xFF}, /* pe2, 6.0 db */
+ {0x00, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
};
/* voltage swing, 0.2v and 1.0v are not support */
char vm_voltage_swing[4][4] = {
- {0x0a, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */
- {0x07, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */
- {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */
+ {0x07, 0x0f, 0x12, 0x1E}, /* sw0, 0.4v */
+ {0x11, 0x1D, 0x1F, 0xFF}, /* sw1, 0.6 v */
+ {0x18, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8 v */
{0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */
};
@@ -1211,7 +1214,7 @@ static int dp_start_link_train_1(struct mdss_dp_drv_pdata *ep)
static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
{
- int tries;
+ int tries = 0;
int ret = 0;
int usleep_time;
char pattern;
@@ -1223,12 +1226,12 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
else
pattern = 0x02;
- dp_host_train_set(ep, pattern); /* train_2 */
- dp_voltage_pre_emphasise_set(ep);
dp_train_pattern_set_write(ep, pattern | 0x20);/* train_2 */
- tries = 0;
- while (1) {
+ do {
+ dp_voltage_pre_emphasise_set(ep);
+ dp_host_train_set(ep, pattern);
+
usleep_time = ep->dpcd.training_read_interval;
usleep_range(usleep_time, usleep_time);
@@ -1240,14 +1243,13 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
}
tries++;
- if (tries > 5) {
+ if (tries > 4) {
ret = -1;
break;
}
dp_sink_train_set_adjust(ep);
- dp_voltage_pre_emphasise_set(ep);
- }
+ } while (1);
return ret;
}
@@ -1319,7 +1321,6 @@ static void dp_clear_training_pattern(struct mdss_dp_drv_pdata *ep)
int mdss_dp_link_train(struct mdss_dp_drv_pdata *dp)
{
int ret = 0;
- int usleep_time;
ret = dp_aux_chan_ready(dp);
if (ret) {
@@ -1340,8 +1341,6 @@ train_start:
mdss_dp_state_ctrl(&dp->ctrl_io, 0);
dp_clear_training_pattern(dp);
- usleep_time = dp->dpcd.training_read_interval;
- usleep_range(usleep_time, usleep_time);
ret = dp_start_link_train_1(dp);
if (ret < 0) {
@@ -1356,8 +1355,6 @@ train_start:
pr_debug("Training 1 completed successfully\n");
- mdss_dp_state_ctrl(&dp->ctrl_io, 0);
- dp_clear_training_pattern(dp);
ret = dp_start_link_train_2(dp);
if (ret < 0) {
if (dp_link_rate_down_shift(dp) == 0) {
@@ -1375,7 +1372,8 @@ train_start:
clear:
dp_clear_training_pattern(dp);
if (ret != -1) {
- mdss_dp_setup_tr_unit(&dp->ctrl_io);
+ mdss_dp_setup_tr_unit(&dp->ctrl_io, dp->link_rate,
+ dp->lane_cnt, dp->vic);
mdss_dp_state_ctrl(&dp->ctrl_io, ST_SEND_VIDEO);
pr_debug("State_ctrl set to SEND_VIDEO\n");
}
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.c b/drivers/video/fbdev/msm/mdss_dp_util.c
index bdf5d92f7053..92acb910e0c3 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.c
+++ b/drivers/video/fbdev/msm/mdss_dp_util.c
@@ -32,6 +32,29 @@
#define AUDIO_FREQ_48 48000
#define DP_AUDIO_FREQ_COUNT 3
+enum mdss_dp_pin_assignment {
+ PIN_ASSIGNMENT_A,
+ PIN_ASSIGNMENT_B,
+ PIN_ASSIGNMENT_C,
+ PIN_ASSIGNMENT_D,
+ PIN_ASSIGNMENT_E,
+ PIN_ASSIGNMENT_F,
+ PIN_ASSIGNMENT_MAX,
+};
+
+static const char *mdss_dp_pin_name(u8 pin)
+{
+ switch (pin) {
+ case PIN_ASSIGNMENT_A: return "PIN_ASSIGNMENT_A";
+ case PIN_ASSIGNMENT_B: return "PIN_ASSIGNMENT_B";
+ case PIN_ASSIGNMENT_C: return "PIN_ASSIGNMENT_C";
+ case PIN_ASSIGNMENT_D: return "PIN_ASSIGNMENT_D";
+ case PIN_ASSIGNMENT_E: return "PIN_ASSIGNMENT_E";
+ case PIN_ASSIGNMENT_F: return "PIN_ASSIGNMENT_F";
+ default: return "UNKNOWN";
+ }
+}
+
static const uint32_t naud_value[DP_AUDIO_FREQ_COUNT][DP_AUDIO_FREQ_COUNT] = {
{ 10125, 16875, 33750 },
{ 5625, 9375, 18750 },
@@ -143,6 +166,18 @@ void mdss_dp_aux_reset(struct dss_io_data *ctrl_io)
writel_relaxed(aux_ctrl, ctrl_io->base + DP_AUX_CTRL);
}
+/* reset DP controller */
+void mdss_dp_ctrl_reset(struct dss_io_data *ctrl_io)
+{
+ u32 sw_reset = readl_relaxed(ctrl_io->base + DP_SW_RESET);
+
+ sw_reset |= BIT(0);
+ writel_relaxed(sw_reset, ctrl_io->base + DP_SW_RESET);
+ udelay(1000);
+ sw_reset &= ~BIT(0);
+ writel_relaxed(sw_reset, ctrl_io->base + DP_SW_RESET);
+}
+
/* reset DP Mainlink */
void mdss_dp_mainlink_reset(struct dss_io_data *ctrl_io)
{
@@ -284,13 +319,47 @@ void mdss_dp_sw_mvid_nvid(struct dss_io_data *ctrl_io)
writel_relaxed(0x3c, ctrl_io->base + DP_SOFTWARE_NVID);
}
-void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io)
+void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate,
+ u8 ln_cnt, u32 res)
{
- /* Current Tr unit configuration supports only 1080p */
+ u32 dp_tu = 0x0;
+ u32 valid_boundary = 0x0;
+ u32 valid_boundary2 = 0x0;
+ struct dp_vc_tu_mapping_table const *tu_entry = tu_table;
+
writel_relaxed(0x21, ctrl_io->base + DP_MISC1_MISC0);
- writel_relaxed(0x0f0016, ctrl_io->base + DP_VALID_BOUNDARY);
- writel_relaxed(0x1f, ctrl_io->base + DP_TU);
- writel_relaxed(0x0, ctrl_io->base + DP_VALID_BOUNDARY_2);
+
+ for (; tu_entry != tu_table + ARRAY_SIZE(tu_table); ++tu_entry) {
+ if ((tu_entry->vic == res) &&
+ (tu_entry->lanes == ln_cnt) &&
+ (tu_entry->lrate == link_rate))
+ break;
+ }
+
+ if (tu_entry == tu_table + ARRAY_SIZE(tu_table)) {
+ pr_err("requested ln_cnt=%d, lrate=0x%x not supported\n",
+ ln_cnt, link_rate);
+ return;
+ }
+
+ dp_tu |= tu_entry->tu_size_minus1;
+ valid_boundary |= tu_entry->valid_boundary_link;
+ valid_boundary |= (tu_entry->delay_start_link << 16);
+
+ valid_boundary2 |= (tu_entry->valid_lower_boundary_link << 1);
+ valid_boundary2 |= (tu_entry->upper_boundary_count << 16);
+ valid_boundary2 |= (tu_entry->lower_boundary_count << 20);
+
+ if (tu_entry->boundary_moderation_en)
+ valid_boundary2 |= BIT(0);
+
+ writel_relaxed(valid_boundary, ctrl_io->base + DP_VALID_BOUNDARY);
+ writel_relaxed(dp_tu, ctrl_io->base + DP_TU);
+ writel_relaxed(valid_boundary2, ctrl_io->base + DP_VALID_BOUNDARY_2);
+
+ pr_debug("valid_boundary=0x%x, valid_boundary2=0x%x\n",
+ valid_boundary, valid_boundary2);
+ pr_debug("dp_tu=0x%x\n", dp_tu);
}
void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io,
@@ -431,9 +500,23 @@ void mdss_dp_usbpd_ext_dp_status(struct usbpd_dp_status *dp_status)
u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp)
{
+ u8 pin_cfg, pin;
u32 config = 0;
- config |= (dp->alt_mode.dp_cap.dlink_pin_config << 8);
+ pin_cfg = dp->alt_mode.dp_cap.dlink_pin_config;
+
+ for (pin = PIN_ASSIGNMENT_A; pin < PIN_ASSIGNMENT_MAX; pin++) {
+ if (pin_cfg & BIT(pin))
+ break;
+ }
+
+ if (pin == PIN_ASSIGNMENT_MAX)
+ pin = PIN_ASSIGNMENT_C;
+
+ pr_debug("pin assignment: %s\n", mdss_dp_pin_name(pin));
+
+ config |= BIT(pin) << 8;
+
config |= (0x1 << 2); /* configure for DPv1.3 */
config |= 0x2; /* Configuring for UFP_D */
@@ -441,6 +524,17 @@ u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp)
return config;
}
+void mdss_dp_phy_share_lane_config(struct dss_io_data *phy_io,
+ u8 orientation, u8 ln_cnt)
+{
+ u32 info = 0x0;
+
+ info |= (ln_cnt & 0x0F);
+ info |= ((orientation & 0x0F) << 4);
+ pr_debug("Shared Info = 0x%x\n", info);
+ writel_relaxed(info, phy_io->base + DP_PHY_SPARE0);
+}
+
void mdss_dp_config_audio_acr_ctrl(struct dss_io_data *ctrl_io, char link_rate)
{
u32 acr_ctrl = 0;
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.h b/drivers/video/fbdev/msm/mdss_dp_util.h
index 5eb9d092476f..cf2286f9b58a 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.h
+++ b/drivers/video/fbdev/msm/mdss_dp_util.h
@@ -150,6 +150,8 @@
#define DP_PHY_AUX_INTERRUPT_MASK (0x00000044)
#define DP_PHY_AUX_INTERRUPT_CLEAR (0x00000048)
+#define DP_PHY_SPARE0 0x00A8
+
#define QSERDES_TX0_OFFSET 0x0400
#define QSERDES_TX1_OFFSET 0x0800
@@ -200,17 +202,72 @@ struct edp_cmd {
char next; /* next command */
};
+struct dp_vc_tu_mapping_table {
+ u32 vic;
+ u8 lanes;
+ u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20) */
+ u8 bpp;
+ u8 valid_boundary_link;
+ u16 delay_start_link;
+ bool boundary_moderation_en;
+ u8 valid_lower_boundary_link;
+ u8 upper_boundary_count;
+ u8 lower_boundary_count;
+ u8 tu_size_minus1;
+};
+
+static const struct dp_vc_tu_mapping_table tu_table[] = {
+ {HDMI_VFRMT_640x480p60_4_3, 4, 06, 24,
+ 0x07, 0x0056, false, 0x00, 0x00, 0x00, 0x3b},
+ {HDMI_VFRMT_640x480p60_4_3, 2, 06, 24,
+ 0x0e, 0x004f, false, 0x00, 0x00, 0x00, 0x3b},
+ {HDMI_VFRMT_640x480p60_4_3, 1, 06, 24,
+ 0x15, 0x0039, false, 0x00, 0x00, 0x00, 0x2c},
+ {HDMI_VFRMT_720x480p60_4_3, 1, 06, 24,
+ 0x13, 0x0038, true, 0x12, 0x0c, 0x0b, 0x24},
+ {HDMI_VFRMT_720x480p60_16_9, 1, 06, 24,
+ 0x13, 0x0038, true, 0x12, 0x0c, 0x0b, 0x24},
+ {HDMI_VFRMT_1280x720p60_16_9, 4, 06, 24,
+ 0x0c, 0x0020, false, 0x00, 0x00, 0x00, 0x1f},
+ {HDMI_VFRMT_1280x720p60_16_9, 2, 06, 24,
+ 0x16, 0x0015, false, 0x00, 0x00, 0x00, 0x1f},
+ {HDMI_VFRMT_1280x720p60_16_9, 1, 10, 24,
+ 0x21, 0x001a, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_1920x1080p60_16_9, 4, 06, 24,
+ 0x16, 0x000f, false, 0x00, 0x00, 0x00, 0x1f},
+ {HDMI_VFRMT_1920x1080p60_16_9, 2, 10, 24,
+ 0x21, 0x0011, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_1920x1080p60_16_9, 1, 20, 24,
+ 0x21, 0x001a, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_3840x2160p24_16_9, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_3840x2160p30_16_9, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_3840x2160p60_16_9, 4, 20, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_4096x2160p24_256_135, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_4096x2160p30_256_135, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_4096x2160p60_256_135, 4, 20, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_EVFRMT_4096x2160p24_16_9, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+};
+
int dp_aux_read(void *ep, struct edp_cmd *cmds);
int dp_aux_write(void *ep, struct edp_cmd *cmd);
void mdss_dp_state_ctrl(struct dss_io_data *ctrl_io, u32 data);
u32 mdss_dp_get_ctrl_hw_version(struct dss_io_data *ctrl_io);
u32 mdss_dp_get_phy_hw_version(struct dss_io_data *phy_io);
+void mdss_dp_ctrl_reset(struct dss_io_data *ctrl_io);
void mdss_dp_aux_reset(struct dss_io_data *ctrl_io);
void mdss_dp_mainlink_reset(struct dss_io_data *ctrl_io);
void mdss_dp_phy_reset(struct dss_io_data *ctrl_io);
void mdss_dp_switch_usb3_phy_to_dp_mode(struct dss_io_data *tcsr_reg_io);
void mdss_dp_assert_phy_reset(struct dss_io_data *ctrl_io, bool assert);
-void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io);
+void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate,
+ u8 ln_cnt, u32 res);
void mdss_dp_phy_aux_setup(struct dss_io_data *phy_io);
void mdss_dp_hpd_configure(struct dss_io_data *ctrl_io, bool enable);
void mdss_dp_aux_ctrl(struct dss_io_data *ctrl_io, bool enable);
@@ -231,6 +288,8 @@ void mdss_dp_usbpd_ext_dp_status(struct usbpd_dp_status *dp_status);
u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp);
void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io,
struct lane_mapping l_map);
+void mdss_dp_phy_share_lane_config(struct dss_io_data *phy_io,
+ u8 orientation, u8 ln_cnt);
void mdss_dp_config_audio_acr_ctrl(struct dss_io_data *ctrl_io,
char link_rate);
void mdss_dp_audio_setup_sdps(struct dss_io_data *ctrl_io);
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index c145f72c3c70..66cd99720afa 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -357,7 +357,7 @@ static int mdss_dsi_panel_power_lp(struct mdss_panel_data *pdata, int enable)
static int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata,
int power_state)
{
- int ret;
+ int ret = 0;
struct mdss_panel_info *pinfo;
if (pdata == NULL) {
@@ -383,7 +383,11 @@ static int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata,
switch (power_state) {
case MDSS_PANEL_POWER_OFF:
- ret = mdss_dsi_panel_power_off(pdata);
+ case MDSS_PANEL_POWER_LCD_DISABLED:
+ /* if LCD has not been disabled, then disable it now */
+ if ((pinfo->panel_power_state != MDSS_PANEL_POWER_LCD_DISABLED)
+ && (pinfo->panel_power_state != MDSS_PANEL_POWER_OFF))
+ ret = mdss_dsi_panel_power_off(pdata);
break;
case MDSS_PANEL_POWER_ON:
if (mdss_dsi_is_panel_on_lp(pdata))
@@ -2469,6 +2473,7 @@ static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
int power_state;
u32 mode;
struct mdss_panel_info *pinfo;
+ int ret;
if (pdata == NULL) {
pr_err("%s: Invalid input data\n", __func__);
@@ -2529,6 +2534,20 @@ static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
rc = mdss_dsi_blank(pdata, power_state);
rc = mdss_dsi_off(pdata, power_state);
break;
+ case MDSS_EVENT_DISABLE_PANEL:
+ /* disable esd thread */
+ disable_esd_thread();
+
+ /* disable backlight */
+ ctrl_pdata->panel_data.set_backlight(pdata, 0);
+
+ /* send the off commands */
+ ctrl_pdata->off(pdata);
+
+ /* disable panel power */
+ ret = mdss_dsi_panel_power_ctrl(pdata,
+ MDSS_PANEL_POWER_LCD_DISABLED);
+ break;
case MDSS_EVENT_CONT_SPLASH_FINISH:
if (ctrl_pdata->off_cmds.link_state == DSI_LP_MODE)
rc = mdss_dsi_blank(pdata, MDSS_PANEL_POWER_OFF);
diff --git a/drivers/video/fbdev/msm/mdss_dsi.h b/drivers/video/fbdev/msm/mdss_dsi.h
index bd1854092c6a..7091dc2f38b9 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.h
+++ b/drivers/video/fbdev/msm/mdss_dsi.h
@@ -614,6 +614,7 @@ int mdss_dsi_wait_for_lane_idle(struct mdss_dsi_ctrl_pdata *ctrl);
irqreturn_t mdss_dsi_isr(int irq, void *ptr);
irqreturn_t hw_vsync_handler(int irq, void *data);
+void disable_esd_thread(void);
void mdss_dsi_irq_handler_config(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
void mdss_dsi_set_tx_power_mode(int mode, struct mdss_panel_data *pdata);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index e8d68059581f..01fc01425a3a 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -667,6 +667,11 @@ static void mdss_dsi_panel_bl_ctrl(struct mdss_panel_data *pdata,
* for the backlight brightness. If the brightness is less
* than it, the controller can malfunction.
*/
+ pr_debug("%s: bl_level:%d\n", __func__, bl_level);
+
+ /* do not allow backlight to change when panel in disable mode */
+ if (pdata->panel_disable_mode && (bl_level != 0))
+ return;
if ((bl_level < pdata->panel_info.bl_min) && (bl_level != 0))
bl_level = pdata->panel_info.bl_min;
@@ -851,6 +856,48 @@ static int mdss_dsi_panel_low_power_config(struct mdss_panel_data *pdata,
return 0;
}
+static void mdss_dsi_parse_mdp_kickoff_threshold(struct device_node *np,
+ struct mdss_panel_info *pinfo)
+{
+ int len, rc;
+ const u32 *src;
+ u32 tmp;
+ u32 max_delay_us;
+
+ pinfo->mdp_koff_thshold = false;
+ src = of_get_property(np, "qcom,mdss-mdp-kickoff-threshold", &len);
+ if (!src || (len == 0))
+ return;
+
+ rc = of_property_read_u32(np, "qcom,mdss-mdp-kickoff-delay", &tmp);
+ if (!rc)
+ pinfo->mdp_koff_delay = tmp;
+ else
+ return;
+
+ if (pinfo->mipi.frame_rate == 0) {
+ pr_err("cannot enable guard window, unexpected panel fps\n");
+ return;
+ }
+
+ pinfo->mdp_koff_thshold_low = be32_to_cpu(src[0]);
+ pinfo->mdp_koff_thshold_high = be32_to_cpu(src[1]);
+ max_delay_us = 1000000 / pinfo->mipi.frame_rate;
+
+ /* enable the feature if threshold is valid */
+ if ((pinfo->mdp_koff_thshold_low < pinfo->mdp_koff_thshold_high) &&
+ ((pinfo->mdp_koff_delay > 0) ||
+ (pinfo->mdp_koff_delay < max_delay_us)))
+ pinfo->mdp_koff_thshold = true;
+
+ pr_debug("panel kickoff thshold:[%d, %d] delay:%d (max:%d) enable:%d\n",
+ pinfo->mdp_koff_thshold_low,
+ pinfo->mdp_koff_thshold_high,
+ pinfo->mdp_koff_delay,
+ max_delay_us,
+ pinfo->mdp_koff_thshold);
+}
+
static void mdss_dsi_parse_trigger(struct device_node *np, char *trigger,
char *trigger_key)
{
@@ -2492,6 +2539,8 @@ static int mdss_panel_parse_dt(struct device_node *np,
rc = of_property_read_u32(np, "qcom,mdss-mdp-transfer-time-us", &tmp);
pinfo->mdp_transfer_time_us = (!rc ? tmp : DEFAULT_MDP_TRANSFER_TIME);
+ mdss_dsi_parse_mdp_kickoff_threshold(np, pinfo);
+
pinfo->mipi.lp11_init = of_property_read_bool(np,
"qcom,mdss-dsi-lp11-init");
rc = of_property_read_u32(np, "qcom,mdss-dsi-init-delay-us", &tmp);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_status.c b/drivers/video/fbdev/msm/mdss_dsi_status.c
index bf545ae311f2..4208c2c43efb 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_status.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_status.c
@@ -101,6 +101,16 @@ irqreturn_t hw_vsync_handler(int irq, void *data)
}
/*
+ * disable_esd_thread() - Cancels work item for the esd check.
+ */
+void disable_esd_thread(void)
+{
+ if (pstatus_data &&
+ cancel_delayed_work(&pstatus_data->check_status))
+ pr_debug("esd thread killed\n");
+}
+
+/*
* fb_event_callback() - Call back function for the fb_register_client()
* notifying events
* @self : notifier block
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 50c7015c6731..fc8d3898351e 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -1950,6 +1950,9 @@ static int mdss_fb_blank(int blank_mode, struct fb_info *info)
pdata->panel_info.is_lpm_mode = false;
}
+ if (pdata->panel_disable_mode)
+ mdss_mdp_enable_panel_disable_mode(mfd, false);
+
return mdss_fb_blank_sub(blank_mode, info, mfd->op_enable);
}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
index 4f1435d006b2..2047a047b537 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
@@ -698,7 +698,6 @@ static ssize_t hdmi_edid_sysfs_rda_3d_modes(struct device *dev,
}
}
- DEV_DBG("%s: '%s'\n", __func__, buf);
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
return ret;
@@ -1567,7 +1566,9 @@ static void hdmi_edid_detail_desc(struct hdmi_edid_ctrl *edid_ctrl,
frame_data = (active_h + blank_h) * (active_v + blank_v);
if (frame_data) {
- int refresh_rate_khz = (pixel_clk * khz_to_hz) / frame_data;
+ u64 refresh_rate = (u64)pixel_clk * khz_to_hz * khz_to_hz;
+
+ do_div(refresh_rate, frame_data);
timing.active_h = active_h;
timing.front_porch_h = front_porch_h;
@@ -1582,19 +1583,24 @@ static void hdmi_edid_detail_desc(struct hdmi_edid_ctrl *edid_ctrl,
(front_porch_v + pulse_width_v);
timing.active_low_v = active_low_v;
timing.pixel_freq = pixel_clk;
- timing.refresh_rate = refresh_rate_khz * khz_to_hz;
+ timing.refresh_rate = refresh_rate;
timing.interlaced = interlaced;
timing.supported = true;
timing.ar = aspect_ratio_4_3 ? HDMI_RES_AR_4_3 :
(aspect_ratio_5_4 ? HDMI_RES_AR_5_4 :
HDMI_RES_AR_16_9);
- DEV_DBG("%s: new res: %dx%d%s@%dHz\n", __func__,
+ DEV_DBG("%s: new res: %dx%d%s@%d.%d%d%dHz\n", __func__,
timing.active_h, timing.active_v,
interlaced ? "i" : "p",
- timing.refresh_rate / khz_to_hz);
-
- rc = hdmi_set_resv_timing_info(&timing);
+ timing.refresh_rate / khz_to_hz,
+ (timing.refresh_rate % khz_to_hz) / 100,
+ (timing.refresh_rate % 100) / 10,
+ timing.refresh_rate % 10);
+
+ rc = hdmi_get_video_id_code(&timing, NULL);
+ if (rc < 0)
+ rc = hdmi_set_resv_timing_info(&timing);
} else {
DEV_ERR("%s: Invalid frame data\n", __func__);
rc = -EINVAL;
@@ -1642,6 +1648,7 @@ static void hdmi_edid_add_sink_video_format(struct hdmi_edid_ctrl *edid_ctrl,
u32 supported = hdmi_edid_is_mode_supported(edid_ctrl, &timing);
struct hdmi_edid_sink_data *sink_data = &edid_ctrl->sink_data;
struct disp_mode_info *disp_mode_list = sink_data->disp_mode_list;
+ u32 i = 0;
if (video_format >= HDMI_VFRMT_MAX) {
DEV_ERR("%s: video format: %s is not supported\n", __func__,
@@ -1653,6 +1660,15 @@ static void hdmi_edid_add_sink_video_format(struct hdmi_edid_ctrl *edid_ctrl,
video_format, msm_hdmi_mode_2string(video_format),
supported ? "Supported" : "Not-Supported");
+ for (i = 0; i < sink_data->num_of_elements; i++) {
+ u32 vic = disp_mode_list[i].video_format;
+
+ if (vic == video_format) {
+ DEV_DBG("%s: vic %d already added\n", __func__, vic);
+ return;
+ }
+ }
+
if (!ret && supported) {
/* todo: MHL */
disp_mode_list[sink_data->num_of_elements].video_format =
@@ -1970,6 +1986,7 @@ static void hdmi_edid_get_display_mode(struct hdmi_edid_ctrl *edid_ctrl)
const u8 *svd = NULL;
u32 has60hz_mode = false;
u32 has50hz_mode = false;
+ u32 desc_offset = 0;
bool read_block0_res = false;
struct hdmi_edid_sink_data *sink_data = NULL;
@@ -2033,103 +2050,66 @@ static void hdmi_edid_get_display_mode(struct hdmi_edid_ctrl *edid_ctrl)
if (video_format == HDMI_VFRMT_640x480p60_4_3)
has480p = true;
}
- } else if (!num_of_cea_blocks || read_block0_res) {
- /* Detailed timing descriptors */
- u32 desc_offset = 0;
- /*
- * * Maximum 4 timing descriptor in block 0 - No CEA
- * extension in this case
- * * EDID_FIRST_TIMING_DESC[0x36] - 1st detailed timing
- * descriptor
- * * EDID_DETAIL_TIMING_DESC_BLCK_SZ[0x12] - Each detailed
- * timing descriptor has block size of 18
- */
- while (4 > i && 0 != edid_blk0[0x36+desc_offset]) {
- hdmi_edid_detail_desc(edid_ctrl,
- edid_blk0+0x36+desc_offset,
- &video_format);
-
- DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
- __func__, __LINE__,
- msm_hdmi_mode_2string(video_format));
-
- hdmi_edid_add_sink_video_format(edid_ctrl,
- video_format);
-
- if (video_format == HDMI_VFRMT_640x480p60_4_3)
- has480p = true;
-
- /* Make a note of the preferred video format */
- if (i == 0) {
- sink_data->preferred_video_format =
- video_format;
- }
- desc_offset += 0x12;
- ++i;
- }
- } else if (1 == num_of_cea_blocks) {
- u32 desc_offset = 0;
-
- /*
- * Read from both block 0 and block 1
- * Read EDID block[0] as above
- */
- while (4 > i && 0 != edid_blk0[0x36+desc_offset]) {
- hdmi_edid_detail_desc(edid_ctrl,
- edid_blk0+0x36+desc_offset,
- &video_format);
+ }
- DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
- __func__, __LINE__,
- msm_hdmi_mode_2string(video_format));
+ i = 0;
+ /* Read DTD resolutions from block0 */
+ while (4 > i && 0 != edid_blk0[0x36+desc_offset]) {
+ hdmi_edid_detail_desc(edid_ctrl,
+ edid_blk0+0x36+desc_offset,
+ &video_format);
+
+ DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
+ __func__, __LINE__,
+ msm_hdmi_mode_2string(video_format));
- hdmi_edid_add_sink_video_format(edid_ctrl,
- video_format);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ video_format);
- if (video_format == HDMI_VFRMT_640x480p60_4_3)
- has480p = true;
+ if (video_format == HDMI_VFRMT_640x480p60_4_3)
+ has480p = true;
- /* Make a note of the preferred video format */
- if (i == 0) {
- sink_data->preferred_video_format =
- video_format;
- }
- desc_offset += 0x12;
- ++i;
+ /* Make a note of the preferred video format */
+ if (i == 0) {
+ sink_data->preferred_video_format =
+ video_format;
}
+ desc_offset += 0x12;
+ ++i;
+ }
- /*
- * * Parse block 1 - CEA extension byte offset of first
- * detailed timing generation - offset is relevant to
- * the offset of block 1
- * * EDID_CEA_EXTENSION_FIRST_DESC[0x82]: Offset to CEA
- * extension first timing desc - indicate the offset of
- * the first detailed timing descriptor
- * * EDID_BLOCK_SIZE = 0x80 Each page size in the EDID ROM
- */
- desc_offset = edid_blk1[0x02];
- while (0 != edid_blk1[desc_offset]) {
- hdmi_edid_detail_desc(edid_ctrl,
- edid_blk1+desc_offset,
- &video_format);
-
- DEV_DBG("[%s:%d] Block-1 Adding vid fmt = [%s]\n",
- __func__, __LINE__,
- msm_hdmi_mode_2string(video_format));
+ /*
+ * * Parse block 1 - CEA extension byte offset of first
+ * detailed timing generation - offset is relevant to
+ * the offset of block 1
+ * * EDID_CEA_EXTENSION_FIRST_DESC[0x82]: Offset to CEA
+ * extension first timing desc - indicate the offset of
+ * the first detailed timing descriptor
+ * * EDID_BLOCK_SIZE = 0x80 Each page size in the EDID ROM
+ */
+ desc_offset = edid_blk1[0x02];
+ i = 0;
+ while (!edid_blk1[desc_offset]) {
+ hdmi_edid_detail_desc(edid_ctrl,
+ edid_blk1+desc_offset,
+ &video_format);
- hdmi_edid_add_sink_video_format(edid_ctrl,
- video_format);
- if (video_format == HDMI_VFRMT_640x480p60_4_3)
- has480p = true;
+ DEV_DBG("[%s:%d] Block-1 Adding vid fmt = [%s]\n",
+ __func__, __LINE__,
+ msm_hdmi_mode_2string(video_format));
- /* Make a note of the preferred video format */
- if (i == 0) {
- sink_data->preferred_video_format =
- video_format;
- }
- desc_offset += 0x12;
- ++i;
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ video_format);
+ if (video_format == HDMI_VFRMT_640x480p60_4_3)
+ has480p = true;
+
+ /* Make a note of the preferred video format */
+ if (i == 0) {
+ sink_data->preferred_video_format =
+ video_format;
}
+ desc_offset += 0x12;
+ ++i;
}
std_blk = 0;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.c b/drivers/video/fbdev/msm/mdss_hdmi_util.c
index 9ed909e9a387..c9fc8ba8bfdb 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.c
@@ -560,7 +560,7 @@ int msm_hdmi_get_timing_info(
int hdmi_get_supported_mode(struct msm_hdmi_mode_timing_info *info,
struct hdmi_util_ds_data *ds_data, u32 mode)
{
- int ret;
+ int ret, i = 0;
if (!info)
return -EINVAL;
@@ -570,9 +570,23 @@ int hdmi_get_supported_mode(struct msm_hdmi_mode_timing_info *info,
ret = msm_hdmi_get_timing_info(info, mode);
- if (!ret && ds_data && ds_data->ds_registered && ds_data->ds_max_clk) {
- if (info->pixel_freq > ds_data->ds_max_clk)
- info->supported = false;
+ if (!ret && ds_data && ds_data->ds_registered) {
+ if (ds_data->ds_max_clk) {
+ if (info->pixel_freq > ds_data->ds_max_clk)
+ info->supported = false;
+ }
+
+ if (ds_data->modes_num) {
+ u32 *modes = ds_data->modes;
+
+ for (i = 0; i < ds_data->modes_num; i++) {
+ if (info->video_format == *modes++)
+ break;
+ }
+
+ if (i == ds_data->modes_num)
+ info->supported = false;
+ }
}
return ret;
@@ -625,7 +639,7 @@ int hdmi_get_video_id_code(struct msm_hdmi_mode_timing_info *timing_in,
{
int i, vic = -1;
struct msm_hdmi_mode_timing_info supported_timing = {0};
- u32 ret;
+ u32 ret, pclk_delta, pclk, fps_delta, fps;
if (!timing_in) {
pr_err("invalid input\n");
@@ -633,9 +647,16 @@ int hdmi_get_video_id_code(struct msm_hdmi_mode_timing_info *timing_in,
}
/* active_low_h, active_low_v and interlaced are not checked against */
- for (i = 0; i < HDMI_VFRMT_MAX; i++) {
+ for (i = 1; i < HDMI_VFRMT_MAX; i++) {
ret = hdmi_get_supported_mode(&supported_timing, ds_data, i);
+ pclk = supported_timing.pixel_freq;
+ fps = supported_timing.refresh_rate;
+
+ /* as per standard, 0.5% of deviation is allowed */
+ pclk_delta = (pclk / HDMI_KHZ_TO_HZ) * 5;
+ fps_delta = (fps / HDMI_KHZ_TO_HZ) * 5;
+
if (ret || !supported_timing.supported)
continue;
if (timing_in->active_h != supported_timing.active_h)
@@ -654,9 +675,11 @@ int hdmi_get_video_id_code(struct msm_hdmi_mode_timing_info *timing_in,
continue;
if (timing_in->back_porch_v != supported_timing.back_porch_v)
continue;
- if (timing_in->pixel_freq != supported_timing.pixel_freq)
+ if (timing_in->pixel_freq < (pclk - pclk_delta) ||
+ timing_in->pixel_freq > (pclk + pclk_delta))
continue;
- if (timing_in->refresh_rate != supported_timing.refresh_rate)
+ if (timing_in->refresh_rate < (fps - fps_delta) ||
+ timing_in->refresh_rate > (fps + fps_delta))
continue;
vic = (int)supported_timing.video_format;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.h b/drivers/video/fbdev/msm/mdss_hdmi_util.h
index e65cf915fe92..8a7e4d1ebafc 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.h
@@ -459,6 +459,8 @@ struct hdmi_tx_ddc_ctrl {
struct hdmi_util_ds_data {
bool ds_registered;
u32 ds_max_clk;
+ u32 modes_num;
+ u32 *modes;
};
static inline int hdmi_tx_get_v_total(const struct msm_hdmi_mode_timing_info *t)
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 81e3438befca..6845b386807b 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1992,6 +1992,8 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
set_bit(MDSS_QOS_REMAPPER, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_WB_QOS, mdata->mdss_qos_map);
set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map); /* cdp supported */
mdata->enable_cdp = false; /* disable cdp */
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index 921391dc4bde..8ac63aaaefce 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -122,6 +122,11 @@
*/
#define MDSS_MDP_DS_OVERFETCH_SIZE 5
+#define QOS_LUT_NRT_READ 0x0
+#define QOS_LUT_CWB_READ 0xe4000000
+#define PANIC_LUT_NRT_READ 0x0
+#define ROBUST_LUT_NRT_READ 0xFFFF
+
/* hw cursor can only be setup in highest mixer stage */
#define HW_CURSOR_STAGE(mdata) \
(((mdata)->max_target_zorder + MDSS_MDP_STAGE_0) - 1)
@@ -394,6 +399,8 @@ struct mdss_mdp_ctl_intfs_ops {
enum dynamic_switch_modes mode, bool pre);
/* called before do any register programming from commit thread */
void (*pre_programming)(struct mdss_mdp_ctl *ctl);
+ /* called to do any interface programming for the panel disable mode */
+ void (*panel_disable_cfg)(struct mdss_mdp_ctl *ctl, bool disable);
/* to update lineptr, [1..yres] - enable, 0 - disable */
int (*update_lineptr)(struct mdss_mdp_ctl *ctl, bool enable);
@@ -405,7 +412,7 @@ struct mdss_mdp_cwb {
struct list_head data_queue;
int valid;
u32 wb_idx;
- struct mdp_output_layer *layer;
+ struct mdp_output_layer layer;
void *priv_data;
struct msm_sync_pt_data cwb_sync_pt_data;
struct blocking_notifier_head notifier_head;
@@ -1863,6 +1870,8 @@ int mdss_mdp_cmd_set_autorefresh_mode(struct mdss_mdp_ctl *ctl, int frame_cnt);
int mdss_mdp_cmd_get_autorefresh_mode(struct mdss_mdp_ctl *ctl);
int mdss_mdp_ctl_cmd_set_autorefresh(struct mdss_mdp_ctl *ctl, int frame_cnt);
int mdss_mdp_ctl_cmd_get_autorefresh(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_enable_panel_disable_mode(struct msm_fb_data_type *mfd,
+ bool disable_panel);
int mdss_mdp_pp_get_version(struct mdp_pp_feature_version *version);
int mdss_mdp_layer_pre_commit_cwb(struct msm_fb_data_type *mfd,
struct mdp_layer_commit_v1 *commit);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index ebc7d2144eb9..eb1e0b5c47a6 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -3424,6 +3424,7 @@ int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl)
mutex_lock(&cwb->queue_lock);
cwb_data = list_first_entry_or_null(&cwb->data_queue,
struct mdss_mdp_wb_data, next);
+ __list_del_entry(&cwb_data->next);
mutex_unlock(&cwb->queue_lock);
if (cwb_data == NULL) {
pr_err("no output buffer for cwb\n");
@@ -3453,14 +3454,14 @@ int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl)
sctl->opmode |= MDSS_MDP_CTL_OP_WFD_MODE;
/* Select CWB data point */
- data_point = (cwb->layer->flags & MDP_COMMIT_CWB_DSPP) ? 0x4 : 0;
+ data_point = (cwb->layer.flags & MDP_COMMIT_CWB_DSPP) ? 0x4 : 0;
writel_relaxed(data_point, mdata->mdp_base + mdata->ppb_ctl[2]);
if (sctl)
writel_relaxed(data_point + 1,
mdata->mdp_base + mdata->ppb_ctl[3]);
- /* Flush WB */
- ctl->flush_bits |= BIT(16);
+ /* Flush WB and CTL */
+ ctl->flush_bits |= BIT(16) | BIT(17);
opmode = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_TOP) | ctl->opmode;
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, opmode);
@@ -3469,6 +3470,10 @@ int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl)
sctl->opmode;
mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP, opmode);
}
+
+ /* Increase commit count to signal CWB release fence */
+ atomic_inc(&cwb->cwb_sync_pt_data.commit_cnt);
+
goto cwb_setup_done;
cwb_setup_fail:
diff --git a/drivers/video/fbdev/msm/mdss_mdp_hwio.h b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
index 76fd2d12ac95..294e05c2fbb0 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_hwio.h
+++ b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
@@ -541,6 +541,10 @@ enum mdss_mdp_writeback_index {
#define MDSS_MDP_REG_WB_N16_INIT_PHASE_Y_C12 0x06C
#define MDSS_MDP_REG_WB_OUT_SIZE 0x074
#define MDSS_MDP_REG_WB_ALPHA_X_VALUE 0x078
+#define MDSS_MDP_REG_WB_DANGER_LUT 0x084
+#define MDSS_MDP_REG_WB_SAFE_LUT 0x088
+#define MDSS_MDP_REG_WB_CREQ_LUT 0x08c
+#define MDSS_MDP_REG_WB_QOS_CTRL 0x090
#define MDSS_MDP_REG_WB_CSC_BASE 0x260
#define MDSS_MDP_REG_WB_DST_ADDR_SW_STATUS 0x2B0
#define MDSS_MDP_REG_WB_CDP_CTRL 0x2B4
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index 2c2dc6f18fd9..4eb121f01aca 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -73,6 +73,7 @@ struct mdss_mdp_cmd_ctx {
struct mutex clk_mtx;
spinlock_t clk_lock;
spinlock_t koff_lock;
+ spinlock_t ctlstart_lock;
struct work_struct gate_clk_work;
struct delayed_work delayed_off_clk_work;
struct work_struct pp_done_work;
@@ -144,15 +145,11 @@ static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
u32 init;
u32 height;
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
-
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
if (!mixer) {
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
- if (!mixer) {
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ if (!mixer)
goto exit;
- }
}
init = mdss_mdp_pingpong_read(mixer->pingpong_base,
@@ -160,10 +157,8 @@ static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
height = mdss_mdp_pingpong_read(mixer->pingpong_base,
MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT) & 0xffff;
- if (height < init) {
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ if (height < init)
goto exit;
- }
cnt = mdss_mdp_pingpong_read(mixer->pingpong_base,
MDSS_MDP_REG_PP_INT_COUNT_VAL) & 0xffff;
@@ -173,13 +168,21 @@ static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
else
cnt -= init;
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
-
pr_debug("cnt=%d init=%d height=%d\n", cnt, init, height);
exit:
return cnt;
}
+static inline u32 mdss_mdp_cmd_line_count_wrapper(struct mdss_mdp_ctl *ctl)
+{
+ u32 ret;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ ret = mdss_mdp_cmd_line_count(ctl);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return ret;
+}
+
static int mdss_mdp_tearcheck_enable(struct mdss_mdp_ctl *ctl, bool enable)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
@@ -295,9 +298,9 @@ static int mdss_mdp_cmd_tearcheck_cfg(struct mdss_mdp_mixer *mixer,
__func__, pinfo->yres, vclks_line, te->sync_cfg_height,
te->vsync_init_val, te->rd_ptr_irq, te->start_pos,
te->wr_ptr_irq);
- pr_debug("thrd_start =%d thrd_cont=%d pp_split=%d\n",
+ pr_debug("thrd_start =%d thrd_cont=%d pp_split=%d hw_vsync_mode:%d\n",
te->sync_threshold_start, te->sync_threshold_continue,
- ctx->pingpong_split_slave);
+ ctx->pingpong_split_slave, pinfo->mipi.hw_vsync_mode);
pingpong_base = mixer->pingpong_base;
@@ -2130,6 +2133,88 @@ static int mdss_mdp_cmd_panel_on(struct mdss_mdp_ctl *ctl,
}
/*
+ * This function will be called from the sysfs node to tear down or restore
+ * any dependencies of the interface to disable the panel
+ */
+void mdss_mdp_cmd_panel_disable_cfg(struct mdss_mdp_ctl *ctl,
+ bool disable)
+{
+ struct mdss_panel_info *pinfo, *spinfo = NULL;
+ struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+
+ pinfo = &ctl->panel_data->panel_info;
+ mutex_lock(&ctl->offlock);
+
+ if ((pinfo->sim_panel_mode == SIM_MODE) ||
+ ((!ctl->panel_data->panel_disable_mode) &&
+ (pinfo->mipi.hw_vsync_mode == 0))) {
+ pr_err("te already in simulaiton mode\n");
+ goto exit;
+ }
+
+ ctx = (struct mdss_mdp_cmd_ctx *)ctl->intf_ctx[MASTER_CTX];
+ if (is_pingpong_split(ctl->mfd)) {
+ sctx = (struct mdss_mdp_cmd_ctx *)ctl->intf_ctx[SLAVE_CTX];
+ } else if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+ struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+
+ if (sctl) {
+ sctx = (struct mdss_mdp_cmd_ctx *)
+ sctl->intf_ctx[MASTER_CTX];
+ spinfo = &sctl->panel_data->panel_info;
+ }
+ }
+
+ if (disable) {
+ /* cache the te params */
+ memcpy(&pinfo->te_cached, &pinfo->te,
+ sizeof(struct mdss_mdp_pp_tear_check));
+ pinfo->mipi.hw_vsync_mode = 0;
+
+ if (spinfo) {
+ spinfo->mipi.hw_vsync_mode = 0;
+ memcpy(&spinfo->te_cached, &spinfo->te,
+ sizeof(struct mdss_mdp_pp_tear_check));
+ }
+
+ pr_debug("%s: update info\n", __func__);
+ /* update the te information to use sim mode */
+ mdss_panel_override_te_params(pinfo);
+ if (spinfo)
+ mdss_panel_override_te_params(spinfo);
+
+ pr_debug("%s: reconfig tear check\n", __func__);
+ /* reconfigure tear check, remove dependency to external te */
+ if (mdss_mdp_cmd_tearcheck_setup(ctx, false)) {
+ pr_warn("%s: ctx%d tearcheck setup failed\n", __func__,
+ ctx->current_pp_num);
+ } else {
+ if (sctx && mdss_mdp_cmd_tearcheck_setup(sctx, false))
+ pr_warn("%s: ctx%d tearcheck setup failed\n",
+ __func__, sctx->current_pp_num);
+ }
+ } else {
+ /*
+ * restore the information in the panel information,
+ * the actual programming will happen during restore
+ */
+ pr_debug("%s: reset tear check\n", __func__);
+ memcpy(&pinfo->te, &pinfo->te_cached,
+ sizeof(struct mdss_mdp_pp_tear_check));
+ pinfo->mipi.hw_vsync_mode = 1;
+
+ if (spinfo) {
+ spinfo->mipi.hw_vsync_mode = 1;
+ memcpy(&spinfo->te, &spinfo->te_cached,
+ sizeof(struct mdss_mdp_pp_tear_check));
+ }
+ }
+
+exit:
+ mutex_unlock(&ctl->offlock);
+}
+
+/*
* This function will be called from the sysfs node to enable and disable the
* feature with master ctl only.
*/
@@ -2595,12 +2680,42 @@ static int mdss_mdp_disable_autorefresh(struct mdss_mdp_ctl *ctl,
return 0;
}
+static bool wait_for_read_ptr_if_late(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_ctl *sctl, struct mdss_panel_info *pinfo)
+{
+ u32 line_count;
+ u32 sline_count = 0;
+ bool ret = true;
+ u32 low_threshold = pinfo->mdp_koff_thshold_low;
+ u32 high_threshold = pinfo->mdp_koff_thshold_high;
+
+ /* read the line count */
+ line_count = mdss_mdp_cmd_line_count(ctl);
+ if (sctl)
+ sline_count = mdss_mdp_cmd_line_count(sctl);
+
+ /* if line count is between the range, return to trigger transfer */
+ if (((line_count > low_threshold) && (line_count < high_threshold)) &&
+ (!sctl || ((sline_count > low_threshold) &&
+ (sline_count < high_threshold))))
+ ret = false;
+
+ pr_debug("threshold:[%d, %d]\n", low_threshold, high_threshold);
+ pr_debug("line:%d sline:%d ret:%d\n", line_count, sline_count, ret);
+ MDSS_XLOG(line_count, sline_count, ret);
+
+ return ret;
+}
static void __mdss_mdp_kickoff(struct mdss_mdp_ctl *ctl,
- struct mdss_mdp_cmd_ctx *ctx)
+ struct mdss_mdp_ctl *sctl, struct mdss_mdp_cmd_ctx *ctx)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
bool is_pp_split = is_pingpong_split(ctl->mfd);
+ struct mdss_panel_info *pinfo = NULL;
+
+ if (ctl->panel_data)
+ pinfo = &ctl->panel_data->panel_info;
MDSS_XLOG(ctx->autorefresh_state);
@@ -2625,9 +2740,33 @@ static void __mdss_mdp_kickoff(struct mdss_mdp_ctl *ctl,
ctx->autorefresh_state = MDP_AUTOREFRESH_ON;
} else {
+
+ /*
+ * Some panels can require that mdp is within some range
+ * of the scanlines in order to trigger the tansfer.
+ * If that is the case, make sure the panel scanline
+ * is within the limit to start.
+ * Acquire an spinlock for this operation to raise the
+ * priority of this thread and make sure the context
+ * is maintained, so we can have the less time possible
+ * between the check of the scanline and the kickoff.
+ */
+ if (pinfo && pinfo->mdp_koff_thshold) {
+ spin_lock(&ctx->ctlstart_lock);
+ if (wait_for_read_ptr_if_late(ctl, sctl, pinfo)) {
+ spin_unlock(&ctx->ctlstart_lock);
+ usleep_range(pinfo->mdp_koff_delay,
+ pinfo->mdp_koff_delay + 10);
+ spin_lock(&ctx->ctlstart_lock);
+ }
+ }
+
/* SW Kickoff */
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
MDSS_XLOG(0x11, ctx->autorefresh_state);
+
+ if (pinfo && pinfo->mdp_koff_thshold)
+ spin_unlock(&ctx->ctlstart_lock);
}
}
@@ -2759,7 +2898,7 @@ static int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
}
/* Kickoff */
- __mdss_mdp_kickoff(ctl, ctx);
+ __mdss_mdp_kickoff(ctl, sctl, ctx);
mdss_mdp_cmd_post_programming(ctl);
@@ -3185,6 +3324,7 @@ static int mdss_mdp_cmd_ctx_setup(struct mdss_mdp_ctl *ctl,
init_completion(&ctx->autorefresh_done);
spin_lock_init(&ctx->clk_lock);
spin_lock_init(&ctx->koff_lock);
+ spin_lock_init(&ctx->ctlstart_lock);
mutex_init(&ctx->clk_mtx);
mutex_init(&ctx->mdp_rdptr_lock);
mutex_init(&ctx->mdp_wrptr_lock);
@@ -3475,12 +3615,13 @@ int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
ctl->ops.wait_pingpong = mdss_mdp_cmd_wait4pingpong;
ctl->ops.add_vsync_handler = mdss_mdp_cmd_add_vsync_handler;
ctl->ops.remove_vsync_handler = mdss_mdp_cmd_remove_vsync_handler;
- ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count;
+ ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count_wrapper;
ctl->ops.restore_fnc = mdss_mdp_cmd_restore;
ctl->ops.early_wake_up_fnc = mdss_mdp_cmd_early_wake_up;
ctl->ops.reconfigure = mdss_mdp_cmd_reconfigure;
ctl->ops.pre_programming = mdss_mdp_cmd_pre_programming;
ctl->ops.update_lineptr = mdss_mdp_cmd_update_lineptr;
+ ctl->ops.panel_disable_cfg = mdss_mdp_cmd_panel_disable_cfg;
pr_debug("%s:-\n", __func__);
return 0;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
index 40b10e368309..e6e03e7d54b2 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
@@ -124,6 +124,30 @@ static inline void mdp_wb_write(struct mdss_mdp_writeback_ctx *ctx,
writel_relaxed(val, ctx->base + reg);
}
+static void mdss_mdp_set_qos_wb(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_writeback_ctx *ctx)
+{
+ u32 wb_qos_setup = QOS_LUT_NRT_READ;
+ struct mdss_mdp_cwb *cwb = NULL;
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (false == test_bit(MDSS_QOS_WB_QOS, mdata->mdss_qos_map))
+ return;
+
+ mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+ cwb = &mdp5_data->cwb;
+
+ if (cwb->valid)
+ wb_qos_setup = QOS_LUT_CWB_READ;
+ else
+ wb_qos_setup = QOS_LUT_NRT_READ;
+
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DANGER_LUT, PANIC_LUT_NRT_READ);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_SAFE_LUT, ROBUST_LUT_NRT_READ);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_CREQ_LUT, wb_qos_setup);
+}
+
static void mdss_mdp_set_ot_limit_wb(struct mdss_mdp_writeback_ctx *ctx,
int is_wfd)
{
@@ -447,7 +471,7 @@ int mdss_mdp_writeback_prepare_cwb(struct mdss_mdp_ctl *ctl,
cwb = &mdp5_data->cwb;
ctx = (struct mdss_mdp_writeback_ctx *)cwb->priv_data;
- buffer = &cwb->layer->buffer;
+ buffer = &cwb->layer.buffer;
ctx->opmode = 0;
ctx->img_width = buffer->width;
@@ -495,6 +519,8 @@ int mdss_mdp_writeback_prepare_cwb(struct mdss_mdp_ctl *ctl,
if (ctl->mdata->default_ot_wr_limit || ctl->mdata->default_ot_rd_limit)
mdss_mdp_set_ot_limit_wb(ctx, false);
+ mdss_mdp_set_qos_wb(ctl, ctx);
+
return ret;
}
@@ -897,6 +923,8 @@ static int mdss_mdp_writeback_display(struct mdss_mdp_ctl *ctl, void *arg)
ctl->mdata->default_ot_rd_limit)
mdss_mdp_set_ot_limit_wb(ctx, true);
+ mdss_mdp_set_qos_wb(ctl, ctx);
+
wb_args = (struct mdss_mdp_writeback_arg *) arg;
if (!wb_args)
return -ENOENT;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index 91d4332700b6..0f0df2256f74 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -2285,12 +2285,12 @@ end:
return ret;
}
-int __is_cwb_requested(uint32_t output_layer_flags)
+int __is_cwb_requested(uint32_t commit_flags)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
int req = 0;
- req = output_layer_flags & MDP_COMMIT_CWB_EN;
+ req = commit_flags & MDP_COMMIT_CWB_EN;
if (req && !test_bit(MDSS_CAPS_CWB_SUPPORTED, mdata->mdss_caps_map)) {
pr_err("CWB not supported");
return -ENODEV;
@@ -2330,7 +2330,7 @@ int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd,
return -EINVAL;
if (commit->output_layer) {
- ret = __is_cwb_requested(commit->output_layer->flags);
+ ret = __is_cwb_requested(commit->flags);
if (IS_ERR_VALUE(ret)) {
return ret;
} else if (ret) {
@@ -2493,7 +2493,7 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
}
if (commit->output_layer) {
- rc = __is_cwb_requested(commit->output_layer->flags);
+ rc = __is_cwb_requested(commit->flags);
if (IS_ERR_VALUE(rc)) {
return rc;
} else if (rc) {
@@ -2553,7 +2553,7 @@ int mdss_mdp_layer_pre_commit_cwb(struct msm_fb_data_type *mfd,
return rc;
}
- mdp5_data->cwb.layer = commit->output_layer;
+ mdp5_data->cwb.layer = *commit->output_layer;
mdp5_data->cwb.wb_idx = commit->output_layer->writeback_ndx;
mutex_lock(&mdp5_data->cwb.queue_lock);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 9dda467e53cc..965d4a6cfb5e 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -3243,6 +3243,110 @@ static ssize_t mdss_mdp_dyn_pu_store(struct device *dev,
return count;
}
+
+static ssize_t mdss_mdp_panel_disable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret = 0;
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_mdp_ctl *ctl;
+ struct mdss_panel_data *pdata;
+
+ if (!mfd) {
+ pr_err("Invalid mfd structure\n");
+ return -EINVAL;
+ }
+
+ ctl = mfd_to_ctl(mfd);
+ if (!ctl) {
+ pr_err("Invalid ctl structure\n");
+ return -EINVAL;
+ }
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ pdata->panel_disable_mode);
+
+ return ret;
+}
+
+int mdss_mdp_enable_panel_disable_mode(struct msm_fb_data_type *mfd,
+ bool disable_panel)
+{
+ struct mdss_mdp_ctl *ctl;
+ int ret = 0;
+ struct mdss_panel_data *pdata;
+
+ ctl = mfd_to_ctl(mfd);
+ if (!ctl) {
+ pr_err("Invalid ctl structure\n");
+ ret = -EINVAL;
+ return ret;
+ }
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+
+ pr_debug("config panel %d\n", disable_panel);
+ if (disable_panel) {
+ /* first set the flag that we enter this mode */
+ pdata->panel_disable_mode = true;
+
+ /*
+ * setup any interface config that needs to change before
+ * disabling the panel
+ */
+ if (ctl->ops.panel_disable_cfg)
+ ctl->ops.panel_disable_cfg(ctl, disable_panel);
+
+ /* disable panel */
+ ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DISABLE_PANEL,
+ NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+ if (ret)
+ pr_err("failed to disable panel! %d\n", ret);
+ } else {
+ /* restore any interface configuration */
+ if (ctl->ops.panel_disable_cfg)
+ ctl->ops.panel_disable_cfg(ctl, disable_panel);
+
+ /*
+ * no other action is needed when reconfiguring, since all the
+ * re-configuration will happen during restore
+ */
+ pdata->panel_disable_mode = false;
+ }
+
+ return ret;
+}
+
+static ssize_t mdss_mdp_panel_disable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ int disable_panel, rc;
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+
+ if (!mfd) {
+ pr_err("Invalid mfd structure\n");
+ rc = -EINVAL;
+ return rc;
+ }
+
+ rc = kstrtoint(buf, 10, &disable_panel);
+ if (rc) {
+ pr_err("kstrtoint failed. rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_debug("disable panel: %d ++\n", disable_panel);
+ /* we only support disabling the panel from sysfs */
+ if (disable_panel)
+ mdss_mdp_enable_panel_disable_mode(mfd, true);
+
+ return len;
+}
+
static ssize_t mdss_mdp_cmd_autorefresh_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -3433,6 +3537,8 @@ static DEVICE_ATTR(msm_misr_en, S_IRUGO | S_IWUSR,
mdss_mdp_misr_show, mdss_mdp_misr_store);
static DEVICE_ATTR(msm_cmd_autorefresh_en, S_IRUGO | S_IWUSR,
mdss_mdp_cmd_autorefresh_show, mdss_mdp_cmd_autorefresh_store);
+static DEVICE_ATTR(msm_disable_panel, S_IRUGO | S_IWUSR,
+ mdss_mdp_panel_disable_show, mdss_mdp_panel_disable_store);
static DEVICE_ATTR(vsync_event, S_IRUGO, mdss_mdp_vsync_show_event, NULL);
static DEVICE_ATTR(lineptr_event, S_IRUGO, mdss_mdp_lineptr_show_event, NULL);
static DEVICE_ATTR(lineptr_value, S_IRUGO | S_IWUSR | S_IWGRP,
@@ -3454,6 +3560,7 @@ static struct attribute *mdp_overlay_sysfs_attrs[] = {
&dev_attr_dyn_pu.attr,
&dev_attr_msm_misr_en.attr,
&dev_attr_msm_cmd_autorefresh_en.attr,
+ &dev_attr_msm_disable_panel.attr,
&dev_attr_hist_event.attr,
&dev_attr_bl_event.attr,
&dev_attr_ad_event.attr,
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
index 48235c5b85ba..5bd627853c59 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
@@ -1570,7 +1570,7 @@ static int pp_pa_dither_cache_params_v1_7(
res_data->strength = dither_data.strength;
res_data->matrix_sz = MDP_DITHER_DATA_V1_7_SZ;
ret = copy_from_user(res_data->matrix_data,
- (u8 *)dither_data.matrix_data,
+ (u8 *)((unsigned long)dither_data.matrix_data),
(MDP_DITHER_DATA_V1_7_SZ * sizeof(u32)));
if (ret) {
pr_err("failed to copy the dither matrix ret %d sz %zd", ret,
@@ -1700,8 +1700,8 @@ static int pp_igc_lut_cache_params_v3(struct mdp_igc_lut_data *config,
v3_cache_data->table_fmt = v3_kernel_data->table_fmt;
} else {
ret = copy_from_user(v3_cache_data->c0_c1_data,
- (u8 *)v3_usr_config.c0_c1_data,
- len * sizeof(u32));
+ (u8 *)((unsigned long)v3_usr_config.c0_c1_data),
+ len * sizeof(u32));
if (ret) {
pr_err("copy from user failed for c0_c1_data size %zd ret %d\n",
len * sizeof(u32), ret);
@@ -1709,8 +1709,8 @@ static int pp_igc_lut_cache_params_v3(struct mdp_igc_lut_data *config,
goto igc_config_exit;
}
ret = copy_from_user(v3_cache_data->c2_data,
- (u8 *)v3_usr_config.c2_data,
- len * sizeof(u32));
+ (u8 *)((unsigned long)v3_usr_config.c2_data),
+ len * sizeof(u32));
if (ret) {
pr_err("copy from user failed for c2_data size %zd ret %d\n",
len * sizeof(u32), ret);
@@ -1846,7 +1846,7 @@ static int pp_igc_lut_cache_params_pipe_v3(
}
} else {
if (copy_from_user(v3_cache_data->c0_c1_data,
- (u8 *)v3_usr_config.c0_c1_data,
+ (u8 *)((unsigned long)v3_usr_config.c0_c1_data),
IGC_LUT_ENTRIES * sizeof(u32))) {
pr_err("error in copying the c0_c1_data of size %zd\n",
IGC_LUT_ENTRIES * sizeof(u32));
@@ -1854,7 +1854,7 @@ static int pp_igc_lut_cache_params_pipe_v3(
goto igc_config_exit;
}
if (copy_from_user(v3_cache_data->c2_data,
- (u8 *)v3_usr_config.c2_data,
+ (u8 *)((unsigned long)v3_usr_config.c2_data),
IGC_LUT_ENTRIES * sizeof(u32))) {
pr_err("error in copying the c2_data of size %zd\n",
IGC_LUT_ENTRIES * sizeof(u32));
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index b2b647dcc017..463d26643dde 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -104,6 +104,7 @@ enum {
MDSS_PANEL_POWER_ON,
MDSS_PANEL_POWER_LP1,
MDSS_PANEL_POWER_LP2,
+ MDSS_PANEL_POWER_LCD_DISABLED,
};
enum {
@@ -265,6 +266,7 @@ enum mdss_intf_events {
MDSS_EVENT_DSI_RESET_WRITE_PTR,
MDSS_EVENT_PANEL_TIMING_SWITCH,
MDSS_EVENT_DEEP_COLOR,
+ MDSS_EVENT_DISABLE_PANEL,
MDSS_EVENT_MAX,
};
@@ -633,6 +635,10 @@ struct mdss_panel_info {
u32 saved_fporch;
/* current fps, once is programmed in hw */
int current_fps;
+ u32 mdp_koff_thshold_low;
+ u32 mdp_koff_thshold_high;
+ bool mdp_koff_thshold;
+ u32 mdp_koff_delay;
int panel_max_fps;
int panel_max_vtotal;
@@ -692,6 +698,7 @@ struct mdss_panel_info {
char panel_name[MDSS_MAX_PANEL_LEN];
struct mdss_mdp_pp_tear_check te;
+ struct mdss_mdp_pp_tear_check te_cached;
/*
* Value of 2 only when single DSI is configured with 2 DSC
@@ -789,6 +796,12 @@ struct mdss_panel_data {
/* To store dsc cfg name passed by bootloader */
char dsc_cfg_np_name[MDSS_MAX_PANEL_LEN];
struct mdss_panel_data *next;
+
+ /*
+ * Set when the power of the panel is disabled while dsi/mdp
+ * are still on; panel will recover after unblank
+ */
+ bool panel_disable_mode;
};
struct mdss_panel_debugfs_info {
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
index 9a00eff9ade9..eab7bcaaa156 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.c
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -36,6 +36,8 @@
#include "mdss_smmu.h"
#include "mdss_debug.h"
+#define SZ_4G 0xF0000000
+
static DEFINE_MUTEX(mdp_iommu_lock);
void mdss_iommu_lock(void)
@@ -536,13 +538,13 @@ int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev)
}
static struct mdss_smmu_domain mdss_mdp_unsec = {
- "mdp_0", MDSS_IOMMU_DOMAIN_UNSECURE, SZ_128K, (SZ_1G - SZ_128K)};
+ "mdp_0", MDSS_IOMMU_DOMAIN_UNSECURE, SZ_128K, (SZ_4G - SZ_128K)};
static struct mdss_smmu_domain mdss_rot_unsec = {
- NULL, MDSS_IOMMU_DOMAIN_ROT_UNSECURE, SZ_128K, (SZ_1G - SZ_128K)};
+ NULL, MDSS_IOMMU_DOMAIN_ROT_UNSECURE, SZ_128K, (SZ_4G - SZ_128K)};
static struct mdss_smmu_domain mdss_mdp_sec = {
- "mdp_1", MDSS_IOMMU_DOMAIN_SECURE, SZ_1G, SZ_2G};
+ "mdp_1", MDSS_IOMMU_DOMAIN_SECURE, SZ_128K, (SZ_4G - SZ_128K)};
static struct mdss_smmu_domain mdss_rot_sec = {
- NULL, MDSS_IOMMU_DOMAIN_ROT_SECURE, SZ_1G, SZ_2G};
+ NULL, MDSS_IOMMU_DOMAIN_ROT_SECURE, SZ_128K, (SZ_4G - SZ_128K)};
static const struct of_device_id mdss_smmu_dt_match[] = {
{ .compatible = "qcom,smmu_mdp_unsec", .data = &mdss_mdp_unsec},
@@ -571,7 +573,6 @@ int mdss_smmu_probe(struct platform_device *pdev)
struct mdss_smmu_domain smmu_domain;
const struct of_device_id *match;
struct dss_module_power *mp;
- int disable_htw = 1;
char name[MAX_CLIENT_NAME_LEN];
const __be32 *address = NULL, *size = NULL;
@@ -665,13 +666,6 @@ int mdss_smmu_probe(struct platform_device *pdev)
goto disable_power;
}
- rc = iommu_domain_set_attr(mdss_smmu->mmu_mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
- if (rc) {
- pr_err("couldn't disable coherent HTW\n");
- goto release_mapping;
- }
-
if (smmu_domain.domain == MDSS_IOMMU_DOMAIN_SECURE ||
smmu_domain.domain == MDSS_IOMMU_DOMAIN_ROT_SECURE) {
int secure_vmid = VMID_CP_PIXEL;
diff --git a/drivers/video/fbdev/msm/msm_ext_display.c b/drivers/video/fbdev/msm/msm_ext_display.c
index e229f52057d4..4899231787f2 100644
--- a/drivers/video/fbdev/msm/msm_ext_display.c
+++ b/drivers/video/fbdev/msm/msm_ext_display.c
@@ -365,6 +365,7 @@ static int msm_ext_disp_hpd(struct platform_device *pdev,
ext_disp->ops->get_audio_edid_blk = NULL;
ext_disp->ops->cable_status = NULL;
ext_disp->ops->get_intf_id = NULL;
+ ext_disp->ops->teardown_done = NULL;
}
ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX;
@@ -463,6 +464,20 @@ end:
return ret;
}
+static void msm_ext_disp_teardown_done(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct msm_ext_disp_init_data *data = NULL;
+
+ ret = msm_ext_disp_get_intf_data_helper(pdev, &data);
+ if (ret || !data) {
+ pr_err("invalid input");
+ return;
+ }
+
+ data->codec_ops.teardown_done(data->pdev);
+}
+
static int msm_ext_disp_get_intf_id(struct platform_device *pdev)
{
int ret = 0;
@@ -545,6 +560,8 @@ static int msm_ext_disp_notify(struct platform_device *pdev,
msm_ext_disp_cable_status;
ext_disp->ops->get_intf_id =
msm_ext_disp_get_intf_id;
+ ext_disp->ops->teardown_done =
+ msm_ext_disp_teardown_done;
}
switch_set_state(&ext_disp->audio_sdev, (int)new_state);
@@ -614,6 +631,7 @@ static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack)
ext_disp->ops->get_audio_edid_blk = NULL;
ext_disp->ops->cable_status = NULL;
ext_disp->ops->get_intf_id = NULL;
+ ext_disp->ops->teardown_done = NULL;
}
ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX;
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index 866bb18efefe..e818f5ac7a26 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/mount.h>
+#include <linux/file.h>
#include "ecryptfs_kernel.h"
struct ecryptfs_open_req {
@@ -147,7 +148,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
(*lower_file) = dentry_open(&req.path, flags, cred);
if (!IS_ERR(*lower_file))
- goto out;
+ goto have_file;
if ((flags & O_ACCMODE) == O_RDONLY) {
rc = PTR_ERR((*lower_file));
goto out;
@@ -165,8 +166,16 @@ int ecryptfs_privileged_open(struct file **lower_file,
mutex_unlock(&ecryptfs_kthread_ctl.mux);
wake_up(&ecryptfs_kthread_ctl.wait);
wait_for_completion(&req.done);
- if (IS_ERR(*lower_file))
+ if (IS_ERR(*lower_file)) {
rc = PTR_ERR(*lower_file);
+ goto out;
+ }
+have_file:
+ if ((*lower_file)->f_op->mmap == NULL) {
+ fput(*lower_file);
+ *lower_file = NULL;
+ rc = -EMEDIUMTYPE;
+ }
out:
return rc;
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index b15e6edb8f2c..933f1866b811 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3602,6 +3602,7 @@ int ext4_can_truncate(struct inode *inode)
int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
{
+#if 0
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
struct address_space *mapping = inode->i_mapping;
@@ -3725,6 +3726,12 @@ out_dio:
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
+#else
+ /*
+ * Disabled as per b/28760453
+ */
+ return -EOPNOTSUPP;
+#endif
}
int ext4_inode_attach_jinode(struct inode *inode)
diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c
index e8671942c2a0..785af63acabd 100644
--- a/fs/fuse/passthrough.c
+++ b/fs/fuse/passthrough.c
@@ -71,10 +71,12 @@ static ssize_t fuse_passthrough_read_write_iter(struct kiocb *iocb,
struct fuse_file *ff;
struct file *fuse_file, *passthrough_filp;
struct inode *fuse_inode, *passthrough_inode;
+ struct fuse_conn *fc;
ff = iocb->ki_filp->private_data;
fuse_file = iocb->ki_filp;
passthrough_filp = ff->passthrough_filp;
+ fc = ff->fc;
/* lock passthrough file to prevent it from being released */
get_file(passthrough_filp);
@@ -88,7 +90,9 @@ static ssize_t fuse_passthrough_read_write_iter(struct kiocb *iocb,
ret_val = passthrough_filp->f_op->write_iter(iocb, iter);
if (ret_val >= 0 || ret_val == -EIOCBQUEUED) {
+ spin_lock(&fc->lock);
fsstack_copy_inode_size(fuse_inode, passthrough_inode);
+ spin_unlock(&fc->lock);
fsstack_copy_attr_times(fuse_inode, passthrough_inode);
}
} else {
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 361ab4ee42fc..ec649c92d270 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -121,6 +121,13 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
if (IS_ERR(sb))
return ERR_CAST(sb);
+ /*
+ * procfs isn't actually a stacking filesystem; however, there is
+ * too much magic going on inside it to permit stacking things on
+ * top of it
+ */
+ sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
+
if (!proc_parse_options(options, ns)) {
deactivate_locked_super(sb);
return ERR_PTR(-EINVAL);
diff --git a/include/dt-bindings/clock/qcom,gcc-msmfalcon.h b/include/dt-bindings/clock/qcom,gcc-msmfalcon.h
index 6860d78e020e..aa76fbad5083 100644
--- a/include/dt-bindings/clock/qcom,gcc-msmfalcon.h
+++ b/include/dt-bindings/clock/qcom,gcc-msmfalcon.h
@@ -14,173 +14,191 @@
#ifndef _DT_BINDINGS_CLK_MSM_GCC_FALCON_H
#define _DT_BINDINGS_CLK_MSM_GCC_FALCON_H
-#define BIMC_HMSS_AXI_CLK_SRC 0
-#define BLSP1_QUP1_I2C_APPS_CLK_SRC 1
-#define BLSP1_QUP1_SPI_APPS_CLK_SRC 2
-#define BLSP1_QUP2_I2C_APPS_CLK_SRC 3
-#define BLSP1_QUP2_SPI_APPS_CLK_SRC 4
-#define BLSP1_QUP3_I2C_APPS_CLK_SRC 5
-#define BLSP1_QUP3_SPI_APPS_CLK_SRC 6
-#define BLSP1_QUP4_I2C_APPS_CLK_SRC 7
-#define BLSP1_QUP4_SPI_APPS_CLK_SRC 8
-#define BLSP1_UART1_APPS_CLK_SRC 9
-#define BLSP1_UART2_APPS_CLK_SRC 10
-#define BLSP2_QUP1_I2C_APPS_CLK_SRC 11
-#define BLSP2_QUP1_SPI_APPS_CLK_SRC 12
-#define BLSP2_QUP2_I2C_APPS_CLK_SRC 13
-#define BLSP2_QUP2_SPI_APPS_CLK_SRC 14
-#define BLSP2_QUP3_I2C_APPS_CLK_SRC 15
-#define BLSP2_QUP3_SPI_APPS_CLK_SRC 16
-#define BLSP2_QUP4_I2C_APPS_CLK_SRC 17
-#define BLSP2_QUP4_SPI_APPS_CLK_SRC 18
-#define BLSP2_UART1_APPS_CLK_SRC 19
-#define BLSP2_UART2_APPS_CLK_SRC 20
-#define GCC_AGGRE2_UFS_AXI_CLK 21
-#define GCC_AGGRE2_USB3_AXI_CLK 22
-#define GCC_BIMC_GFX_CLK 23
-#define GCC_BIMC_HMSS_AXI_CLK 24
-#define GCC_BIMC_MSS_Q6_AXI_CLK 25
-#define GCC_BLSP1_AHB_CLK 26
-#define GCC_BLSP1_QUP1_I2C_APPS_CLK 27
-#define GCC_BLSP1_QUP1_SPI_APPS_CLK 28
-#define GCC_BLSP1_QUP2_I2C_APPS_CLK 29
-#define GCC_BLSP1_QUP2_SPI_APPS_CLK 30
-#define GCC_BLSP1_QUP3_I2C_APPS_CLK 31
-#define GCC_BLSP1_QUP3_SPI_APPS_CLK 32
-#define GCC_BLSP1_QUP4_I2C_APPS_CLK 33
-#define GCC_BLSP1_QUP4_SPI_APPS_CLK 34
-#define GCC_BLSP1_UART1_APPS_CLK 35
-#define GCC_BLSP1_UART2_APPS_CLK 36
-#define GCC_BLSP2_AHB_CLK 37
-#define GCC_BLSP2_QUP1_I2C_APPS_CLK 38
-#define GCC_BLSP2_QUP1_SPI_APPS_CLK 39
-#define GCC_BLSP2_QUP2_I2C_APPS_CLK 40
-#define GCC_BLSP2_QUP2_SPI_APPS_CLK 41
-#define GCC_BLSP2_QUP3_I2C_APPS_CLK 42
-#define GCC_BLSP2_QUP3_SPI_APPS_CLK 43
-#define GCC_BLSP2_QUP4_I2C_APPS_CLK 44
-#define GCC_BLSP2_QUP4_SPI_APPS_CLK 45
-#define GCC_BLSP2_UART1_APPS_CLK 46
-#define GCC_BLSP2_UART2_APPS_CLK 47
-#define GCC_BOOT_ROM_AHB_CLK 48
-#define GCC_CFG_NOC_USB2_AXI_CLK 49
-#define GCC_CFG_NOC_USB3_AXI_CLK 50
-#define GCC_GLM_AHB_CLK 51
-#define GCC_GLM_CLK 52
-#define GCC_GP1_CLK 53
-#define GCC_GP2_CLK 54
-#define GCC_GP3_CLK 55
-#define GCC_GPU_BIMC_GFX_CLK 56
-#define GCC_GPU_BIMC_GFX_SRC_CLK 57
-#define GCC_GPU_CFG_AHB_CLK 58
-#define GCC_GPU_SNOC_DVM_GFX_CLK 59
-#define GCC_HMSS_AHB_CLK 60
-#define GCC_HMSS_DVM_BUS_CLK 61
-#define GCC_HMSS_RBCPR_CLK 62
-#define GCC_MMSS_NOC_CFG_AHB_CLK 63
-#define GCC_MMSS_QM_AHB_CLK 64
-#define GCC_MMSS_QM_CORE_CLK 65
-#define GCC_MMSS_SYS_NOC_AXI_CLK 66
-#define GCC_PDM2_CLK 67
-#define GCC_PDM_AHB_CLK 68
-#define GCC_PRNG_AHB_CLK 69
-#define GCC_QSPI_AHB_CLK 70
-#define GCC_QSPI_SER_CLK 71
-#define GCC_SDCC1_AHB_CLK 72
-#define GCC_SDCC1_APPS_CLK 73
-#define GCC_SDCC1_ICE_CORE_CLK 74
-#define GCC_SDCC2_AHB_CLK 75
-#define GCC_SDCC2_APPS_CLK 76
-#define GCC_UFS_AHB_CLK 77
-#define GCC_UFS_AXI_CLK 78
-#define GCC_UFS_ICE_CORE_CLK 79
-#define GCC_UFS_PHY_AUX_CLK 80
-#define GCC_UFS_RX_SYMBOL_0_CLK 81
-#define GCC_UFS_RX_SYMBOL_1_CLK 82
-#define GCC_UFS_TX_SYMBOL_0_CLK 83
-#define GCC_UFS_UNIPRO_CORE_CLK 84
-#define GCC_USB20_MASTER_CLK 85
-#define GCC_USB20_MOCK_UTMI_CLK 86
-#define GCC_USB20_SLEEP_CLK 87
-#define GCC_USB30_MASTER_CLK 88
-#define GCC_USB30_MOCK_UTMI_CLK 89
-#define GCC_USB30_SLEEP_CLK 90
-#define GCC_USB3_PHY_AUX_CLK 91
-#define GCC_USB3_PHY_PIPE_CLK 92
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK 93
-#define GCC_WCSS_AHB_S0_CLK 94
-#define GCC_WCSS_AXI_M_CLK 95
-#define GCC_WCSS_ECAHB_CLK 96
-#define GCC_WCSS_SHDREG_AHB_CLK 97
-#define GLM_CLK_SRC 98
-#define GP1_CLK_SRC 99
-#define GP2_CLK_SRC 100
-#define GP3_CLK_SRC 101
-#define GPLL0 102
-#define GPLL0_OUT_AUX 103
-#define GPLL0_OUT_AUX2 104
-#define GPLL0_OUT_EARLY 105
-#define GPLL0_OUT_MAIN 106
-#define GPLL0_OUT_TEST 107
-#define GPLL1 108
-#define GPLL1_OUT_AUX 109
-#define GPLL1_OUT_AUX2 110
-#define GPLL1_OUT_EARLY 111
-#define GPLL1_OUT_MAIN 112
-#define GPLL1_OUT_TEST 113
-#define GPLL2 114
-#define GPLL2_OUT_AUX 115
-#define GPLL2_OUT_AUX2 116
-#define GPLL2_OUT_EARLY 117
-#define GPLL2_OUT_MAIN 118
-#define GPLL2_OUT_TEST 119
-#define GPLL3 120
-#define GPLL3_OUT_AUX 121
-#define GPLL3_OUT_AUX2 122
-#define GPLL3_OUT_EARLY 123
-#define GPLL3_OUT_MAIN 124
-#define GPLL3_OUT_TEST 125
-#define GPLL4 126
-#define GPLL4_OUT_AUX 127
-#define GPLL4_OUT_AUX2 128
-#define GPLL4_OUT_EARLY 129
-#define GPLL4_OUT_MAIN 130
-#define GPLL4_OUT_TEST 131
-#define GPLL5 132
-#define GPLL5_OUT_AUX 133
-#define GPLL5_OUT_AUX2 134
-#define GPLL5_OUT_EARLY 135
-#define GPLL5_OUT_MAIN 136
-#define GPLL5_OUT_TEST 137
-#define GPLL6 138
-#define GPLL6_OUT_AUX 139
-#define GPLL6_OUT_AUX2 140
-#define GPLL6_OUT_EARLY 141
-#define GPLL6_OUT_MAIN 142
-#define GPLL6_OUT_TEST 143
-#define HMSS_AHB_CLK_SRC 144
-#define HMSS_GPLL0_CLK_SRC 145
-#define HMSS_GPLL4_CLK_SRC 146
-#define HMSS_RBCPR_CLK_SRC 147
-#define MMSS_QM_CORE_CLK_SRC 148
-#define PDM2_CLK_SRC 149
-#define QSPI_SER_CLK_SRC 150
-#define SDCC1_APPS_CLK_SRC 151
-#define SDCC1_ICE_CORE_CLK_SRC 152
-#define SDCC2_APPS_CLK_SRC 153
-#define UFS_AXI_CLK_SRC 154
-#define UFS_ICE_CORE_CLK_SRC 155
-#define UFS_PHY_AUX_CLK_SRC 156
-#define UFS_UNIPRO_CORE_CLK_SRC 157
-#define USB20_MASTER_CLK_SRC 158
-#define USB20_MOCK_UTMI_CLK_SRC 159
-#define USB30_MASTER_CLK_SRC 160
-#define USB30_MOCK_UTMI_CLK_SRC 161
-#define USB3_PHY_AUX_CLK_SRC 162
+/* Hardware/Dummy/Voter clocks */
+#define GCC_XO 0
+#define GCC_GPLL0_EARLY_DIV 1
+#define GCC_GPLL1_EARLY_DIV 2
+#define GCC_CE1_AHB_M_CLK 3
+#define GCC_CE1_AXI_M_CLK 4
-#define UFS_GDSC 0
-#define USB_30_GDSC 1
+/* RCGs and Branches */
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 10
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 11
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 12
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 13
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 14
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 15
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 16
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 17
+#define BLSP1_UART1_APPS_CLK_SRC 18
+#define BLSP1_UART2_APPS_CLK_SRC 19
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC 20
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC 21
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC 22
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC 23
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC 24
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC 25
+#define BLSP2_QUP4_I2C_APPS_CLK_SRC 26
+#define BLSP2_QUP4_SPI_APPS_CLK_SRC 27
+#define BLSP2_UART1_APPS_CLK_SRC 28
+#define BLSP2_UART2_APPS_CLK_SRC 29
+#define GCC_AGGRE2_UFS_AXI_CLK 30
+#define GCC_AGGRE2_USB3_AXI_CLK 31
+#define GCC_BIMC_GFX_CLK 32
+#define GCC_BIMC_HMSS_AXI_CLK 33
+#define GCC_BIMC_MSS_Q6_AXI_CLK 34
+#define GCC_BLSP1_AHB_CLK 35
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 36
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 37
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 38
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 39
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 40
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 41
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 42
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 43
+#define GCC_BLSP1_UART1_APPS_CLK 44
+#define GCC_BLSP1_UART2_APPS_CLK 45
+#define GCC_BLSP2_AHB_CLK 46
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK 47
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK 48
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK 49
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK 50
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK 51
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK 52
+#define GCC_BLSP2_QUP4_I2C_APPS_CLK 53
+#define GCC_BLSP2_QUP4_SPI_APPS_CLK 54
+#define GCC_BLSP2_UART1_APPS_CLK 55
+#define GCC_BLSP2_UART2_APPS_CLK 56
+#define GCC_BOOT_ROM_AHB_CLK 57
+#define GCC_CFG_NOC_USB2_AXI_CLK 58
+#define GCC_CFG_NOC_USB3_AXI_CLK 59
+#define GCC_DCC_AHB_CLK 60
+#define GCC_GP1_CLK 61
+#define GCC_GP2_CLK 62
+#define GCC_GP3_CLK 63
+#define GCC_GPU_BIMC_GFX_CLK 64
+#define GCC_GPU_BIMC_GFX_SRC_CLK 65
+#define GCC_GPU_CFG_AHB_CLK 66
+#define GCC_GPU_GPLL0_CLK 67
+#define GCC_GPU_GPLL0_DIV_CLK 68
+#define GCC_GPU_SNOC_DVM_GFX_CLK 69
+#define GCC_HMSS_AHB_CLK 70
+#define GCC_HMSS_DVM_BUS_CLK 71
+#define GCC_HMSS_RBCPR_CLK 72
+#define GCC_MMSS_GPLL0_CLK 73
+#define GCC_MMSS_GPLL0_DIV_CLK 74
+#define GCC_MMSS_NOC_CFG_AHB_CLK 75
+#define GCC_MMSS_SYS_NOC_AXI_CLK 76
+#define GCC_MSS_CFG_AHB_CLK 77
+#define GCC_MSS_GPLL0_DIV_CLK 78
+#define GCC_MSS_MNOC_BIMC_AXI_CLK 79
+#define GCC_MSS_Q6_BIMC_AXI_CLK 80
+#define GCC_MSS_SNOC_AXI_CLK 81
+#define GCC_PDM2_CLK 82
+#define GCC_PDM_AHB_CLK 83
+#define GCC_PRNG_AHB_CLK 84
+#define GCC_QSPI_AHB_CLK 85
+#define GCC_QSPI_SER_CLK 86
+#define GCC_RX0_USB2_CLKREF_CLK 87
+#define GCC_RX1_USB2_CLKREF_CLK 88
+#define GCC_RX2_QLINK_CLKREF_CLK 89
+#define GCC_SDCC1_AHB_CLK 90
+#define GCC_SDCC1_APPS_CLK 91
+#define GCC_SDCC1_ICE_CORE_CLK 92
+#define GCC_SDCC2_AHB_CLK 93
+#define GCC_SDCC2_APPS_CLK 94
+#define GCC_UFS_AHB_CLK 95
+#define GCC_UFS_AXI_CLK 96
+#define GCC_UFS_CLKREF_CLK 97
+#define GCC_UFS_ICE_CORE_CLK 98
+#define GCC_UFS_PHY_AUX_CLK 99
+#define GCC_UFS_RX_SYMBOL_0_CLK 100
+#define GCC_UFS_RX_SYMBOL_1_CLK 101
+#define GCC_UFS_TX_SYMBOL_0_CLK 102
+#define GCC_UFS_UNIPRO_CORE_CLK 103
+#define GCC_USB20_MASTER_CLK 104
+#define GCC_USB20_MOCK_UTMI_CLK 105
+#define GCC_USB20_SLEEP_CLK 106
+#define GCC_USB30_MASTER_CLK 107
+#define GCC_USB30_MOCK_UTMI_CLK 108
+#define GCC_USB30_SLEEP_CLK 109
+#define GCC_USB3_CLKREF_CLK 110
+#define GCC_USB3_PHY_AUX_CLK 111
+#define GCC_USB3_PHY_PIPE_CLK 112
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 113
+#define GP1_CLK_SRC 114
+#define GP2_CLK_SRC 115
+#define GP3_CLK_SRC 116
+#define GPLL0 117
+#define GPLL0_OUT_AUX 118
+#define GPLL0_OUT_AUX2 119
+#define GPLL0_OUT_EARLY 120
+#define GPLL0_OUT_MAIN 121
+#define GPLL0_OUT_TEST 122
+#define GPLL1 123
+#define GPLL1_OUT_AUX 124
+#define GPLL1_OUT_AUX2 125
+#define GPLL1_OUT_EARLY 126
+#define GPLL1_OUT_MAIN 127
+#define GPLL1_OUT_TEST 128
+#define GPLL2 129
+#define GPLL2_OUT_AUX 130
+#define GPLL2_OUT_AUX2 131
+#define GPLL2_OUT_EARLY 132
+#define GPLL2_OUT_MAIN 133
+#define GPLL2_OUT_TEST 134
+#define GPLL3 135
+#define GPLL3_OUT_AUX 136
+#define GPLL3_OUT_AUX2 137
+#define GPLL3_OUT_EARLY 138
+#define GPLL3_OUT_MAIN 139
+#define GPLL3_OUT_TEST 140
+#define GPLL4 141
+#define GPLL4_OUT_AUX 142
+#define GPLL4_OUT_AUX2 143
+#define GPLL4_OUT_EARLY 144
+#define GPLL4_OUT_MAIN 145
+#define GPLL4_OUT_TEST 146
+#define GPLL5 147
+#define GPLL5_OUT_AUX 148
+#define GPLL5_OUT_AUX2 149
+#define GPLL5_OUT_EARLY 150
+#define GPLL5_OUT_MAIN 151
+#define GPLL5_OUT_TEST 152
+#define GPLL6 153
+#define GPLL6_OUT_AUX 154
+#define GPLL6_OUT_AUX2 155
+#define GPLL6_OUT_EARLY 156
+#define GPLL6_OUT_MAIN 157
+#define GPLL6_OUT_TEST 158
+#define HLOS1_VOTE_LPASS_ADSP_SMMU_CLK 159
+#define HMSS_AHB_CLK_SRC 160
+#define HMSS_GPLL0_CLK_SRC 161
+#define HMSS_GPLL4_CLK_SRC 162
+#define HMSS_RBCPR_CLK_SRC 163
+#define PDM2_CLK_SRC 164
+#define QSPI_SER_CLK_SRC 165
+#define SDCC1_APPS_CLK_SRC 166
+#define SDCC1_ICE_CORE_CLK_SRC 167
+#define SDCC2_APPS_CLK_SRC 168
+#define UFS_AXI_CLK_SRC 169
+#define UFS_ICE_CORE_CLK_SRC 170
+#define UFS_PHY_AUX_CLK_SRC 171
+#define UFS_UNIPRO_CORE_CLK_SRC 172
+#define USB20_MASTER_CLK_SRC 173
+#define USB20_MOCK_UTMI_CLK_SRC 174
+#define USB30_MASTER_CLK_SRC 175
+#define USB30_MOCK_UTMI_CLK_SRC 176
+#define USB3_PHY_AUX_CLK_SRC 177
+#define GPLL0_OUT_MSSCC 178
+#define GCC_UFS_AXI_HW_CTL_CLK 179
+#define GCC_UFS_ICE_CORE_HW_CTL_CLK 180
+#define GCC_UFS_PHY_AUX_HW_CTL_CLK 181
+#define GCC_UFS_UNIPRO_CORE_HW_CTL_CLK 182
+#define HLOS1_VOTE_TURING_ADSP_SMMU_CLK 183
+#define HLOS2_VOTE_TURING_ADSP_SMMU_CLK 184
+/* Block resets */
#define GCC_QUSB2PHY_PRIM_BCR 0
#define GCC_QUSB2PHY_SEC_BCR 1
#define GCC_UFS_BCR 2
@@ -191,52 +209,4 @@
#define GCC_USB_30_BCR 7
#define GCC_USB_PHY_CFG_AHB2PHY_BCR 8
-/* RPM controlled clocks */
-#define RPM_CE1_CLK 1
-#define RPM_CE1_A_CLK 2
-#define RPM_CXO_CLK_SRC 3
-#define RPM_BIMC_CLK 4
-#define RPM_BIMC_A_CLK 5
-#define RPM_CNOC_CLK 6
-#define RPM_CNOC_A_CLK 7
-#define RPM_SNOC_CLK 8
-#define RPM_SNOC_A_CLK 9
-#define RPM_CNOC_PERIPH_CLK 10
-#define RPM_CNOC_PERIPH_A_CLK 11
-#define RPM_CNOC_PERIPH_KEEPALIVE_A_CLK 12
-#define RPM_LN_BB_CLK1 13
-#define RPM_LN_BB_CLK1_AO 14
-#define RPM_LN_BB_CLK1_PIN 15
-#define RPM_LN_BB_CLK1_PIN_AO 16
-#define RPM_BIMC_MSMBUS_CLK 17
-#define RPM_BIMC_MSMBUS_A_CLK 18
-#define RPM_CNOC_MSMBUS_CLK 19
-#define RPM_CNOC_MSMBUS_A_CLK 20
-#define RPM_CXO_CLK_SRC_AO 21
-#define RPM_CXO_DWC3_CLK 22
-#define RPM_CXO_LPM_CLK 23
-#define RPM_CXO_OTG_CLK 24
-#define RPM_CXO_PIL_LPASS_CLK 25
-#define RPM_CXO_PIL_SSC_CLK 26
-#define RPM_CXO_PIL_SPSS_CLK 27
-#define RPM_DIV_CLK1 28
-#define RPM_DIV_CLK1_AO 29
-#define RPM_IPA_CLK 30
-#define RPM_IPA_A_CLK 31
-#define RPM_MCD_CE1_CLK 32
-#define RPM_MMSSNOC_AXI_CLK 33
-#define RPM_MMSSNOC_AXI_A_CLK 34
-#define RPM_QCEDEV_CE1_CLK 35
-#define RPM_QCRYPTO_CE1_CLK 36
-#define RPM_QDSS_CLK 37
-#define RPM_QDSS_A_CLK 38
-#define RPM_QSEECOM_CE1_CLK 39
-#define RPM_RF_CLK2 40
-#define RPM_RF_CLK2_AO 41
-#define RPM_SCM_CE1_CLK 42
-#define RPM_SNOC_MSMBUS_CLK 43
-#define RPM_SNOC_MSMBUS_A_CLK 44
-#define RPM_AGGRE2_NOC_CLK 45
-#define RPM_AGGRE2_NOC_A_CLK 46
-
#endif
diff --git a/include/dt-bindings/clock/qcom,gpu-msmfalcon.h b/include/dt-bindings/clock/qcom,gpu-msmfalcon.h
index 427c6aae05d3..2ef1e34db3a1 100644
--- a/include/dt-bindings/clock/qcom,gpu-msmfalcon.h
+++ b/include/dt-bindings/clock/qcom,gpu-msmfalcon.h
@@ -14,27 +14,32 @@
#ifndef _DT_BINDINGS_CLK_MSM_GPU_FALCON_H
#define _DT_BINDINGS_CLK_MSM_GPU_FALCON_H
-#define GFX3D_CLK_SRC 0
-#define GPU_PLL0_PLL 1
-#define GPU_PLL0_PLL_OUT_AUX 2
-#define GPU_PLL0_PLL_OUT_AUX2 3
-#define GPU_PLL0_PLL_OUT_EARLY 4
-#define GPU_PLL0_PLL_OUT_MAIN 5
-#define GPU_PLL0_PLL_OUT_TEST 6
-#define GPU_PLL1_PLL 7
-#define GPU_PLL1_PLL_OUT_AUX 8
-#define GPU_PLL1_PLL_OUT_AUX2 9
-#define GPU_PLL1_PLL_OUT_EARLY 10
-#define GPU_PLL1_PLL_OUT_MAIN 11
-#define GPU_PLL1_PLL_OUT_TEST 12
-#define GPUCC_CXO_CLK 13
-#define GPUCC_GFX3D_CLK 14
-#define GPUCC_RBBMTIMER_CLK 15
-#define GPUCC_RBCPR_CLK 16
-#define RBBMTIMER_CLK_SRC 18
-#define RBCPR_CLK_SRC 19
+#define GFX3D_CLK_SRC 0
+#define GPU_PLL0_PLL 1
+#define GPU_PLL0_PLL_OUT_AUX 2
+#define GPU_PLL0_PLL_OUT_AUX2 3
+#define GPU_PLL0_PLL_OUT_EARLY 4
+#define GPU_PLL0_PLL_OUT_MAIN 5
+#define GPU_PLL0_PLL_OUT_TEST 6
+#define GPU_PLL1_PLL 7
+#define GPU_PLL1_PLL_OUT_AUX 8
+#define GPU_PLL1_PLL_OUT_AUX2 9
+#define GPU_PLL1_PLL_OUT_EARLY 10
+#define GPU_PLL1_PLL_OUT_MAIN 11
+#define GPU_PLL1_PLL_OUT_TEST 12
+#define GPUCC_CXO_CLK 13
+#define GPUCC_GFX3D_CLK 14
+#define GPUCC_RBBMTIMER_CLK 15
+#define GPUCC_RBCPR_CLK 16
+#define RBBMTIMER_CLK_SRC 17
+#define RBCPR_CLK_SRC 18
-#define GPU_CX_GDSC 0
-#define GPU_GX_GDSC 1
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#define GPUCC_GPU_CX_BCR 0
+#define GPUCC_GPU_GX_BCR 1
+#define GPUCC_RBCPR_BCR 2
+#define GPUCC_SPDM_BCR 3
#endif
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index ee37c5c22f5f..bcaa1a552e8e 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -1,5 +1,6 @@
/*
* Copyright 2015 Linaro Limited
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -38,47 +39,31 @@
#define RPM_SFPB_CLK 20
#define RPM_SFPB_A_CLK 21
-/* msm8916 */
-#define RPM_SMD_XO_CLK_SRC 0
-#define RPM_SMD_XO_A_CLK_SRC 1
-#define RPM_SMD_PCNOC_CLK 2
-#define RPM_SMD_PCNOC_A_CLK 3
-#define RPM_SMD_SNOC_CLK 4
-#define RPM_SMD_SNOC_A_CLK 5
-#define RPM_SMD_BIMC_CLK 6
-#define RPM_SMD_BIMC_A_CLK 7
-#define RPM_SMD_QDSS_CLK 8
-#define RPM_SMD_QDSS_A_CLK 9
-#define RPM_SMD_BB_CLK1 10
-#define RPM_SMD_BB_CLK1_A 11
-#define RPM_SMD_BB_CLK2 12
-#define RPM_SMD_BB_CLK2_A 13
-#define RPM_SMD_RF_CLK1 14
-#define RPM_SMD_RF_CLK1_A 15
-#define RPM_SMD_RF_CLK2 16
-#define RPM_SMD_RF_CLK2_A 17
-#define RPM_SMD_BB_CLK1_PIN 18
-#define RPM_SMD_BB_CLK1_A_PIN 19
-#define RPM_SMD_BB_CLK2_PIN 20
-#define RPM_SMD_BB_CLK2_A_PIN 21
-#define RPM_SMD_RF_CLK1_PIN 22
-#define RPM_SMD_RF_CLK1_A_PIN 23
-#define RPM_SMD_RF_CLK2_PIN 24
-#define RPM_SMD_RF_CLK2_A_PIN 25
+/* msm8916 and msm8996 */
+#define RPM_XO_CLK_SRC 0
+#define RPM_XO_A_CLK_SRC 1
+#define RPM_PCNOC_CLK 2
+#define RPM_PCNOC_A_CLK 3
+#define RPM_SNOC_CLK 4
+#define RPM_SNOC_A_CLK 5
+#define RPM_BIMC_CLK 6
+#define RPM_BIMC_A_CLK 7
+#define RPM_QDSS_CLK 8
+#define RPM_QDSS_A_CLK 9
#define RPM_BB_CLK1 10
#define RPM_BB_CLK1_A 11
-#define RPM_BB_CLK2 12
-#define RPM_BB_CLK2_A 13
-#define RPM_RF_CLK1 14
-#define RPM_RF_CLK1_A 15
-#define RPM_RF_CLK2 16
-#define RPM_RF_CLK2_A 17
-#define RPM_BB_CLK1_PIN 18
-#define RPM_BB_CLK1_A_PIN 19
-#define RPM_BB_CLK2_PIN 20
-#define RPM_BB_CLK2_A_PIN 21
-#define RPM_RF_CLK1_PIN 22
-#define RPM_RF_CLK1_A_PIN 23
+#define RPM_BB_CLK1_PIN 12
+#define RPM_BB_CLK1_A_PIN 13
+#define RPM_BB_CLK2 14
+#define RPM_BB_CLK2_A 15
+#define RPM_BB_CLK2_PIN 16
+#define RPM_BB_CLK2_A_PIN 17
+#define RPM_RF_CLK1 18
+#define RPM_RF_CLK1_A 19
+#define RPM_RF_CLK1_PIN 20
+#define RPM_RF_CLK1_A_PIN 21
+#define RPM_RF_CLK2 22
+#define RPM_RF_CLK2_A 23
#define RPM_RF_CLK2_PIN 24
#define RPM_RF_CLK2_A_PIN 25
#define RPM_AGGR1_NOC_CLK 26
@@ -101,5 +86,47 @@
#define RPM_DIV_CLK3_AO 43
#define RPM_LN_BB_CLK 44
#define RPM_LN_BB_A_CLK 45
+#define RPM_LN_BB_CLK1 46
+#define RPM_LN_BB_CLK1_AO 47
+#define RPM_LN_BB_CLK1_PIN 48
+#define RPM_LN_BB_CLK1_PIN_AO 49
+#define RPM_LN_BB_CLK2 50
+#define RPM_LN_BB_CLK2_AO 51
+#define RPM_LN_BB_CLK2_PIN 52
+#define RPM_LN_BB_CLK2_PIN_AO 53
+#define RPM_LN_BB_CLK3 54
+#define RPM_LN_BB_CLK3_AO 55
+#define RPM_LN_BB_CLK3_PIN 56
+#define RPM_LN_BB_CLK3_PIN_AO 57
+#define RPM_CNOC_PERIPH_CLK 58
+#define RPM_CNOC_PERIPH_A_CLK 59
+
+/* Voter clocks */
+#define MMSSNOC_AXI_CLK 60
+#define MMSSNOC_AXI_A_CLK 61
+#define MMSSNOC_GDS_CLK 62
+#define BIMC_MSMBUS_CLK 63
+#define BIMC_MSMBUS_A_CLK 64
+#define CNOC_MSMBUS_CLK 65
+#define CNOC_MSMBUS_A_CLK 66
+#define PNOC_KEEPALIVE_A_CLK 67
+#define PNOC_MSMBUS_CLK 68
+#define PNOC_MSMBUS_A_CLK 69
+#define PNOC_PM_CLK 70
+#define PNOC_SPS_CLK 71
+#define MCD_CE1_CLK 72
+#define QCEDEV_CE1_CLK 73
+#define QCRYPTO_CE1_CLK 74
+#define QSEECOM_CE1_CLK 75
+#define SCM_CE1_CLK 76
+#define SNOC_MSMBUS_CLK 77
+#define SNOC_MSMBUS_A_CLK 78
+#define CXO_DWC3_CLK 79
+#define CXO_LPM_CLK 80
+#define CXO_OTG_CLK 81
+#define CXO_PIL_LPASS_CLK 82
+#define CXO_PIL_SSC_CLK 83
+#define CXO_PIL_CDSP_CLK 84
+#define CNOC_PERIPH_KEEPALIVE_A_CLK 85
#endif
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 1a96fdaa33d5..e133705d794a 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -26,6 +26,10 @@ SUBSYS(cpu)
SUBSYS(cpuacct)
#endif
+#if IS_ENABLED(CONFIG_CGROUP_SCHEDTUNE)
+SUBSYS(schedtune)
+#endif
+
#if IS_ENABLED(CONFIG_BLK_CGROUP)
SUBSYS(io)
#endif
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 59915ea5373c..0eab4811ee92 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -53,6 +53,7 @@ extern int nr_cpu_ids;
* cpu_present_mask - has bit 'cpu' set iff cpu is populated
* cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
* cpu_active_mask - has bit 'cpu' set iff cpu available to migration
+ * cpu_isolated_mask- has bit 'cpu' set iff cpu isolated
*
* If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
*
@@ -89,25 +90,30 @@ extern const struct cpumask *const cpu_possible_mask;
extern const struct cpumask *const cpu_online_mask;
extern const struct cpumask *const cpu_present_mask;
extern const struct cpumask *const cpu_active_mask;
+extern const struct cpumask *const cpu_isolated_mask;
#if NR_CPUS > 1
#define num_online_cpus() cpumask_weight(cpu_online_mask)
#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
#define num_active_cpus() cpumask_weight(cpu_active_mask)
+#define num_isolated_cpus() cpumask_weight(cpu_isolated_mask)
#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask)
+#define cpu_isolated(cpu) cpumask_test_cpu((cpu), cpu_isolated_mask)
#else
#define num_online_cpus() 1U
#define num_possible_cpus() 1U
#define num_present_cpus() 1U
#define num_active_cpus() 1U
+#define num_isolated_cpus() 0U
#define cpu_online(cpu) ((cpu) == 0)
#define cpu_possible(cpu) ((cpu) == 0)
#define cpu_present(cpu) ((cpu) == 0)
#define cpu_active(cpu) ((cpu) == 0)
+#define cpu_isolated(cpu) ((cpu) == 0)
#endif
/* verify cpu argument to cpumask_* operators */
@@ -714,12 +720,14 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
+#define for_each_isolated_cpu(cpu) for_each_cpu((cpu), cpu_isolated_mask)
/* Wrappers for arch boot code to manipulate normally-constant masks */
void set_cpu_possible(unsigned int cpu, bool possible);
void set_cpu_present(unsigned int cpu, bool present);
void set_cpu_online(unsigned int cpu, bool online);
void set_cpu_active(unsigned int cpu, bool active);
+void set_cpu_isolated(unsigned int cpu, bool isolated);
void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
void init_cpu_online(const struct cpumask *src);
diff --git a/include/linux/device.h b/include/linux/device.h
index 9f27351c6b9c..4b4e2d5ce6e7 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1023,6 +1023,7 @@ static inline bool device_supports_offline(struct device *dev)
extern void lock_device_hotplug(void);
extern void unlock_device_hotplug(void);
extern int lock_device_hotplug_sysfs(void);
+extern void lock_device_hotplug_assert(void);
extern int device_offline(struct device *dev);
extern int device_online(struct device *dev);
extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 768c44d9ea8b..0ae23ddbc528 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -144,10 +144,10 @@ the appropriate macros. */
/* This needs to be modified manually now, when we add
a new RANGE of SSIDs to the msg_mask_tbl */
#define MSG_MASK_TBL_CNT 25
-#define APPS_EVENT_LAST_ID 0x0B14
+#define APPS_EVENT_LAST_ID 0x0B2A
#define MSG_SSID_0 0
-#define MSG_SSID_0_LAST 118
+#define MSG_SSID_0_LAST 120
#define MSG_SSID_1 500
#define MSG_SSID_1_LAST 506
#define MSG_SSID_2 1000
@@ -163,7 +163,7 @@ the appropriate macros. */
#define MSG_SSID_7 4600
#define MSG_SSID_7_LAST 4615
#define MSG_SSID_8 5000
-#define MSG_SSID_8_LAST 5032
+#define MSG_SSID_8_LAST 5033
#define MSG_SSID_9 5500
#define MSG_SSID_9_LAST 5516
#define MSG_SSID_10 6000
@@ -193,7 +193,7 @@ the appropriate macros. */
#define MSG_SSID_22 10350
#define MSG_SSID_22_LAST 10377
#define MSG_SSID_23 10400
-#define MSG_SSID_23_LAST 10415
+#define MSG_SSID_23_LAST 10416
#define MSG_SSID_24 0xC000
#define MSG_SSID_24_LAST 0xC063
@@ -336,7 +336,9 @@ static const uint32_t msg_bld_masks_0[] = {
MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
MSG_LVL_MED,
MSG_LVL_MED,
- MSG_LVL_HIGH
+ MSG_LVL_HIGH,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL
};
static const uint32_t msg_bld_masks_1[] = {
@@ -535,7 +537,8 @@ static const uint32_t msg_bld_masks_8[] = {
MSG_LVL_MED,
MSG_LVL_MED,
MSG_LVL_MED,
- MSG_LVL_MED
+ MSG_LVL_MED,
+ MSG_LVL_HIGH
};
static const uint32_t msg_bld_masks_9[] = {
@@ -848,13 +851,14 @@ static const uint32_t msg_bld_masks_23[] = {
MSG_LVL_LOW,
MSG_LVL_LOW,
MSG_LVL_LOW,
+ MSG_LVL_LOW,
MSG_LVL_LOW
};
/* LOG CODES */
static const uint32_t log_code_last_tbl[] = {
0x0, /* EQUIP ID 0 */
- 0x1966, /* EQUIP ID 1 */
+ 0x1A02, /* EQUIP ID 1 */
0x0, /* EQUIP ID 2 */
0x0, /* EQUIP ID 3 */
0x4910, /* EQUIP ID 4 */
diff --git a/include/linux/dma-mapping-fast.h b/include/linux/dma-mapping-fast.h
index aa9fcfe73162..ddd126c0fd85 100644
--- a/include/linux/dma-mapping-fast.h
+++ b/include/linux/dma-mapping-fast.h
@@ -16,6 +16,8 @@
#include <linux/iommu.h>
#include <linux/io-pgtable-fast.h>
+struct dma_iommu_mapping;
+
struct dma_fast_smmu_mapping {
struct device *dev;
struct iommu_domain *domain;
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 2ead22dd74a0..952adcacc4cf 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -53,6 +53,7 @@ enum hrtimer_restart {
*
* 0x00 inactive
* 0x01 enqueued into rbtree
+ * 0x02 timer is pinned to a cpu
*
* The callback state is not part of the timer->state because clearing it would
* mean touching the timer after the callback, this makes it impossible to free
@@ -72,6 +73,8 @@ enum hrtimer_restart {
*/
#define HRTIMER_STATE_INACTIVE 0x00
#define HRTIMER_STATE_ENQUEUED 0x01
+#define HRTIMER_PINNED_SHIFT 1
+#define HRTIMER_STATE_PINNED (1 << HRTIMER_PINNED_SHIFT)
/**
* struct hrtimer - the basic hrtimer structure
@@ -357,6 +360,9 @@ DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
/* Exported timer functions: */
+/* To be used from cpusets, only */
+extern void hrtimer_quiesce_cpu(void *cpup);
+
/* Initialize timers: */
extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
enum hrtimer_mode mode);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 56855724271c..c34a68ce901a 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -121,7 +121,6 @@ enum iommu_attr {
DOMAIN_ATTR_FSL_PAMU_ENABLE,
DOMAIN_ATTR_FSL_PAMUV1,
DOMAIN_ATTR_NESTING, /* two stages of translation */
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_PT_BASE_ADDR,
DOMAIN_ATTR_SECURE_VMID,
DOMAIN_ATTR_ATOMIC,
@@ -650,8 +649,8 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link)
{
}
-static int iommu_dma_supported(struct iommu_domain *domain, struct device *dev,
- u64 mask)
+static inline int iommu_dma_supported(struct iommu_domain *domain,
+ struct device *dev, u64 mask)
{
return -EINVAL;
}
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 81da2aaa01e5..e66d388651b8 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -551,6 +551,7 @@ struct ipa_sys_connect_params {
bool skip_ep_cfg;
bool keep_ipa_awake;
bool napi_enabled;
+ bool recycle_enabled;
};
/**
diff --git a/include/linux/msm_dma_iommu_mapping.h b/include/linux/msm_dma_iommu_mapping.h
index 76451faa2073..73e69383b9b6 100644
--- a/include/linux/msm_dma_iommu_mapping.h
+++ b/include/linux/msm_dma_iommu_mapping.h
@@ -90,7 +90,7 @@ static inline void msm_dma_unmap_sg(struct device *dev,
{
}
-int msm_dma_unmap_all_for_dev(struct device *dev)
+static inline int msm_dma_unmap_all_for_dev(struct device *dev)
{
return 0;
}
diff --git a/include/linux/msm_ext_display.h b/include/linux/msm_ext_display.h
index 873a778d5370..59ba776b5f9b 100644
--- a/include/linux/msm_ext_display.h
+++ b/include/linux/msm_ext_display.h
@@ -108,6 +108,7 @@ struct msm_ext_disp_audio_codec_ops {
struct msm_ext_disp_audio_edid_blk *blk);
int (*cable_status)(struct platform_device *pdev, u32 vote);
int (*get_intf_id)(struct platform_device *pdev);
+ void (*teardown_done)(struct platform_device *pdev);
};
/*
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index c95a529b029b..fb2607dd365b 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -13,6 +13,14 @@
#define MSM_GSI_H
#include <linux/types.h>
+enum gsi_ver {
+ GSI_VER_ERR = 0,
+ GSI_VER_1_0 = 1,
+ GSI_VER_1_2 = 2,
+ GSI_VER_1_3 = 3,
+ GSI_VER_MAX,
+};
+
enum gsi_status {
GSI_STATUS_SUCCESS = 0,
GSI_STATUS_ERROR = 1,
@@ -65,6 +73,7 @@ enum gsi_intr_type {
/**
* gsi_per_props - Peripheral related properties
*
+ * @gsi: GSI core version
* @ee: EE where this driver and peripheral driver runs
* @intr: control interrupt type
* @intvec: write data for MSI write
@@ -87,6 +96,7 @@ enum gsi_intr_type {
*
*/
struct gsi_per_props {
+ enum gsi_ver ver;
unsigned int ee;
enum gsi_intr_type intr;
uint32_t intvec;
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index c2fa3ecb0dce..146efefde2a1 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -10,30 +10,96 @@
struct percpu_rw_semaphore {
struct rcu_sync rss;
- unsigned int __percpu *fast_read_ctr;
+ unsigned int __percpu *read_count;
struct rw_semaphore rw_sem;
- atomic_t slow_read_ctr;
- wait_queue_head_t write_waitq;
+ wait_queue_head_t writer;
+ int readers_block;
};
-extern void percpu_down_read(struct percpu_rw_semaphore *);
-extern int percpu_down_read_trylock(struct percpu_rw_semaphore *);
-extern void percpu_up_read(struct percpu_rw_semaphore *);
+extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
+extern void __percpu_up_read(struct percpu_rw_semaphore *);
+
+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+{
+ might_sleep();
+
+ rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
+
+ preempt_disable();
+ /*
+ * We are in an RCU-sched read-side critical section, so the writer
+ * cannot both change sem->state from readers_fast and start checking
+ * counters while we are here. So if we see !sem->state, we know that
+ * the writer won't be checking until we're past the preempt_enable()
+ * and that one the synchronize_sched() is done, the writer will see
+ * anything we did within this RCU-sched read-size critical section.
+ */
+ __this_cpu_inc(*sem->read_count);
+ if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+ __percpu_down_read(sem, false); /* Unconditional memory barrier */
+ preempt_enable();
+ /*
+ * The barrier() from preempt_enable() prevents the compiler from
+ * bleeding the critical section out.
+ */
+}
+
+static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
+{
+ int ret = 1;
+
+ preempt_disable();
+ /*
+ * Same as in percpu_down_read().
+ */
+ __this_cpu_inc(*sem->read_count);
+ if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+ ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
+ preempt_enable();
+ /*
+ * The barrier() from preempt_enable() prevents the compiler from
+ * bleeding the critical section out.
+ */
+
+ if (ret)
+ rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);
+
+ return ret;
+}
+
+static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+{
+ /*
+ * The barrier() in preempt_disable() prevents the compiler from
+ * bleeding the critical section out.
+ */
+ preempt_disable();
+ /*
+ * Same as in percpu_down_read().
+ */
+ if (likely(rcu_sync_is_idle(&sem->rss)))
+ __this_cpu_dec(*sem->read_count);
+ else
+ __percpu_up_read(sem); /* Unconditional memory barrier */
+ preempt_enable();
+
+ rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
+}
extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);
extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
const char *, struct lock_class_key *);
+
extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
-#define percpu_init_rwsem(brw) \
+#define percpu_init_rwsem(sem) \
({ \
static struct lock_class_key rwsem_key; \
- __percpu_init_rwsem(brw, #brw, &rwsem_key); \
+ __percpu_init_rwsem(sem, #sem, &rwsem_key); \
})
-
#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 03853d956b41..c477f60c3f01 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -218,6 +218,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_PD_ACTIVE,
POWER_SUPPLY_PROP_CHARGER_TEMP,
POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+ POWER_SUPPLY_PROP_PARALLEL_DISABLE,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
diff --git a/include/linux/qcom_tspp.h b/include/linux/qcom_tspp.h
new file mode 100644
index 000000000000..28e6695fb057
--- /dev/null
+++ b/include/linux/qcom_tspp.h
@@ -0,0 +1,99 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_TSPP_H_
+#define _MSM_TSPP_H_
+
+struct tspp_data_descriptor {
+ void *virt_base; /* logical address of the actual data */
+ phys_addr_t phys_base; /* physical address of the actual data */
+ u32 size; /* size of buffer in bytes */
+ int id; /* unique identifier */
+ void *user; /* user-defined data */
+};
+
+enum tspp_key_parity {
+ TSPP_KEY_PARITY_EVEN,
+ TSPP_KEY_PARITY_ODD
+};
+
+struct tspp_key {
+ enum tspp_key_parity parity;
+ int lsb;
+ int msb;
+};
+
+enum tspp_source {
+ TSPP_SOURCE_TSIF0,
+ TSPP_SOURCE_TSIF1,
+ TSPP_SOURCE_MEM,
+ TSPP_SOURCE_NONE = -1
+};
+
+enum tspp_mode {
+ TSPP_MODE_DISABLED,
+ TSPP_MODE_PES,
+ TSPP_MODE_RAW,
+ TSPP_MODE_RAW_NO_SUFFIX
+};
+
+enum tspp_tsif_mode {
+ TSPP_TSIF_MODE_LOOPBACK, /* loopback mode */
+ TSPP_TSIF_MODE_1, /* without sync */
+ TSPP_TSIF_MODE_2 /* with sync signal */
+};
+
+struct tspp_filter {
+ int pid;
+ int mask;
+ enum tspp_mode mode;
+ unsigned int priority; /* 0 - 15 */
+ int decrypt;
+ enum tspp_source source;
+};
+
+struct tspp_select_source {
+ enum tspp_source source;
+ enum tspp_tsif_mode mode;
+ int clk_inverse;
+ int data_inverse;
+ int sync_inverse;
+ int enable_inverse;
+};
+
+typedef void (tspp_notifier)(int channel_id, void *user);
+typedef void* (tspp_allocator)(int channel_id, u32 size,
+ phys_addr_t *phys_base, void *user);
+typedef void (tspp_memfree)(int channel_id, u32 size,
+ void *virt_base, phys_addr_t phys_base, void *user);
+
+/* Kernel API functions */
+int tspp_open_stream(u32 dev, u32 channel_id,
+ struct tspp_select_source *source);
+int tspp_close_stream(u32 dev, u32 channel_id);
+int tspp_open_channel(u32 dev, u32 channel_id);
+int tspp_close_channel(u32 dev, u32 channel_id);
+int tspp_get_ref_clk_counter(u32 dev,
+ enum tspp_source source, u32 *tcr_counter);
+int tspp_add_filter(u32 dev, u32 channel_id, struct tspp_filter *filter);
+int tspp_remove_filter(u32 dev, u32 channel_id, struct tspp_filter *filter);
+int tspp_set_key(u32 dev, u32 channel_id, struct tspp_key *key);
+int tspp_register_notification(u32 dev, u32 channel_id, tspp_notifier *notify,
+ void *data, u32 timer_ms);
+int tspp_unregister_notification(u32 dev, u32 channel_id);
+const struct tspp_data_descriptor *tspp_get_buffer(u32 dev, u32 channel_id);
+int tspp_release_buffer(u32 dev, u32 channel_id, u32 descriptor_id);
+int tspp_allocate_buffers(u32 dev, u32 channel_id, u32 count,
+ u32 size, u32 int_freq, tspp_allocator *alloc,
+ tspp_memfree *memfree, void *user);
+
+#endif /* _MSM_TSPP_H_ */
diff --git a/include/linux/qdsp6v2/apr.h b/include/linux/qdsp6v2/apr.h
index 84c822234e00..29deb3ca5ac7 100644
--- a/include/linux/qdsp6v2/apr.h
+++ b/include/linux/qdsp6v2/apr.h
@@ -172,8 +172,8 @@ inline int apr_fill_hdr(void *handle, uint32_t *buf, uint16_t src_port,
int apr_send_pkt(void *handle, uint32_t *buf);
int apr_deregister(void *handle);
-void subsys_notif_register(struct notifier_block *mod_notif,
- struct notifier_block *lp_notif);
+void subsys_notif_register(char *client_name, int domain,
+ struct notifier_block *nb);
int apr_get_dest_id(char *dest);
uint16_t apr_get_data_src(struct apr_hdr *hdr);
void change_q6_state(int state);
diff --git a/include/linux/qdsp6v2/audio_notifier.h b/include/linux/qdsp6v2/audio_notifier.h
new file mode 100644
index 000000000000..3587b49a05c6
--- /dev/null
+++ b/include/linux/qdsp6v2/audio_notifier.h
@@ -0,0 +1,105 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __AUDIO_NOTIFIER_H_
+#define __AUDIO_NOTIFIER_H_
+
+/* State of the notifier domain */
+enum {
+ AUDIO_NOTIFIER_SERVICE_DOWN,
+ AUDIO_NOTIFIER_SERVICE_UP
+};
+
+/* Service order determines connection priority
+ * Highest number connected first
+ */
+enum {
+ AUDIO_NOTIFIER_SSR_SERVICE,
+ AUDIO_NOTIFIER_PDR_SERVICE,
+ AUDIO_NOTIFIER_MAX_SERVICES
+};
+
+enum {
+ AUDIO_NOTIFIER_ADSP_DOMAIN,
+ AUDIO_NOTIFIER_MODEM_DOMAIN,
+ AUDIO_NOTIFIER_MAX_DOMAINS
+};
+
+/* Structure populated in void *data of nb function
+ * callback used for audio_notifier_register
+ */
+struct audio_notifier_cb_data {
+ int service;
+ int domain;
+};
+
+#ifdef CONFIG_MSM_QDSP6_NOTIFIER
+
+/*
+ * Use audio_notifier_register to register any audio
+ * clients who need to be notified of a remote process.
+ * This API will determine and register the client with
+ * the best available subsystem (SSR or PDR) for that
+ * domain (Adsp or Modem). When an event is sent from that
+ * domain the notifier block callback function will be called.
+ *
+ * client_name - A unique user name defined by the client.
+ * If the same name is used for multiple calls each will
+ * be tracked & called back separately and a single call
+ * to deregister will delete them all.
+ * domain - Domain the client wants to get events from.
+ * AUDIO_NOTIFIER_ADSP_DOMAIN
+ * AUDIO_NOTIFIER_MODEM_DOMAIN
+ * *nb - Pointer to a notifier block. Provide a callback function
+ * to be notified of an even on that domain.
+ *
+ * nb_func(struct notifier_block *this, unsigned long opcode, void *data)
+ * this - pointer to own nb
+ * opcode - event from registered domain
+ * AUDIO_NOTIFIER_SERVICE_DOWN
+ * AUDIO_NOTIFIER_SERVICE_UP
+ * *data - pointer to struct audio_notifier_cb_data
+ *
+ * Returns: Success: 0
+ * Error: -#
+ */
+int audio_notifier_register(char *client_name, int domain,
+ struct notifier_block *nb);
+
+/*
+ * Use audio_notifier_deregister to deregister the clients from
+ * all domains registered using audio_notifier_register that
+ * match the client name.
+ *
+ * client_name - Unique user name used in audio_notifier_register.
+ * Returns: Success: 0
+ * Error: -#
+ */
+int audio_notifier_deregister(char *client_name);
+
+#else
+
+static inline int audio_notifier_register(char *client_name, int domain,
+ struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int audio_notifier_deregister(char *client_name)
+{
+ return 0;
+}
+
+#endif /* CONFIG_MSM_QDSP6_PDR */
+
+#endif
diff --git a/include/linux/qdsp6v2/audio_pdr.h b/include/linux/qdsp6v2/audio_pdr.h
new file mode 100644
index 000000000000..b8eb1be3ee64
--- /dev/null
+++ b/include/linux/qdsp6v2/audio_pdr.h
@@ -0,0 +1,101 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __AUDIO_PDR_H_
+#define __AUDIO_PDR_H_
+
+enum {
+ AUDIO_PDR_DOMAIN_ADSP,
+ AUDIO_PDR_DOMAIN_MAX
+};
+
+enum {
+ AUDIO_PDR_FRAMEWORK_DOWN,
+ AUDIO_PDR_FRAMEWORK_UP
+};
+
+#ifdef CONFIG_MSM_QDSP6_PDR
+
+/*
+ * Use audio_pdr_register to register with the PDR subsystem this
+ * should be done before module late init otherwise notification
+ * of the AUDIO_PDR_FRAMEWORK_UP cannot be guaranteed.
+ *
+ * *nb - Pointer to a notifier block. Provide a callback function
+ * to be notified once the PDR framework has been initialized.
+ * Callback will receive either the AUDIO_PDR_FRAMEWORK_DOWN
+ * or AUDIO_PDR_FRAMEWORK_UP ioctl depending on the state of
+ * the PDR framework.
+ *
+ * Returns: Success: 0
+ * Failure: Error code
+ */
+int audio_pdr_register(struct notifier_block *nb);
+
+/*
+ * Use audio_pdr_service_register to register with a PDR service
+ * Function should be called after nb callback registered with
+ * audio_pdr_register has been called back with the
+ * AUDIO_PDR_FRAMEWORK_UP ioctl.
+ *
+ * domain_id - Domain to use, example: AUDIO_PDR_ADSP
+ * *nb - Pointer to a notifier block. Provide a callback function
+ * that will be notified of the state of the domain
+ * requested. The ioctls received by the callback are
+ * defined in service-notifier.h.
+ *
+ * Returns: Success: Client handle
+ * Failure: Pointer error code
+ */
+void *audio_pdr_service_register(int domain_id,
+ struct notifier_block *nb, int *curr_state);
+
+ /*
+ * Use audio_pdr_service_deregister to deregister with a PDR
+ * service that was registered using the audio_pdr_service_register
+ * API.
+ *
+ * *service_handle - Service handle returned by audio_pdr_service_register
+ * *nb - Pointer to the notifier block. Used in the call to
+ * audio_pdr_service_register.
+ *
+ * Returns: Success: Client handle
+ * Failure: Error code
+ */
+int audio_pdr_service_deregister(void *service_handle,
+ struct notifier_block *nb);
+
+#else
+
+static inline int audio_pdr_register(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+
+static inline void *audio_pdr_service_register(int domain_id,
+ struct notifier_block *nb,
+ int *curr_state)
+{
+ return NULL;
+}
+
+static inline int audio_pdr_service_deregister(void *service_handle,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+#endif /* CONFIG_MSM_QDSP6_PDR */
+
+#endif
diff --git a/include/linux/qdsp6v2/audio_ssr.h b/include/linux/qdsp6v2/audio_ssr.h
new file mode 100644
index 000000000000..a807021ba7ca
--- /dev/null
+++ b/include/linux/qdsp6v2/audio_ssr.h
@@ -0,0 +1,78 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __AUDIO_SSR_H_
+#define __AUDIO_SSR_H_
+
+enum {
+ AUDIO_SSR_DOMAIN_ADSP,
+ AUDIO_SSR_DOMAIN_MODEM,
+ AUDIO_SSR_DOMAIN_MAX
+};
+
+#ifdef CONFIG_MSM_QDSP6_SSR
+
+/*
+ * Use audio_ssr_register to register with the SSR subsystem
+ *
+ * domain_id - Service to use, example: AUDIO_SSR_DOMAIN_ADSP
+ * *nb - Pointer to a notifier block. Provide a callback function
+ * to be notified of an event for that service. The ioctls
+ * used by the callback are defined in subsystem_notif.h.
+ *
+ * Returns: Success: Client handle
+ * Failure: Pointer error code
+ */
+void *audio_ssr_register(int domain_id, struct notifier_block *nb);
+
+/*
+ * Use audio_ssr_deregister to register with the SSR subsystem
+ *
+ * handle - Handle received from audio_ssr_register
+ * *nb - Pointer to a notifier block. Callback function
+ * Used from audio_ssr_register.
+ *
+ * Returns: Success: 0
+ * Failure: Error code
+ */
+int audio_ssr_deregister(void *handle, struct notifier_block *nb);
+
+
+/*
+ * Use audio_ssr_send_nmi to force a RAM dump on ADSP
+ * down event.
+ *
+ * *ssr_cb_data - *data received from notifier callback
+ */
+void audio_ssr_send_nmi(void *ssr_cb_data);
+
+#else
+
+static inline void *audio_ssr_register(int domain_id,
+ struct notifier_block *nb)
+{
+ return NULL;
+}
+
+static inline int audio_ssr_deregister(void *handle, struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline void audio_ssr_send_nmi(void *ssr_cb_data)
+{
+}
+
+#endif /* CONFIG_MSM_QDSP6_SSR */
+
+#endif
diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h
index b13ebe50c3d6..7c12823894df 100644
--- a/include/linux/qpnp/qpnp-revid.h
+++ b/include/linux/qpnp/qpnp-revid.h
@@ -212,6 +212,7 @@ struct pmic_revid_data {
u8 pmic_type;
u8 pmic_subtype;
const char *pmic_name;
+ int fab_id;
};
#ifdef CONFIG_QPNP_REVID
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h
index a63a33e6196e..ece7ed9a4a70 100644
--- a/include/linux/rcu_sync.h
+++ b/include/linux/rcu_sync.h
@@ -59,6 +59,7 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
}
extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type);
+extern void rcu_sync_enter_start(struct rcu_sync *);
extern void rcu_sync_enter(struct rcu_sync *);
extern void rcu_sync_exit(struct rcu_sync *);
extern void rcu_sync_dtor(struct rcu_sync *);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4701e0403167..a395d8a9ff73 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -363,6 +363,41 @@ extern cpumask_var_t cpu_isolated_map;
extern int runqueue_is_locked(int cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+extern int sched_isolate_count(const cpumask_t *mask, bool include_offline);
+extern int sched_isolate_cpu(int cpu);
+extern int sched_unisolate_cpu(int cpu);
+extern int sched_unisolate_cpu_unlocked(int cpu);
+#else
+static inline int sched_isolate_count(const cpumask_t *mask,
+ bool include_offline)
+{
+ cpumask_t count_mask;
+
+ if (include_offline)
+ cpumask_andnot(&count_mask, mask, cpu_online_mask);
+ else
+ return 0;
+
+ return cpumask_weight(&count_mask);
+}
+
+static inline int sched_isolate_cpu(int cpu)
+{
+ return 0;
+}
+
+static inline int sched_unisolate_cpu(int cpu)
+{
+ return 0;
+}
+
+static inline int sched_unisolate_cpu_unlocked(int cpu)
+{
+ return 0;
+}
+#endif
+
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
extern void set_cpu_sd_state_idle(void);
@@ -409,6 +444,8 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
extern unsigned int softlockup_panic;
extern unsigned int hardlockup_panic;
void lockup_detector_init(void);
+extern void watchdog_enable(unsigned int cpu);
+extern void watchdog_disable(unsigned int cpu);
#else
static inline void touch_softlockup_watchdog_sched(void)
{
@@ -425,6 +462,12 @@ static inline void touch_all_softlockup_watchdogs(void)
static inline void lockup_detector_init(void)
{
}
+static inline void watchdog_enable(unsigned int cpu)
+{
+}
+static inline void watchdog_disable(unsigned int cpu)
+{
+}
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 1f9c2c734b20..9fe71c774543 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -64,6 +64,7 @@ extern unsigned int sysctl_sched_pred_alert_freq;
extern unsigned int sysctl_sched_freq_aggregate;
extern unsigned int sysctl_sched_enable_thread_grouping;
extern unsigned int sysctl_sched_freq_aggregate_threshold_pct;
+extern unsigned int sysctl_sched_prefer_sync_wakee_to_waker;
#else /* CONFIG_SCHED_HMP */
@@ -120,6 +121,22 @@ extern int sysctl_sched_rt_runtime;
extern unsigned int sysctl_sched_cfs_bandwidth_slice;
#endif
+#ifdef CONFIG_SCHED_TUNE
+extern unsigned int sysctl_sched_cfs_boost;
+int sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length,
+ loff_t *ppos);
+static inline unsigned int get_sysctl_sched_cfs_boost(void)
+{
+ return sysctl_sched_cfs_boost;
+}
+#else
+static inline unsigned int get_sysctl_sched_cfs_boost(void)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_SCHED_AUTOGROUP
extern unsigned int sysctl_sched_autogroup_enabled;
#endif
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 5bf3ddade19c..1732697ea419 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -161,7 +161,15 @@ extern void __tick_nohz_task_switch(void);
#else
static inline int housekeeping_any_cpu(void)
{
- return smp_processor_id();
+ cpumask_t available;
+ int cpu;
+
+ cpumask_andnot(&available, cpu_online_mask, cpu_isolated_mask);
+ cpu = cpumask_any(&available);
+ if (cpu >= nr_cpu_ids)
+ cpu = smp_processor_id();
+
+ return cpu;
}
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
@@ -187,7 +195,7 @@ static inline bool is_housekeeping_cpu(int cpu)
if (tick_nohz_full_enabled())
return cpumask_test_cpu(cpu, housekeeping_mask);
#endif
- return true;
+ return !cpu_isolated(cpu);
}
static inline void housekeeping_affine(struct task_struct *t)
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 7a5602e19e87..b1617e8932b2 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -182,6 +182,9 @@ extern void set_timer_slack(struct timer_list *time, int slack_hz);
*/
#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
+/* To be used from cpusets, only */
+extern void timer_quiesce_cpu(void *cpup);
+
/*
* Timer-statistics info:
*/
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 3740366d9fc5..cef429cf3dce 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -502,7 +502,7 @@ extern void usb_hc_died(struct usb_hcd *hcd);
extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);
extern void usb_wakeup_notification(struct usb_device *hdev,
unsigned int portnum);
-
+extern void usb_flush_hub_wq(void);
extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum);
extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum);
diff --git a/include/media/msm_cam_sensor.h b/include/media/msm_cam_sensor.h
index fb650ab2693f..75d0912aa459 100644
--- a/include/media/msm_cam_sensor.h
+++ b/include/media/msm_cam_sensor.h
@@ -2,6 +2,7 @@
#define __LINUX_MSM_CAM_SENSOR_H
#include <uapi/media/msm_cam_sensor.h>
+#include <uapi/media/msm_camsensor_sdk.h>
#include <linux/compat.h>
@@ -72,6 +73,16 @@ struct csid_cfg_data32 {
} cfg;
};
+struct msm_ir_led_cfg_data_t32 {
+ enum msm_ir_led_cfg_type_t cfg_type;
+ int32_t pwm_duty_on_ns;
+ int32_t pwm_period_ns;
+};
+
+struct msm_ir_cut_cfg_data_t32 {
+ enum msm_ir_cut_cfg_type_t cfg_type;
+};
+
struct eeprom_read_t32 {
compat_uptr_t dbuffer;
uint32_t num_bytes;
@@ -258,6 +269,12 @@ struct msm_flash_cfg_data_t32 {
#define VIDIOC_MSM_FLASH_CFG32 \
_IOWR('V', BASE_VIDIOC_PRIVATE + 13, struct msm_flash_cfg_data_t32)
+
+#define VIDIOC_MSM_IR_LED_CFG32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 14, struct msm_ir_led_cfg_data_t32)
+
+#define VIDIOC_MSM_IR_CUT_CFG32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_ir_cut_cfg_data_t32)
#endif
#endif
diff --git a/include/net/cnss.h b/include/net/cnss.h
index 7fce7db19e54..be58e32e6c7a 100644
--- a/include/net/cnss.h
+++ b/include/net/cnss.h
@@ -121,6 +121,8 @@ extern void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver);
extern int cnss_get_fw_files(struct cnss_fw_files *pfw_files);
extern int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files,
u32 target_type, u32 target_version);
+extern void cnss_get_qca9377_fw_files(struct cnss_fw_files *pfw_files,
+ u32 size, u32 tufello_dual_fw);
extern int cnss_request_bus_bandwidth(int bandwidth);
diff --git a/include/soc/qcom/core_ctl.h b/include/soc/qcom/core_ctl.h
deleted file mode 100644
index 08b43058b37c..000000000000
--- a/include/soc/qcom/core_ctl.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __SOC_QCOM_CORE_CTL_H
-#define __SOC_QCOM_CORE_CTL_H
-
-enum {
- CORE_CTL_EVAL_NEED,
- CORE_CTL_SET_BUSY,
- CORE_CTL_N_TRACE_EVENTS,
-};
-
-extern void core_ctl_block_hotplug(void);
-extern void core_ctl_unblock_hotplug(void);
-extern s64 core_ctl_get_time(void);
-extern struct cpufreq_policy *core_ctl_get_policy(int cpu);
-extern void core_ctl_put_policy(struct cpufreq_policy *policy);
-extern struct device *core_ctl_find_cpu_device(unsigned cpu);
-extern int core_ctl_online_core(unsigned int cpu);
-extern int core_ctl_offline_core(unsigned int cpu);
-
-#define USE_CORE_CTL_TRACE
-extern void core_ctl_trace(int type, int cpu, int arg1, int arg2, int arg3);
-
-#endif
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 8704b2e7cfbc..7e2f32883aa4 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -24,8 +24,6 @@ struct icnss_driver_ops {
void (*shutdown)(struct device *dev);
int (*reinit)(struct device *dev);
void (*crash_shutdown)(void *pdev);
- int (*suspend)(struct device *dev, pm_message_t state);
- int (*resume)(struct device *dev);
int (*pm_suspend)(struct device *dev);
int (*pm_resume)(struct device *dev);
int (*suspend_noirq)(struct device *dev);
@@ -125,5 +123,6 @@ extern int icnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 *ch_count,
u16 buf_len);
extern int icnss_wlan_set_dfs_nol(const void *info, u16 info_len);
extern int icnss_wlan_get_dfs_nol(void *info, u16 info_len);
+extern bool icnss_is_qmi_disable(void);
#endif /* _ICNSS_WLAN_H_ */
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index ffa65569ce18..59971c08ed74 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -37,6 +37,7 @@ enum vmid {
VMID_CP_APP = 0x12,
VMID_WLAN = 0x18,
VMID_WLAN_CE = 0x19,
+ VMID_CP_CAMERA_PREVIEW = 0x1D,
VMID_LAST,
VMID_INVAL = -1
};
diff --git a/include/soc/qcom/smem.h b/include/soc/qcom/smem.h
index b5425dd7eaea..4117b0d47b0d 100644
--- a/include/soc/qcom/smem.h
+++ b/include/soc/qcom/smem.h
@@ -22,11 +22,11 @@ enum {
SMEM_DSPS,
SMEM_WCNSS,
SMEM_MODEM_Q6_FW,
+ SMEM_CDSP = SMEM_MODEM_Q6_FW,
SMEM_RPM,
SMEM_TZ,
SMEM_SPSS,
SMEM_HYP,
- SMEM_CDSP,
NUM_SMEM_SUBSYSTEMS,
};
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index e1bdca690cc9..06b72b262395 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -2943,7 +2943,7 @@ struct asm_aac_enc_cfg_v2_t {
* number of channels at the input.
* The number of channels must not change during encoding.
*/
- uint32_t channel_cfg;
+ uint16_t channel_cfg;
/*
* Number of samples per second.
@@ -3678,6 +3678,8 @@ struct asm_softvolume_params {
#define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3 0x00010DDC
+#define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V4 0x0001320C
+
#define ASM_MEDIA_FMT_EVRCB_FS 0x00010BEF
#define ASM_MEDIA_FMT_EVRCWB_FS 0x00010BF0
@@ -3780,6 +3782,56 @@ struct asm_multi_channel_pcm_fmt_blk_v3 {
*/
} __packed;
+struct asm_multi_channel_pcm_fmt_blk_v4 {
+ uint16_t num_channels;
+/*
+ * Number of channels
+ * Supported values: 1 to 8
+ */
+
+ uint16_t bits_per_sample;
+/*
+ * Number of bits per sample per channel
+ * Supported values: 16, 24, 32
+ */
+
+ uint32_t sample_rate;
+/*
+ * Number of samples per second
+ * Supported values: 2000 to 48000, 96000,192000 Hz
+ */
+
+ uint16_t is_signed;
+/* Flag that indicates that PCM samples are signed (1) */
+
+ uint16_t sample_word_size;
+/*
+ * Size in bits of the word that holds a sample of a channel.
+ * Supported values: 12,24,32
+ */
+
+ uint8_t channel_mapping[8];
+/*
+ * Each element, i, in the array describes channel i inside the buffer where
+ * 0 <= i < num_channels. Unused channels are set to 0.
+ */
+ uint16_t endianness;
+/*
+ * Flag to indicate the endianness of the pcm sample
+ * Supported values: 0 - Little endian (all other formats)
+ * 1 - Big endian (AIFF)
+ */
+ uint16_t mode;
+/*
+ * Mode to provide additional info about the pcm input data.
+ * Supported values: 0 - Default QFs (Q15 for 16b, Q23 for packed 24b,
+ * Q31 for unpacked 24b or 32b)
+ * 15 - for 16 bit
+ * 23 - for 24b packed or 8.24 format
+ * 31 - for 24b unpacked or 32bit
+ */
+} __packed;
+
/*
* Payload of the multichannel PCM configuration parameters in
* the ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3 media format.
@@ -3790,6 +3842,16 @@ struct asm_multi_channel_pcm_fmt_blk_param_v3 {
struct asm_multi_channel_pcm_fmt_blk_v3 param;
} __packed;
+/*
+ * Payload of the multichannel PCM configuration parameters in
+ * the ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V4 media format.
+ */
+struct asm_multi_channel_pcm_fmt_blk_param_v4 {
+ struct apr_hdr hdr;
+ struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+ struct asm_multi_channel_pcm_fmt_blk_v4 param;
+} __packed;
+
struct asm_stream_cmd_set_encdec_param {
u32 param_id;
/* ID of the parameter. */
@@ -3825,6 +3887,79 @@ struct asm_dec_ddp_endp_param_v2 {
int endp_param_value;
} __packed;
+/*
+ * Payload of the multichannel PCM encoder configuration parameters in
+ * the ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V4 media format.
+ */
+
+struct asm_multi_channel_pcm_enc_cfg_v4 {
+ struct apr_hdr hdr;
+ struct asm_stream_cmd_set_encdec_param encdec;
+ struct asm_enc_cfg_blk_param_v2 encblk;
+ uint16_t num_channels;
+ /*
+ * Number of PCM channels.
+ * @values
+ * - 0 -- Native mode
+ * - 1 -- 8 channels
+ * Native mode indicates that encoding must be performed with the number
+ * of channels at the input.
+ */
+ uint16_t bits_per_sample;
+ /*
+ * Number of bits per sample per channel.
+ * @values 16, 24
+ */
+ uint32_t sample_rate;
+ /*
+ * Number of samples per second.
+ * @values 0, 8000 to 48000 Hz
+ * A value of 0 indicates the native sampling rate. Encoding is
+ * performed at the input sampling rate.
+ */
+ uint16_t is_signed;
+ /*
+ * Flag that indicates the PCM samples are signed (1). Currently, only
+ * signed PCM samples are supported.
+ */
+ uint16_t sample_word_size;
+ /*
+ * The size in bits of the word that holds a sample of a channel.
+ * @values 16, 24, 32
+ * 16-bit samples are always placed in 16-bit words:
+ * sample_word_size = 1.
+ * 24-bit samples can be placed in 32-bit words or in consecutive
+ * 24-bit words.
+ * - If sample_word_size = 32, 24-bit samples are placed in the
+ * most significant 24 bits of a 32-bit word.
+ * - If sample_word_size = 24, 24-bit samples are placed in
+ * 24-bit words. @tablebulletend
+ */
+ uint8_t channel_mapping[8];
+ /*
+ * Channel mapping array expected at the encoder output.
+ * Channel[i] mapping describes channel i inside the buffer, where
+ * 0 @le i < num_channels. All valid used channels must be present at
+ * the beginning of the array.
+ * If Native mode is set for the channels, this field is ignored.
+ * @values See Section @xref{dox:PcmChannelDefs}
+ */
+ uint16_t endianness;
+ /*
+ * Flag to indicate the endianness of the pcm sample
+ * Supported values: 0 - Little endian (all other formats)
+ * 1 - Big endian (AIFF)
+ */
+ uint16_t mode;
+ /*
+ * Mode to provide additional info about the pcm input data.
+ * Supported values: 0 - Default QFs (Q15 for 16b, Q23 for packed 24b,
+ * Q31 for unpacked 24b or 32b)
+ * 15 - for 16 bit
+ * 23 - for 24b packed or 8.24 format
+ * 31 - for 24b unpacked or 32bit
+ */
+} __packed;
/*
* Payload of the multichannel PCM encoder configuration parameters in
diff --git a/include/sound/q6afe-v2.h b/include/sound/q6afe-v2.h
index 9ed6510cd0e1..31f7c02b54b3 100644
--- a/include/sound/q6afe-v2.h
+++ b/include/sound/q6afe-v2.h
@@ -281,7 +281,7 @@ void afe_set_cal_mode(u16 port_id, enum afe_cal_mode afe_cal_mode);
int afe_port_start(u16 port_id, union afe_port_config *afe_config,
u32 rate);
int afe_port_start_v2(u16 port_id, union afe_port_config *afe_config,
- u32 rate, u16 afe_in_channels,
+ u32 rate, u16 afe_in_channels, u16 afe_in_bit_width,
struct afe_enc_config *enc_config);
int afe_spk_prot_feed_back_cfg(int src_port, int dst_port,
int l_ch, int r_ch, u32 enable);
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index 8525f2e7f738..f08bd73edb59 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -97,6 +97,24 @@
#define ASM_SHIFT_GAPLESS_MODE_FLAG 31
#define ASM_SHIFT_LAST_BUFFER_FLAG 30
+#define ASM_LITTLE_ENDIAN 0
+#define ASM_BIG_ENDIAN 1
+
+/* PCM_MEDIA_FORMAT_Version */
+enum {
+ PCM_MEDIA_FORMAT_V2 = 0,
+ PCM_MEDIA_FORMAT_V3,
+ PCM_MEDIA_FORMAT_V4,
+};
+
+/* PCM format modes in DSP */
+enum {
+ DEFAULT_QF = 0,
+ Q15 = 15,
+ Q23 = 23,
+ Q31 = 31,
+};
+
/* payload structure bytes */
#define READDONE_IDX_STATUS 0
#define READDONE_IDX_BUFADD_LSW 1
@@ -245,6 +263,9 @@ int q6asm_open_read_v2(struct audio_client *ac, uint32_t format,
int q6asm_open_read_v3(struct audio_client *ac, uint32_t format,
uint16_t bits_per_sample);
+int q6asm_open_read_v4(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample);
+
int q6asm_open_write(struct audio_client *ac, uint32_t format
/*, uint16_t bits_per_sample*/);
@@ -257,6 +278,9 @@ int q6asm_open_shared_io(struct audio_client *ac,
int q6asm_open_write_v3(struct audio_client *ac, uint32_t format,
uint16_t bits_per_sample);
+int q6asm_open_write_v4(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample);
+
int q6asm_stream_open_write_v2(struct audio_client *ac, uint32_t format,
uint16_t bits_per_sample, int32_t stream_id,
bool is_gapless_mode);
@@ -265,6 +289,10 @@ int q6asm_stream_open_write_v3(struct audio_client *ac, uint32_t format,
uint16_t bits_per_sample, int32_t stream_id,
bool is_gapless_mode);
+int q6asm_stream_open_write_v4(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample, int32_t stream_id,
+ bool is_gapless_mode);
+
int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format,
uint32_t passthrough_flag);
@@ -339,6 +367,8 @@ int q6asm_stream_cmd_nowait(struct audio_client *ac, int cmd,
void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac,
uint32_t *size, uint32_t *idx);
+int q6asm_cpu_buf_release(int dir, struct audio_client *ac);
+
void *q6asm_is_cpu_buf_avail_nolock(int dir, struct audio_client *ac,
uint32_t *size, uint32_t *idx);
@@ -367,6 +397,13 @@ int q6asm_enc_cfg_blk_pcm_v3(struct audio_client *ac,
bool use_back_flavor, u8 *channel_map,
uint16_t sample_word_size);
+int q6asm_enc_cfg_blk_pcm_v4(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ uint16_t bits_per_sample, bool use_default_chmap,
+ bool use_back_flavor, u8 *channel_map,
+ uint16_t sample_word_size, uint16_t endianness,
+ uint16_t mode);
+
int q6asm_enc_cfg_blk_pcm_format_support(struct audio_client *ac,
uint32_t rate, uint32_t channels,
uint16_t bits_per_sample);
@@ -376,6 +413,13 @@ int q6asm_enc_cfg_blk_pcm_format_support_v3(struct audio_client *ac,
uint16_t bits_per_sample,
uint16_t sample_word_size);
+int q6asm_enc_cfg_blk_pcm_format_support_v4(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ uint16_t bits_per_sample,
+ uint16_t sample_word_size,
+ uint16_t endianness,
+ uint16_t mode);
+
int q6asm_set_encdec_chan_map(struct audio_client *ac,
uint32_t num_channels);
@@ -425,6 +469,17 @@ int q6asm_media_format_block_pcm_format_support_v3(struct audio_client *ac,
char *channel_map,
uint16_t sample_word_size);
+int q6asm_media_format_block_pcm_format_support_v4(struct audio_client *ac,
+ uint32_t rate,
+ uint32_t channels,
+ uint16_t bits_per_sample,
+ int stream_id,
+ bool use_default_chmap,
+ char *channel_map,
+ uint16_t sample_word_size,
+ uint16_t endianness,
+ uint16_t mode);
+
int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
uint32_t rate, uint32_t channels,
bool use_default_chmap, char *channel_map);
@@ -442,6 +497,15 @@ int q6asm_media_format_block_multi_ch_pcm_v3(struct audio_client *ac,
uint16_t bits_per_sample,
uint16_t sample_word_size);
+int q6asm_media_format_block_multi_ch_pcm_v4(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ bool use_default_chmap,
+ char *channel_map,
+ uint16_t bits_per_sample,
+ uint16_t sample_word_size,
+ uint16_t endianness,
+ uint16_t mode);
+
int q6asm_media_format_block_aac(struct audio_client *ac,
struct asm_aac_cfg *cfg);
diff --git a/include/sound/wcd-dsp-mgr.h b/include/sound/wcd-dsp-mgr.h
index 5adcbcf660ba..2beb9b38a46a 100644
--- a/include/sound/wcd-dsp-mgr.h
+++ b/include/sound/wcd-dsp-mgr.h
@@ -36,6 +36,9 @@ enum wdsp_cmpnt_type {
};
enum wdsp_event_type {
+ /* Initialization related */
+ WDSP_EVENT_POST_INIT,
+
/* Image download related */
WDSP_EVENT_PRE_DLOAD_CODE,
WDSP_EVENT_DLOAD_SECTION,
@@ -44,6 +47,8 @@ enum wdsp_event_type {
WDSP_EVENT_POST_DLOAD_DATA,
WDSP_EVENT_DLOAD_FAILED,
+ WDSP_EVENT_READ_SECTION,
+
/* DSP boot related */
WDSP_EVENT_PRE_BOOTUP,
WDSP_EVENT_DO_BOOT,
@@ -60,8 +65,14 @@ enum wdsp_event_type {
WDSP_EVENT_RESUME,
};
-enum wdsp_intr {
+enum wdsp_signal {
+ /* Hardware generated interrupts signalled to manager */
WDSP_IPC1_INTR,
+ WDSP_ERR_INTR,
+
+ /* Other signals */
+ WDSP_CDC_DOWN_SIGNAL,
+ WDSP_CDC_UP_SIGNAL,
};
/*
@@ -86,14 +97,21 @@ struct wdsp_img_section {
u8 *data;
};
+struct wdsp_err_signal_arg {
+ bool mem_dumps_enabled;
+ u32 remote_start_addr;
+ size_t dump_size;
+};
+
/*
* wdsp_ops: ops/function callbacks for manager driver
* @register_cmpnt_ops: components will use this to register
* their own ops to manager driver
* @get_dev_for_cmpnt: components can use this to get handle
* to struct device * of any other component
- * @intr_handler: callback to notify manager driver that interrupt
- * has occurred.
+ * @signal_handler: callback to notify manager driver that signal
+ * has occurred. Cannot be called from interrupt
+ * context as this can sleep
* @vote_for_dsp: notifies manager that dsp should be booted up
* @suspend: notifies manager that one component wants to suspend.
* Manager will make sure to suspend all components in order
@@ -108,8 +126,8 @@ struct wdsp_mgr_ops {
struct wdsp_cmpnt_ops *ops);
struct device *(*get_dev_for_cmpnt)(struct device *wdsp_dev,
enum wdsp_cmpnt_type type);
- int (*intr_handler)(struct device *wdsp_dev,
- enum wdsp_intr intr);
+ int (*signal_handler)(struct device *wdsp_dev,
+ enum wdsp_signal signal, void *arg);
int (*vote_for_dsp)(struct device *wdsp_dev, bool vote);
int (*suspend)(struct device *wdsp_dev);
int (*resume)(struct device *wdsp_dev);
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 2df03af6f328..e8bc27f3467e 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -1082,49 +1082,6 @@ DEFINE_EVENT(perf_cl_peak_timer_status, perf_cl_peak_exit_timer_stop,
timer_rate, mode)
);
-TRACE_EVENT(core_ctl_eval_need,
-
- TP_PROTO(unsigned int cpu, unsigned int old_need,
- unsigned int new_need, unsigned int updated),
- TP_ARGS(cpu, old_need, new_need, updated),
- TP_STRUCT__entry(
- __field(u32, cpu)
- __field(u32, old_need)
- __field(u32, new_need)
- __field(u32, updated)
- ),
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->old_need = old_need;
- __entry->new_need = new_need;
- __entry->updated = updated;
- ),
- TP_printk("cpu=%u, old_need=%u, new_need=%u, updated=%u", __entry->cpu,
- __entry->old_need, __entry->new_need, __entry->updated)
-);
-
-TRACE_EVENT(core_ctl_set_busy,
-
- TP_PROTO(unsigned int cpu, unsigned int busy,
- unsigned int old_is_busy, unsigned int is_busy),
- TP_ARGS(cpu, busy, old_is_busy, is_busy),
- TP_STRUCT__entry(
- __field(u32, cpu)
- __field(u32, busy)
- __field(u32, old_is_busy)
- __field(u32, is_busy)
- ),
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->busy = busy;
- __entry->old_is_busy = old_is_busy;
- __entry->is_busy = is_busy;
- ),
- TP_printk("cpu=%u, busy=%u, old_is_busy=%u, new_is_busy=%u",
- __entry->cpu, __entry->busy, __entry->old_is_busy,
- __entry->is_busy)
-);
-
#endif /* _TRACE_POWER_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 1ef5ec3eaf70..daf69b7df534 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -1244,6 +1244,84 @@ TRACE_EVENT(sched_get_nr_running_avg,
__entry->avg, __entry->big_avg, __entry->iowait_avg)
);
+TRACE_EVENT(core_ctl_eval_need,
+
+ TP_PROTO(unsigned int cpu, unsigned int old_need,
+ unsigned int new_need, unsigned int updated),
+ TP_ARGS(cpu, old_need, new_need, updated),
+ TP_STRUCT__entry(
+ __field(u32, cpu)
+ __field(u32, old_need)
+ __field(u32, new_need)
+ __field(u32, updated)
+ ),
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->old_need = old_need;
+ __entry->new_need = new_need;
+ __entry->updated = updated;
+ ),
+ TP_printk("cpu=%u, old_need=%u, new_need=%u, updated=%u", __entry->cpu,
+ __entry->old_need, __entry->new_need, __entry->updated)
+);
+
+TRACE_EVENT(core_ctl_set_busy,
+
+ TP_PROTO(unsigned int cpu, unsigned int busy,
+ unsigned int old_is_busy, unsigned int is_busy),
+ TP_ARGS(cpu, busy, old_is_busy, is_busy),
+ TP_STRUCT__entry(
+ __field(u32, cpu)
+ __field(u32, busy)
+ __field(u32, old_is_busy)
+ __field(u32, is_busy)
+ ),
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->busy = busy;
+ __entry->old_is_busy = old_is_busy;
+ __entry->is_busy = is_busy;
+ ),
+ TP_printk("cpu=%u, busy=%u, old_is_busy=%u, new_is_busy=%u",
+ __entry->cpu, __entry->busy, __entry->old_is_busy,
+ __entry->is_busy)
+);
+
+/**
+ * sched_isolate - called when cores are isolated/unisolated
+ *
+ * @acutal_mask: mask of cores actually isolated/unisolated
+ * @req_mask: mask of cores requested isolated/unisolated
+ * @online_mask: cpu online mask
+ * @time: amount of time in us it took to isolate/unisolate
+ * @isolate: 1 if isolating, 0 if unisolating
+ *
+ */
+TRACE_EVENT(sched_isolate,
+
+ TP_PROTO(unsigned int requested_cpu, unsigned int isolated_cpus,
+ u64 start_time, unsigned char isolate),
+
+ TP_ARGS(requested_cpu, isolated_cpus, start_time, isolate),
+
+ TP_STRUCT__entry(
+ __field(u32, requested_cpu)
+ __field(u32, isolated_cpus)
+ __field(u32, time)
+ __field(unsigned char, isolate)
+ ),
+
+ TP_fast_assign(
+ __entry->requested_cpu = requested_cpu;
+ __entry->isolated_cpus = isolated_cpus;
+ __entry->time = div64_u64(sched_clock() - start_time, 1000);
+ __entry->isolate = isolate;
+ ),
+
+ TP_printk("iso cpu=%u cpus=0x%x time=%u us isolated=%d",
+ __entry->requested_cpu, __entry->isolated_cpus,
+ __entry->time, __entry->isolate)
+);
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/trace_msm_low_power.h b/include/trace/events/trace_msm_low_power.h
index 691df1b2689b..97eefc665130 100644
--- a/include/trace/events/trace_msm_low_power.h
+++ b/include/trace/events/trace_msm_low_power.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -43,6 +43,54 @@ TRACE_EVENT(cpu_power_select,
__entry->next_event_us)
);
+TRACE_EVENT(cpu_pred_select,
+
+ TP_PROTO(u32 predtype, u64 predicted, u32 tmr_time),
+
+ TP_ARGS(predtype, predicted, tmr_time),
+
+ TP_STRUCT__entry(
+ __field(u32, predtype)
+ __field(u64, predicted)
+ __field(u32, tmr_time)
+ ),
+
+ TP_fast_assign(
+ __entry->predtype = predtype;
+ __entry->predicted = predicted;
+ __entry->tmr_time = tmr_time;
+ ),
+
+ TP_printk("pred:%u time:%lu tmr_time:%u",
+ __entry->predtype, (unsigned long)__entry->predicted,
+ __entry->tmr_time)
+);
+
+TRACE_EVENT(cpu_pred_hist,
+
+ TP_PROTO(int idx, u32 resi, u32 sample, u32 tmr),
+
+ TP_ARGS(idx, resi, sample, tmr),
+
+ TP_STRUCT__entry(
+ __field(int, idx)
+ __field(u32, resi)
+ __field(u32, sample)
+ __field(u32, tmr)
+ ),
+
+ TP_fast_assign(
+ __entry->idx = idx;
+ __entry->resi = resi;
+ __entry->sample = sample;
+ __entry->tmr = tmr;
+ ),
+
+ TP_printk("idx:%d resi:%u sample:%u tmr:%u",
+ __entry->idx, __entry->resi,
+ __entry->sample, __entry->tmr)
+);
+
TRACE_EVENT(cpu_idle_enter,
TP_PROTO(int index),
@@ -144,6 +192,64 @@ TRACE_EVENT(cluster_exit,
__entry->from_idle)
);
+TRACE_EVENT(cluster_pred_select,
+
+ TP_PROTO(const char *name, int index, u32 sleep_us,
+ u32 latency, int pred, u32 pred_us),
+
+ TP_ARGS(name, index, sleep_us, latency, pred, pred_us),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(int, index)
+ __field(u32, sleep_us)
+ __field(u32, latency)
+ __field(int, pred)
+ __field(u32, pred_us)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->index = index;
+ __entry->sleep_us = sleep_us;
+ __entry->latency = latency;
+ __entry->pred = pred;
+ __entry->pred_us = pred_us;
+ ),
+
+ TP_printk("name:%s idx:%d sleep_time:%u latency:%u pred:%d pred_us:%u",
+ __entry->name, __entry->index, __entry->sleep_us,
+ __entry->latency, __entry->pred, __entry->pred_us)
+);
+
+TRACE_EVENT(cluster_pred_hist,
+
+ TP_PROTO(const char *name, int idx, u32 resi,
+ u32 sample, u32 tmr),
+
+ TP_ARGS(name, idx, resi, sample, tmr),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(int, idx)
+ __field(u32, resi)
+ __field(u32, sample)
+ __field(u32, tmr)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->idx = idx;
+ __entry->resi = resi;
+ __entry->sample = sample;
+ __entry->tmr = tmr;
+ ),
+
+ TP_printk("name:%s idx:%d resi:%u sample:%u tmr:%u",
+ __entry->name, __entry->idx, __entry->resi,
+ __entry->sample, __entry->tmr)
+);
+
TRACE_EVENT(pre_pc_cb,
TP_PROTO(int tzflag),
diff --git a/include/uapi/linux/dvb/dmx.h b/include/uapi/linux/dvb/dmx.h
index 427e4899ed69..a768696c90f8 100644
--- a/include/uapi/linux/dvb/dmx.h
+++ b/include/uapi/linux/dvb/dmx.h
@@ -32,6 +32,11 @@
#define DMX_FILTER_SIZE 16
+/* Min recording chunk upon which event is generated */
+#define DMX_REC_BUFF_CHUNK_MIN_SIZE (100*188)
+
+#define DMX_MAX_DECODER_BUFFER_NUM (32)
+
enum dmx_output
{
DMX_OUT_DECODER, /* Streaming directly to decoder. */
@@ -108,6 +113,41 @@ struct dmx_sct_filter_params
#define DMX_KERNEL_CLIENT 0x8000
};
+enum dmx_video_codec {
+ DMX_VIDEO_CODEC_MPEG2,
+ DMX_VIDEO_CODEC_H264,
+ DMX_VIDEO_CODEC_VC1
+};
+
+/* Index entries types */
+#define DMX_IDX_RAI 0x00000001
+#define DMX_IDX_PUSI 0x00000002
+#define DMX_IDX_MPEG_SEQ_HEADER 0x00000004
+#define DMX_IDX_MPEG_GOP 0x00000008
+#define DMX_IDX_MPEG_FIRST_SEQ_FRAME_START 0x00000010
+#define DMX_IDX_MPEG_FIRST_SEQ_FRAME_END 0x00000020
+#define DMX_IDX_MPEG_I_FRAME_START 0x00000040
+#define DMX_IDX_MPEG_I_FRAME_END 0x00000080
+#define DMX_IDX_MPEG_P_FRAME_START 0x00000100
+#define DMX_IDX_MPEG_P_FRAME_END 0x00000200
+#define DMX_IDX_MPEG_B_FRAME_START 0x00000400
+#define DMX_IDX_MPEG_B_FRAME_END 0x00000800
+#define DMX_IDX_H264_SPS 0x00001000
+#define DMX_IDX_H264_PPS 0x00002000
+#define DMX_IDX_H264_FIRST_SPS_FRAME_START 0x00004000
+#define DMX_IDX_H264_FIRST_SPS_FRAME_END 0x00008000
+#define DMX_IDX_H264_IDR_START 0x00010000
+#define DMX_IDX_H264_IDR_END 0x00020000
+#define DMX_IDX_H264_NON_IDR_START 0x00040000
+#define DMX_IDX_H264_NON_IDR_END 0x00080000
+#define DMX_IDX_VC1_SEQ_HEADER 0x00100000
+#define DMX_IDX_VC1_ENTRY_POINT 0x00200000
+#define DMX_IDX_VC1_FIRST_SEQ_FRAME_START 0x00400000
+#define DMX_IDX_VC1_FIRST_SEQ_FRAME_END 0x00800000
+#define DMX_IDX_VC1_FRAME_START 0x01000000
+#define DMX_IDX_VC1_FRAME_END 0x02000000
+#define DMX_IDX_H264_ACCESS_UNIT_DEL 0x04000000
+#define DMX_IDX_H264_SEI 0x08000000
struct dmx_pes_filter_params
{
@@ -116,11 +156,457 @@ struct dmx_pes_filter_params
dmx_output_t output;
dmx_pes_type_t pes_type;
__u32 flags;
+
+ /*
+ * The following configures when the event
+ * DMX_EVENT_NEW_REC_CHUNK will be triggered.
+ * When new recorded data is received with size
+ * equal or larger than this value a new event
+ * will be triggered. This is relevant when
+ * output is DMX_OUT_TS_TAP or DMX_OUT_TSDEMUX_TAP,
+ * size must be at least DMX_REC_BUFF_CHUNK_MIN_SIZE
+ * and smaller than buffer size.
+ */
+ __u32 rec_chunk_size;
+
+ enum dmx_video_codec video_codec;
+};
+
+struct dmx_buffer_status {
+ /* size of buffer in bytes */
+ unsigned int size;
+
+ /* fullness of buffer in bytes */
+ unsigned int fullness;
+
+ /*
+ * How many bytes are free
+ * It's the same as: size-fullness-1
+ */
+ unsigned int free_bytes;
+
+ /* read pointer offset in bytes */
+ unsigned int read_offset;
+
+ /* write pointer offset in bytes */
+ unsigned int write_offset;
+
+ /* non-zero if data error occurred */
+ int error;
+};
+
+/* Events associated with each demux filter */
+enum dmx_event {
+ /* New PES packet is ready to be consumed */
+ DMX_EVENT_NEW_PES = 0x00000001,
+
+ /* New section is ready to be consumed */
+ DMX_EVENT_NEW_SECTION = 0x00000002,
+
+ /* New recording chunk is ready to be consumed */
+ DMX_EVENT_NEW_REC_CHUNK = 0x00000004,
+
+ /* New PCR value is ready */
+ DMX_EVENT_NEW_PCR = 0x00000008,
+
+ /* Overflow */
+ DMX_EVENT_BUFFER_OVERFLOW = 0x00000010,
+
+ /* Section was dropped due to CRC error */
+ DMX_EVENT_SECTION_CRC_ERROR = 0x00000020,
+
+ /* End-of-stream, no more data from this filter */
+ DMX_EVENT_EOS = 0x00000040,
+
+ /* New Elementary Stream data is ready */
+ DMX_EVENT_NEW_ES_DATA = 0x00000080,
+
+ /* Data markers */
+ DMX_EVENT_MARKER = 0x00000100,
+
+ /* New indexing entry is ready */
+ DMX_EVENT_NEW_INDEX_ENTRY = 0x00000200,
+
+ /*
+ * Section filter timer expired. This is notified
+ * when timeout is configured to section filter
+ * (dmx_sct_filter_params) and no sections were
+ * received for the given time.
+ */
+ DMX_EVENT_SECTION_TIMEOUT = 0x00000400,
+
+ /* Scrambling bits change between clear and scrambled */
+ DMX_EVENT_SCRAMBLING_STATUS_CHANGE = 0x00000800
+};
+
+enum dmx_oob_cmd {
+ /* End-of-stream, no more data from this filter */
+ DMX_OOB_CMD_EOS,
+
+ /* Data markers */
+ DMX_OOB_CMD_MARKER,
+};
+
+/* Flags passed in filter events */
+
+/* Continuity counter error was detected */
+#define DMX_FILTER_CC_ERROR 0x01
+
+/* Discontinuity indicator was set */
+#define DMX_FILTER_DISCONTINUITY_INDICATOR 0x02
+
+/* PES length in PES header is not correct */
+#define DMX_FILTER_PES_LENGTH_ERROR 0x04
+
+
+/* PES info associated with DMX_EVENT_NEW_PES event */
+struct dmx_pes_event_info {
+ /* Offset at which PES information starts */
+ __u32 base_offset;
+
+ /*
+ * Start offset at which PES data
+ * from the stream starts.
+ * Equal to base_offset if PES data
+ * starts from the beginning.
+ */
+ __u32 start_offset;
+
+ /* Total length holding the PES information */
+ __u32 total_length;
+
+ /* Actual length holding the PES data */
+ __u32 actual_length;
+
+ /* Local receiver timestamp in 27MHz */
+ __u64 stc;
+
+ /* Flags passed in filter events */
+ __u32 flags;
+
+ /*
+ * Number of TS packets with Transport Error Indicator (TEI)
+ * found while constructing the PES.
+ */
+ __u32 transport_error_indicator_counter;
+
+ /* Number of continuity errors found while constructing the PES */
+ __u32 continuity_error_counter;
+
+ /* Total number of TS packets holding the PES */
+ __u32 ts_packets_num;
+};
+
+/* Section info associated with DMX_EVENT_NEW_SECTION event */
+struct dmx_section_event_info {
+ /* Offset at which section information starts */
+ __u32 base_offset;
+
+ /*
+ * Start offset at which section data
+ * from the stream starts.
+ * Equal to base_offset if section data
+ * starts from the beginning.
+ */
+ __u32 start_offset;
+
+ /* Total length holding the section information */
+ __u32 total_length;
+
+ /* Actual length holding the section data */
+ __u32 actual_length;
+
+ /* Flags passed in filter events */
+ __u32 flags;
+};
+
+/* Recording info associated with DMX_EVENT_NEW_REC_CHUNK event */
+struct dmx_rec_chunk_event_info {
+ /* Offset at which recording chunk starts */
+ __u32 offset;
+
+ /* Size of recording chunk in bytes */
+ __u32 size;
+};
+
+/* PCR info associated with DMX_EVENT_NEW_PCR event */
+struct dmx_pcr_event_info {
+ /* Local timestamp in 27MHz
+ * when PCR packet was received
+ */
+ __u64 stc;
+
+ /* PCR value in 27MHz */
+ __u64 pcr;
+
+ /* Flags passed in filter events */
+ __u32 flags;
+};
+
+/*
+ * Elementary stream data information associated
+ * with DMX_EVENT_NEW_ES_DATA event
+ */
+struct dmx_es_data_event_info {
+ /* Buffer user-space handle */
+ int buf_handle;
+
+ /*
+ * Cookie to provide when releasing the buffer
+ * using the DMX_RELEASE_DECODER_BUFFER ioctl command
+ */
+ int cookie;
+
+ /* Offset of data from the beginning of the buffer */
+ __u32 offset;
+
+ /* Length of data in buffer (in bytes) */
+ __u32 data_len;
+
+ /* Indication whether PTS value is valid */
+ int pts_valid;
+
+ /* PTS value associated with the buffer */
+ __u64 pts;
+
+ /* Indication whether DTS value is valid */
+ int dts_valid;
+
+ /* DTS value associated with the buffer */
+ __u64 dts;
+
+ /* STC value associated with the buffer in 27MHz */
+ __u64 stc;
+
+ /*
+ * Number of TS packets with Transport Error Indicator (TEI) set
+ * in the TS packet header since last reported event
+ */
+ __u32 transport_error_indicator_counter;
+
+ /* Number of continuity errors since last reported event */
+ __u32 continuity_error_counter;
+
+ /* Total number of TS packets processed since last reported event */
+ __u32 ts_packets_num;
+
+ /*
+ * Number of dropped bytes due to insufficient buffer space,
+ * since last reported event
+ */
+ __u32 ts_dropped_bytes;
+};
+
+/* Marker details associated with DMX_EVENT_MARKER event */
+struct dmx_marker_event_info {
+ /* Marker id */
+ __u64 id;
+};
+
+/* Indexing information associated with DMX_EVENT_NEW_INDEX_ENTRY event */
+struct dmx_index_event_info {
+ /* Index entry type, one of DMX_IDX_* */
+ __u64 type;
+
+ /*
+ * The PID the index entry belongs to.
+ * In case of recording filter, multiple PIDs may exist in the same
+ * filter through DMX_ADD_PID ioctl and each can be indexed separately.
+ */
+ __u16 pid;
+
+ /*
+ * The TS packet number in the recorded data at which
+ * the indexing event is found.
+ */
+ __u64 match_tsp_num;
+
+ /*
+ * The TS packet number in the recorded data preceding
+ * match_tsp_num and has PUSI set.
+ */
+ __u64 last_pusi_tsp_num;
+
+ /* STC associated with match_tsp_num, in 27MHz */
+ __u64 stc;
+};
+
+/* Scrambling information associated with DMX_EVENT_SCRAMBLING_STATUS_CHANGE */
+struct dmx_scrambling_status_event_info {
+ /*
+ * The PID which its scrambling bit status changed.
+ * In case of recording filter, multiple PIDs may exist in the same
+ * filter through DMX_ADD_PID ioctl, each may have
+ * different scrambling bits status.
+ */
+ __u16 pid;
+
+ /* old value of scrambling bits */
+ __u8 old_value;
+
+ /* new value of scrambling bits */
+ __u8 new_value;
+};
+
+/*
+ * Filter's event returned through DMX_GET_EVENT.
+ * poll with POLLPRI would block until events are available.
+ */
+struct dmx_filter_event {
+ enum dmx_event type;
+
+ union {
+ struct dmx_pes_event_info pes;
+ struct dmx_section_event_info section;
+ struct dmx_rec_chunk_event_info recording_chunk;
+ struct dmx_pcr_event_info pcr;
+ struct dmx_es_data_event_info es_data;
+ struct dmx_marker_event_info marker;
+ struct dmx_index_event_info index;
+ struct dmx_scrambling_status_event_info scrambling_status;
+ } params;
+};
+
+/* Filter's buffer requirement returned in dmx_caps */
+struct dmx_buffer_requirement {
+ /* Buffer size alignment, 0 means no special requirement */
+ __u32 size_alignment;
+
+ /* Maximum buffer size allowed */
+ __u32 max_size;
+
+ /* Maximum number of linear buffers handled by demux */
+ __u32 max_buffer_num;
+
+ /* Feature support bitmap as detailed below */
+ __u32 flags;
+
+/* Buffer must be allocated as physically contiguous memory */
+#define DMX_BUFFER_CONTIGUOUS_MEM 0x1
+
+/* If the filter's data is decrypted, the buffer should be secured one */
+#define DMX_BUFFER_SECURED_IF_DECRYPTED 0x2
+
+/* Buffer can be allocated externally */
+#define DMX_BUFFER_EXTERNAL_SUPPORT 0x4
+
+/* Buffer can be allocated internally */
+#define DMX_BUFFER_INTERNAL_SUPPORT 0x8
+
+/* Filter output can be output to a linear buffer group */
+#define DMX_BUFFER_LINEAR_GROUP_SUPPORT 0x10
+
+/* Buffer may be allocated as cached buffer */
+#define DMX_BUFFER_CACHED 0x20
+};
+
+/* Out-of-band (OOB) command */
+struct dmx_oob_command {
+ enum dmx_oob_cmd type;
+
+ union {
+ struct dmx_marker_event_info marker;
+ } params;
};
typedef struct dmx_caps {
__u32 caps;
+
+/* Indicates whether demux support playback from memory in pull mode */
+#define DMX_CAP_PULL_MODE 0x01
+
+/* Indicates whether demux support indexing of recorded video stream */
+#define DMX_CAP_VIDEO_INDEXING 0x02
+
+/* Indicates whether demux support sending data directly to video decoder */
+#define DMX_CAP_VIDEO_DECODER_DATA 0x04
+
+/* Indicates whether demux support sending data directly to audio decoder */
+#define DMX_CAP_AUDIO_DECODER_DATA 0x08
+
+/* Indicates whether demux support sending data directly to subtitle decoder */
+#define DMX_CAP_SUBTITLE_DECODER_DATA 0x10
+
+/* Indicates whether TS insertion is supported */
+#define DMX_CAP_TS_INSERTION 0x20
+
+/* Indicates whether playback from secured input is supported */
+#define DMX_CAP_SECURED_INPUT_PLAYBACK 0x40
+
+/* Indicates whether automatic buffer flush upon overflow is allowed */
+#define DMX_CAP_AUTO_BUFFER_FLUSH 0x80
+
+ /* Number of decoders demux can output data to */
int num_decoders;
+
+ /* Number of demux devices */
+ int num_demux_devices;
+
+ /* Max number of PID filters */
+ int num_pid_filters;
+
+ /* Max number of section filters */
+ int num_section_filters;
+
+ /*
+ * Max number of section filters using same PID,
+ * 0 if not supported
+ */
+ int num_section_filters_per_pid;
+
+ /*
+ * Length of section filter, not including section
+ * length field (2 bytes).
+ */
+ int section_filter_length;
+
+ /* Max number of demod based input */
+ int num_demod_inputs;
+
+ /* Max number of memory based input */
+ int num_memory_inputs;
+
+ /* Overall bitrate from all inputs concurrently. Mbit/sec */
+ int max_bitrate;
+
+ /* Max bitrate from single demod input. Mbit/sec */
+ int demod_input_max_bitrate;
+
+ /* Max bitrate from single memory input. Mbit/sec */
+ int memory_input_max_bitrate;
+
+ /* Max number of supported cipher operations per PID */
+ int num_cipher_ops;
+
+ /* Max possible value of STC reported by demux, in 27MHz */
+ __u64 max_stc;
+
+ /*
+ * For indexing support (DMX_CAP_VIDEO_INDEXING capability) this is
+ * the max number of video pids that can be indexed for a single
+ * recording filter. If 0, means there is not limitation.
+ */
+ int recording_max_video_pids_indexed;
+
+ struct dmx_buffer_requirement section;
+
+ /* For PES not sent to decoder */
+ struct dmx_buffer_requirement pes;
+
+ /* For PES sent to decoder */
+ struct dmx_buffer_requirement decoder;
+
+ /* Recording buffer for recording of 188 bytes packets */
+ struct dmx_buffer_requirement recording_188_tsp;
+
+ /* Recording buffer for recording of 192 bytes packets */
+ struct dmx_buffer_requirement recording_192_tsp;
+
+ /* DVR input buffer for playback of 188 bytes packets */
+ struct dmx_buffer_requirement playback_188_tsp;
+
+ /* DVR input buffer for playback of 192 bytes packets */
+ struct dmx_buffer_requirement playback_192_tsp;
} dmx_caps_t;
typedef enum dmx_source {
@@ -134,12 +620,229 @@ typedef enum dmx_source {
DMX_SOURCE_DVR3
} dmx_source_t;
+enum dmx_tsp_format_t {
+ DMX_TSP_FORMAT_188 = 0,
+ DMX_TSP_FORMAT_192_TAIL,
+ DMX_TSP_FORMAT_192_HEAD,
+ DMX_TSP_FORMAT_204,
+};
+
+enum dmx_playback_mode_t {
+ /*
+ * In push mode, if one of output buffers
+ * is full, the buffer would overflow
+ * and demux continue processing incoming stream.
+ * This is the default mode. When playing from frontend,
+ * this is the only mode that is allowed.
+ */
+ DMX_PB_MODE_PUSH = 0,
+
+ /*
+ * In pull mode, if one of output buffers
+ * is full, demux stalls waiting for free space,
+ * this would cause DVR input buffer fullness
+ * to accumulate.
+ * This mode is possible only when playing
+ * from DVR.
+ */
+ DMX_PB_MODE_PULL,
+};
+
struct dmx_stc {
unsigned int num; /* input : which STC? 0..N */
unsigned int base; /* output: divisor for stc to get 90 kHz clock */
__u64 stc; /* output: stc in 'base'*90 kHz units */
};
+enum dmx_buffer_mode {
+ /*
+ * demux buffers are allocated internally
+ * by the demux driver. This is the default mode.
+ * DMX_SET_BUFFER_SIZE can be used to set the size of
+ * this buffer.
+ */
+ DMX_BUFFER_MODE_INTERNAL,
+
+ /*
+ * demux buffers are allocated externally and provided
+ * to demux through DMX_SET_BUFFER.
+ * When this mode is used DMX_SET_BUFFER_SIZE and
+ * mmap are prohibited.
+ */
+ DMX_BUFFER_MODE_EXTERNAL,
+};
+
+struct dmx_buffer {
+ unsigned int size;
+ int handle;
+
+ /*
+ * The following indication is relevant only when setting
+ * DVR input buffer. It indicates whether the input buffer
+ * being set is secured one or not. Secured (locked) buffers
+ * are required for playback from secured input. In such case
+ * write() syscall is not allowed.
+ */
+ int is_protected;
+};
+
+struct dmx_decoder_buffers {
+ /*
+ * Specify if linear buffer support is requested. If set, buffers_num
+ * must be greater than 1
+ */
+ int is_linear;
+
+ /*
+ * Specify number of external buffers allocated by user.
+ * If set to 0 means internal buffer allocation is requested
+ */
+ __u32 buffers_num;
+
+ /* Specify buffer size, either external or internal */
+ __u32 buffers_size;
+
+ /* Array of externally allocated buffer handles */
+ int handles[DMX_MAX_DECODER_BUFFER_NUM];
+};
+
+struct dmx_secure_mode {
+ /*
+ * Specifies whether the filter is secure or not.
+ * Filter should be set as secured if the filter's data *may* include
+ * encrypted data that would require decryption configured through
+ * DMX_SET_CIPHER ioctl. The setting may be done while
+ * filter is in idle state only.
+ */
+ int is_secured;
+};
+
+struct dmx_cipher_operation {
+ /* Indication whether the operation is encryption or decryption */
+ int encrypt;
+
+ /* The ID of the key used for decryption or encryption */
+ __u32 key_ladder_id;
+};
+
+#define DMX_MAX_CIPHER_OPERATIONS_COUNT 5
+struct dmx_cipher_operations {
+ /*
+ * The PID to perform the cipher operations on.
+ * In case of recording filter, multiple PIDs may exist in the same
+ * filter through DMX_ADD_PID ioctl, each may have different
+ * cipher operations.
+ */
+ __u16 pid;
+
+ /* Total number of operations */
+ __u8 operations_count;
+
+ /*
+ * Cipher operation to perform on the given PID.
+ * The operations are performed in the order they are given.
+ */
+ struct dmx_cipher_operation operations[DMX_MAX_CIPHER_OPERATIONS_COUNT];
+};
+
+struct dmx_events_mask {
+ /*
+ * Bitmask of events to be disabled (dmx_event).
+ * Disabled events will not be notified to the user.
+ * By default all events are enabled except for
+ * DMX_EVENT_NEW_ES_DATA.
+ * Overflow event can't be disabled.
+ */
+ __u32 disable_mask;
+
+ /*
+ * Bitmask of events that will not wake-up the user
+ * when user calls poll with POLLPRI flag.
+ * Events that are used as wake-up source should not be
+ * disabled in disable_mask or they would not be used
+ * as a wake-up source.
+ * By default all enabled events are set as wake-up events.
+ * Overflow event can't be disabled as a wake-up source.
+ */
+ __u32 no_wakeup_mask;
+
+ /*
+ * Number of ready wake-up events which will trigger
+ * a wake-up when user calls poll with POLLPRI flag.
+ * Default is set to 1.
+ */
+ __u32 wakeup_threshold;
+};
+
+struct dmx_indexing_params {
+ /*
+ * PID to index. In case of recording filter, multiple PIDs
+ * may exist in the same filter through DMX_ADD_PID ioctl.
+ * It is assumed that the PID was already added using DMX_ADD_PID
+ * or an error will be reported.
+ */
+ __u16 pid;
+
+ /* enable or disable indexing, default is disabled */
+ int enable;
+
+ /* combination of DMX_IDX_* bits */
+ __u64 types;
+};
+
+struct dmx_set_ts_insertion {
+ /*
+ * Unique identifier managed by the caller.
+ * This identifier can be used later to remove the
+ * insertion using DMX_ABORT_TS_INSERTION ioctl.
+ */
+ __u32 identifier;
+
+ /*
+ * Repetition time in msec, minimum allowed value is 25msec.
+ * 0 repetition time means one-shot insertion is done.
+ * Insertion done based on wall-clock.
+ */
+ __u32 repetition_time;
+
+ /*
+ * TS packets buffer to be inserted.
+ * The buffer is inserted as-is to the recording buffer
+ * without any modification.
+ * It is advised to set discontinuity flag in the very
+ * first TS packet in the buffer.
+ */
+ const __u8 *ts_packets;
+
+ /*
+ * Size in bytes of the TS packets buffer to be inserted.
+ * Should be in multiples of 188 or 192 bytes
+ * depending on recording filter output format.
+ */
+ size_t size;
+};
+
+struct dmx_abort_ts_insertion {
+ /*
+ * Identifier of the insertion buffer previously set
+ * using DMX_SET_TS_INSERTION.
+ */
+ __u32 identifier;
+};
+
+struct dmx_scrambling_bits {
+ /*
+ * The PID to return its scrambling bit value.
+ * In case of recording filter, multiple PIDs may exist in the same
+ * filter through DMX_ADD_PID ioctl, each may have different
+ * scrambling bits status.
+ */
+ __u16 pid;
+
+ /* Current value of scrambling bits: 0, 1, 2 or 3 */
+ __u8 value;
+};
+
#define DMX_START _IO('o', 41)
#define DMX_STOP _IO('o', 42)
#define DMX_SET_FILTER _IOW('o', 43, struct dmx_sct_filter_params)
@@ -151,5 +854,27 @@ struct dmx_stc {
#define DMX_GET_STC _IOWR('o', 50, struct dmx_stc)
#define DMX_ADD_PID _IOW('o', 51, __u16)
#define DMX_REMOVE_PID _IOW('o', 52, __u16)
+#define DMX_SET_TS_PACKET_FORMAT _IOW('o', 53, enum dmx_tsp_format_t)
+#define DMX_SET_TS_OUT_FORMAT _IOW('o', 54, enum dmx_tsp_format_t)
+#define DMX_SET_DECODER_BUFFER_SIZE _IO('o', 55)
+#define DMX_GET_BUFFER_STATUS _IOR('o', 56, struct dmx_buffer_status)
+#define DMX_RELEASE_DATA _IO('o', 57)
+#define DMX_FEED_DATA _IO('o', 58)
+#define DMX_SET_PLAYBACK_MODE _IOW('o', 59, enum dmx_playback_mode_t)
+#define DMX_GET_EVENT _IOR('o', 60, struct dmx_filter_event)
+#define DMX_SET_BUFFER_MODE _IOW('o', 61, enum dmx_buffer_mode)
+#define DMX_SET_BUFFER _IOW('o', 62, struct dmx_buffer)
+#define DMX_SET_DECODER_BUFFER _IOW('o', 63, struct dmx_decoder_buffers)
+#define DMX_REUSE_DECODER_BUFFER _IO('o', 64)
+#define DMX_SET_SECURE_MODE _IOW('o', 65, struct dmx_secure_mode)
+#define DMX_SET_EVENTS_MASK _IOW('o', 66, struct dmx_events_mask)
+#define DMX_GET_EVENTS_MASK _IOR('o', 67, struct dmx_events_mask)
+#define DMX_PUSH_OOB_COMMAND _IOW('o', 68, struct dmx_oob_command)
+#define DMX_SET_INDEXING_PARAMS _IOW('o', 69, struct dmx_indexing_params)
+#define DMX_SET_TS_INSERTION _IOW('o', 70, struct dmx_set_ts_insertion)
+#define DMX_ABORT_TS_INSERTION _IOW('o', 71, struct dmx_abort_ts_insertion)
+#define DMX_GET_SCRAMBLING_BITS _IOWR('o', 72, struct dmx_scrambling_bits)
+#define DMX_SET_CIPHER _IOW('o', 73, struct dmx_cipher_operations)
+#define DMX_FLUSH_BUFFER _IO('o', 74)
#endif /* _UAPI_DVBDMX_H_ */
diff --git a/include/uapi/linux/msm_vidc_dec.h b/include/uapi/linux/msm_vidc_dec.h
index f502c81665a4..48ce8e929fbf 100644
--- a/include/uapi/linux/msm_vidc_dec.h
+++ b/include/uapi/linux/msm_vidc_dec.h
@@ -486,10 +486,14 @@ enum vdec_interlaced_format {
VDEC_InterlaceInterleaveFrameBottomFieldFirst = 0x4
};
+#define VDEC_YUV_FORMAT_NV12_TP10_UBWC \
+ VDEC_YUV_FORMAT_NV12_TP10_UBWC
+
enum vdec_output_fromat {
VDEC_YUV_FORMAT_NV12 = 0x1,
VDEC_YUV_FORMAT_TILE_4x2 = 0x2,
- VDEC_YUV_FORMAT_NV12_UBWC = 0x3
+ VDEC_YUV_FORMAT_NV12_UBWC = 0x3,
+ VDEC_YUV_FORMAT_NV12_TP10_UBWC = 0x4
};
enum vdec_output_order {
diff --git a/include/uapi/media/msm_cam_sensor.h b/include/uapi/media/msm_cam_sensor.h
index 2c7ada5d02cf..172545d34b7d 100644
--- a/include/uapi/media/msm_cam_sensor.h
+++ b/include/uapi/media/msm_cam_sensor.h
@@ -86,6 +86,8 @@ enum sensor_sub_module_t {
SUB_MODULE_CSIPHY_3D,
SUB_MODULE_OIS,
SUB_MODULE_EXT,
+ SUB_MODULE_IR_LED,
+ SUB_MODULE_IR_CUT,
SUB_MODULE_MAX,
};
@@ -289,6 +291,16 @@ struct msm_eeprom_info_t {
struct msm_eeprom_memory_map_array *mem_map_array;
};
+struct msm_ir_led_cfg_data_t {
+ enum msm_ir_led_cfg_type_t cfg_type;
+ int32_t pwm_duty_on_ns;
+ int32_t pwm_period_ns;
+};
+
+struct msm_ir_cut_cfg_data_t {
+ enum msm_ir_cut_cfg_type_t cfg_type;
+};
+
struct msm_eeprom_cfg_data {
enum eeprom_cfg_type_t cfgtype;
uint8_t is_supported;
@@ -598,5 +610,11 @@ struct sensor_init_cfg_data {
#define VIDIOC_MSM_FLASH_QUERY_DATA \
_IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_flash_query_data_t)
+#define VIDIOC_MSM_IR_LED_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_ir_led_cfg_data_t)
+
+#define VIDIOC_MSM_IR_CUT_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_ir_cut_cfg_data_t)
+
#endif
diff --git a/include/uapi/media/msm_camsensor_sdk.h b/include/uapi/media/msm_camsensor_sdk.h
index 01e52b6f7b44..ad0825e33217 100644
--- a/include/uapi/media/msm_camsensor_sdk.h
+++ b/include/uapi/media/msm_camsensor_sdk.h
@@ -114,6 +114,15 @@ enum msm_sensor_power_seq_gpio_t {
SENSOR_GPIO_MAX,
};
+enum msm_ir_cut_filter_gpio_t {
+ IR_CUT_FILTER_GPIO_P = 0,
+ IR_CUT_FILTER_GPIO_M,
+ IR_CUT_FILTER_GPIO_MAX,
+};
+#define IR_CUT_FILTER_GPIO_P IR_CUT_FILTER_GPIO_P
+#define IR_CUT_FILTER_GPIO_M IR_CUT_FILTER_GPIO_M
+#define R_CUT_FILTER_GPIO_MAX IR_CUT_FILTER_GPIO_MAX
+
enum msm_camera_vreg_name_t {
CAM_VDIG,
CAM_VIO,
@@ -182,6 +191,28 @@ enum msm_flash_cfg_type_t {
CFG_FLASH_HIGH,
};
+enum msm_ir_led_cfg_type_t {
+ CFG_IR_LED_INIT = 0,
+ CFG_IR_LED_RELEASE,
+ CFG_IR_LED_OFF,
+ CFG_IR_LED_ON,
+};
+#define CFG_IR_LED_INIT CFG_IR_LED_INIT
+#define CFG_IR_LED_RELEASE CFG_IR_LED_RELEASE
+#define CFG_IR_LED_OFF CFG_IR_LED_OFF
+#define CFG_IR_LED_ON CFG_IR_LED_ON
+
+enum msm_ir_cut_cfg_type_t {
+ CFG_IR_CUT_INIT = 0,
+ CFG_IR_CUT_RELEASE,
+ CFG_IR_CUT_OFF,
+ CFG_IR_CUT_ON,
+};
+#define CFG_IR_CUT_INIT CFG_IR_CUT_INIT
+#define CFG_IR_CUT_RELEASE CFG_IR_CUT_RELEASE
+#define CFG_IR_CUT_OFF CFG_IR_CUT_OFF
+#define CFG_IR_CUT_ON CFG_IR_CUT_ON
+
enum msm_sensor_output_format_t {
MSM_SENSOR_BAYER,
MSM_SENSOR_YCBCR,
diff --git a/include/uapi/media/msmb_camera.h b/include/uapi/media/msmb_camera.h
index fe70daa772df..071331ef6882 100644
--- a/include/uapi/media/msmb_camera.h
+++ b/include/uapi/media/msmb_camera.h
@@ -48,7 +48,9 @@
#define MSM_CAMERA_SUBDEV_SENSOR_INIT 14
#define MSM_CAMERA_SUBDEV_OIS 15
#define MSM_CAMERA_SUBDEV_FLASH 16
-#define MSM_CAMERA_SUBDEV_EXT 17
+#define MSM_CAMERA_SUBDEV_IR_LED 17
+#define MSM_CAMERA_SUBDEV_IR_CUT 18
+#define MSM_CAMERA_SUBDEV_EXT 19
#define MSM_MAX_CAMERA_SENSORS 5
diff --git a/include/uapi/sound/wcd-dsp-glink.h b/include/uapi/sound/wcd-dsp-glink.h
index db92e6b41340..39d128d370a0 100644
--- a/include/uapi/sound/wcd-dsp-glink.h
+++ b/include/uapi/sound/wcd-dsp-glink.h
@@ -8,7 +8,9 @@
enum {
WDSP_REG_PKT = 1,
WDSP_CMD_PKT,
+ WDSP_READY_PKT,
};
+#define WDSP_READY_PKT WDSP_READY_PKT
/*
* struct wdsp_reg_pkt - Glink channel information structure format
diff --git a/init/Kconfig b/init/Kconfig
index 9ad1ae9d9da8..eb9e1a0aa688 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1002,6 +1002,23 @@ config CGROUP_CPUACCT
config PAGE_COUNTER
bool
+config CGROUP_SCHEDTUNE
+ bool "CFS tasks boosting cgroup subsystem (EXPERIMENTAL)"
+ depends on SCHED_TUNE
+ help
+ This option provides the "schedtune" controller which improves the
+ flexibility of the task boosting mechanism by introducing the support
+ to define "per task" boost values.
+
+ This new controller:
+ 1. allows only a two layers hierarchy, where the root defines the
+ system-wide boost value and its direct childrens define each one a
+ different "class of tasks" to be boosted with a different value
+ 2. supports up to 16 different task classes, each one which could be
+ configured with a different boost value
+
+ Say N if unsure.
+
config MEMCG
bool "Memory Resource Controller for Control Groups"
select PAGE_COUNTER
@@ -1170,6 +1187,16 @@ config SCHED_HMP_CSTATE_AWARE
with CPUs C-state. If this is enabled, scheduler places tasks
onto the shallowest C-state CPU among the most power efficient CPUs.
+config SCHED_CORE_CTL
+ bool "QTI Core Control"
+ depends on SMP
+ help
+ This options enables the core control functionality in
+ the scheduler. Core control automatically offline and
+ online cores based on cpu load and utilization.
+
+ If unsure, say N here.
+
config CHECKPOINT_RESTORE
bool "Checkpoint/restore support" if EXPERT
select PROC_CHILDREN
@@ -1254,6 +1281,32 @@ config SCHED_AUTOGROUP
desktop applications. Task group autogeneration is currently based
upon task session.
+config SCHED_TUNE
+ bool "Boosting for CFS tasks (EXPERIMENTAL)"
+ help
+ This option enables the system-wide support for task boosting.
+ When this support is enabled a new sysctl interface is exposed to
+ userspace via:
+ /proc/sys/kernel/sched_cfs_boost
+ which allows to set a system-wide boost value in range [0..100].
+
+ The currently boosting strategy is implemented in such a way that:
+ - a 0% boost value requires to operate in "standard" mode by
+ scheduling all tasks at the minimum capacities required by their
+ workload demand
+ - a 100% boost value requires to push at maximum the task
+ performances, "regardless" of the incurred energy consumption
+
+ A boost value in between these two boundaries is used to bias the
+ power/performance trade-off, the higher the boost value the more the
+ scheduler is biased toward performance boosting instead of energy
+ efficiency.
+
+ Since this support exposes a single system-wide knob, the specified
+ boost value is applied to all (CFS) tasks in the system.
+
+ If unsure, say N.
+
config SYSFS_DEPRECATED
bool "Enable deprecated sysfs features to support old userspace tools"
depends on SYSFS
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index ad4a12371069..cc6c7d0a6758 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5376,6 +5376,12 @@ int __init cgroup_init(void)
BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
+ /*
+ * The latency of the synchronize_sched() is too high for cgroups,
+ * avoid it at the cost of forcing all readers into the slow path.
+ */
+ rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss);
+
mutex_lock(&cgroup_mutex);
/* Add init_css_set to the hash table */
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 1cfd381642da..3c97f5b88a07 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -768,6 +768,10 @@ static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
EXPORT_SYMBOL(cpu_active_mask);
+static DECLARE_BITMAP(cpu_isolated_bits, CONFIG_NR_CPUS) __read_mostly;
+const struct cpumask *const cpu_isolated_mask = to_cpumask(cpu_isolated_bits);
+EXPORT_SYMBOL(cpu_isolated_mask);
+
void set_cpu_possible(unsigned int cpu, bool possible)
{
if (possible)
@@ -802,6 +806,14 @@ void set_cpu_active(unsigned int cpu, bool active)
cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
}
+void set_cpu_isolated(unsigned int cpu, bool isolated)
+{
+ if (isolated)
+ cpumask_set_cpu(cpu, to_cpumask(cpu_isolated_bits));
+ else
+ cpumask_clear_cpu(cpu, to_cpumask(cpu_isolated_bits));
+}
+
void init_cpu_present(const struct cpumask *src)
{
cpumask_copy(to_cpumask(cpu_present_bits), src);
@@ -817,6 +829,11 @@ void init_cpu_online(const struct cpumask *src)
cpumask_copy(to_cpumask(cpu_online_bits), src);
}
+void init_cpu_isolated(const struct cpumask *src)
+{
+ cpumask_copy(to_cpumask(cpu_isolated_bits), src);
+}
+
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
void idle_notifier_register(struct notifier_block *n)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d6ec580584b6..5beb88f11671 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3386,7 +3386,8 @@ static int perf_event_read(struct perf_event *event, bool group)
* If event is enabled and currently active on a CPU, update the
* value in the event structure:
*/
- if (event->state == PERF_EVENT_STATE_ACTIVE) {
+ if (event->state == PERF_EVENT_STATE_ACTIVE &&
+ !cpu_isolated(event->oncpu)) {
struct perf_read_data data = {
.event = event,
.group = group,
diff --git a/kernel/fork.c b/kernel/fork.c
index c9eb86b646ab..e89d0bae6f20 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1375,7 +1375,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->real_start_time = ktime_get_boot_ns();
p->io_context = NULL;
p->audit_context = NULL;
- threadgroup_change_begin(current);
cgroup_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
@@ -1527,6 +1526,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
INIT_LIST_HEAD(&p->thread_group);
p->task_works = NULL;
+ threadgroup_change_begin(current);
/*
* Ensure that the cgroup subsystem policies allow the new process to be
* forked. It should be noted the the new process's css_set can be changed
@@ -1627,6 +1627,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
bad_fork_cancel_cgroup:
cgroup_cancel_fork(p, cgrp_ss_priv);
bad_fork_free_pid:
+ threadgroup_change_end(current);
if (pid != &init_struct_pid)
free_pid(pid);
bad_fork_cleanup_io:
@@ -1657,7 +1658,6 @@ bad_fork_cleanup_policy:
mpol_put(p->mempolicy);
bad_fork_cleanup_threadgroup_lock:
#endif
- threadgroup_change_end(current);
delayacct_tsk_free(p);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 011f8c4c63da..dac3724e4c1e 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/ratelimit.h>
#include <linux/irq.h>
+#include <linux/cpumask.h>
#include "internals.h"
@@ -20,6 +21,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
const struct cpumask *affinity = d->common->affinity;
struct irq_chip *c;
bool ret = false;
+ struct cpumask available_cpus;
/*
* If this is a per-CPU interrupt, or the affinity does not
@@ -29,8 +31,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
!cpumask_test_cpu(smp_processor_id(), affinity))
return false;
+ cpumask_copy(&available_cpus, affinity);
+ cpumask_andnot(&available_cpus, &available_cpus, cpu_isolated_mask);
+ affinity = &available_cpus;
+
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
- affinity = cpu_online_mask;
+ cpumask_andnot(&available_cpus, cpu_online_mask,
+ cpu_isolated_mask);
+ if (cpumask_empty(affinity))
+ affinity = cpu_online_mask;
ret = true;
}
@@ -69,6 +78,9 @@ void irq_migrate_all_off_this_cpu(void)
bool affinity_broken;
desc = irq_to_desc(irq);
+ if (!desc)
+ continue;
+
raw_spin_lock(&desc->lock);
affinity_broken = migrate_one_irq(desc);
raw_spin_unlock(&desc->lock);
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
index f231e0bb311c..ce182599cf2e 100644
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -8,151 +8,186 @@
#include <linux/sched.h>
#include <linux/errno.h>
-int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
+int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
const char *name, struct lock_class_key *rwsem_key)
{
- brw->fast_read_ctr = alloc_percpu(int);
- if (unlikely(!brw->fast_read_ctr))
+ sem->read_count = alloc_percpu(int);
+ if (unlikely(!sem->read_count))
return -ENOMEM;
/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
- __init_rwsem(&brw->rw_sem, name, rwsem_key);
- rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
- atomic_set(&brw->slow_read_ctr, 0);
- init_waitqueue_head(&brw->write_waitq);
+ rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
+ __init_rwsem(&sem->rw_sem, name, rwsem_key);
+ init_waitqueue_head(&sem->writer);
+ sem->readers_block = 0;
return 0;
}
EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
-void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
+void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
{
/*
* XXX: temporary kludge. The error path in alloc_super()
* assumes that percpu_free_rwsem() is safe after kzalloc().
*/
- if (!brw->fast_read_ctr)
+ if (!sem->read_count)
return;
- rcu_sync_dtor(&brw->rss);
- free_percpu(brw->fast_read_ctr);
- brw->fast_read_ctr = NULL; /* catch use after free bugs */
+ rcu_sync_dtor(&sem->rss);
+ free_percpu(sem->read_count);
+ sem->read_count = NULL; /* catch use after free bugs */
}
+EXPORT_SYMBOL_GPL(percpu_free_rwsem);
-/*
- * This is the fast-path for down_read/up_read. If it succeeds we rely
- * on the barriers provided by rcu_sync_enter/exit; see the comments in
- * percpu_down_write() and percpu_up_write().
- *
- * If this helper fails the callers rely on the normal rw_semaphore and
- * atomic_dec_and_test(), so in this case we have the necessary barriers.
- */
-static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
+int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
{
- bool success;
+ /*
+ * Due to having preemption disabled the decrement happens on
+ * the same CPU as the increment, avoiding the
+ * increment-on-one-CPU-and-decrement-on-another problem.
+ *
+ * If the reader misses the writer's assignment of readers_block, then
+ * the writer is guaranteed to see the reader's increment.
+ *
+ * Conversely, any readers that increment their sem->read_count after
+ * the writer looks are guaranteed to see the readers_block value,
+ * which in turn means that they are guaranteed to immediately
+ * decrement their sem->read_count, so that it doesn't matter that the
+ * writer missed them.
+ */
- preempt_disable();
- success = rcu_sync_is_idle(&brw->rss);
- if (likely(success))
- __this_cpu_add(*brw->fast_read_ctr, val);
- preempt_enable();
+ smp_mb(); /* A matches D */
- return success;
-}
+ /*
+ * If !readers_block the critical section starts here, matched by the
+ * release in percpu_up_write().
+ */
+ if (likely(!smp_load_acquire(&sem->readers_block)))
+ return 1;
-/*
- * Like the normal down_read() this is not recursive, the writer can
- * come after the first percpu_down_read() and create the deadlock.
- *
- * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
- * percpu_up_read() does rwsem_release(). This pairs with the usage
- * of ->rw_sem in percpu_down/up_write().
- */
-void percpu_down_read(struct percpu_rw_semaphore *brw)
-{
- might_sleep();
- rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
+ /*
+ * Per the above comment; we still have preemption disabled and
+ * will thus decrement on the same CPU as we incremented.
+ */
+ __percpu_up_read(sem);
- if (likely(update_fast_ctr(brw, +1)))
- return;
+ if (try)
+ return 0;
- /* Avoid rwsem_acquire_read() and rwsem_release() */
- __down_read(&brw->rw_sem);
- atomic_inc(&brw->slow_read_ctr);
- __up_read(&brw->rw_sem);
-}
-EXPORT_SYMBOL_GPL(percpu_down_read);
+ /*
+ * We either call schedule() in the wait, or we'll fall through
+ * and reschedule on the preempt_enable() in percpu_down_read().
+ */
+ preempt_enable_no_resched();
-int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
-{
- if (unlikely(!update_fast_ctr(brw, +1))) {
- if (!__down_read_trylock(&brw->rw_sem))
- return 0;
- atomic_inc(&brw->slow_read_ctr);
- __up_read(&brw->rw_sem);
- }
-
- rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_);
+ /*
+ * Avoid lockdep for the down/up_read() we already have them.
+ */
+ __down_read(&sem->rw_sem);
+ this_cpu_inc(*sem->read_count);
+ __up_read(&sem->rw_sem);
+
+ preempt_disable();
return 1;
}
+EXPORT_SYMBOL_GPL(__percpu_down_read);
-void percpu_up_read(struct percpu_rw_semaphore *brw)
+void __percpu_up_read(struct percpu_rw_semaphore *sem)
{
- rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
-
- if (likely(update_fast_ctr(brw, -1)))
- return;
+ smp_mb(); /* B matches C */
+ /*
+ * In other words, if they see our decrement (presumably to aggregate
+ * zero, as that is the only time it matters) they will also see our
+ * critical section.
+ */
+ __this_cpu_dec(*sem->read_count);
- /* false-positive is possible but harmless */
- if (atomic_dec_and_test(&brw->slow_read_ctr))
- wake_up_all(&brw->write_waitq);
+ /* Prod writer to recheck readers_active */
+ wake_up(&sem->writer);
}
-EXPORT_SYMBOL_GPL(percpu_up_read);
+EXPORT_SYMBOL_GPL(__percpu_up_read);
+
+#define per_cpu_sum(var) \
+({ \
+ typeof(var) __sum = 0; \
+ int cpu; \
+ compiletime_assert_atomic_type(__sum); \
+ for_each_possible_cpu(cpu) \
+ __sum += per_cpu(var, cpu); \
+ __sum; \
+})
-static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
+/*
+ * Return true if the modular sum of the sem->read_count per-CPU variable is
+ * zero. If this sum is zero, then it is stable due to the fact that if any
+ * newly arriving readers increment a given counter, they will immediately
+ * decrement that same counter.
+ */
+static bool readers_active_check(struct percpu_rw_semaphore *sem)
{
- unsigned int sum = 0;
- int cpu;
+ if (per_cpu_sum(*sem->read_count) != 0)
+ return false;
+
+ /*
+ * If we observed the decrement; ensure we see the entire critical
+ * section.
+ */
- for_each_possible_cpu(cpu) {
- sum += per_cpu(*brw->fast_read_ctr, cpu);
- per_cpu(*brw->fast_read_ctr, cpu) = 0;
- }
+ smp_mb(); /* C matches B */
- return sum;
+ return true;
}
-void percpu_down_write(struct percpu_rw_semaphore *brw)
+void percpu_down_write(struct percpu_rw_semaphore *sem)
{
+ /* Notify readers to take the slow path. */
+ rcu_sync_enter(&sem->rss);
+
+ down_write(&sem->rw_sem);
+
/*
- * Make rcu_sync_is_idle() == F and thus disable the fast-path in
- * percpu_down_read() and percpu_up_read(), and wait for gp pass.
- *
- * The latter synchronises us with the preceding readers which used
- * the fast-past, so we can not miss the result of __this_cpu_add()
- * or anything else inside their criticial sections.
+ * Notify new readers to block; up until now, and thus throughout the
+ * longish rcu_sync_enter() above, new readers could still come in.
*/
- rcu_sync_enter(&brw->rss);
+ WRITE_ONCE(sem->readers_block, 1);
- /* exclude other writers, and block the new readers completely */
- down_write(&brw->rw_sem);
+ smp_mb(); /* D matches A */
- /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
- atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
+ /*
+ * If they don't see our writer of readers_block, then we are
+ * guaranteed to see their sem->read_count increment, and therefore
+ * will wait for them.
+ */
- /* wait for all readers to complete their percpu_up_read() */
- wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
+ /* Wait for all now active readers to complete. */
+ wait_event(sem->writer, readers_active_check(sem));
}
EXPORT_SYMBOL_GPL(percpu_down_write);
-void percpu_up_write(struct percpu_rw_semaphore *brw)
+void percpu_up_write(struct percpu_rw_semaphore *sem)
{
- /* release the lock, but the readers can't use the fast-path */
- up_write(&brw->rw_sem);
/*
- * Enable the fast-path in percpu_down_read() and percpu_up_read()
- * but only after another gp pass; this adds the necessary barrier
- * to ensure the reader can't miss the changes done by us.
+ * Signal the writer is done, no fast path yet.
+ *
+ * One reason that we cannot just immediately flip to readers_fast is
+ * that new readers might fail to see the results of this writer's
+ * critical section.
+ *
+ * Therefore we force it through the slow path which guarantees an
+ * acquire and thereby guarantees the critical section's consistency.
+ */
+ smp_store_release(&sem->readers_block, 0);
+
+ /*
+ * Release the write lock, this will allow readers back in the game.
+ */
+ up_write(&sem->rw_sem);
+
+ /*
+ * Once this completes (at least one RCU-sched grace period hence) the
+ * reader fast path will be available again. Safe to use outside the
+ * exclusive write lock because its counting.
*/
- rcu_sync_exit(&brw->rss);
+ rcu_sync_exit(&sem->rss);
}
EXPORT_SYMBOL_GPL(percpu_up_write);
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 8ecc7b3f7dd9..69c32c42080f 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -45,6 +45,7 @@
#include <linux/seq_file.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
+#include <linux/cpumask.h>
#include <linux/uaccess.h>
#include <linux/export.h>
@@ -447,6 +448,9 @@ EXPORT_SYMBOL_GPL(pm_qos_request);
int pm_qos_request_for_cpu(int pm_qos_class, int cpu)
{
+ if (cpu_isolated(cpu))
+ return INT_MAX;
+
return pm_qos_array[pm_qos_class]->constraints->target_per_cpu[cpu];
}
EXPORT_SYMBOL(pm_qos_request_for_cpu);
@@ -469,6 +473,9 @@ int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask)
val = c->default_value;
for_each_cpu(cpu, mask) {
+ if (cpu_isolated(cpu))
+ continue;
+
switch (c->type) {
case PM_QOS_MIN:
if (c->target_per_cpu[cpu] < val)
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index be922c9f3d37..e358313a0d6c 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -83,6 +83,18 @@ void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
}
/**
+ * Must be called after rcu_sync_init() and before first use.
+ *
+ * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
+ * pairs turn into NO-OPs.
+ */
+void rcu_sync_enter_start(struct rcu_sync *rsp)
+{
+ rsp->gp_count++;
+ rsp->gp_state = GP_PASSED;
+}
+
+/**
* rcu_sync_enter() - Force readers onto slowpath
* @rsp: Pointer to rcu_sync structure to use for synchronization
*
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 1f159743ebfc..7d0d34c53e08 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -19,4 +19,6 @@ obj-$(CONFIG_SCHED_HMP) += hmp.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
+obj-$(CONFIG_SCHED_TUNE) += tune.o
obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
+obj-$(CONFIG_SCHED_CORE_CTL) += core_ctl.o
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7474463b9835..024fb1007c78 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -74,6 +74,7 @@
#include <linux/binfmts.h>
#include <linux/context_tracking.h>
#include <linux/compiler.h>
+#include <linux/irq.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
@@ -84,6 +85,7 @@
#endif
#include "sched.h"
+#include "core_ctl.h"
#include "../workqueue_internal.h"
#include "../smpboot.h"
@@ -1229,6 +1231,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
struct rq *rq;
unsigned int dest_cpu;
int ret = 0;
+ cpumask_t allowed_mask;
rq = task_rq_lock(p, &flags);
@@ -1244,16 +1247,22 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
if (cpumask_equal(&p->cpus_allowed, new_mask))
goto out;
- dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+ cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask);
+
+ dest_cpu = cpumask_any_and(cpu_active_mask, &allowed_mask);
if (dest_cpu >= nr_cpu_ids) {
- ret = -EINVAL;
- goto out;
+ dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+ if (dest_cpu >= nr_cpu_ids) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cpumask_copy(&allowed_mask, new_mask);
}
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask))
+ if (cpumask_test_cpu(task_cpu(p), &allowed_mask))
goto out;
if (task_running(rq, p) || p->state == TASK_WAKING) {
@@ -1577,12 +1586,13 @@ EXPORT_SYMBOL_GPL(kick_process);
/*
* ->cpus_allowed is protected by both rq->lock and p->pi_lock
*/
-static int select_fallback_rq(int cpu, struct task_struct *p)
+static int select_fallback_rq(int cpu, struct task_struct *p, bool allow_iso)
{
int nid = cpu_to_node(cpu);
const struct cpumask *nodemask = NULL;
- enum { cpuset, possible, fail } state = cpuset;
+ enum { cpuset, possible, fail, bug } state = cpuset;
int dest_cpu;
+ int isolated_candidate = -1;
/*
* If the node that the cpu is on has been offlined, cpu_to_node()
@@ -1598,6 +1608,8 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
continue;
if (!cpu_active(dest_cpu))
continue;
+ if (cpu_isolated(dest_cpu))
+ continue;
if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
return dest_cpu;
}
@@ -1610,6 +1622,16 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
continue;
if (!cpu_active(dest_cpu))
continue;
+ if (cpu_isolated(dest_cpu)) {
+ if (allow_iso)
+ isolated_candidate = dest_cpu;
+ continue;
+ }
+ goto out;
+ }
+
+ if (isolated_candidate != -1) {
+ dest_cpu = isolated_candidate;
goto out;
}
@@ -1628,6 +1650,11 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
break;
case fail:
+ allow_iso = true;
+ state = bug;
+ break;
+
+ case bug:
BUG();
break;
}
@@ -1655,6 +1682,8 @@ out:
static inline
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
{
+ bool allow_isolated = (p->flags & PF_KTHREAD);
+
lockdep_assert_held(&p->pi_lock);
if (p->nr_cpus_allowed > 1)
@@ -1671,8 +1700,9 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
* not worry about this generic constraint ]
*/
if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
- !cpu_online(cpu)))
- cpu = select_fallback_rq(task_cpu(p), p);
+ !cpu_online(cpu)) ||
+ (cpu_isolated(cpu) && !allow_isolated))
+ cpu = select_fallback_rq(task_cpu(p), p, allow_isolated);
return cpu;
}
@@ -2956,7 +2986,7 @@ void sched_exec(void)
if (dest_cpu == smp_processor_id())
goto unlock;
- if (likely(cpu_active(dest_cpu))) {
+ if (likely(cpu_active(dest_cpu) && likely(!cpu_isolated(dest_cpu)))) {
struct migration_arg arg = { p, dest_cpu };
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
@@ -3066,6 +3096,8 @@ void scheduler_tick(void)
if (curr->sched_class == &fair_sched_class)
check_for_migration(rq, curr);
+
+ core_ctl_check(wallclock);
}
#ifdef CONFIG_NO_HZ_FULL
@@ -3197,7 +3229,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
static inline void schedule_debug(struct task_struct *prev)
{
#ifdef CONFIG_SCHED_STACK_END_CHECK
- BUG_ON(task_stack_end_corrupted(prev));
+ if (task_stack_end_corrupted(prev))
+ panic("corrupted stack end detected inside scheduler\n");
#endif
if (unlikely(in_atomic_preempt_off())) {
@@ -3357,16 +3390,17 @@ static void __sched notrace __schedule(bool preempt)
update_rq_clock(rq);
next = pick_next_task(rq, prev);
- wallclock = sched_ktime_clock();
- update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
- update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
rq->clock_skip_update = 0;
BUG_ON(task_cpu(next) != cpu_of(rq));
+ wallclock = sched_ktime_clock();
if (likely(prev != next)) {
+ update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
+ update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
+
rq->nr_switches++;
rq->curr = next;
++*switch_count;
@@ -3377,6 +3411,7 @@ static void __sched notrace __schedule(bool preempt)
rq = context_switch(rq, prev, next); /* unlocks the rq */
cpu = cpu_of(rq);
} else {
+ update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0);
lockdep_unpin_lock(&rq->lock);
raw_spin_unlock_irq(&rq->lock);
}
@@ -5414,18 +5449,22 @@ static struct task_struct fake_task = {
};
/*
- * Migrate all tasks from the rq, sleeping tasks will be migrated by
- * try_to_wake_up()->select_task_rq().
+ * Migrate all tasks (not pinned if pinned argument say so) from the rq,
+ * sleeping tasks will be migrated by try_to_wake_up()->select_task_rq().
*
* Called with rq->lock held even though we'er in stop_machine() and
* there's no concurrency possible, we hold the required locks anyway
* because of lock validation efforts.
*/
-static void migrate_tasks(struct rq *dead_rq)
+static void migrate_tasks(struct rq *dead_rq, bool migrate_pinned_tasks)
{
struct rq *rq = dead_rq;
struct task_struct *next, *stop = rq->stop;
int dest_cpu;
+ unsigned int num_pinned_kthreads = 1; /* this thread */
+ cpumask_t avail_cpus;
+
+ cpumask_andnot(&avail_cpus, cpu_online_mask, cpu_isolated_mask);
/*
* Fudge the rq selection such that the below task selection loop
@@ -5447,10 +5486,12 @@ static void migrate_tasks(struct rq *dead_rq)
for (;;) {
/*
- * There's this thread running, bail when that's the only
- * remaining thread.
+ * There's this thread running + pinned threads, bail when
+ * that's the only remaining threads.
*/
- if (rq->nr_running == 1)
+ if ((migrate_pinned_tasks && rq->nr_running == 1) ||
+ (!migrate_pinned_tasks &&
+ rq->nr_running == num_pinned_kthreads))
break;
/*
@@ -5461,6 +5502,13 @@ static void migrate_tasks(struct rq *dead_rq)
BUG_ON(!next);
next->sched_class->put_prev_task(rq, next);
+ if (!migrate_pinned_tasks && next->flags & PF_KTHREAD &&
+ !cpumask_intersects(&avail_cpus, &next->cpus_allowed)) {
+ lockdep_unpin_lock(&rq->lock);
+ num_pinned_kthreads += 1;
+ continue;
+ }
+
/*
* Rules for changing task_struct::cpus_allowed are holding
* both pi_lock and rq->lock, such that holding either
@@ -5486,7 +5534,7 @@ static void migrate_tasks(struct rq *dead_rq)
}
/* Find suitable destination for @next, with force if needed. */
- dest_cpu = select_fallback_rq(dead_rq->cpu, next);
+ dest_cpu = select_fallback_rq(dead_rq->cpu, next, false);
rq = __migrate_task(rq, next, dest_cpu);
if (rq != dead_rq) {
@@ -5502,6 +5550,222 @@ static void migrate_tasks(struct rq *dead_rq)
rq->stop = stop;
}
+
+static void set_rq_online(struct rq *rq);
+static void set_rq_offline(struct rq *rq);
+
+int do_isolation_work_cpu_stop(void *data)
+{
+ unsigned long flags;
+ unsigned int cpu = smp_processor_id();
+ struct rq *rq = cpu_rq(cpu);
+
+ watchdog_disable(cpu);
+
+ irq_migrate_all_off_this_cpu();
+
+ sched_ttwu_pending();
+ /* Update our root-domain */
+ raw_spin_lock_irqsave(&rq->lock, flags);
+
+ if (rq->rd) {
+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+ set_rq_offline(rq);
+ }
+
+ migrate_tasks(rq, false);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+ /*
+ * We might have been in tickless state. Clear NOHZ flags to avoid
+ * us being kicked for helping out with balancing
+ */
+ nohz_balance_clear_nohz_mask(cpu);
+ return 0;
+}
+
+int do_unisolation_work_cpu_stop(void *data)
+{
+ watchdog_enable(smp_processor_id());
+ return 0;
+}
+
+static void init_sched_groups_capacity(int cpu, struct sched_domain *sd);
+
+static void sched_update_group_capacities(int cpu)
+{
+ struct sched_domain *sd;
+
+ mutex_lock(&sched_domains_mutex);
+ rcu_read_lock();
+
+ for_each_domain(cpu, sd) {
+ int balance_cpu = group_balance_cpu(sd->groups);
+
+ init_sched_groups_capacity(cpu, sd);
+ /*
+ * Need to ensure this is also called with balancing
+ * cpu.
+ */
+ if (cpu != balance_cpu)
+ init_sched_groups_capacity(balance_cpu, sd);
+ }
+
+ rcu_read_unlock();
+ mutex_unlock(&sched_domains_mutex);
+}
+
+static unsigned int cpu_isolation_vote[NR_CPUS];
+
+int sched_isolate_count(const cpumask_t *mask, bool include_offline)
+{
+ cpumask_t count_mask = CPU_MASK_NONE;
+
+ if (include_offline) {
+ cpumask_complement(&count_mask, cpu_online_mask);
+ cpumask_or(&count_mask, &count_mask, cpu_isolated_mask);
+ cpumask_and(&count_mask, &count_mask, mask);
+ } else {
+ cpumask_and(&count_mask, mask, cpu_isolated_mask);
+ }
+
+ return cpumask_weight(&count_mask);
+}
+
+/*
+ * 1) CPU is isolated and cpu is offlined:
+ * Unisolate the core.
+ * 2) CPU is not isolated and CPU is offlined:
+ * No action taken.
+ * 3) CPU is offline and request to isolate
+ * Request ignored.
+ * 4) CPU is offline and isolated:
+ * Not a possible state.
+ * 5) CPU is online and request to isolate
+ * Normal case: Isolate the CPU
+ * 6) CPU is not isolated and comes back online
+ * Nothing to do
+ *
+ * Note: The client calling sched_isolate_cpu() is repsonsible for ONLY
+ * calling sched_unisolate_cpu() on a CPU that the client previously isolated.
+ * Client is also responsible for unisolating when a core goes offline
+ * (after CPU is marked offline).
+ */
+int sched_isolate_cpu(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ cpumask_t avail_cpus;
+ int ret_code = 0;
+ u64 start_time;
+
+ if (trace_sched_isolate_enabled())
+ start_time = sched_clock();
+
+ lock_device_hotplug();
+
+ cpumask_andnot(&avail_cpus, cpu_online_mask, cpu_isolated_mask);
+
+ /* We cannot isolate ALL cpus in the system */
+ if (cpumask_weight(&avail_cpus) == 1) {
+ ret_code = -EINVAL;
+ goto out;
+ }
+
+ if (!cpu_online(cpu)) {
+ ret_code = -EINVAL;
+ goto out;
+ }
+
+ if (++cpu_isolation_vote[cpu] > 1)
+ goto out;
+
+ set_cpu_isolated(cpu, true);
+ cpumask_clear_cpu(cpu, &avail_cpus);
+
+ /* Migrate timers */
+ smp_call_function_any(&avail_cpus, hrtimer_quiesce_cpu, &cpu, 1);
+ smp_call_function_any(&avail_cpus, timer_quiesce_cpu, &cpu, 1);
+
+ migrate_sync_cpu(cpu, cpumask_first(&avail_cpus));
+ stop_cpus(cpumask_of(cpu), do_isolation_work_cpu_stop, 0);
+
+ clear_hmp_request(cpu);
+ calc_load_migrate(rq);
+ update_max_interval();
+ sched_update_group_capacities(cpu);
+
+out:
+ unlock_device_hotplug();
+ trace_sched_isolate(cpu, cpumask_bits(cpu_isolated_mask)[0],
+ start_time, 1);
+ return ret_code;
+}
+
+/*
+ * Note: The client calling sched_isolate_cpu() is repsonsible for ONLY
+ * calling sched_unisolate_cpu() on a CPU that the client previously isolated.
+ * Client is also responsible for unisolating when a core goes offline
+ * (after CPU is marked offline).
+ */
+int sched_unisolate_cpu_unlocked(int cpu)
+{
+ int ret_code = 0;
+ struct rq *rq = cpu_rq(cpu);
+ u64 start_time;
+
+ if (trace_sched_isolate_enabled())
+ start_time = sched_clock();
+
+ lock_device_hotplug_assert();
+
+ if (!cpu_isolation_vote[cpu]) {
+ ret_code = -EINVAL;
+ goto out;
+ }
+
+ if (--cpu_isolation_vote[cpu])
+ goto out;
+
+ if (cpu_online(cpu)) {
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ rq->age_stamp = sched_clock_cpu(cpu);
+ if (rq->rd) {
+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+ set_rq_online(rq);
+ }
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+
+ set_cpu_isolated(cpu, false);
+ update_max_interval();
+ sched_update_group_capacities(cpu);
+
+ if (cpu_online(cpu)) {
+ stop_cpus(cpumask_of(cpu), do_unisolation_work_cpu_stop, 0);
+
+ /* Kick CPU to immediately do load balancing */
+ if (!test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
+ smp_send_reschedule(cpu);
+ }
+
+out:
+ trace_sched_isolate(cpu, cpumask_bits(cpu_isolated_mask)[0],
+ start_time, 0);
+ return ret_code;
+}
+
+int sched_unisolate_cpu(int cpu)
+{
+ int ret_code;
+
+ lock_device_hotplug();
+ ret_code = sched_unisolate_cpu_unlocked(cpu);
+ unlock_device_hotplug();
+ return ret_code;
+}
+
#endif /* CONFIG_HOTPLUG_CPU */
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
@@ -5748,13 +6012,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
sched_ttwu_pending();
/* Update our root-domain */
raw_spin_lock_irqsave(&rq->lock, flags);
- migrate_sync_cpu(cpu);
+ migrate_sync_cpu(cpu, smp_processor_id());
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
- migrate_tasks(rq);
+ migrate_tasks(rq, true);
BUG_ON(rq->nr_running != 1); /* the migration thread */
raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
@@ -6509,11 +6773,14 @@ build_sched_groups(struct sched_domain *sd, int cpu)
static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
{
struct sched_group *sg = sd->groups;
+ cpumask_t avail_mask;
WARN_ON(!sg);
do {
- sg->group_weight = cpumask_weight(sched_group_cpus(sg));
+ cpumask_andnot(&avail_mask, sched_group_cpus(sg),
+ cpu_isolated_mask);
+ sg->group_weight = cpumask_weight(&avail_mask);
sg = sg->next;
} while (sg != sd->groups);
@@ -7547,6 +7814,7 @@ void __init sched_init_smp(void)
hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
update_cluster_topology();
+ init_sched_hmp_boost_policy();
init_hrtick();
@@ -7595,9 +7863,8 @@ void __init sched_init(void)
BUG_ON(num_possible_cpus() > BITS_PER_LONG);
-#ifdef CONFIG_SCHED_HMP
+ sched_hmp_parse_dt();
init_clusters();
-#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
@@ -7972,7 +8239,7 @@ void set_curr_task(int cpu, struct task_struct *p)
/* task_group_lock serializes the addition/removal of task groups */
static DEFINE_SPINLOCK(task_group_lock);
-static void free_sched_group(struct task_group *tg)
+static void sched_free_group(struct task_group *tg)
{
free_fair_sched_group(tg);
free_rt_sched_group(tg);
@@ -7998,7 +8265,7 @@ struct task_group *sched_create_group(struct task_group *parent)
return tg;
err:
- free_sched_group(tg);
+ sched_free_group(tg);
return ERR_PTR(-ENOMEM);
}
@@ -8018,27 +8285,24 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
}
/* rcu callback to free various structures associated with a task group */
-static void free_sched_group_rcu(struct rcu_head *rhp)
+static void sched_free_group_rcu(struct rcu_head *rhp)
{
/* now it should be safe to free those cfs_rqs */
- free_sched_group(container_of(rhp, struct task_group, rcu));
+ sched_free_group(container_of(rhp, struct task_group, rcu));
}
-/* Destroy runqueue etc associated with a task group */
void sched_destroy_group(struct task_group *tg)
{
/* wait for possible concurrent references to cfs_rqs complete */
- call_rcu(&tg->rcu, free_sched_group_rcu);
+ call_rcu(&tg->rcu, sched_free_group_rcu);
}
void sched_offline_group(struct task_group *tg)
{
unsigned long flags;
- int i;
/* end participation in shares distribution */
- for_each_possible_cpu(i)
- unregister_fair_sched_group(tg, i);
+ unregister_fair_sched_group(tg);
spin_lock_irqsave(&task_group_lock, flags);
list_del_rcu(&tg->list);
@@ -8489,31 +8753,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
+ sched_online_group(tg, parent);
+
return &tg->css;
}
-static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
- struct task_group *parent = css_tg(css->parent);
- if (parent)
- sched_online_group(tg, parent);
- return 0;
+ sched_offline_group(tg);
}
static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
- sched_destroy_group(tg);
-}
-
-static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
-{
- struct task_group *tg = css_tg(css);
-
- sched_offline_group(tg);
+ /*
+ * Relies on the RCU grace period between css_released() and this.
+ */
+ sched_free_group(tg);
}
static void cpu_cgroup_fork(struct task_struct *task, void *private)
@@ -8880,9 +9139,8 @@ static struct cftype cpu_files[] = {
struct cgroup_subsys cpu_cgrp_subsys = {
.css_alloc = cpu_cgroup_css_alloc,
+ .css_released = cpu_cgroup_css_released,
.css_free = cpu_cgroup_css_free,
- .css_online = cpu_cgroup_css_online,
- .css_offline = cpu_cgroup_css_offline,
.fork = cpu_cgroup_fork,
.can_attach = cpu_cgroup_can_attach,
.attach = cpu_cgroup_attach,
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
new file mode 100644
index 000000000000..d81886da7ca2
--- /dev/null
+++ b/kernel/sched/core_ctl.c
@@ -0,0 +1,1055 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+
+#include <trace/events/sched.h>
+
+#define MAX_CPUS_PER_CLUSTER 4
+#define MAX_CLUSTERS 2
+
+struct cluster_data {
+ bool inited;
+ unsigned int min_cpus;
+ unsigned int max_cpus;
+ unsigned int offline_delay_ms;
+ unsigned int busy_up_thres[MAX_CPUS_PER_CLUSTER];
+ unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER];
+ unsigned int active_cpus;
+ unsigned int num_cpus;
+ cpumask_t cpu_mask;
+ unsigned int need_cpus;
+ unsigned int task_thres;
+ s64 last_isolate_ts;
+ struct list_head lru;
+ bool pending;
+ spinlock_t pending_lock;
+ bool is_big_cluster;
+ int nrrun;
+ bool nrrun_changed;
+ struct task_struct *core_ctl_thread;
+ unsigned int first_cpu;
+ bool boost;
+ struct kobject kobj;
+};
+
+struct cpu_data {
+ bool online;
+ bool is_busy;
+ unsigned int busy;
+ unsigned int cpu;
+ bool not_preferred;
+ struct cluster_data *cluster;
+ struct list_head sib;
+ bool isolated_by_us;
+};
+
+static DEFINE_PER_CPU(struct cpu_data, cpu_state);
+static struct cluster_data cluster_state[MAX_CLUSTERS];
+static unsigned int num_clusters;
+
+#define for_each_cluster(cluster, idx) \
+ for ((cluster) = &cluster_state[idx]; (idx) < num_clusters;\
+ (idx)++, (cluster) = &cluster_state[idx])
+
+static DEFINE_SPINLOCK(state_lock);
+static void apply_need(struct cluster_data *state);
+static void wake_up_core_ctl_thread(struct cluster_data *state);
+static bool initialized;
+
+static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
+
+/* ========================= sysfs interface =========================== */
+
+static ssize_t store_min_cpus(struct cluster_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ state->min_cpus = min(val, state->max_cpus);
+ wake_up_core_ctl_thread(state);
+
+ return count;
+}
+
+static ssize_t show_min_cpus(const struct cluster_data *state, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", state->min_cpus);
+}
+
+static ssize_t store_max_cpus(struct cluster_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ val = min(val, state->num_cpus);
+ state->max_cpus = val;
+ state->min_cpus = min(state->min_cpus, state->max_cpus);
+ wake_up_core_ctl_thread(state);
+
+ return count;
+}
+
+static ssize_t show_max_cpus(const struct cluster_data *state, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", state->max_cpus);
+}
+
+static ssize_t store_offline_delay_ms(struct cluster_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ state->offline_delay_ms = val;
+ apply_need(state);
+
+ return count;
+}
+
+static ssize_t show_task_thres(const struct cluster_data *state, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", state->task_thres);
+}
+
+static ssize_t store_task_thres(struct cluster_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ if (val < state->num_cpus)
+ return -EINVAL;
+
+ state->task_thres = val;
+ apply_need(state);
+
+ return count;
+}
+
+static ssize_t show_offline_delay_ms(const struct cluster_data *state,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", state->offline_delay_ms);
+}
+
+static ssize_t store_busy_up_thres(struct cluster_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val[MAX_CPUS_PER_CLUSTER];
+ int ret, i;
+
+ ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
+ if (ret != 1 && ret != state->num_cpus)
+ return -EINVAL;
+
+ if (ret == 1) {
+ for (i = 0; i < state->num_cpus; i++)
+ state->busy_up_thres[i] = val[0];
+ } else {
+ for (i = 0; i < state->num_cpus; i++)
+ state->busy_up_thres[i] = val[i];
+ }
+ apply_need(state);
+ return count;
+}
+
+static ssize_t show_busy_up_thres(const struct cluster_data *state, char *buf)
+{
+ int i, count = 0;
+
+ for (i = 0; i < state->num_cpus; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
+ state->busy_up_thres[i]);
+
+ count += snprintf(buf + count, PAGE_SIZE - count, "\n");
+ return count;
+}
+
+static ssize_t store_busy_down_thres(struct cluster_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val[MAX_CPUS_PER_CLUSTER];
+ int ret, i;
+
+ ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
+ if (ret != 1 && ret != state->num_cpus)
+ return -EINVAL;
+
+ if (ret == 1) {
+ for (i = 0; i < state->num_cpus; i++)
+ state->busy_down_thres[i] = val[0];
+ } else {
+ for (i = 0; i < state->num_cpus; i++)
+ state->busy_down_thres[i] = val[i];
+ }
+ apply_need(state);
+ return count;
+}
+
+static ssize_t show_busy_down_thres(const struct cluster_data *state, char *buf)
+{
+ int i, count = 0;
+
+ for (i = 0; i < state->num_cpus; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
+ state->busy_down_thres[i]);
+
+ count += snprintf(buf + count, PAGE_SIZE - count, "\n");
+ return count;
+}
+
+static ssize_t store_is_big_cluster(struct cluster_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ state->is_big_cluster = val ? 1 : 0;
+ return count;
+}
+
+static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
+}
+
+static ssize_t show_cpus(const struct cluster_data *state, char *buf)
+{
+ struct cpu_data *c;
+ ssize_t count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ list_for_each_entry(c, &state->lru, sib) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "CPU%u (%s)\n", c->cpu,
+ c->online ? "Online" : "Offline");
+ }
+ spin_unlock_irqrestore(&state_lock, flags);
+ return count;
+}
+
+static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
+}
+
+static ssize_t show_active_cpus(const struct cluster_data *state, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", state->active_cpus);
+}
+
+static ssize_t show_global_state(const struct cluster_data *state, char *buf)
+{
+ struct cpu_data *c;
+ struct cluster_data *cluster;
+ ssize_t count = 0;
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ c = &per_cpu(cpu_state, cpu);
+ if (!c->cluster)
+ continue;
+
+ cluster = c->cluster;
+ if (!cluster || !cluster->inited)
+ continue;
+
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "CPU%u\n", cpu);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tCPU: %u\n", c->cpu);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tOnline: %u\n", c->online);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tActive: %u\n",
+ !cpu_isolated(c->cpu));
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tFirst CPU: %u\n",
+ cluster->first_cpu);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tBusy%%: %u\n", c->busy);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tIs busy: %u\n", c->is_busy);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tNr running: %u\n", cluster->nrrun);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tActive CPUs: %u\n", get_active_cpu_count(cluster));
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tNeed CPUs: %u\n", cluster->need_cpus);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tBoost: %u\n", (unsigned int) cluster->boost);
+ }
+
+ return count;
+}
+
+static ssize_t store_not_preferred(struct cluster_data *state,
+ const char *buf, size_t count)
+{
+ struct cpu_data *c;
+ unsigned int i;
+ unsigned int val[MAX_CPUS_PER_CLUSTER];
+ unsigned long flags;
+ int ret;
+
+ ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
+ if (ret != 1 && ret != state->num_cpus)
+ return -EINVAL;
+
+ i = 0;
+ spin_lock_irqsave(&state_lock, flags);
+ list_for_each_entry(c, &state->lru, sib)
+ c->not_preferred = val[i++];
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return count;
+}
+
+static ssize_t show_not_preferred(const struct cluster_data *state, char *buf)
+{
+ struct cpu_data *c;
+ ssize_t count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ list_for_each_entry(c, &state->lru, sib)
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tCPU:%d %u\n", c->cpu, c->not_preferred);
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return count;
+}
+
+
+struct core_ctl_attr {
+ struct attribute attr;
+ ssize_t (*show)(const struct cluster_data *, char *);
+ ssize_t (*store)(struct cluster_data *, const char *, size_t count);
+};
+
+#define core_ctl_attr_ro(_name) \
+static struct core_ctl_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define core_ctl_attr_rw(_name) \
+static struct core_ctl_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+core_ctl_attr_rw(min_cpus);
+core_ctl_attr_rw(max_cpus);
+core_ctl_attr_rw(offline_delay_ms);
+core_ctl_attr_rw(busy_up_thres);
+core_ctl_attr_rw(busy_down_thres);
+core_ctl_attr_rw(task_thres);
+core_ctl_attr_rw(is_big_cluster);
+core_ctl_attr_ro(cpus);
+core_ctl_attr_ro(need_cpus);
+core_ctl_attr_ro(active_cpus);
+core_ctl_attr_ro(global_state);
+core_ctl_attr_rw(not_preferred);
+
+static struct attribute *default_attrs[] = {
+ &min_cpus.attr,
+ &max_cpus.attr,
+ &offline_delay_ms.attr,
+ &busy_up_thres.attr,
+ &busy_down_thres.attr,
+ &task_thres.attr,
+ &is_big_cluster.attr,
+ &cpus.attr,
+ &need_cpus.attr,
+ &active_cpus.attr,
+ &global_state.attr,
+ &not_preferred.attr,
+ NULL
+};
+
+#define to_cluster_data(k) container_of(k, struct cluster_data, kobj)
+#define to_attr(a) container_of(a, struct core_ctl_attr, attr)
+static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct cluster_data *data = to_cluster_data(kobj);
+ struct core_ctl_attr *cattr = to_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (cattr->show)
+ ret = cattr->show(data, buf);
+
+ return ret;
+}
+
+static ssize_t store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cluster_data *data = to_cluster_data(kobj);
+ struct core_ctl_attr *cattr = to_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (cattr->store)
+ ret = cattr->store(data, buf, count);
+
+ return ret;
+}
+
+static const struct sysfs_ops sysfs_ops = {
+ .show = show,
+ .store = store,
+};
+
+static struct kobj_type ktype_core_ctl = {
+ .sysfs_ops = &sysfs_ops,
+ .default_attrs = default_attrs,
+};
+
+/* ==================== runqueue based core count =================== */
+
+#define RQ_AVG_TOLERANCE 2
+#define RQ_AVG_DEFAULT_MS 20
+#define NR_RUNNING_TOLERANCE 5
+static unsigned int rq_avg_period_ms = RQ_AVG_DEFAULT_MS;
+
+static s64 rq_avg_timestamp_ms;
+
+static void update_running_avg(bool trigger_update)
+{
+ int avg, iowait_avg, big_avg, old_nrrun;
+ s64 now;
+ unsigned long flags;
+ struct cluster_data *cluster;
+ unsigned int index = 0;
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ now = ktime_to_ms(ktime_get());
+ if (now - rq_avg_timestamp_ms < rq_avg_period_ms - RQ_AVG_TOLERANCE) {
+ spin_unlock_irqrestore(&state_lock, flags);
+ return;
+ }
+ rq_avg_timestamp_ms = now;
+ sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg);
+
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ /*
+ * Round up to the next integer if the average nr running tasks
+ * is within NR_RUNNING_TOLERANCE/100 of the next integer.
+ * If normal rounding up is used, it will allow a transient task
+ * to trigger online event. By the time core is onlined, the task
+ * has finished.
+ * Rounding to closest suffers same problem because scheduler
+ * might only provide running stats per jiffy, and a transient
+ * task could skew the number for one jiffy. If core control
+ * samples every 2 jiffies, it will observe 0.5 additional running
+ * average which rounds up to 1 task.
+ */
+ avg = (avg + NR_RUNNING_TOLERANCE) / 100;
+ big_avg = (big_avg + NR_RUNNING_TOLERANCE) / 100;
+
+ for_each_cluster(cluster, index) {
+ if (!cluster->inited)
+ continue;
+ old_nrrun = cluster->nrrun;
+ /*
+ * Big cluster only need to take care of big tasks, but if
+ * there are not enough big cores, big tasks need to be run
+ * on little as well. Thus for little's runqueue stat, it
+ * has to use overall runqueue average, or derive what big
+ * tasks would have to be run on little. The latter approach
+ * is not easy to get given core control reacts much slower
+ * than scheduler, and can't predict scheduler's behavior.
+ */
+ cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
+ if (cluster->nrrun != old_nrrun) {
+ if (trigger_update)
+ apply_need(cluster);
+ else
+ cluster->nrrun_changed = true;
+ }
+ }
+ return;
+}
+
+/* adjust needed CPUs based on current runqueue information */
+static unsigned int apply_task_need(const struct cluster_data *cluster,
+ unsigned int new_need)
+{
+ /* unisolate all cores if there are enough tasks */
+ if (cluster->nrrun >= cluster->task_thres)
+ return cluster->num_cpus;
+
+ /* only unisolate more cores if there are tasks to run */
+ if (cluster->nrrun > new_need)
+ return new_need + 1;
+
+ return new_need;
+}
+
+/* ======================= load based core count ====================== */
+
+static unsigned int apply_limits(const struct cluster_data *cluster,
+ unsigned int need_cpus)
+{
+ return min(max(cluster->min_cpus, need_cpus), cluster->max_cpus);
+}
+
+static unsigned int get_active_cpu_count(const struct cluster_data *cluster)
+{
+ return cluster->num_cpus -
+ sched_isolate_count(&cluster->cpu_mask, true);
+}
+
+static bool is_active(const struct cpu_data *state)
+{
+ return state->online && !cpu_isolated(state->cpu);
+}
+
+static bool adjustment_possible(const struct cluster_data *cluster,
+ unsigned int need)
+{
+ return (need < cluster->active_cpus || (need > cluster->active_cpus &&
+ sched_isolate_count(&cluster->cpu_mask, false)));
+}
+
+static bool eval_need(struct cluster_data *cluster)
+{
+ unsigned long flags;
+ struct cpu_data *c;
+ unsigned int need_cpus = 0, last_need, thres_idx;
+ int ret = 0;
+ bool need_flag = false;
+ unsigned int active_cpus;
+ unsigned int new_need;
+
+ if (unlikely(!cluster->inited))
+ return 0;
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ if (cluster->boost) {
+ need_cpus = cluster->max_cpus;
+ } else {
+ active_cpus = get_active_cpu_count(cluster);
+ thres_idx = active_cpus ? active_cpus - 1 : 0;
+ list_for_each_entry(c, &cluster->lru, sib) {
+ if (c->busy >= cluster->busy_up_thres[thres_idx])
+ c->is_busy = true;
+ else if (c->busy < cluster->busy_down_thres[thres_idx])
+ c->is_busy = false;
+ need_cpus += c->is_busy;
+ }
+ need_cpus = apply_task_need(cluster, need_cpus);
+ }
+ new_need = apply_limits(cluster, need_cpus);
+ need_flag = adjustment_possible(cluster, new_need);
+
+ last_need = cluster->need_cpus;
+ cluster->need_cpus = new_need;
+
+ if (!need_flag) {
+ spin_unlock_irqrestore(&state_lock, flags);
+ return 0;
+ }
+
+ if (need_cpus > cluster->active_cpus) {
+ ret = 1;
+ } else if (need_cpus < cluster->active_cpus) {
+ s64 now = ktime_to_ms(ktime_get());
+ s64 elapsed = now - cluster->last_isolate_ts;
+
+ ret = elapsed >= cluster->offline_delay_ms;
+ }
+
+ trace_core_ctl_eval_need(cluster->first_cpu, last_need, need_cpus,
+ ret && need_flag);
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return ret && need_flag;
+}
+
+static void apply_need(struct cluster_data *cluster)
+{
+ if (eval_need(cluster))
+ wake_up_core_ctl_thread(cluster);
+}
+
+static int core_ctl_set_busy(unsigned int cpu, unsigned int busy)
+{
+ struct cpu_data *c = &per_cpu(cpu_state, cpu);
+ struct cluster_data *cluster = c->cluster;
+ unsigned int old_is_busy = c->is_busy;
+
+ if (!cluster || !cluster->inited)
+ return 0;
+
+ update_running_avg(false);
+ if (c->busy == busy && !cluster->nrrun_changed)
+ return 0;
+ c->busy = busy;
+ cluster->nrrun_changed = false;
+
+ apply_need(cluster);
+ trace_core_ctl_set_busy(cpu, busy, old_is_busy, c->is_busy);
+ return 0;
+}
+
+/* ========================= core count enforcement ==================== */
+
+static void wake_up_core_ctl_thread(struct cluster_data *cluster)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cluster->pending_lock, flags);
+ cluster->pending = true;
+ spin_unlock_irqrestore(&cluster->pending_lock, flags);
+
+ wake_up_process_no_notif(cluster->core_ctl_thread);
+}
+
+static u64 core_ctl_check_timestamp;
+static u64 core_ctl_check_interval;
+
+static bool do_check(u64 wallclock)
+{
+ bool do_check = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ if ((wallclock - core_ctl_check_timestamp) >= core_ctl_check_interval) {
+ core_ctl_check_timestamp = wallclock;
+ do_check = true;
+ }
+ spin_unlock_irqrestore(&state_lock, flags);
+ return do_check;
+}
+
+void core_ctl_set_boost(bool boost)
+{
+ unsigned int index = 0;
+ struct cluster_data *cluster;
+
+ for_each_cluster(cluster, index) {
+ if (cluster->is_big_cluster && cluster->boost != boost) {
+ cluster->boost = boost;
+ apply_need(cluster);
+ }
+ }
+}
+
+void core_ctl_check(u64 wallclock)
+{
+ if (unlikely(!initialized))
+ return;
+
+ if (do_check(wallclock)) {
+ unsigned int index = 0;
+ struct cluster_data *cluster;
+
+ update_running_avg(true);
+
+ for_each_cluster(cluster, index) {
+ if (eval_need(cluster))
+ wake_up_core_ctl_thread(cluster);
+ }
+ }
+}
+
+static void move_cpu_lru(struct cpu_data *cpu_data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ list_del(&cpu_data->sib);
+ list_add_tail(&cpu_data->sib, &cpu_data->cluster->lru);
+ spin_unlock_irqrestore(&state_lock, flags);
+}
+
+static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
+{
+ struct cpu_data *c, *tmp;
+
+ list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
+ if (!is_active(c))
+ continue;
+ if (cluster->active_cpus == need)
+ break;
+ /* Don't offline busy CPUs. */
+ if (c->is_busy)
+ continue;
+
+ pr_debug("Trying to isolate CPU%u\n", c->cpu);
+ if (!sched_isolate_cpu(c->cpu)) {
+ c->isolated_by_us = true;
+ move_cpu_lru(c);
+ cluster->last_isolate_ts = ktime_to_ms(ktime_get());
+ } else {
+ pr_debug("Unable to isolate CPU%u\n", c->cpu);
+ }
+ cluster->active_cpus = get_active_cpu_count(cluster);
+ }
+
+ /*
+ * If the number of active CPUs is within the limits, then
+ * don't force isolation of any busy CPUs.
+ */
+ if (cluster->active_cpus <= cluster->max_cpus)
+ return;
+
+ list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
+ if (!is_active(c))
+ continue;
+ if (cluster->active_cpus <= cluster->max_cpus)
+ break;
+
+ pr_debug("Trying to isolate CPU%u\n", c->cpu);
+ if (!sched_isolate_cpu(c->cpu)) {
+ c->isolated_by_us = true;
+ move_cpu_lru(c);
+ cluster->last_isolate_ts = ktime_to_ms(ktime_get());
+ } else {
+ pr_debug("Unable to isolate CPU%u\n", c->cpu);
+ }
+ cluster->active_cpus = get_active_cpu_count(cluster);
+ }
+}
+
+static void __try_to_unisolate(struct cluster_data *cluster,
+ unsigned int need, bool force)
+{
+ struct cpu_data *c, *tmp;
+
+ list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
+ if (!c->isolated_by_us)
+ continue;
+ if ((c->online && !cpu_isolated(c->cpu)) ||
+ (!force && c->not_preferred))
+ continue;
+ if (cluster->active_cpus == need)
+ break;
+
+ pr_debug("Trying to unisolate CPU%u\n", c->cpu);
+ if (!sched_unisolate_cpu(c->cpu)) {
+ c->isolated_by_us = false;
+ move_cpu_lru(c);
+ } else {
+ pr_debug("Unable to unisolate CPU%u\n", c->cpu);
+ }
+ cluster->active_cpus = get_active_cpu_count(cluster);
+ }
+}
+
+static void try_to_unisolate(struct cluster_data *cluster, unsigned int need)
+{
+ bool force_use_non_preferred = false;
+
+ __try_to_unisolate(cluster, need, force_use_non_preferred);
+
+ if (cluster->active_cpus == need)
+ return;
+
+ force_use_non_preferred = true;
+ __try_to_unisolate(cluster, need, force_use_non_preferred);
+}
+
+static void __ref do_core_ctl(struct cluster_data *cluster)
+{
+ unsigned int need;
+
+ need = apply_limits(cluster, cluster->need_cpus);
+
+ if (adjustment_possible(cluster, need)) {
+ pr_debug("Trying to adjust group %u from %u to %u\n",
+ cluster->first_cpu, cluster->active_cpus, need);
+
+ if (cluster->active_cpus > need)
+ try_to_isolate(cluster, need);
+ else if (cluster->active_cpus < need)
+ try_to_unisolate(cluster, need);
+ }
+}
+
+static int __ref try_core_ctl(void *data)
+{
+ struct cluster_data *cluster = data;
+ unsigned long flags;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_lock_irqsave(&cluster->pending_lock, flags);
+ if (!cluster->pending) {
+ spin_unlock_irqrestore(&cluster->pending_lock, flags);
+ schedule();
+ if (kthread_should_stop())
+ break;
+ spin_lock_irqsave(&cluster->pending_lock, flags);
+ }
+ set_current_state(TASK_RUNNING);
+ cluster->pending = false;
+ spin_unlock_irqrestore(&cluster->pending_lock, flags);
+
+ do_core_ctl(cluster);
+ }
+
+ return 0;
+}
+
+static int __ref cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ uint32_t cpu = (uintptr_t)hcpu;
+ struct cpu_data *state = &per_cpu(cpu_state, cpu);
+ struct cluster_data *cluster = state->cluster;
+ unsigned int need;
+ int ret = NOTIFY_OK;
+
+ /* Don't affect suspend resume */
+ if (action & CPU_TASKS_FROZEN)
+ return NOTIFY_OK;
+
+ if (unlikely(!cluster || !cluster->inited))
+ return NOTIFY_OK;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+
+ /* If online state of CPU somehow got out of sync, fix it. */
+ if (state->online) {
+ state->online = false;
+ cluster->active_cpus = get_active_cpu_count(cluster);
+ pr_warn("CPU%d offline when state is online\n", cpu);
+ }
+ break;
+
+ case CPU_ONLINE:
+
+ state->online = true;
+ cluster->active_cpus = get_active_cpu_count(cluster);
+
+ /*
+ * Moving to the end of the list should only happen in
+ * CPU_ONLINE and not on CPU_UP_PREPARE to prevent an
+ * infinite list traversal when thermal (or other entities)
+ * reject trying to online CPUs.
+ */
+ move_cpu_lru(state);
+ break;
+
+ case CPU_DEAD:
+ /*
+ * We don't want to have a CPU both offline and isolated.
+ * So unisolate a CPU that went down if it was isolated by us.
+ */
+ if (state->isolated_by_us) {
+ sched_unisolate_cpu_unlocked(cpu);
+ state->isolated_by_us = false;
+ }
+
+ /* Move a CPU to the end of the LRU when it goes offline. */
+ move_cpu_lru(state);
+
+ /* Fall through */
+
+ case CPU_UP_CANCELED:
+
+ /* If online state of CPU somehow got out of sync, fix it. */
+ if (!state->online)
+ pr_warn("CPU%d online when state is offline\n", cpu);
+
+ state->online = false;
+ state->busy = 0;
+ cluster->active_cpus = get_active_cpu_count(cluster);
+ break;
+ }
+
+ need = apply_limits(cluster, cluster->need_cpus);
+ if (adjustment_possible(cluster, need))
+ wake_up_core_ctl_thread(cluster);
+
+ return ret;
+}
+
+static struct notifier_block __refdata cpu_notifier = {
+ .notifier_call = cpu_callback,
+};
+
+/* ============================ init code ============================== */
+
+static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_clusters; ++i) {
+ if (cluster_state[i].first_cpu == first_cpu)
+ return &cluster_state[i];
+ }
+
+ return NULL;
+}
+
+static int cluster_init(const struct cpumask *mask)
+{
+ struct device *dev;
+ unsigned int first_cpu = cpumask_first(mask);
+ struct cluster_data *cluster;
+ struct cpu_data *state;
+ unsigned int cpu;
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+ if (find_cluster_by_first_cpu(first_cpu))
+ return 0;
+
+ dev = get_cpu_device(first_cpu);
+ if (!dev)
+ return -ENODEV;
+
+ pr_info("Creating CPU group %d\n", first_cpu);
+
+ if (num_clusters == MAX_CLUSTERS) {
+ pr_err("Unsupported number of clusters. Only %u supported\n",
+ MAX_CLUSTERS);
+ return -EINVAL;
+ }
+ cluster = &cluster_state[num_clusters];
+ ++num_clusters;
+
+ cpumask_copy(&cluster->cpu_mask, mask);
+ cluster->num_cpus = cpumask_weight(mask);
+ if (cluster->num_cpus > MAX_CPUS_PER_CLUSTER) {
+ pr_err("HW configuration not supported\n");
+ return -EINVAL;
+ }
+ cluster->first_cpu = first_cpu;
+ cluster->min_cpus = 1;
+ cluster->max_cpus = cluster->num_cpus;
+ cluster->need_cpus = cluster->num_cpus;
+ cluster->offline_delay_ms = 100;
+ cluster->task_thres = UINT_MAX;
+ cluster->nrrun = cluster->num_cpus;
+ INIT_LIST_HEAD(&cluster->lru);
+ spin_lock_init(&cluster->pending_lock);
+
+ for_each_cpu(cpu, mask) {
+ pr_info("Init CPU%u state\n", cpu);
+
+ state = &per_cpu(cpu_state, cpu);
+ state->cluster = cluster;
+ state->cpu = cpu;
+ if (cpu_online(cpu))
+ state->online = true;
+ list_add_tail(&state->sib, &cluster->lru);
+ }
+ cluster->active_cpus = get_active_cpu_count(cluster);
+
+ cluster->core_ctl_thread = kthread_run(try_core_ctl, (void *) cluster,
+ "core_ctl/%d", first_cpu);
+ if (IS_ERR(cluster->core_ctl_thread))
+ return PTR_ERR(cluster->core_ctl_thread);
+
+ sched_setscheduler_nocheck(cluster->core_ctl_thread, SCHED_FIFO,
+ &param);
+
+ cluster->inited = true;
+
+ kobject_init(&cluster->kobj, &ktype_core_ctl);
+ return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl");
+}
+
+static int cpufreq_policy_cb(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cpufreq_policy *policy = data;
+ int ret;
+
+ switch (val) {
+ case CPUFREQ_CREATE_POLICY:
+ ret = cluster_init(policy->related_cpus);
+ if (ret)
+ pr_warn("unable to create core ctl group: %d\n", ret);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_pol_nb = {
+ .notifier_call = cpufreq_policy_cb,
+};
+
+static int cpufreq_gov_cb(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cpufreq_govinfo *info = data;
+
+ switch (val) {
+ case CPUFREQ_LOAD_CHANGE:
+ core_ctl_set_busy(info->cpu, info->load);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_gov_nb = {
+ .notifier_call = cpufreq_gov_cb,
+};
+
+static int __init core_ctl_init(void)
+{
+ unsigned int cpu;
+
+ core_ctl_check_interval = (rq_avg_period_ms - RQ_AVG_TOLERANCE)
+ * NSEC_PER_MSEC;
+
+ register_cpu_notifier(&cpu_notifier);
+ cpufreq_register_notifier(&cpufreq_pol_nb, CPUFREQ_POLICY_NOTIFIER);
+ cpufreq_register_notifier(&cpufreq_gov_nb, CPUFREQ_GOVINFO_NOTIFIER);
+
+ lock_device_hotplug();
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy;
+ int ret;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (policy) {
+ ret = cluster_init(policy->related_cpus);
+ if (ret)
+ pr_warn("unable to create core ctl group: %d\n"
+ , ret);
+ cpufreq_cpu_put(policy);
+ }
+ }
+ unlock_device_hotplug();
+ initialized = true;
+ return 0;
+}
+
+late_initcall(core_ctl_init);
diff --git a/kernel/sched/core_ctl.h b/kernel/sched/core_ctl.h
new file mode 100644
index 000000000000..3b0c12acb9c0
--- /dev/null
+++ b/kernel/sched/core_ctl.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CORE_CTL_H
+#define __CORE_CTL_H
+
+#ifdef CONFIG_SCHED_CORE_CTL
+void core_ctl_check(u64 wallclock);
+void core_ctl_set_boost(bool boost);
+#else
+static inline void core_ctl_check(u64 wallclock) {}
+static inline void core_ctl_set_boost(bool boost) {}
+#endif
+#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e893b0fcac6b..6362b864e2b1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2590,6 +2590,7 @@ static u32 __compute_runnable_contrib(u64 n)
#define SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER 0x80
#define SBC_FLAG_CSTATE_LOAD 0x100
#define SBC_FLAG_BEST_SIBLING 0x200
+#define SBC_FLAG_WAKER_CPU 0x400
/* Cluster selection flag */
#define SBC_FLAG_COLOC_CLUSTER 0x10000
@@ -2618,7 +2619,7 @@ struct cluster_cpu_stats {
int best_idle_cpu, least_loaded_cpu;
int best_capacity_cpu, best_cpu, best_sibling_cpu;
int min_cost, best_sibling_cpu_cost;
- int best_cpu_cstate;
+ int best_cpu_wakeup_latency;
u64 min_load, best_load, best_sibling_cpu_load;
s64 highest_spare_capacity;
};
@@ -2826,19 +2827,19 @@ next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
struct cpu_select_env *env, int cpu_cost)
{
- int cpu_cstate;
+ int wakeup_latency;
int prev_cpu = env->prev_cpu;
- cpu_cstate = cpu_rq(cpu)->cstate;
+ wakeup_latency = cpu_rq(cpu)->wakeup_latency;
if (env->need_idle) {
stats->min_cost = cpu_cost;
if (idle_cpu(cpu)) {
- if (cpu_cstate < stats->best_cpu_cstate ||
- (cpu_cstate == stats->best_cpu_cstate &&
- cpu == prev_cpu)) {
+ if (wakeup_latency < stats->best_cpu_wakeup_latency ||
+ (wakeup_latency == stats->best_cpu_wakeup_latency &&
+ cpu == prev_cpu)) {
stats->best_idle_cpu = cpu;
- stats->best_cpu_cstate = cpu_cstate;
+ stats->best_cpu_wakeup_latency = wakeup_latency;
}
} else {
if (env->cpu_load < stats->min_load ||
@@ -2854,7 +2855,7 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
if (cpu_cost < stats->min_cost) {
stats->min_cost = cpu_cost;
- stats->best_cpu_cstate = cpu_cstate;
+ stats->best_cpu_wakeup_latency = wakeup_latency;
stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
env->sbc_best_flag = SBC_FLAG_CPU_COST;
@@ -2863,11 +2864,11 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
/* CPU cost is the same. Start breaking the tie by C-state */
- if (cpu_cstate > stats->best_cpu_cstate)
+ if (wakeup_latency > stats->best_cpu_wakeup_latency)
return;
- if (cpu_cstate < stats->best_cpu_cstate) {
- stats->best_cpu_cstate = cpu_cstate;
+ if (wakeup_latency < stats->best_cpu_wakeup_latency) {
+ stats->best_cpu_wakeup_latency = wakeup_latency;
stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
env->sbc_best_flag = SBC_FLAG_COST_CSTATE_TIE_BREAKER;
@@ -2882,8 +2883,8 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
}
if (stats->best_cpu != prev_cpu &&
- ((cpu_cstate == 0 && env->cpu_load < stats->best_load) ||
- (cpu_cstate > 0 && env->cpu_load > stats->best_load))) {
+ ((wakeup_latency == 0 && env->cpu_load < stats->best_load) ||
+ (wakeup_latency > 0 && env->cpu_load > stats->best_load))) {
stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
env->sbc_best_flag = SBC_FLAG_CSTATE_LOAD;
@@ -2941,6 +2942,8 @@ static void find_best_cpu_in_cluster(struct sched_cluster *c,
struct cpumask search_cpus;
cpumask_and(&search_cpus, tsk_cpus_allowed(env->p), &c->cpus);
+ cpumask_andnot(&search_cpus, &search_cpus, cpu_isolated_mask);
+
if (env->ignore_prev_cpu)
cpumask_clear_cpu(env->prev_cpu, &search_cpus);
@@ -2976,7 +2979,7 @@ static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats)
stats->min_load = stats->best_sibling_cpu_load = ULLONG_MAX;
stats->highest_spare_capacity = 0;
stats->least_loaded_cpu = -1;
- stats->best_cpu_cstate = INT_MAX;
+ stats->best_cpu_wakeup_latency = INT_MAX;
/* No need to initialize stats->best_load */
}
@@ -3009,7 +3012,8 @@ bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
prev_cpu = env->prev_cpu;
if (!cpumask_test_cpu(prev_cpu, tsk_cpus_allowed(task)) ||
- unlikely(!cpu_active(prev_cpu)))
+ unlikely(!cpu_active(prev_cpu)) ||
+ cpu_isolated(prev_cpu))
return false;
if (task->ravg.mark_start - task->last_cpu_selected_ts >=
@@ -3057,6 +3061,15 @@ wake_to_waker_cluster(struct cpu_select_env *env)
task_load(env->p) < sched_small_wakee_task_load;
}
+static inline bool
+bias_to_waker_cpu(struct task_struct *p, int cpu)
+{
+ return sysctl_sched_prefer_sync_wakee_to_waker &&
+ cpu_rq(cpu)->nr_running == 1 &&
+ cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
+ cpu_active(cpu) && !cpu_isolated(cpu);
+}
+
static inline int
cluster_allowed(struct task_struct *p, struct sched_cluster *cluster)
{
@@ -3077,6 +3090,7 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
struct cluster_cpu_stats stats;
struct related_thread_group *grp;
unsigned int sbc_flag = 0;
+ int cpu = raw_smp_processor_id();
struct cpu_select_env env = {
.p = p,
@@ -3108,14 +3122,20 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
else
env.rtg = grp;
} else {
- cluster = cpu_rq(smp_processor_id())->cluster;
- if (wake_to_waker_cluster(&env) &&
- cluster_allowed(p, cluster)) {
- env.need_waker_cluster = 1;
- bitmap_zero(env.candidate_list, NR_CPUS);
- __set_bit(cluster->id, env.candidate_list);
- env.sbc_best_cluster_flag = SBC_FLAG_WAKER_CLUSTER;
-
+ cluster = cpu_rq(cpu)->cluster;
+ if (wake_to_waker_cluster(&env)) {
+ if (bias_to_waker_cpu(p, cpu)) {
+ target = cpu;
+ sbc_flag = SBC_FLAG_WAKER_CLUSTER |
+ SBC_FLAG_WAKER_CPU;
+ goto out;
+ } else if (cluster_allowed(p, cluster)) {
+ env.need_waker_cluster = 1;
+ bitmap_zero(env.candidate_list, NR_CPUS);
+ __set_bit(cluster->id, env.candidate_list);
+ env.sbc_best_cluster_flag =
+ SBC_FLAG_WAKER_CLUSTER;
+ }
} else if (bias_to_prev_cpu(&env, &stats)) {
sbc_flag = SBC_FLAG_PREV_CPU;
goto out;
@@ -7200,6 +7220,10 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
.avg_load = 0UL,
.sum_nr_running = 0,
.group_type = group_other,
+#ifdef CONFIG_SCHED_HMP
+ .sum_nr_big_tasks = 0UL,
+ .group_cpu_load = 0ULL,
+#endif
},
};
}
@@ -7226,9 +7250,7 @@ bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
local_pwr_cost = cpu_max_power_cost(local_cpu);
busiest_pwr_cost = cpu_max_power_cost(busiest_cpu);
- if (local_capacity < busiest_capacity ||
- (local_capacity == busiest_capacity &&
- local_pwr_cost <= busiest_pwr_cost))
+ if (local_pwr_cost <= busiest_pwr_cost)
return 0;
if (local_capacity > busiest_capacity &&
@@ -7354,6 +7376,8 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
struct sched_group_capacity *sgc;
struct rq *rq = cpu_rq(cpu);
+ if (cpumask_test_cpu(cpu, cpu_isolated_mask))
+ continue;
/*
* build_sched_domains() -> init_sched_groups_capacity()
* gets here before we've attached the domains to the
@@ -7381,7 +7405,11 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
group = child->groups;
do {
- capacity += group->sgc->capacity;
+ cpumask_t *cpus = sched_group_cpus(group);
+
+ /* Revisit this later. This won't work for MT domain */
+ if (!cpu_isolated(cpumask_first(cpus)))
+ capacity += group->sgc->capacity;
group = group->next;
} while (group != child->groups);
}
@@ -7521,6 +7549,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
power_cost(i, 0),
cpu_temp(i));
+ if (cpu_isolated(i))
+ continue;
+
/* Bias balancing toward cpus of our domain */
if (local_group)
load = target_load(i, load_idx);
@@ -7548,17 +7579,27 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->idle_cpus++;
}
- /* Adjust by relative CPU capacity of the group */
- sgs->group_capacity = group->sgc->capacity;
- sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
+ /* Isolated CPU has no weight */
+ if (!group->group_weight) {
+ sgs->group_capacity = 0;
+ sgs->avg_load = 0;
+ sgs->group_no_capacity = 1;
+ sgs->group_type = group_other;
+ sgs->group_weight = group->group_weight;
+ } else {
+ /* Adjust by relative CPU capacity of the group */
+ sgs->group_capacity = group->sgc->capacity;
+ sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) /
+ sgs->group_capacity;
- if (sgs->sum_nr_running)
- sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
+ sgs->group_weight = group->group_weight;
- sgs->group_weight = group->group_weight;
+ sgs->group_no_capacity = group_is_overloaded(env, sgs);
+ sgs->group_type = group_classify(group, sgs, env);
+ }
- sgs->group_no_capacity = group_is_overloaded(env, sgs);
- sgs->group_type = group_classify(group, sgs, env);
+ if (sgs->sum_nr_running)
+ sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
}
#ifdef CONFIG_SCHED_HMP
@@ -8601,6 +8642,9 @@ static int idle_balance(struct rq *this_rq)
int pulled_task = 0;
u64 curr_cost = 0;
+ if (cpu_isolated(this_cpu))
+ return 0;
+
idle_enter_fair(this_rq);
/*
@@ -8843,9 +8887,6 @@ static inline int find_new_hmp_ilb(int type)
for_each_cpu_and(ilb, nohz.idle_cpus_mask,
sched_domain_span(sd)) {
if (idle_cpu(ilb) && (type != NOHZ_KICK_RESTRICT ||
- (hmp_capable() &&
- cpu_max_possible_capacity(ilb) <=
- cpu_max_possible_capacity(call_cpu)) ||
cpu_max_power_cost(ilb) <=
cpu_max_power_cost(call_cpu))) {
rcu_read_unlock();
@@ -8908,16 +8949,21 @@ static void nohz_balancer_kick(int type)
return;
}
+void nohz_balance_clear_nohz_mask(int cpu)
+{
+ if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
+ cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
+ atomic_dec(&nohz.nr_cpus);
+ }
+}
+
static inline void nohz_balance_exit_idle(int cpu)
{
if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
/*
* Completely isolated CPUs don't ever set, so we must test.
*/
- if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
- cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
- atomic_dec(&nohz.nr_cpus);
- }
+ nohz_balance_clear_nohz_mask(cpu);
clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
}
}
@@ -8974,7 +9020,7 @@ void nohz_balance_enter_idle(int cpu)
/*
* If we're a completely isolated CPU, we don't play.
*/
- if (on_null_domain(cpu_rq(cpu)))
+ if (on_null_domain(cpu_rq(cpu)) || cpu_isolated(cpu))
return;
cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
@@ -9003,7 +9049,13 @@ static DEFINE_SPINLOCK(balancing);
*/
void update_max_interval(void)
{
- max_load_balance_interval = HZ*num_online_cpus()/10;
+ cpumask_t avail_mask;
+ unsigned int available_cpus;
+
+ cpumask_andnot(&avail_mask, cpu_online_mask, cpu_isolated_mask);
+ available_cpus = cpumask_weight(&avail_mask);
+
+ max_load_balance_interval = HZ*available_cpus/10;
}
/*
@@ -9188,8 +9240,7 @@ static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
if (!sysctl_sched_restrict_cluster_spill || sched_boost())
return 1;
- if (hmp_capable() && cpu_max_possible_capacity(cpu) ==
- max_possible_capacity)
+ if (cpu_max_power_cost(cpu) == max_power_cost)
return 1;
rcu_read_lock();
@@ -9342,8 +9393,10 @@ void trigger_load_balance(struct rq *rq)
{
int type = NOHZ_KICK_ANY;
- /* Don't need to rebalance while attached to NULL domain */
- if (unlikely(on_null_domain(rq)))
+ /* Don't need to rebalance while attached to NULL domain or
+ * cpu is isolated.
+ */
+ if (unlikely(on_null_domain(rq)) || cpu_isolated(cpu_of(rq)))
return;
if (time_after_eq(jiffies, rq->next_balance))
@@ -9600,11 +9653,8 @@ void free_fair_sched_group(struct task_group *tg)
for_each_possible_cpu(i) {
if (tg->cfs_rq)
kfree(tg->cfs_rq[i]);
- if (tg->se) {
- if (tg->se[i])
- remove_entity_load_avg(tg->se[i]);
+ if (tg->se)
kfree(tg->se[i]);
- }
}
kfree(tg->cfs_rq);
@@ -9652,21 +9702,29 @@ err:
return 0;
}
-void unregister_fair_sched_group(struct task_group *tg, int cpu)
+void unregister_fair_sched_group(struct task_group *tg)
{
- struct rq *rq = cpu_rq(cpu);
unsigned long flags;
+ struct rq *rq;
+ int cpu;
- /*
- * Only empty task groups can be destroyed; so we can speculatively
- * check on_list without danger of it being re-added.
- */
- if (!tg->cfs_rq[cpu]->on_list)
- return;
+ for_each_possible_cpu(cpu) {
+ if (tg->se[cpu])
+ remove_entity_load_avg(tg->se[cpu]);
- raw_spin_lock_irqsave(&rq->lock, flags);
- list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
+ /*
+ * Only empty task groups can be destroyed; so we can speculatively
+ * check on_list without danger of it being re-added.
+ */
+ if (!tg->cfs_rq[cpu]->on_list)
+ continue;
+
+ rq = cpu_rq(cpu);
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
}
void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
@@ -9748,7 +9806,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
return 1;
}
-void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
+void unregister_fair_sched_group(struct task_group *tg) { }
#endif /* CONFIG_FAIR_GROUP_SCHED */
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 5002619961ce..3d5de8ba70a2 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -17,11 +17,15 @@
#include <linux/cpufreq.h>
#include <linux/list_sort.h>
#include <linux/syscore_ops.h>
+#include <linux/of.h>
#include "sched.h"
+#include "core_ctl.h"
#include <trace/events/sched.h>
+#define CSTATE_LATENCY_GRANULARITY_SHIFT (6)
+
const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
"TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE",
"IRQ_UPDATE"};
@@ -97,7 +101,10 @@ sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
rq->cstate = cstate; /* C1, C2 etc */
rq->wakeup_energy = wakeup_energy;
- rq->wakeup_latency = wakeup_latency;
+ /* disregard small latency delta (64 us). */
+ rq->wakeup_latency = ((wakeup_latency >>
+ CSTATE_LATENCY_GRANULARITY_SHIFT) <<
+ CSTATE_LATENCY_GRANULARITY_SHIFT);
}
/*
@@ -224,6 +231,52 @@ fail:
return ret;
}
+/*
+ * It is possible that CPUs of the same micro architecture can have slight
+ * difference in the efficiency due to other factors like cache size. The
+ * BOOST_ON_BIG policy may not be optimial for such systems. The required
+ * boost policy can be specified via device tree to handle this.
+ */
+static int __read_mostly sched_boost_policy = SCHED_BOOST_NONE;
+
+/*
+ * This should be called after clusters are populated and
+ * the respective efficiency values are initialized.
+ */
+void init_sched_hmp_boost_policy(void)
+{
+ /*
+ * Initialize the boost type here if it is not passed from
+ * device tree.
+ */
+ if (sched_boost_policy == SCHED_BOOST_NONE) {
+ if (max_possible_efficiency != min_possible_efficiency)
+ sched_boost_policy = SCHED_BOOST_ON_BIG;
+ else
+ sched_boost_policy = SCHED_BOOST_ON_ALL;
+ }
+}
+
+void sched_hmp_parse_dt(void)
+{
+ struct device_node *sn;
+ const char *boost_policy;
+
+ if (!sched_enable_hmp)
+ return;
+
+ sn = of_find_node_by_path("/sched-hmp");
+ if (!sn)
+ return;
+
+ if (!of_property_read_string(sn, "boost-policy", &boost_policy)) {
+ if (!strcmp(boost_policy, "boost-on-big"))
+ sched_boost_policy = SCHED_BOOST_ON_BIG;
+ else if (!strcmp(boost_policy, "boost-on-all"))
+ sched_boost_policy = SCHED_BOOST_ON_ALL;
+ }
+}
+
unsigned int max_possible_efficiency = 1;
unsigned int min_possible_efficiency = UINT_MAX;
@@ -356,6 +409,8 @@ DECLARE_BITMAP(all_cluster_ids, NR_CPUS);
struct sched_cluster *sched_cluster[NR_CPUS];
int num_clusters;
+unsigned int max_power_cost = 1;
+
struct sched_cluster init_cluster = {
.list = LIST_HEAD_INIT(init_cluster.list),
.id = 0,
@@ -465,6 +520,7 @@ static void sort_clusters(void)
{
struct sched_cluster *cluster;
struct list_head new_head;
+ unsigned int tmp_max = 1;
INIT_LIST_HEAD(&new_head);
@@ -473,7 +529,11 @@ static void sort_clusters(void)
max_task_load());
cluster->min_power_cost = power_cost(cluster_first_cpu(cluster),
0);
+
+ if (cluster->max_power_cost > tmp_max)
+ tmp_max = cluster->max_power_cost;
}
+ max_power_cost = tmp_max;
move_list(&new_head, &cluster_head, true);
@@ -889,6 +949,13 @@ unsigned int __read_mostly sched_spill_load;
unsigned int __read_mostly sysctl_sched_spill_load_pct = 100;
/*
+ * Prefer the waker CPU for sync wakee task, if the CPU has only 1 runnable
+ * task. This eliminates the LPM exit latency associated with the idle
+ * CPUs in the waker cluster.
+ */
+unsigned int __read_mostly sysctl_sched_prefer_sync_wakee_to_waker;
+
+/*
* Tasks whose bandwidth consumption on a cpu is more than
* sched_upmigrate are considered "big" tasks. Big tasks will be
* considered for "up" migration, i.e migrating to a cpu with better
@@ -1090,6 +1157,8 @@ int sched_set_boost(int enable)
if (!old_refcount && boost_refcount)
boost_kick_cpus();
+ if (boost_refcount <= 1)
+ core_ctl_set_boost(boost_refcount == 1);
trace_sched_set_boost(boost_refcount);
spin_unlock_irqrestore(&boost_lock, flags);
@@ -1152,12 +1221,9 @@ int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
enum sched_boost_type sched_boost_type(void)
{
- if (sched_boost()) {
- if (min_possible_efficiency != max_possible_efficiency)
- return SCHED_BOOST_ON_BIG;
- else
- return SCHED_BOOST_ON_ALL;
- }
+ if (sched_boost())
+ return sched_boost_policy;
+
return SCHED_BOOST_NONE;
}
@@ -1499,28 +1565,10 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
if (write && (old_val == *data))
goto done;
- /*
- * Special handling for sched_freq_aggregate_threshold_pct
- * which can be greater than 100. Use 1000 as an upper bound
- * value which works for all practical use cases.
- */
- if (data == &sysctl_sched_freq_aggregate_threshold_pct) {
- if (*data > 1000) {
- *data = old_val;
- ret = -EINVAL;
- goto done;
- }
- } else if (data != &sysctl_sched_select_prev_cpu_us) {
- /*
- * all tunables other than sched_select_prev_cpu_us are
- * in percentage.
- */
- if (sysctl_sched_downmigrate_pct >
- sysctl_sched_upmigrate_pct || *data > 100) {
- *data = old_val;
- ret = -EINVAL;
- goto done;
- }
+ if (sysctl_sched_downmigrate_pct > sysctl_sched_upmigrate_pct) {
+ *data = old_val;
+ ret = -EINVAL;
+ goto done;
}
/*
@@ -2698,7 +2746,8 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,
void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
u64 wallclock, u64 irqtime)
{
- if (!rq->window_start || sched_disable_window_stats)
+ if (!rq->window_start || sched_disable_window_stats ||
+ p->ravg.mark_start == wallclock)
return;
lockdep_assert_held(&rq->lock);
@@ -2828,10 +2877,10 @@ void set_window_start(struct rq *rq)
rq->curr->ravg.mark_start = rq->window_start;
}
-void migrate_sync_cpu(int cpu)
+void migrate_sync_cpu(int cpu, int new_cpu)
{
if (cpu == sync_cpu)
- sync_cpu = smp_processor_id();
+ sync_cpu = new_cpu;
}
static void reset_all_task_stats(void)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index cfec881491ef..ba4403e910d8 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -265,8 +265,12 @@ static void pull_rt_task(struct rq *this_rq);
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
- /* Try to pull RT tasks here if we lower this rq's prio */
- return rq->rt.highest_prio.curr > prev->prio;
+ /*
+ * Try to pull RT tasks here if we lower this rq's prio and cpu is not
+ * isolated
+ */
+ return rq->rt.highest_prio.curr > prev->prio &&
+ !cpu_isolated(cpu_of(rq));
}
static inline int rt_overloaded(struct rq *rq)
@@ -1694,6 +1698,8 @@ static int find_lowest_rq_hmp(struct task_struct *task)
for_each_sched_cluster(cluster) {
cpumask_and(&candidate_mask, &cluster->cpus, lowest_mask);
+ cpumask_andnot(&candidate_mask, &candidate_mask,
+ cpu_isolated_mask);
if (cpumask_empty(&candidate_mask))
continue;
@@ -2282,7 +2288,8 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
* we may need to handle the pulling of RT tasks
* now.
*/
- if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
+ if (!task_on_rq_queued(p) || rq->rt.rt_nr_running ||
+ cpu_isolated(cpu_of(rq)))
return;
queue_pull_task(rq);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ec7721112b05..27b28369440d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -313,7 +313,7 @@ extern int tg_nop(struct task_group *tg, void *data);
extern void free_fair_sched_group(struct task_group *tg);
extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
-extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
+extern void unregister_fair_sched_group(struct task_group *tg);
extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu,
struct sched_entity *parent);
@@ -1039,6 +1039,7 @@ extern unsigned int min_capacity;
extern unsigned int max_load_scale_factor;
extern unsigned int max_possible_capacity;
extern unsigned int min_max_possible_capacity;
+extern unsigned int max_power_cost;
extern unsigned int sched_upmigrate;
extern unsigned int sched_downmigrate;
extern unsigned int sched_init_task_load_windows;
@@ -1069,7 +1070,7 @@ extern void clear_boost_kick(int cpu);
extern void clear_hmp_request(int cpu);
extern void mark_task_starting(struct task_struct *p);
extern void set_window_start(struct rq *rq);
-extern void migrate_sync_cpu(int cpu);
+extern void migrate_sync_cpu(int cpu, int new_cpu);
extern void update_cluster_topology(void);
extern void set_task_last_wake(struct task_struct *p, u64 wallclock);
extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
@@ -1398,6 +1399,8 @@ extern u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft);
extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
struct cftype *cft, u64 upmigrate_discourage);
+extern void sched_hmp_parse_dt(void);
+extern void init_sched_hmp_boost_policy(void);
#else /* CONFIG_SCHED_HMP */
@@ -1424,7 +1427,8 @@ static inline void clear_boost_kick(int cpu) { }
static inline void clear_hmp_request(int cpu) { }
static inline void mark_task_starting(struct task_struct *p) { }
static inline void set_window_start(struct rq *rq) { }
-static inline void migrate_sync_cpu(int cpu) { }
+static inline void migrate_sync_cpu(int cpu, int new_cpu) {}
+static inline void init_clusters(void) {}
static inline void update_cluster_topology(void) { }
static inline void set_task_last_wake(struct task_struct *p, u64 wallclock) { }
static inline void set_task_last_switch_out(struct task_struct *p,
@@ -1587,6 +1591,8 @@ static inline void post_big_task_count_change(void) { }
static inline void set_hmp_defaults(void) { }
static inline void clear_reserved(int cpu) { }
+static inline void sched_hmp_parse_dt(void) {}
+static inline void init_sched_hmp_boost_policy(void) {}
#define trace_sched_cpu_load(...)
#define trace_sched_cpu_load_lb(...)
@@ -1953,6 +1959,7 @@ extern const struct sched_class idle_sched_class;
extern void update_group_capacity(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq);
+extern void nohz_balance_clear_nohz_mask(int cpu);
extern void idle_enter_fair(struct rq *this_rq);
extern void idle_exit_fair(struct rq *this_rq);
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index c70e0466c36c..29d8a26a78ed 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -60,17 +60,17 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg)
spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
curr_time = sched_clock();
+ diff = curr_time - per_cpu(last_time, cpu);
+ BUG_ON((s64)diff < 0);
+
tmp_avg += per_cpu(nr_prod_sum, cpu);
- tmp_avg += per_cpu(nr, cpu) *
- (curr_time - per_cpu(last_time, cpu));
+ tmp_avg += per_cpu(nr, cpu) * diff;
tmp_big_avg += per_cpu(nr_big_prod_sum, cpu);
- tmp_big_avg += nr_eligible_big_tasks(cpu) *
- (curr_time - per_cpu(last_time, cpu));
+ tmp_big_avg += nr_eligible_big_tasks(cpu) * diff;
tmp_iowait += per_cpu(iowait_prod_sum, cpu);
- tmp_iowait += nr_iowait_cpu(cpu) *
- (curr_time - per_cpu(last_time, cpu));
+ tmp_iowait += nr_iowait_cpu(cpu) * diff;
per_cpu(last_time, cpu) = curr_time;
@@ -107,14 +107,15 @@ EXPORT_SYMBOL(sched_get_nr_running_avg);
*/
void sched_update_nr_prod(int cpu, long delta, bool inc)
{
- int diff;
- s64 curr_time;
+ u64 diff;
+ u64 curr_time;
unsigned long flags, nr_running;
spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
nr_running = per_cpu(nr, cpu);
curr_time = sched_clock();
diff = curr_time - per_cpu(last_time, cpu);
+ BUG_ON((s64)diff < 0);
per_cpu(last_time, cpu) = curr_time;
per_cpu(nr, cpu) = nr_running + (inc ? delta : -delta);
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
new file mode 100644
index 000000000000..4f8182302e5e
--- /dev/null
+++ b/kernel/sched/tune.c
@@ -0,0 +1,241 @@
+#include <linux/cgroup.h>
+#include <linux/err.h>
+#include <linux/percpu.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+
+#include "sched.h"
+
+unsigned int sysctl_sched_cfs_boost __read_mostly;
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+
+/*
+ * EAS scheduler tunables for task groups.
+ */
+
+/* SchdTune tunables for a group of tasks */
+struct schedtune {
+ /* SchedTune CGroup subsystem */
+ struct cgroup_subsys_state css;
+
+ /* Boost group allocated ID */
+ int idx;
+
+ /* Boost value for tasks on that SchedTune CGroup */
+ int boost;
+
+};
+
+static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
+{
+ return container_of(css, struct schedtune, css);
+}
+
+static inline struct schedtune *task_schedtune(struct task_struct *tsk)
+{
+ return css_st(task_css(tsk, schedtune_cgrp_id));
+}
+
+static inline struct schedtune *parent_st(struct schedtune *st)
+{
+ return css_st(st->css.parent);
+}
+
+/*
+ * SchedTune root control group
+ * The root control group is used to defined a system-wide boosting tuning,
+ * which is applied to all tasks in the system.
+ * Task specific boost tuning could be specified by creating and
+ * configuring a child control group under the root one.
+ * By default, system-wide boosting is disabled, i.e. no boosting is applied
+ * to tasks which are not into a child control group.
+ */
+static struct schedtune
+root_schedtune = {
+ .boost = 0,
+};
+
+/*
+ * Maximum number of boost groups to support
+ * When per-task boosting is used we still allow only limited number of
+ * boost groups for two main reasons:
+ * 1. on a real system we usually have only few classes of workloads which
+ * make sense to boost with different values (e.g. background vs foreground
+ * tasks, interactive vs low-priority tasks)
+ * 2. a limited number allows for a simpler and more memory/time efficient
+ * implementation especially for the computation of the per-CPU boost
+ * value
+ */
+#define BOOSTGROUPS_COUNT 5
+
+/* Array of configured boostgroups */
+static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
+ &root_schedtune,
+ NULL,
+};
+
+/* SchedTune boost groups
+ * Keep track of all the boost groups which impact on CPU, for example when a
+ * CPU has two RUNNABLE tasks belonging to two different boost groups and thus
+ * likely with different boost values.
+ * Since on each system we expect only a limited number of boost groups, here
+ * we use a simple array to keep track of the metrics required to compute the
+ * maximum per-CPU boosting value.
+ */
+struct boost_groups {
+ /* Maximum boost value for all RUNNABLE tasks on a CPU */
+ unsigned boost_max;
+ struct {
+ /* The boost for tasks on that boost group */
+ unsigned boost;
+ /* Count of RUNNABLE tasks on that boost group */
+ unsigned tasks;
+ } group[BOOSTGROUPS_COUNT];
+};
+
+/* Boost groups affecting each CPU in the system */
+DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups);
+
+static u64
+boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+ struct schedtune *st = css_st(css);
+
+ return st->boost;
+}
+
+static int
+boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 boost)
+{
+ struct schedtune *st = css_st(css);
+
+ if (boost < 0 || boost > 100)
+ return -EINVAL;
+
+ st->boost = boost;
+ if (css == &root_schedtune.css)
+ sysctl_sched_cfs_boost = boost;
+
+ return 0;
+}
+
+static struct cftype files[] = {
+ {
+ .name = "boost",
+ .read_u64 = boost_read,
+ .write_u64 = boost_write,
+ },
+ { } /* terminate */
+};
+
+static int
+schedtune_boostgroup_init(struct schedtune *st)
+{
+ /* Keep track of allocated boost groups */
+ allocated_group[st->idx] = st;
+
+ return 0;
+}
+
+static int
+schedtune_init(void)
+{
+ struct boost_groups *bg;
+ int cpu;
+
+ /* Initialize the per CPU boost groups */
+ for_each_possible_cpu(cpu) {
+ bg = &per_cpu(cpu_boost_groups, cpu);
+ memset(bg, 0, sizeof(struct boost_groups));
+ }
+
+ pr_info(" schedtune configured to support %d boost groups\n",
+ BOOSTGROUPS_COUNT);
+ return 0;
+}
+
+static struct cgroup_subsys_state *
+schedtune_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+ struct schedtune *st;
+ int idx;
+
+ if (!parent_css) {
+ schedtune_init();
+ return &root_schedtune.css;
+ }
+
+ /* Allow only single level hierachies */
+ if (parent_css != &root_schedtune.css) {
+ pr_err("Nested SchedTune boosting groups not allowed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Allow only a limited number of boosting groups */
+ for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx)
+ if (!allocated_group[idx])
+ break;
+ if (idx == BOOSTGROUPS_COUNT) {
+ pr_err("Trying to create more than %d SchedTune boosting groups\n",
+ BOOSTGROUPS_COUNT);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto out;
+
+ /* Initialize per CPUs boost group support */
+ st->idx = idx;
+ if (schedtune_boostgroup_init(st))
+ goto release;
+
+ return &st->css;
+
+release:
+ kfree(st);
+out:
+ return ERR_PTR(-ENOMEM);
+}
+
+static void
+schedtune_boostgroup_release(struct schedtune *st)
+{
+ /* Keep track of allocated boost groups */
+ allocated_group[st->idx] = NULL;
+}
+
+static void
+schedtune_css_free(struct cgroup_subsys_state *css)
+{
+ struct schedtune *st = css_st(css);
+
+ schedtune_boostgroup_release(st);
+ kfree(st);
+}
+
+struct cgroup_subsys schedtune_cgrp_subsys = {
+ .css_alloc = schedtune_css_alloc,
+ .css_free = schedtune_css_free,
+ .legacy_cftypes = files,
+ .early_init = 1,
+ .allow_attach = subsys_cgroup_allow_attach,
+};
+
+#endif /* CONFIG_CGROUP_SCHEDTUNE */
+
+int
+sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (ret || !write)
+ return ret;
+
+ return 0;
+}
+
diff --git a/kernel/smp.c b/kernel/smp.c
index abdc48cd79a3..b2ec21c5c9d6 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -766,8 +766,8 @@ void wake_up_all_idle_cpus(void)
for_each_online_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
-
- wake_up_if_idle(cpu);
+ if (!cpu_isolated(cpu))
+ wake_up_if_idle(cpu);
}
preempt_enable();
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 07fef40d1274..587dbe09c47d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -130,6 +130,9 @@ static int one_hundred = 100;
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
+#ifdef CONFIG_SCHED_HMP
+static int one_thousand = 1000;
+#endif
/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
@@ -336,6 +339,8 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_hmp_proc_update_handler,
+ .extra1 = &zero,
+ .extra2 = &one_hundred,
},
{
.procname = "sched_spill_nr_run",
@@ -351,6 +356,8 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_hmp_proc_update_handler,
+ .extra1 = &zero,
+ .extra2 = &one_hundred,
},
{
.procname = "sched_downmigrate",
@@ -358,6 +365,8 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_hmp_proc_update_handler,
+ .extra1 = &zero,
+ .extra2 = &one_hundred,
},
{
.procname = "sched_init_task_load",
@@ -365,6 +374,8 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_hmp_proc_update_handler,
+ .extra1 = &zero,
+ .extra2 = &one_hundred,
},
{
.procname = "sched_select_prev_cpu_us",
@@ -372,6 +383,7 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_hmp_proc_update_handler,
+ .extra1 = &zero,
},
{
.procname = "sched_enable_colocation",
@@ -397,6 +409,8 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_hmp_proc_update_handler,
+ .extra1 = &zero,
+ .extra2 = &one_hundred,
},
{
.procname = "sched_big_waker_task_load",
@@ -404,6 +418,17 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_hmp_proc_update_handler,
+ .extra1 = &zero,
+ .extra2 = &one_hundred,
+ },
+ {
+ .procname = "sched_prefer_sync_wakee_to_waker",
+ .data = &sysctl_sched_prefer_sync_wakee_to_waker,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
},
{
.procname = "sched_enable_thread_grouping",
@@ -440,6 +465,13 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_hmp_proc_update_handler,
+ .extra1 = &zero,
+ /*
+ * Special handling for sched_freq_aggregate_threshold_pct
+ * which can be greater than 100. Use 1000 as an upper bound
+ * value which works for all practical use cases.
+ */
+ .extra2 = &one_thousand,
},
{
.procname = "sched_boost",
@@ -599,6 +631,21 @@ static struct ctl_table kern_table[] = {
.extra1 = &one,
},
#endif
+#ifdef CONFIG_SCHED_TUNE
+ {
+ .procname = "sched_cfs_boost",
+ .data = &sysctl_sched_cfs_boost,
+ .maxlen = sizeof(sysctl_sched_cfs_boost),
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+ .mode = 0444,
+#else
+ .mode = 0644,
+#endif
+ .proc_handler = &sysctl_sched_cfs_boost_handler,
+ .extra1 = &zero,
+ .extra2 = &one_hundred,
+ },
+#endif
#ifdef CONFIG_PROVE_LOCKING
{
.procname = "prove_locking",
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index fa909f9fd559..9e1349fc5bbe 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -880,7 +880,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
base->cpu_base->active_bases |= 1 << base->index;
- timer->state = HRTIMER_STATE_ENQUEUED;
+ timer->state |= HRTIMER_STATE_ENQUEUED;
return timerqueue_add(&base->active, &timer->node);
}
@@ -900,11 +900,9 @@ static void __remove_hrtimer(struct hrtimer *timer,
u8 newstate, int reprogram)
{
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
- u8 state = timer->state;
- timer->state = newstate;
- if (!(state & HRTIMER_STATE_ENQUEUED))
- return;
+ if (!(timer->state & HRTIMER_STATE_ENQUEUED))
+ goto out;
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
@@ -921,6 +919,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
if (reprogram && timer == cpu_base->next_timer)
hrtimer_force_reprogram(cpu_base, 1);
#endif
+
+out:
+ /*
+ * We need to preserve PINNED state here, otherwise we may end up
+ * migrating pinned hrtimers as well.
+ */
+ timer->state = newstate | (timer->state & HRTIMER_STATE_PINNED);
}
/*
@@ -949,6 +954,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest
state = HRTIMER_STATE_INACTIVE;
__remove_hrtimer(timer, base, state, reprogram);
+ timer->state &= ~HRTIMER_STATE_PINNED;
return 1;
}
return 0;
@@ -1002,6 +1008,10 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
timer_stats_hrtimer_set_start_info(timer);
+ /* Update pinned state */
+ timer->state &= ~HRTIMER_STATE_PINNED;
+ timer->state |= (!!(mode & HRTIMER_MODE_PINNED)) << HRTIMER_PINNED_SHIFT;
+
leftmost = enqueue_hrtimer(timer, new_base);
if (!leftmost)
goto unlock;
@@ -1176,8 +1186,8 @@ bool hrtimer_active(const struct hrtimer *timer)
cpu_base = READ_ONCE(timer->base->cpu_base);
seq = raw_read_seqcount_begin(&cpu_base->seq);
- if (timer->state != HRTIMER_STATE_INACTIVE ||
- cpu_base->running == timer)
+ if (((timer->state & ~HRTIMER_STATE_PINNED) !=
+ HRTIMER_STATE_INACTIVE) || cpu_base->running == timer)
return true;
} while (read_seqcount_retry(&cpu_base->seq, seq) ||
@@ -1614,17 +1624,37 @@ static void init_hrtimers_cpu(int cpu)
hrtimer_init_hres(cpu_base);
}
-#ifdef CONFIG_HOTPLUG_CPU
-
-static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
- struct hrtimer_clock_base *new_base)
+#if defined(CONFIG_HOTPLUG_CPU)
+static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
+ struct hrtimer_cpu_base *new_base,
+ unsigned int i,
+ bool wait,
+ bool remove_pinned)
{
struct hrtimer *timer;
struct timerqueue_node *node;
+ struct timerqueue_head pinned;
+ int is_pinned;
+ struct hrtimer_clock_base *old_c_base = &old_base->clock_base[i];
+ struct hrtimer_clock_base *new_c_base = &new_base->clock_base[i];
+
+ timerqueue_init_head(&pinned);
- while ((node = timerqueue_getnext(&old_base->active))) {
+ while ((node = timerqueue_getnext(&old_c_base->active))) {
timer = container_of(node, struct hrtimer, node);
- BUG_ON(hrtimer_callback_running(timer));
+ if (wait) {
+ /* Ensure timers are done running before continuing */
+ while (hrtimer_callback_running(timer)) {
+ raw_spin_unlock(&old_base->lock);
+ raw_spin_unlock(&new_base->lock);
+ cpu_relax();
+ raw_spin_lock(&new_base->lock);
+ raw_spin_lock_nested(&old_base->lock,
+ SINGLE_DEPTH_NESTING);
+ }
+ } else {
+ BUG_ON(hrtimer_callback_running(timer));
+ }
debug_deactivate(timer);
/*
@@ -1632,8 +1662,15 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
* timer could be seen as !active and just vanish away
* under us on another CPU
*/
- __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
- timer->base = new_base;
+ __remove_hrtimer(timer, old_c_base, HRTIMER_STATE_ENQUEUED, 0);
+
+ is_pinned = timer->state & HRTIMER_STATE_PINNED;
+ if (!remove_pinned && is_pinned) {
+ timerqueue_add(&pinned, &timer->node);
+ continue;
+ }
+
+ timer->base = new_c_base;
/*
* Enqueue the timers on the new cpu. This does not
* reprogram the event device in case the timer
@@ -1642,19 +1679,25 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
* sort out already expired timers and reprogram the
* event device.
*/
- enqueue_hrtimer(timer, new_base);
+ enqueue_hrtimer(timer, new_c_base);
+ }
+
+ /* Re-queue pinned timers for non-hotplug usecase */
+ while ((node = timerqueue_getnext(&pinned))) {
+ timer = container_of(node, struct hrtimer, node);
+
+ timerqueue_del(&pinned, &timer->node);
+ enqueue_hrtimer(timer, old_c_base);
}
}
-static void migrate_hrtimers(int scpu)
+static void __migrate_hrtimers(int scpu, bool wait, bool remove_pinned)
{
struct hrtimer_cpu_base *old_base, *new_base;
+ unsigned long flags;
int i;
- BUG_ON(cpu_online(scpu));
- tick_cancel_sched_timer(scpu);
-
- local_irq_disable();
+ local_irq_save(flags);
old_base = &per_cpu(hrtimer_bases, scpu);
new_base = this_cpu_ptr(&hrtimer_bases);
/*
@@ -1665,8 +1708,8 @@ static void migrate_hrtimers(int scpu)
raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- migrate_hrtimer_list(&old_base->clock_base[i],
- &new_base->clock_base[i]);
+ migrate_hrtimer_list(old_base, new_base, i, wait,
+ remove_pinned);
}
raw_spin_unlock(&old_base->lock);
@@ -1674,7 +1717,20 @@ static void migrate_hrtimers(int scpu)
/* Check, if we got expired work to do */
__hrtimer_peek_ahead_timers();
- local_irq_enable();
+ local_irq_restore(flags);
+}
+
+static void migrate_hrtimers(int scpu)
+{
+ BUG_ON(cpu_online(scpu));
+ tick_cancel_sched_timer(scpu);
+
+ __migrate_hrtimers(scpu, false, true);
+}
+
+void hrtimer_quiesce_cpu(void *cpup)
+{
+ __migrate_hrtimers(*(int *)cpup, true, false);
}
#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 51896272fcde..0efb3916f5a4 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1620,56 +1620,86 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout)
}
EXPORT_SYMBOL(schedule_timeout_uninterruptible);
-#ifdef CONFIG_HOTPLUG_CPU
-static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
+#if defined(CONFIG_HOTPLUG_CPU)
+static void migrate_timer_list(struct tvec_base *new_base,
+ struct hlist_head *head, bool remove_pinned)
{
struct timer_list *timer;
int cpu = new_base->cpu;
+ struct hlist_node *n;
+ int is_pinned;
- while (!hlist_empty(head)) {
- timer = hlist_entry(head->first, struct timer_list, entry);
- /* We ignore the accounting on the dying cpu */
- detach_timer(timer, false);
+ hlist_for_each_entry_safe(timer, n, head, entry) {
+ is_pinned = timer->flags & TIMER_PINNED_ON_CPU;
+ if (!remove_pinned && is_pinned)
+ continue;
+
+ detach_if_pending(timer, get_timer_base(timer->flags), false);
timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
internal_add_timer(new_base, timer);
}
}
-static void migrate_timers(int cpu)
+static void __migrate_timers(int cpu, bool wait, bool remove_pinned)
{
struct tvec_base *old_base;
struct tvec_base *new_base;
+ unsigned long flags;
int i;
- BUG_ON(cpu_online(cpu));
old_base = per_cpu_ptr(&tvec_bases, cpu);
new_base = get_cpu_ptr(&tvec_bases);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
- spin_lock_irq(&new_base->lock);
+ spin_lock_irqsave(&new_base->lock, flags);
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
- BUG_ON(old_base->running_timer);
+ if (wait) {
+ /* Ensure timers are done running before continuing */
+ while (old_base->running_timer) {
+ spin_unlock(&old_base->lock);
+ spin_unlock_irqrestore(&new_base->lock, flags);
+ cpu_relax();
+ spin_lock_irqsave(&new_base->lock, flags);
+ spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+ }
+ } else {
+ BUG_ON(old_base->running_timer);
+ }
for (i = 0; i < TVR_SIZE; i++)
- migrate_timer_list(new_base, old_base->tv1.vec + i);
+ migrate_timer_list(new_base, old_base->tv1.vec + i,
+ remove_pinned);
for (i = 0; i < TVN_SIZE; i++) {
- migrate_timer_list(new_base, old_base->tv2.vec + i);
- migrate_timer_list(new_base, old_base->tv3.vec + i);
- migrate_timer_list(new_base, old_base->tv4.vec + i);
- migrate_timer_list(new_base, old_base->tv5.vec + i);
+ migrate_timer_list(new_base, old_base->tv2.vec + i,
+ remove_pinned);
+ migrate_timer_list(new_base, old_base->tv3.vec + i,
+ remove_pinned);
+ migrate_timer_list(new_base, old_base->tv4.vec + i,
+ remove_pinned);
+ migrate_timer_list(new_base, old_base->tv5.vec + i,
+ remove_pinned);
}
- old_base->active_timers = 0;
- old_base->all_timers = 0;
-
spin_unlock(&old_base->lock);
- spin_unlock_irq(&new_base->lock);
+ spin_unlock_irqrestore(&new_base->lock, flags);
put_cpu_ptr(&tvec_bases);
}
+/* Migrate timers from 'cpu' to this_cpu */
+static void migrate_timers(int cpu)
+{
+ BUG_ON(cpu_online(cpu));
+ __migrate_timers(cpu, false, true);
+}
+
+void timer_quiesce_cpu(void *cpup)
+{
+ __migrate_timers(*(int *)cpup, true, false);
+}
+
static int timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index 9270e1ac6460..49fa2e6eea98 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -15,5 +15,3 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
-EXPORT_TRACEPOINT_SYMBOL(core_ctl_set_busy);
-EXPORT_TRACEPOINT_SYMBOL(core_ctl_eval_need);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 029da92fb712..7f21591c8ec5 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -13,6 +13,7 @@
#include <linux/mm.h>
#include <linux/cpu.h>
+#include <linux/device.h>
#include <linux/nmi.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -95,6 +96,7 @@ static u64 __read_mostly sample_period;
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
+static DEFINE_PER_CPU(unsigned int, watchdog_en);
static DEFINE_PER_CPU(bool, softlockup_touch_sync);
static DEFINE_PER_CPU(bool, soft_watchdog_warn);
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
@@ -586,9 +588,17 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio)
sched_setscheduler(current, policy, &param);
}
-static void watchdog_enable(unsigned int cpu)
+/* Must be called with hotplug lock (lock_device_hotplug()) held. */
+void watchdog_enable(unsigned int cpu)
{
struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
+ unsigned int *enabled = raw_cpu_ptr(&watchdog_en);
+
+ lock_device_hotplug_assert();
+
+ if (*enabled)
+ return;
+ *enabled = 1;
/* kick off the timer for the hardlockup detector */
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -606,9 +616,17 @@ static void watchdog_enable(unsigned int cpu)
__touch_watchdog();
}
-static void watchdog_disable(unsigned int cpu)
+/* Must be called with hotplug lock (lock_device_hotplug()) held. */
+void watchdog_disable(unsigned int cpu)
{
struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
+ unsigned int *enabled = raw_cpu_ptr(&watchdog_en);
+
+ lock_device_hotplug_assert();
+
+ if (!*enabled)
+ return;
+ *enabled = 0;
watchdog_set_prio(SCHED_NORMAL, 0);
hrtimer_cancel(hrtimer);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 36ea0d54e05b..902657d4cac5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1020,7 +1020,7 @@ choice
config DEBUG_SPINLOCK_BITE_ON_BUG
bool "Cause a Watchdog Bite on Spinlock bug"
- depends on MSM_WATCHDOG_V2
+ depends on QCOM_WATCHDOG_V2
help
On a spinlock bug, cause a watchdog bite so that we can get the precise
state of the system captured at the time of spin dump. This is mutually
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 2b3f46c049d4..554522934c44 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -74,7 +74,7 @@ next_tag:
/* Extract a tag from the data */
tag = data[dp++];
- if (tag == 0) {
+ if (tag == ASN1_EOC) {
/* It appears to be an EOC. */
if (data[dp++] != 0)
goto invalid_eoc;
@@ -96,10 +96,8 @@ next_tag:
/* Extract the length */
len = data[dp++];
- if (len <= 0x7f) {
- dp += len;
- goto next_tag;
- }
+ if (len <= 0x7f)
+ goto check_length;
if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
/* Indefinite length */
@@ -110,14 +108,18 @@ next_tag:
}
n = len - 0x80;
- if (unlikely(n > sizeof(size_t) - 1))
+ if (unlikely(n > sizeof(len) - 1))
goto length_too_long;
if (unlikely(n > datalen - dp))
goto data_overrun_error;
- for (len = 0; n > 0; n--) {
+ len = 0;
+ for (; n > 0; n--) {
len <<= 8;
len |= data[dp++];
}
+check_length:
+ if (len > datalen - dp)
+ goto data_overrun_error;
dp += len;
goto next_tag;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index ca75eeecbad1..77b8eabd5446 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1390,7 +1390,7 @@ static cpumask_var_t cpu_stat_off;
static void vmstat_update(struct work_struct *w)
{
- if (refresh_cpu_vm_stats(true)) {
+ if (refresh_cpu_vm_stats(true) && !cpu_isolated(smp_processor_id())) {
/*
* Counters were updated so we expect more updates
* to occur in the future. Keep on running the
@@ -1402,7 +1402,8 @@ static void vmstat_update(struct work_struct *w)
} else {
/*
* We did not update any counters so the app may be in
- * a mode where it does not cause counter updates.
+ * a mode where it does not cause counter updates or the cpu
+ * was isolated.
* We may be uselessly running vmstat_update.
* Defer the checking for differentials to the
* shepherd thread on a different processor.
@@ -1469,7 +1470,7 @@ static void vmstat_shepherd(struct work_struct *w)
get_online_cpus();
/* Check processors whose vmstat worker threads have been disabled */
for_each_cpu(cpu, cpu_stat_off)
- if (need_update(cpu) &&
+ if (!cpu_isolated(cpu) && need_update(cpu) &&
cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
queue_delayed_work_on(cpu, vmstat_wq,
diff --git a/net/core/dev.c b/net/core/dev.c
index a299c3956daa..a4c647893e52 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3544,9 +3544,6 @@ static int netif_rx_internal(struct sk_buff *skb)
trace_netif_rx(skb);
#ifdef CONFIG_RPS
- WARN_ONCE(skb_cloned(skb), "Cloned packet from dev %s\n",
- skb->dev->name);
-
if (static_key_false(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
diff --git a/net/ipc_router/ipc_router_socket.c b/net/ipc_router/ipc_router_socket.c
index b05ab20a6f13..b15356ae26fc 100644
--- a/net/ipc_router/ipc_router_socket.c
+++ b/net/ipc_router/ipc_router_socket.c
@@ -541,10 +541,18 @@ static unsigned int msm_ipc_router_poll(struct file *file,
static int msm_ipc_router_close(struct socket *sock)
{
struct sock *sk = sock->sk;
- struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
+ struct msm_ipc_port *port_ptr;
int ret;
+ if (!sk)
+ return -EINVAL;
+
lock_sock(sk);
+ port_ptr = msm_ipc_sk_port(sk);
+ if (!port_ptr) {
+ release_sock(sk);
+ return -EINVAL;
+ }
ret = msm_ipc_router_close_port(port_ptr);
msm_ipc_unload_default_node(msm_ipc_sk(sk)->default_node_vote_info);
release_sock(sk);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 11dccba474b7..36a30fab8625 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -359,11 +359,12 @@ unsigned int arpt_do_table(struct sk_buff *skb,
}
/* All zeroes == unconditional rule. */
-static inline bool unconditional(const struct arpt_arp *arp)
+static inline bool unconditional(const struct arpt_entry *e)
{
static const struct arpt_arp uncond;
- return memcmp(arp, &uncond, sizeof(uncond)) == 0;
+ return e->target_offset == sizeof(struct arpt_entry) &&
+ memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
}
/* Figures out from what hook each rule can be called: returns 0 if
@@ -402,11 +403,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
|= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
/* Unconditional return/END. */
- if ((e->target_offset == sizeof(struct arpt_entry) &&
+ if ((unconditional(e) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
- t->verdict < 0 && unconditional(&e->arp)) ||
- visited) {
+ t->verdict < 0) || visited) {
unsigned int oldpos, size;
if ((strcmp(t->target.u.user.name,
@@ -474,14 +474,12 @@ next:
return 1;
}
-static inline int check_entry(const struct arpt_entry *e, const char *name)
+static inline int check_entry(const struct arpt_entry *e)
{
const struct xt_entry_target *t;
- if (!arp_checkentry(&e->arp)) {
- duprintf("arp_tables: arp check failed %p %s.\n", e, name);
+ if (!arp_checkentry(&e->arp))
return -EINVAL;
- }
if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
return -EINVAL;
@@ -522,10 +520,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
struct xt_target *target;
int ret;
- ret = check_entry(e, name);
- if (ret)
- return ret;
-
e->counters.pcnt = xt_percpu_counter_alloc();
if (IS_ERR_VALUE(e->counters.pcnt))
return -ENOMEM;
@@ -557,7 +551,7 @@ static bool check_underflow(const struct arpt_entry *e)
const struct xt_entry_target *t;
unsigned int verdict;
- if (!unconditional(&e->arp))
+ if (!unconditional(e))
return false;
t = arpt_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -576,9 +570,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
unsigned int valid_hooks)
{
unsigned int h;
+ int err;
if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
- (unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p\n", e);
return -EINVAL;
}
@@ -590,6 +586,10 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
return -EINVAL;
}
+ err = check_entry(e);
+ if (err)
+ return err;
+
/* Check hooks & underflows */
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
@@ -598,9 +598,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
- pr_err("Underflows must be unconditional and "
- "use the STANDARD target with "
- "ACCEPT/DROP\n");
+ pr_debug("Underflows must be unconditional and "
+ "use the STANDARD target with "
+ "ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
@@ -1233,7 +1233,8 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
- (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
@@ -1246,7 +1247,7 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
}
/* For purposes of check_entry casting the compat entry is fine */
- ret = check_entry((struct arpt_entry *)e, name);
+ ret = check_entry((struct arpt_entry *)e);
if (ret)
return ret;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index b99affad6ba1..99d46b0a4ead 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -168,11 +168,12 @@ get_entry(const void *base, unsigned int offset)
/* All zeroes == unconditional rule. */
/* Mildly perf critical (only if packet tracing is on) */
-static inline bool unconditional(const struct ipt_ip *ip)
+static inline bool unconditional(const struct ipt_entry *e)
{
static const struct ipt_ip uncond;
- return memcmp(ip, &uncond, sizeof(uncond)) == 0;
+ return e->target_offset == sizeof(struct ipt_entry) &&
+ memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
#undef FWINV
}
@@ -229,11 +230,10 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
} else if (s == e) {
(*rulenum)++;
- if (s->target_offset == sizeof(struct ipt_entry) &&
+ if (unconditional(s) &&
strcmp(t->target.u.kernel.target->name,
XT_STANDARD_TARGET) == 0 &&
- t->verdict < 0 &&
- unconditional(&s->ip)) {
+ t->verdict < 0) {
/* Tail of chains: STANDARD target (return/policy) */
*comment = *chainname == hookname
? comments[NF_IP_TRACE_COMMENT_POLICY]
@@ -476,11 +476,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
/* Unconditional return/END. */
- if ((e->target_offset == sizeof(struct ipt_entry) &&
+ if ((unconditional(e) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
- t->verdict < 0 && unconditional(&e->ip)) ||
- visited) {
+ t->verdict < 0) || visited) {
unsigned int oldpos, size;
if ((strcmp(t->target.u.user.name,
@@ -569,14 +568,12 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
}
static int
-check_entry(const struct ipt_entry *e, const char *name)
+check_entry(const struct ipt_entry *e)
{
const struct xt_entry_target *t;
- if (!ip_checkentry(&e->ip)) {
- duprintf("ip check failed %p %s.\n", e, name);
+ if (!ip_checkentry(&e->ip))
return -EINVAL;
- }
if (e->target_offset + sizeof(struct xt_entry_target) >
e->next_offset)
@@ -666,10 +663,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
struct xt_mtchk_param mtpar;
struct xt_entry_match *ematch;
- ret = check_entry(e, name);
- if (ret)
- return ret;
-
e->counters.pcnt = xt_percpu_counter_alloc();
if (IS_ERR_VALUE(e->counters.pcnt))
return -ENOMEM;
@@ -721,7 +714,7 @@ static bool check_underflow(const struct ipt_entry *e)
const struct xt_entry_target *t;
unsigned int verdict;
- if (!unconditional(&e->ip))
+ if (!unconditional(e))
return false;
t = ipt_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -741,9 +734,11 @@ check_entry_size_and_hooks(struct ipt_entry *e,
unsigned int valid_hooks)
{
unsigned int h;
+ int err;
if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
- (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p\n", e);
return -EINVAL;
}
@@ -755,6 +750,10 @@ check_entry_size_and_hooks(struct ipt_entry *e,
return -EINVAL;
}
+ err = check_entry(e);
+ if (err)
+ return err;
+
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
@@ -763,9 +762,9 @@ check_entry_size_and_hooks(struct ipt_entry *e,
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
- pr_err("Underflows must be unconditional and "
- "use the STANDARD target with "
- "ACCEPT/DROP\n");
+ pr_debug("Underflows must be unconditional and "
+ "use the STANDARD target with "
+ "ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
@@ -1493,7 +1492,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
- (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
@@ -1506,7 +1506,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
}
/* For purposes of check_entry casting the compat entry is fine */
- ret = check_entry((struct ipt_entry *)e, name);
+ ret = check_entry((struct ipt_entry *)e);
if (ret)
return ret;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b8f7e621e16e..32027efa5033 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -89,7 +89,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1;
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
/* rfc5961 challenge ack rate limiting */
-int sysctl_tcp_challenge_ack_limit = 100;
+int sysctl_tcp_challenge_ack_limit = 1000;
int sysctl_tcp_stdurg __read_mostly;
int sysctl_tcp_rfc1337 __read_mostly;
@@ -3428,7 +3428,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
static u32 challenge_timestamp;
static unsigned int challenge_count;
struct tcp_sock *tp = tcp_sk(sk);
- u32 now;
+ u32 count, now;
/* First check our per-socket dupack rate limit. */
if (tcp_oow_rate_limited(sock_net(sk), skb,
@@ -3436,13 +3436,18 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
&tp->last_oow_ack_time))
return;
- /* Then check the check host-wide RFC 5961 rate limit. */
+ /* Then check host-wide RFC 5961 rate limit. */
now = jiffies / HZ;
if (now != challenge_timestamp) {
+ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
+
challenge_timestamp = now;
- challenge_count = 0;
+ WRITE_ONCE(challenge_count, half +
+ prandom_u32_max(sysctl_tcp_challenge_ack_limit));
}
- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+ count = READ_ONCE(challenge_count);
+ if (count > 0) {
+ WRITE_ONCE(challenge_count, count - 1);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk);
}
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 99425cf2819b..6198807e06f4 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -198,11 +198,12 @@ get_entry(const void *base, unsigned int offset)
/* All zeroes == unconditional rule. */
/* Mildly perf critical (only if packet tracing is on) */
-static inline bool unconditional(const struct ip6t_ip6 *ipv6)
+static inline bool unconditional(const struct ip6t_entry *e)
{
static const struct ip6t_ip6 uncond;
- return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
+ return e->target_offset == sizeof(struct ip6t_entry) &&
+ memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
}
static inline const struct xt_entry_target *
@@ -258,11 +259,10 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
} else if (s == e) {
(*rulenum)++;
- if (s->target_offset == sizeof(struct ip6t_entry) &&
+ if (unconditional(s) &&
strcmp(t->target.u.kernel.target->name,
XT_STANDARD_TARGET) == 0 &&
- t->verdict < 0 &&
- unconditional(&s->ipv6)) {
+ t->verdict < 0) {
/* Tail of chains: STANDARD target (return/policy) */
*comment = *chainname == hookname
? comments[NF_IP6_TRACE_COMMENT_POLICY]
@@ -488,11 +488,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
/* Unconditional return/END. */
- if ((e->target_offset == sizeof(struct ip6t_entry) &&
+ if ((unconditional(e) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
- t->verdict < 0 &&
- unconditional(&e->ipv6)) || visited) {
+ t->verdict < 0) || visited) {
unsigned int oldpos, size;
if ((strcmp(t->target.u.user.name,
@@ -581,14 +580,12 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
}
static int
-check_entry(const struct ip6t_entry *e, const char *name)
+check_entry(const struct ip6t_entry *e)
{
const struct xt_entry_target *t;
- if (!ip6_checkentry(&e->ipv6)) {
- duprintf("ip_tables: ip check failed %p %s.\n", e, name);
+ if (!ip6_checkentry(&e->ipv6))
return -EINVAL;
- }
if (e->target_offset + sizeof(struct xt_entry_target) >
e->next_offset)
@@ -679,10 +676,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
struct xt_mtchk_param mtpar;
struct xt_entry_match *ematch;
- ret = check_entry(e, name);
- if (ret)
- return ret;
-
e->counters.pcnt = xt_percpu_counter_alloc();
if (IS_ERR_VALUE(e->counters.pcnt))
return -ENOMEM;
@@ -733,7 +726,7 @@ static bool check_underflow(const struct ip6t_entry *e)
const struct xt_entry_target *t;
unsigned int verdict;
- if (!unconditional(&e->ipv6))
+ if (!unconditional(e))
return false;
t = ip6t_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -753,9 +746,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
unsigned int valid_hooks)
{
unsigned int h;
+ int err;
if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
- (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p\n", e);
return -EINVAL;
}
@@ -767,6 +762,10 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
return -EINVAL;
}
+ err = check_entry(e);
+ if (err)
+ return err;
+
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
@@ -775,9 +774,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
- pr_err("Underflows must be unconditional and "
- "use the STANDARD target with "
- "ACCEPT/DROP\n");
+ pr_debug("Underflows must be unconditional and "
+ "use the STANDARD target with "
+ "ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
@@ -1505,7 +1504,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
- (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
@@ -1518,7 +1518,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
}
/* For purposes of check_entry casting the compat entry is fine */
- ret = check_entry((struct ip6t_entry *)e, name);
+ ret = check_entry((struct ip6t_entry *)e);
if (ret)
return ret;
diff --git a/net/rmnet_data/rmnet_data_vnd.c b/net/rmnet_data/rmnet_data_vnd.c
index 6d6893c7d99d..4e3a205551e0 100644
--- a/net/rmnet_data/rmnet_data_vnd.c
+++ b/net/rmnet_data/rmnet_data_vnd.c
@@ -928,7 +928,7 @@ int rmnet_vnd_add_tc_flow(uint32_t id, uint32_t map_flow, uint32_t tc_flow)
list_add(&(itm->list), &(dev_conf->flow_head));
write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
- LOGD("Created flow mapping [%s][0x%08X][0x%08X]@%p",
+ LOGD("Created flow mapping [%s][0x%08X][0x%08X]@%pK",
dev->name, itm->map_flow_id, itm->tc_flow_id[0], itm);
return RMNET_CONFIG_OK;
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 3e47e0641780..23b7c76ff2d8 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -416,7 +416,7 @@ country EE: DFS-ETSI
(57240 - 65880 @ 2160), (40), NO-OUTDOOR
country EG: DFS-ETSI
- (2402 - 2482 @ 40), (20)
+ (2402 - 2482 @ 20), (20)
(5170 - 5250 @ 20), (23)
(5250 - 5330 @ 20), (23), DFS
diff --git a/security/keys/key.c b/security/keys/key.c
index ab7997ded725..534808915371 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -578,7 +578,7 @@ int key_reject_and_link(struct key *key,
mutex_unlock(&key_construction_mutex);
- if (keyring)
+ if (keyring && link_ret == 0)
__key_link_end(keyring, &key->index_key, edit);
/* wake up anyone waiting for a key to be constructed */
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index f237a2188fe1..f580a1048d65 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -136,7 +136,9 @@ snd-soc-wcd9335-objs := wcd9335.o
snd-soc-wcd934x-objs := wcd934x.o
snd-soc-wcd9xxx-objs := wcd9xxx-resmgr.o wcd9xxx-mbhc.o wcd9xxx-common.o wcdcal-hwdep.o
snd-soc-wcd9xxx-v2-objs := wcd9xxx-common-v2.o wcd9xxx-resmgr-v2.o
-audio-ext-clock-objs := audio-ext-clk.o
+ifeq ($(CONFIG_COMMON_CLK_MSM), y)
+ audio-ext-clock-objs := audio-ext-clk.o
+endif
snd-soc-wcd-cpe-objs := wcd_cpe_services.o wcd_cpe_core.o
snd-soc-wsa881x-objs := wsa881x.o wsa881x-tables.o wsa881x-regmap.o wsa881x-temp-sensor.o
snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o
@@ -344,7 +346,9 @@ obj-$(CONFIG_SND_SOC_WCD9320) += snd-soc-wcd9320.o
obj-$(CONFIG_SND_SOC_WCD9330) += snd-soc-wcd9330.o
obj-$(CONFIG_SND_SOC_WCD9335) += snd-soc-wcd9335.o
obj-$(CONFIG_SND_SOC_WCD934X) += wcd934x/
-obj-$(CONFIG_AUDIO_EXT_CLK) += audio-ext-clock.o
+ifeq ($(CONFIG_COMMON_CLK_MSM), y)
+ obj-$(CONFIG_AUDIO_EXT_CLK) += audio-ext-clock.o
+endif
obj-$(CONFIG_SND_SOC_WCD9XXX) += snd-soc-wcd9xxx.o
obj-$(CONFIG_SND_SOC_WCD9XXX_V2) += snd-soc-wcd9xxx-v2.o
obj-$(CONFIG_SND_SOC_WCD_CPE) += snd-soc-wcd-cpe.o
diff --git a/sound/soc/codecs/msm_hdmi_codec_rx.c b/sound/soc/codecs/msm_hdmi_codec_rx.c
index dee66f231ceb..7d649ba2b505 100644
--- a/sound/soc/codecs/msm_hdmi_codec_rx.c
+++ b/sound/soc/codecs/msm_hdmi_codec_rx.c
@@ -318,8 +318,9 @@ static void msm_ext_disp_audio_codec_rx_dai_shutdown(
struct msm_ext_disp_audio_codec_rx_data *codec_data =
dev_get_drvdata(dai->codec->dev);
- if (!codec_data || !codec_data->ext_disp_ops.cable_status) {
- dev_err(dai->dev, "%s: codec data or cable_status is null\n",
+ if (!codec_data || !codec_data->ext_disp_ops.teardown_done ||
+ !codec_data->ext_disp_ops.cable_status) {
+ dev_err(dai->dev, "%s: codec data or teardown_done or cable_status is null\n",
__func__);
return;
}
@@ -332,6 +333,8 @@ static void msm_ext_disp_audio_codec_rx_dai_shutdown(
__func__);
}
+ codec_data->ext_disp_ops.teardown_done(
+ codec_data->ext_disp_core_pdev);
return;
}
diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c
index 69246ac9cc87..d9d413f0a80a 100644
--- a/sound/soc/codecs/wcd-dsp-mgr.c
+++ b/sound/soc/codecs/wcd-dsp-mgr.c
@@ -16,6 +16,8 @@
#include <linux/stringify.h>
#include <linux/of.h>
#include <linux/component.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/ramdump.h>
#include <sound/wcd-dsp-mgr.h>
#include "wcd-dsp-utils.h"
@@ -75,6 +77,32 @@ static char *wdsp_get_cmpnt_type_string(enum wdsp_cmpnt_type);
#define WDSP_STATUS_IS_SET(wdsp, state) (wdsp->status & state)
+/* SSR relate status macros */
+#define WDSP_SSR_STATUS_WDSP_READY BIT(0)
+#define WDSP_SSR_STATUS_CDC_READY BIT(1)
+#define WDSP_SSR_STATUS_READY \
+ (WDSP_SSR_STATUS_WDSP_READY | WDSP_SSR_STATUS_CDC_READY)
+#define WDSP_SSR_READY_WAIT_TIMEOUT (10 * HZ)
+
+enum wdsp_ssr_type {
+
+ /* Init value, indicates there is no SSR in progress */
+ WDSP_SSR_TYPE_NO_SSR = 0,
+
+ /*
+ * Indicates WDSP crashed. The manager driver internally
+ * decides when to perform WDSP restart based on the
+ * users of wdsp. Hence there is no explicit WDSP_UP.
+ */
+ WDSP_SSR_TYPE_WDSP_DOWN,
+
+ /* Indicates codec hardware is down */
+ WDSP_SSR_TYPE_CDC_DOWN,
+
+ /* Indicates codec hardware is up, trigger to restart WDSP */
+ WDSP_SSR_TYPE_CDC_UP,
+};
+
struct wdsp_cmpnt {
/* OF node of the phandle */
@@ -96,6 +124,21 @@ struct wdsp_cmpnt {
struct wdsp_cmpnt_ops *ops;
};
+struct wdsp_ramdump_data {
+
+ /* Ramdump device */
+ void *rd_dev;
+
+ /* DMA address of the dump */
+ dma_addr_t rd_addr;
+
+ /* Virtual address of the dump */
+ void *rd_v_addr;
+
+ /* Data provided through error interrupt */
+ struct wdsp_err_signal_arg err_data;
+};
+
struct wdsp_mgr_priv {
/* Manager driver's struct device pointer */
@@ -130,8 +173,35 @@ struct wdsp_mgr_priv {
/* Lock for serializing ops called by components */
struct mutex api_mutex;
+
+ struct wdsp_ramdump_data dump_data;
+
+ /* SSR related */
+ enum wdsp_ssr_type ssr_type;
+ struct mutex ssr_mutex;
+ struct work_struct ssr_work;
+ u16 ready_status;
+ struct completion ready_compl;
};
+static char *wdsp_get_ssr_type_string(enum wdsp_ssr_type type)
+{
+ switch (type) {
+ case WDSP_SSR_TYPE_NO_SSR:
+ return "NO_SSR";
+ case WDSP_SSR_TYPE_WDSP_DOWN:
+ return "WDSP_DOWN";
+ case WDSP_SSR_TYPE_CDC_DOWN:
+ return "CDC_DOWN";
+ case WDSP_SSR_TYPE_CDC_UP:
+ return "CDC_UP";
+ default:
+ pr_err("%s: Invalid ssr_type %d\n",
+ __func__, type);
+ return "Invalid";
+ }
+}
+
static char *wdsp_get_cmpnt_type_string(enum wdsp_cmpnt_type type)
{
switch (type) {
@@ -148,6 +218,26 @@ static char *wdsp_get_cmpnt_type_string(enum wdsp_cmpnt_type type)
}
}
+static void __wdsp_clr_ready_locked(struct wdsp_mgr_priv *wdsp,
+ u16 value)
+{
+ wdsp->ready_status &= ~(value);
+ WDSP_DBG(wdsp, "ready_status = 0x%x", wdsp->ready_status);
+}
+
+static void __wdsp_set_ready_locked(struct wdsp_mgr_priv *wdsp,
+ u16 value, bool mark_complete)
+{
+ wdsp->ready_status |= value;
+ WDSP_DBG(wdsp, "ready_status = 0x%x", wdsp->ready_status);
+
+ if (mark_complete &&
+ wdsp->ready_status == WDSP_SSR_STATUS_READY) {
+ WDSP_DBG(wdsp, "marking ready completion");
+ complete(&wdsp->ready_compl);
+ }
+}
+
static void wdsp_broadcast_event_upseq(struct wdsp_mgr_priv *wdsp,
enum wdsp_event_type event,
void *data)
@@ -199,6 +289,18 @@ static int wdsp_unicast_event(struct wdsp_mgr_priv *wdsp,
return ret;
}
+static void wdsp_deinit_components(struct wdsp_mgr_priv *wdsp)
+{
+ struct wdsp_cmpnt *cmpnt;
+ int i;
+
+ for (i = WDSP_CMPNT_TYPE_MAX - 1; i >= 0; i--) {
+ cmpnt = WDSP_GET_COMPONENT(wdsp, i);
+ if (cmpnt && cmpnt->ops && cmpnt->ops->deinit)
+ cmpnt->ops->deinit(cmpnt->cdev, cmpnt->priv_data);
+ }
+}
+
static int wdsp_init_components(struct wdsp_mgr_priv *wdsp)
{
struct wdsp_cmpnt *cmpnt;
@@ -230,6 +332,8 @@ static int wdsp_init_components(struct wdsp_mgr_priv *wdsp)
cmpnt->ops->deinit(cmpnt->cdev,
cmpnt->priv_data);
}
+ } else {
+ wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_INIT, NULL);
}
return ret;
@@ -272,6 +376,7 @@ static int wdsp_download_segments(struct wdsp_mgr_priv *wdsp,
struct wdsp_cmpnt *ctl;
struct wdsp_img_segment *seg = NULL;
enum wdsp_event_type pre, post;
+ long status;
int ret;
ctl = WDSP_GET_COMPONENT(wdsp, WDSP_CMPNT_CONTROL);
@@ -279,9 +384,11 @@ static int wdsp_download_segments(struct wdsp_mgr_priv *wdsp,
if (type == WDSP_ELF_FLAG_RE) {
pre = WDSP_EVENT_PRE_DLOAD_CODE;
post = WDSP_EVENT_POST_DLOAD_CODE;
+ status = WDSP_STATUS_CODE_DLOADED;
} else if (type == WDSP_ELF_FLAG_WRITE) {
pre = WDSP_EVENT_PRE_DLOAD_DATA;
post = WDSP_EVENT_POST_DLOAD_DATA;
+ status = WDSP_STATUS_DATA_DLOADED;
} else {
WDSP_ERR(wdsp, "Invalid type %u", type);
return -EINVAL;
@@ -312,6 +419,8 @@ static int wdsp_download_segments(struct wdsp_mgr_priv *wdsp,
}
}
+ WDSP_SET_STATUS(wdsp, status);
+
/* Notify all components that image is downloaded */
wdsp_broadcast_event_downseq(wdsp, post, NULL);
@@ -321,42 +430,47 @@ done:
return ret;
}
-static void wdsp_load_fw_image(struct work_struct *work)
+static int wdsp_init_and_dload_code_sections(struct wdsp_mgr_priv *wdsp)
{
- struct wdsp_mgr_priv *wdsp;
- struct wdsp_cmpnt *cmpnt;
- int ret, idx;
-
- wdsp = container_of(work, struct wdsp_mgr_priv, load_fw_work);
- if (!wdsp) {
- pr_err("%s: Invalid private_data\n", __func__);
- goto done;
- }
+ int ret;
+ bool is_initialized;
- /* Initialize the components first */
- ret = wdsp_init_components(wdsp);
- if (IS_ERR_VALUE(ret))
- goto done;
+ is_initialized = WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_INITIALIZED);
- /* Set init done status */
- WDSP_SET_STATUS(wdsp, WDSP_STATUS_INITIALIZED);
+ if (!is_initialized) {
+ /* Components are not initialized yet, initialize them */
+ ret = wdsp_init_components(wdsp);
+ if (IS_ERR_VALUE(ret)) {
+ WDSP_ERR(wdsp, "INIT failed, err = %d", ret);
+ goto done;
+ }
+ WDSP_SET_STATUS(wdsp, WDSP_STATUS_INITIALIZED);
+ }
/* Download the read-execute sections of image */
ret = wdsp_download_segments(wdsp, WDSP_ELF_FLAG_RE);
if (IS_ERR_VALUE(ret)) {
WDSP_ERR(wdsp, "Error %d to download code sections", ret);
- for (idx = 0; idx < WDSP_CMPNT_TYPE_MAX; idx++) {
- cmpnt = WDSP_GET_COMPONENT(wdsp, idx);
- if (cmpnt->ops && cmpnt->ops->deinit)
- cmpnt->ops->deinit(cmpnt->cdev,
- cmpnt->priv_data);
- }
- WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_INITIALIZED);
+ goto done;
}
-
- WDSP_SET_STATUS(wdsp, WDSP_STATUS_CODE_DLOADED);
done:
- return;
+ return ret;
+}
+
+static void wdsp_load_fw_image(struct work_struct *work)
+{
+ struct wdsp_mgr_priv *wdsp;
+ int ret;
+
+ wdsp = container_of(work, struct wdsp_mgr_priv, load_fw_work);
+ if (!wdsp) {
+ pr_err("%s: Invalid private_data\n", __func__);
+ return;
+ }
+
+ ret = wdsp_init_and_dload_code_sections(wdsp);
+ if (IS_ERR_VALUE(ret))
+ WDSP_ERR(wdsp, "dload code sections failed, err = %d", ret);
}
static int wdsp_enable_dsp(struct wdsp_mgr_priv *wdsp)
@@ -377,8 +491,6 @@ static int wdsp_enable_dsp(struct wdsp_mgr_priv *wdsp)
goto done;
}
- WDSP_SET_STATUS(wdsp, WDSP_STATUS_DATA_DLOADED);
-
wdsp_broadcast_event_upseq(wdsp, WDSP_EVENT_PRE_BOOTUP, NULL);
ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_CONTROL,
@@ -399,6 +511,21 @@ static int wdsp_disable_dsp(struct wdsp_mgr_priv *wdsp)
{
int ret;
+ WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
+
+ /*
+ * If Disable happened while SSR is in progress, then set the SSR
+ * ready status indicating WDSP is now ready. Ignore the disable
+ * event here and let the SSR handler go through shutdown.
+ */
+ if (wdsp->ssr_type != WDSP_SSR_TYPE_NO_SSR) {
+ __wdsp_set_ready_locked(wdsp, WDSP_SSR_STATUS_WDSP_READY, true);
+ WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
+ return 0;
+ }
+
+ WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
+
/* Make sure wdsp is in good state */
if (!WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_BOOTED)) {
WDSP_ERR(wdsp, "wdsp in invalid state 0x%x", wdsp->status);
@@ -478,8 +605,204 @@ static struct device *wdsp_get_dev_for_cmpnt(struct device *wdsp_dev,
return cmpnt->cdev;
}
-static int wdsp_intr_handler(struct device *wdsp_dev,
- enum wdsp_intr intr)
+static void wdsp_collect_ramdumps(struct wdsp_mgr_priv *wdsp)
+{
+ struct wdsp_img_section img_section;
+ struct wdsp_err_signal_arg *data = &wdsp->dump_data.err_data;
+ struct ramdump_segment rd_seg;
+ int ret = 0;
+
+ if (wdsp->ssr_type != WDSP_SSR_TYPE_WDSP_DOWN ||
+ !data->mem_dumps_enabled) {
+ WDSP_DBG(wdsp, "cannot dump memory, ssr_type %s, dumps %s",
+ wdsp_get_ssr_type_string(wdsp->ssr_type),
+ !(data->mem_dumps_enabled) ? "disabled" : "enabled");
+ goto done;
+ }
+
+ if (data->dump_size == 0 ||
+ data->remote_start_addr < wdsp->base_addr) {
+ WDSP_ERR(wdsp, "Invalid start addr 0x%x or dump_size 0x%zx",
+ data->remote_start_addr, data->dump_size);
+ goto done;
+ }
+
+ if (!wdsp->dump_data.rd_dev) {
+ WDSP_ERR(wdsp, "Ramdump device is not setup");
+ goto done;
+ }
+
+ WDSP_DBG(wdsp, "base_addr 0x%x, dump_start_addr 0x%x, dump_size 0x%zx",
+ wdsp->base_addr, data->remote_start_addr, data->dump_size);
+
+ /* Allocate memory for dumps */
+ wdsp->dump_data.rd_v_addr = dma_alloc_coherent(wdsp->mdev,
+ data->dump_size,
+ &wdsp->dump_data.rd_addr,
+ GFP_KERNEL);
+ if (!wdsp->dump_data.rd_v_addr)
+ goto done;
+
+ img_section.addr = data->remote_start_addr - wdsp->base_addr;
+ img_section.size = data->dump_size;
+ img_section.data = wdsp->dump_data.rd_v_addr;
+
+ ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_TRANSPORT,
+ WDSP_EVENT_READ_SECTION,
+ &img_section);
+ if (IS_ERR_VALUE(ret)) {
+ WDSP_ERR(wdsp, "Failed to read dumps, size 0x%zx at addr 0x%x",
+ img_section.size, img_section.addr);
+ goto err_read_dumps;
+ }
+
+ rd_seg.address = (unsigned long) wdsp->dump_data.rd_v_addr;
+ rd_seg.size = img_section.size;
+ rd_seg.v_address = wdsp->dump_data.rd_v_addr;
+
+ ret = do_ramdump(wdsp->dump_data.rd_dev, &rd_seg, 1);
+ if (IS_ERR_VALUE(ret))
+ WDSP_ERR(wdsp, "do_ramdump failed with error %d", ret);
+
+err_read_dumps:
+ dma_free_coherent(wdsp->mdev, data->dump_size,
+ wdsp->dump_data.rd_v_addr, wdsp->dump_data.rd_addr);
+done:
+ return;
+}
+
+static void wdsp_ssr_work_fn(struct work_struct *work)
+{
+ struct wdsp_mgr_priv *wdsp;
+ int ret;
+
+ wdsp = container_of(work, struct wdsp_mgr_priv, ssr_work);
+ if (!wdsp) {
+ pr_err("%s: Invalid private_data\n", __func__);
+ return;
+ }
+
+ WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
+
+ /* Issue ramdumps and shutdown only if DSP is currently booted */
+ if (WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_BOOTED)) {
+ wdsp_collect_ramdumps(wdsp);
+ ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_CONTROL,
+ WDSP_EVENT_DO_SHUTDOWN, NULL);
+ if (IS_ERR_VALUE(ret))
+ WDSP_ERR(wdsp, "Failed WDSP shutdown, err = %d", ret);
+
+ wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_SHUTDOWN,
+ NULL);
+ WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_BOOTED);
+ }
+
+ WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
+ ret = wait_for_completion_timeout(&wdsp->ready_compl,
+ WDSP_SSR_READY_WAIT_TIMEOUT);
+ WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
+ if (ret == 0) {
+ WDSP_ERR(wdsp, "wait_for_ready timed out, status = 0x%x",
+ wdsp->ready_status);
+ goto done;
+ }
+
+ /* Data sections are to downloaded per WDSP boot */
+ WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_DATA_DLOADED);
+
+ /*
+ * Even though code section could possible be retained on DSP
+ * crash, go ahead and still re-download just to avoid any
+ * memory corruption from previous crash.
+ */
+ WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_CODE_DLOADED);
+
+ /* If codec went down, then all components must be re-initialized */
+ if (wdsp->ssr_type == WDSP_SSR_TYPE_CDC_DOWN) {
+ wdsp_deinit_components(wdsp);
+ WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_INITIALIZED);
+ }
+
+ ret = wdsp_init_and_dload_code_sections(wdsp);
+ if (IS_ERR_VALUE(ret)) {
+ WDSP_ERR(wdsp, "Failed to dload code sections err = %d",
+ ret);
+ goto done;
+ }
+
+ /* SSR handling is finished, mark SSR type as NO_SSR */
+ wdsp->ssr_type = WDSP_SSR_TYPE_NO_SSR;
+done:
+ WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
+}
+
+static int wdsp_ssr_handler(struct wdsp_mgr_priv *wdsp, void *arg,
+ enum wdsp_ssr_type ssr_type)
+{
+ enum wdsp_ssr_type current_ssr_type;
+ struct wdsp_err_signal_arg *err_data;
+
+ WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
+
+ current_ssr_type = wdsp->ssr_type;
+ WDSP_DBG(wdsp, "Current ssr_type %s, handling ssr_type %s",
+ wdsp_get_ssr_type_string(current_ssr_type),
+ wdsp_get_ssr_type_string(ssr_type));
+ wdsp->ssr_type = ssr_type;
+
+ if (arg) {
+ err_data = (struct wdsp_err_signal_arg *) arg;
+ memcpy(&wdsp->dump_data.err_data, err_data,
+ sizeof(*err_data));
+ } else {
+ memset(&wdsp->dump_data.err_data, 0,
+ sizeof(wdsp->dump_data.err_data));
+ }
+
+ switch (ssr_type) {
+
+ case WDSP_SSR_TYPE_WDSP_DOWN:
+ __wdsp_clr_ready_locked(wdsp, WDSP_SSR_STATUS_WDSP_READY);
+ wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_PRE_SHUTDOWN,
+ NULL);
+ schedule_work(&wdsp->ssr_work);
+ break;
+
+ case WDSP_SSR_TYPE_CDC_DOWN:
+ __wdsp_clr_ready_locked(wdsp, WDSP_SSR_STATUS_CDC_READY);
+ /*
+ * If DSP is booted when CDC_DOWN is received, it needs
+ * to be shutdown.
+ */
+ if (WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_BOOTED)) {
+ __wdsp_clr_ready_locked(wdsp,
+ WDSP_SSR_STATUS_WDSP_READY);
+ wdsp_broadcast_event_downseq(wdsp,
+ WDSP_EVENT_PRE_SHUTDOWN,
+ NULL);
+ }
+
+ schedule_work(&wdsp->ssr_work);
+ break;
+
+ case WDSP_SSR_TYPE_CDC_UP:
+ __wdsp_set_ready_locked(wdsp, WDSP_SSR_STATUS_CDC_READY, true);
+ break;
+
+ default:
+ WDSP_ERR(wdsp, "undefined ssr_type %d\n", ssr_type);
+ /* Revert back the ssr_type for undefined events */
+ wdsp->ssr_type = current_ssr_type;
+ break;
+ }
+
+ WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
+
+ return 0;
+}
+
+static int wdsp_signal_handler(struct device *wdsp_dev,
+ enum wdsp_signal signal, void *arg)
{
struct wdsp_mgr_priv *wdsp;
int ret;
@@ -490,19 +813,30 @@ static int wdsp_intr_handler(struct device *wdsp_dev,
wdsp = dev_get_drvdata(wdsp_dev);
WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->api_mutex);
- switch (intr) {
+ WDSP_DBG(wdsp, "Raised signal %d", signal);
+
+ switch (signal) {
case WDSP_IPC1_INTR:
ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_IPC,
WDSP_EVENT_IPC1_INTR, NULL);
break;
+ case WDSP_ERR_INTR:
+ ret = wdsp_ssr_handler(wdsp, arg, WDSP_SSR_TYPE_WDSP_DOWN);
+ break;
+ case WDSP_CDC_DOWN_SIGNAL:
+ ret = wdsp_ssr_handler(wdsp, arg, WDSP_SSR_TYPE_CDC_DOWN);
+ break;
+ case WDSP_CDC_UP_SIGNAL:
+ ret = wdsp_ssr_handler(wdsp, arg, WDSP_SSR_TYPE_CDC_UP);
+ break;
default:
ret = -EINVAL;
break;
}
if (IS_ERR_VALUE(ret))
- WDSP_ERR(wdsp, "handling intr %d failed with error %d",
- intr, ret);
+ WDSP_ERR(wdsp, "handling signal %d failed with error %d",
+ signal, ret);
WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->api_mutex);
return ret;
@@ -558,7 +892,7 @@ static int wdsp_resume(struct device *wdsp_dev)
static struct wdsp_mgr_ops wdsp_ops = {
.register_cmpnt_ops = wdsp_register_cmpnt_ops,
.get_dev_for_cmpnt = wdsp_get_dev_for_cmpnt,
- .intr_handler = wdsp_intr_handler,
+ .signal_handler = wdsp_signal_handler,
.vote_for_dsp = wdsp_vote_for_dsp,
.suspend = wdsp_suspend,
.resume = wdsp_resume,
@@ -585,6 +919,11 @@ static int wdsp_mgr_bind(struct device *dev)
wdsp->ops = &wdsp_ops;
+ /* Setup ramdump device */
+ wdsp->dump_data.rd_dev = create_ramdump_device("wdsp", dev);
+ if (!wdsp->dump_data.rd_dev)
+ dev_info(dev, "%s: create_ramdump_device failed\n", __func__);
+
ret = component_bind_all(dev, wdsp->ops);
if (IS_ERR_VALUE(ret))
WDSP_ERR(wdsp, "component_bind_all failed %d\n", ret);
@@ -616,6 +955,11 @@ static void wdsp_mgr_unbind(struct device *dev)
component_unbind_all(dev, wdsp->ops);
+ if (wdsp->dump_data.rd_dev) {
+ destroy_ramdump_device(wdsp->dump_data.rd_dev);
+ wdsp->dump_data.rd_dev = NULL;
+ }
+
/* Clear all status bits */
wdsp->status = 0x00;
@@ -746,6 +1090,12 @@ static int wdsp_mgr_probe(struct platform_device *pdev)
INIT_WORK(&wdsp->load_fw_work, wdsp_load_fw_image);
INIT_LIST_HEAD(wdsp->seg_list);
mutex_init(&wdsp->api_mutex);
+ mutex_init(&wdsp->ssr_mutex);
+ wdsp->ssr_type = WDSP_SSR_TYPE_NO_SSR;
+ wdsp->ready_status = WDSP_SSR_STATUS_READY;
+ INIT_WORK(&wdsp->ssr_work, wdsp_ssr_work_fn);
+ init_completion(&wdsp->ready_compl);
+ arch_setup_dma_ops(wdsp->mdev, 0, 0, NULL, 0);
dev_set_drvdata(mdev, wdsp);
ret = component_master_add_with_match(mdev, &wdsp_master_ops,
@@ -759,6 +1109,7 @@ static int wdsp_mgr_probe(struct platform_device *pdev)
err_master_add:
mutex_destroy(&wdsp->api_mutex);
+ mutex_destroy(&wdsp->ssr_mutex);
err_dt_parse:
devm_kfree(mdev, wdsp->seg_list);
devm_kfree(mdev, wdsp);
@@ -775,6 +1126,7 @@ static int wdsp_mgr_remove(struct platform_device *pdev)
component_master_del(mdev, &wdsp_master_ops);
mutex_destroy(&wdsp->api_mutex);
+ mutex_destroy(&wdsp->ssr_mutex);
devm_kfree(mdev, wdsp->seg_list);
devm_kfree(mdev, wdsp);
dev_set_drvdata(mdev, NULL);
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index 5b6af14e1d94..3cbc1e7821cf 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -38,7 +38,7 @@
#define WCD_MBHC_JACK_BUTTON_MASK (SND_JACK_BTN_0 | SND_JACK_BTN_1 | \
SND_JACK_BTN_2 | SND_JACK_BTN_3 | \
SND_JACK_BTN_4 | SND_JACK_BTN_5 )
-#define OCP_ATTEMPT 1
+#define OCP_ATTEMPT 20
#define HS_DETECT_PLUG_TIME_MS (3 * 1000)
#define SPECIAL_HS_DETECT_TIME_MS (2 * 1000)
#define MBHC_BUTTON_PRESS_THRESHOLD_MIN 250
@@ -226,6 +226,10 @@ static const char *wcd_mbhc_get_event_string(int event)
return WCD_MBHC_STRINGIFY(WCD_EVENT_POST_DAPM_MICBIAS_2_OFF);
case WCD_EVENT_PRE_DAPM_MICBIAS_2_OFF:
return WCD_MBHC_STRINGIFY(WCD_EVENT_PRE_DAPM_MICBIAS_2_OFF);
+ case WCD_EVENT_OCP_OFF:
+ return WCD_MBHC_STRINGIFY(WCD_EVENT_OCP_OFF);
+ case WCD_EVENT_OCP_ON:
+ return WCD_MBHC_STRINGIFY(WCD_EVENT_OCP_ON);
case WCD_EVENT_INVALID:
default:
return WCD_MBHC_STRINGIFY(WCD_EVENT_INVALID);
@@ -394,6 +398,16 @@ out_micb_en:
/* Disable micbias, enable pullup & cs */
wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_PULLUP);
break;
+ case WCD_EVENT_OCP_OFF:
+ mbhc->mbhc_cb->irq_control(mbhc->codec,
+ mbhc->intr_ids->hph_left_ocp,
+ false);
+ break;
+ case WCD_EVENT_OCP_ON:
+ mbhc->mbhc_cb->irq_control(mbhc->codec,
+ mbhc->intr_ids->hph_left_ocp,
+ true);
+ break;
default:
break;
}
@@ -461,6 +475,7 @@ static void wcd_mbhc_clr_and_turnon_hph_padac(struct wcd_mbhc *mbhc)
&mbhc->hph_pa_dac_state)) {
pr_debug("%s: HPHR clear flag and enable PA\n", __func__);
WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPHR_PA_EN, 1);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPHR_OCP_DET_EN, 1);
pa_turned_on = true;
}
mutex_unlock(&mbhc->hphr_pa_lock);
@@ -469,6 +484,7 @@ static void wcd_mbhc_clr_and_turnon_hph_padac(struct wcd_mbhc *mbhc)
&mbhc->hph_pa_dac_state)) {
pr_debug("%s: HPHL clear flag and enable PA\n", __func__);
WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPHL_PA_EN, 1);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPHL_OCP_DET_EN, 1);
pa_turned_on = true;
}
mutex_unlock(&mbhc->hphl_pa_lock);
@@ -502,6 +518,8 @@ static void wcd_mbhc_set_and_turnoff_hph_padac(struct wcd_mbhc *mbhc)
pr_debug("%s PA is on, setting PA_OFF_ACK\n", __func__);
set_bit(WCD_MBHC_HPHL_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
set_bit(WCD_MBHC_HPHR_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPHL_OCP_DET_EN, 0);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPHR_OCP_DET_EN, 0);
} else {
pr_debug("%s PA is off\n", __func__);
}
@@ -2014,13 +2032,24 @@ exit:
static irqreturn_t wcd_mbhc_hphl_ocp_irq(int irq, void *data)
{
struct wcd_mbhc *mbhc = data;
+ int val;
pr_debug("%s: received HPHL OCP irq\n", __func__);
if (mbhc) {
- if ((mbhc->hphlocp_cnt < OCP_ATTEMPT) &&
- (!mbhc->hphrocp_cnt)) {
- pr_debug("%s: retry\n", __func__);
+ if (mbhc->mbhc_cb->hph_register_recovery) {
+ if (mbhc->mbhc_cb->hph_register_recovery(mbhc)) {
+ WCD_MBHC_REG_READ(WCD_MBHC_HPHR_OCP_STATUS,
+ val);
+ if ((val != -EINVAL) && val)
+ mbhc->is_hph_ocp_pending = true;
+ goto done;
+ }
+ }
+
+ if (mbhc->hphlocp_cnt < OCP_ATTEMPT) {
mbhc->hphlocp_cnt++;
+ pr_debug("%s: retry, hphlocp_cnt: %d\n", __func__,
+ mbhc->hphlocp_cnt);
WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_OCP_FSM_EN, 0);
WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_OCP_FSM_EN, 1);
} else {
@@ -2035,6 +2064,7 @@ static irqreturn_t wcd_mbhc_hphl_ocp_irq(int irq, void *data)
} else {
pr_err("%s: Bad wcd9xxx_spmi private data\n", __func__);
}
+done:
return IRQ_HANDLED;
}
@@ -2043,10 +2073,26 @@ static irqreturn_t wcd_mbhc_hphr_ocp_irq(int irq, void *data)
struct wcd_mbhc *mbhc = data;
pr_debug("%s: received HPHR OCP irq\n", __func__);
- if ((mbhc->hphrocp_cnt < OCP_ATTEMPT) &&
- (!mbhc->hphlocp_cnt)) {
- pr_debug("%s: retry\n", __func__);
+
+ if (!mbhc) {
+ pr_err("%s: Bad mbhc private data\n", __func__);
+ goto done;
+ }
+
+ if (mbhc->is_hph_ocp_pending) {
+ mbhc->is_hph_ocp_pending = false;
+ goto done;
+ }
+
+ if (mbhc->mbhc_cb->hph_register_recovery) {
+ if (mbhc->mbhc_cb->hph_register_recovery(mbhc))
+ /* register corruption, hence reset registers */
+ goto done;
+ }
+ if (mbhc->hphrocp_cnt < OCP_ATTEMPT) {
mbhc->hphrocp_cnt++;
+ pr_debug("%s: retry, hphrocp_cnt: %d\n", __func__,
+ mbhc->hphrocp_cnt);
WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_OCP_FSM_EN, 0);
WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_OCP_FSM_EN, 1);
} else {
@@ -2057,6 +2103,7 @@ static irqreturn_t wcd_mbhc_hphr_ocp_irq(int irq, void *data)
wcd_mbhc_jack_report(mbhc, &mbhc->headset_jack,
mbhc->hph_status, WCD_MBHC_JACK_MASK);
}
+done:
return IRQ_HANDLED;
}
@@ -2271,7 +2318,7 @@ int wcd_mbhc_start(struct wcd_mbhc *mbhc,
schedule_delayed_work(&mbhc->mbhc_firmware_dwork,
usecs_to_jiffies(FW_READ_TIMEOUT));
else
- pr_err("%s: Skipping to read mbhc fw, 0x%p %p\n",
+ pr_err("%s: Skipping to read mbhc fw, 0x%pK %pK\n",
__func__, mbhc->mbhc_fw, mbhc->mbhc_cal);
}
pr_debug("%s: leave %d\n", __func__, rc);
diff --git a/sound/soc/codecs/wcd-mbhc-v2.h b/sound/soc/codecs/wcd-mbhc-v2.h
index ab42b3bb6e7d..676ec342a30a 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.h
+++ b/sound/soc/codecs/wcd-mbhc-v2.h
@@ -66,6 +66,10 @@ enum wcd_mbhc_register_function {
WCD_MBHC_ANC_DET_EN,
WCD_MBHC_FSM_STATUS,
WCD_MBHC_MUX_CTL,
+ WCD_MBHC_HPHL_OCP_DET_EN,
+ WCD_MBHC_HPHR_OCP_DET_EN,
+ WCD_MBHC_HPHL_OCP_STATUS,
+ WCD_MBHC_HPHR_OCP_STATUS,
WCD_MBHC_REG_FUNC_MAX,
};
@@ -127,6 +131,8 @@ enum wcd_notify_event {
WCD_EVENT_POST_HPHR_PA_OFF,
WCD_EVENT_PRE_HPHL_PA_OFF,
WCD_EVENT_PRE_HPHR_PA_OFF,
+ WCD_EVENT_OCP_OFF,
+ WCD_EVENT_OCP_ON,
WCD_EVENT_LAST,
};
@@ -322,7 +328,9 @@ do { \
mbhc->wcd_mbhc_regs[function].reg)) & \
(mbhc->wcd_mbhc_regs[function].mask)) >> \
(mbhc->wcd_mbhc_regs[function].offset)); \
- } \
+ } else { \
+ val = -EINVAL; \
+ } \
} while (0)
struct wcd_mbhc_cb {
@@ -365,6 +373,7 @@ struct wcd_mbhc_cb {
void (*mbhc_gnd_det_ctrl)(struct snd_soc_codec *, bool);
void (*hph_pull_down_ctrl)(struct snd_soc_codec *, bool);
void (*mbhc_moisture_config)(struct wcd_mbhc *);
+ bool (*hph_register_recovery)(struct wcd_mbhc *);
};
struct wcd_mbhc {
@@ -430,6 +439,7 @@ struct wcd_mbhc {
struct mutex hphr_pa_lock;
unsigned long intr_status;
+ bool is_hph_ocp_pending;
};
#define WCD_MBHC_CAL_SIZE(buttons, rload) ( \
sizeof(struct wcd_mbhc_general_cfg) + \
diff --git a/sound/soc/codecs/wcd-spi.c b/sound/soc/codecs/wcd-spi.c
index 3049d87c6c05..0a9c283c250c 100644
--- a/sound/soc/codecs/wcd-spi.c
+++ b/sound/soc/codecs/wcd-spi.c
@@ -319,7 +319,7 @@ static int wcd_spi_transfer_split(struct spi_device *spi,
u32 addr = data_msg->remote_addr;
u8 *data = data_msg->data;
int remain_size = data_msg->len;
- int to_xfer, loop_cnt, ret;
+ int to_xfer, loop_cnt, ret = 0;
/* Perform single writes until multi word alignment is met */
loop_cnt = 1;
@@ -631,6 +631,14 @@ static int wcd_spi_init(struct spi_device *spi)
if (IS_ERR_VALUE(ret))
goto err_wr_en;
+ /*
+ * In case spi_init is called after component deinit,
+ * it is possible hardware register state is also reset.
+ * Sync the regcache here so hardware state is updated
+ * to reflect the cache.
+ */
+ regcache_sync(wcd_spi->regmap);
+
regmap_write(wcd_spi->regmap, WCD_SPI_SLAVE_CONFIG,
0x0F3D0800);
@@ -639,12 +647,10 @@ static int wcd_spi_init(struct spi_device *spi)
WCD_SPI_SLAVE_TRNS_LEN,
0xFFFF0000,
(WCD_SPI_RW_MULTI_MAX_LEN / 4) << 16);
-done:
- return ret;
-
err_wr_en:
wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
WCD_SPI_CLK_FLAG_IMMEDIATE);
+done:
return ret;
}
@@ -813,17 +819,39 @@ static int wdsp_spi_dload_section(struct spi_device *spi,
return ret;
}
+static int wdsp_spi_read_section(struct spi_device *spi, void *data)
+{
+ struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+ struct wdsp_img_section *sec = data;
+ struct wcd_spi_msg msg;
+ int ret;
+
+ msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
+ msg.data = sec->data;
+ msg.len = sec->size;
+
+ dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
+ __func__, msg.remote_addr, msg.len);
+
+ ret = wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_READ);
+ if (IS_ERR_VALUE(ret))
+ dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
+ __func__, msg.remote_addr, msg.len);
+ return ret;
+}
+
static int wdsp_spi_event_handler(struct device *dev, void *priv_data,
enum wdsp_event_type event,
void *data)
{
struct spi_device *spi = to_spi_device(dev);
- int ret;
+ int ret = 0;
dev_dbg(&spi->dev, "%s: event type %d\n",
__func__, event);
switch (event) {
+ case WDSP_EVENT_PRE_DLOAD_CODE:
case WDSP_EVENT_PRE_DLOAD_DATA:
ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
WCD_SPI_CLK_FLAG_IMMEDIATE);
@@ -846,6 +874,11 @@ static int wdsp_spi_event_handler(struct device *dev, void *priv_data,
case WDSP_EVENT_DLOAD_SECTION:
ret = wdsp_spi_dload_section(spi, data);
break;
+
+ case WDSP_EVENT_READ_SECTION:
+ ret = wdsp_spi_read_section(spi, data);
+ break;
+
default:
dev_dbg(&spi->dev, "%s: Unhandled event %d\n",
__func__, event);
@@ -1068,46 +1101,12 @@ static struct regmap_config wcd_spi_regmap_cfg = {
static int wdsp_spi_init(struct device *dev, void *priv_data)
{
struct spi_device *spi = to_spi_device(dev);
- struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
int ret;
- wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
- wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
-
- wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
- &spi->dev, &wcd_spi_regmap_cfg);
- if (IS_ERR(wcd_spi->regmap)) {
- ret = PTR_ERR(wcd_spi->regmap);
- dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
- __func__, ret);
- goto err_regmap;
- }
-
- if (wcd_spi_debugfs_init(spi))
- dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
-
- spi_message_init(&wcd_spi->msg1);
- spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
-
- spi_message_init(&wcd_spi->msg2);
- spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
- spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
-
ret = wcd_spi_init(spi);
- if (IS_ERR_VALUE(ret)) {
+ if (IS_ERR_VALUE(ret))
dev_err(&spi->dev, "%s: Init failed, err = %d\n",
__func__, ret);
- goto err_init;
- }
-
- return 0;
-
-err_init:
- spi_transfer_del(&wcd_spi->xfer1);
- spi_transfer_del(&wcd_spi->xfer2[0]);
- spi_transfer_del(&wcd_spi->xfer2[1]);
-
-err_regmap:
return ret;
}
@@ -1116,9 +1115,11 @@ static int wdsp_spi_deinit(struct device *dev, void *priv_data)
struct spi_device *spi = to_spi_device(dev);
struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
- spi_transfer_del(&wcd_spi->xfer1);
- spi_transfer_del(&wcd_spi->xfer2[0]);
- spi_transfer_del(&wcd_spi->xfer2[1]);
+ /*
+ * Deinit means the hardware is reset. Mark the cache
+ * as dirty here, so init will sync the cache
+ */
+ regcache_mark_dirty(wcd_spi->regmap);
return 0;
}
@@ -1145,9 +1146,34 @@ static int wcd_spi_component_bind(struct device *dev,
ret = wcd_spi->m_ops->register_cmpnt_ops(master, dev,
wcd_spi,
&wdsp_spi_ops);
- if (ret)
+ if (ret) {
dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
__func__, ret);
+ goto done;
+ }
+
+ wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
+ wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
+
+ wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
+ &spi->dev, &wcd_spi_regmap_cfg);
+ if (IS_ERR(wcd_spi->regmap)) {
+ ret = PTR_ERR(wcd_spi->regmap);
+ dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ if (wcd_spi_debugfs_init(spi))
+ dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
+
+ spi_message_init(&wcd_spi->msg1);
+ spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
+
+ spi_message_init(&wcd_spi->msg2);
+ spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
+ spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
+done:
return ret;
}
@@ -1160,6 +1186,10 @@ static void wcd_spi_component_unbind(struct device *dev,
wcd_spi->m_dev = NULL;
wcd_spi->m_ops = NULL;
+
+ spi_transfer_del(&wcd_spi->xfer1);
+ spi_transfer_del(&wcd_spi->xfer2[0]);
+ spi_transfer_del(&wcd_spi->xfer2[1]);
}
static const struct component_ops wcd_spi_component_ops = {
diff --git a/sound/soc/codecs/wcd9330.c b/sound/soc/codecs/wcd9330.c
index a8d6e0fa4732..fa396aa55ac9 100644
--- a/sound/soc/codecs/wcd9330.c
+++ b/sound/soc/codecs/wcd9330.c
@@ -5474,7 +5474,7 @@ static int tomtom_set_channel_map(struct snd_soc_dai *dai,
struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(dai->codec);
struct wcd9xxx *core = dev_get_drvdata(dai->codec->dev->parent);
if (!tx_slot || !rx_slot) {
- pr_err("%s: Invalid tx_slot=%p, rx_slot=%p\n",
+ pr_err("%s: Invalid tx_slot=%pK, rx_slot=%pK\n",
__func__, tx_slot, rx_slot);
return -EINVAL;
}
@@ -5519,7 +5519,7 @@ static int tomtom_get_channel_map(struct snd_soc_dai *dai,
case AIF2_PB:
case AIF3_PB:
if (!rx_slot || !rx_num) {
- pr_err("%s: Invalid rx_slot %p or rx_num %p\n",
+ pr_err("%s: Invalid rx_slot %pK or rx_num %pK\n",
__func__, rx_slot, rx_num);
return -EINVAL;
}
@@ -5538,7 +5538,7 @@ static int tomtom_get_channel_map(struct snd_soc_dai *dai,
case AIF4_VIFEED:
case AIF4_MAD_TX:
if (!tx_slot || !tx_num) {
- pr_err("%s: Invalid tx_slot %p or tx_num %p\n",
+ pr_err("%s: Invalid tx_slot %pK or tx_num %pK\n",
__func__, tx_slot, tx_num);
return -EINVAL;
}
@@ -8228,7 +8228,7 @@ static void tomtom_compute_impedance(struct wcd9xxx_mbhc *mbhc, s16 *l, s16 *r,
struct tomtom_priv *tomtom;
if (!mbhc) {
- pr_err("%s: Invalid parameters mbhc = %p\n",
+ pr_err("%s: Invalid parameters mbhc = %pK\n",
__func__, mbhc);
return;
}
@@ -8287,7 +8287,7 @@ static void tomtom_zdet_error_approx(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
const int shift = TOMTOM_ZDET_ERROR_APPROX_SHIFT;
if (!zl || !zr || !mbhc) {
- pr_err("%s: Invalid parameters zl = %p zr = %p, mbhc = %p\n",
+ pr_err("%s: Invalid parameters zl = %pK zr = %pK, mbhc = %pK\n",
__func__, zl, zr, mbhc);
return;
}
@@ -8602,7 +8602,7 @@ static int tomtom_codec_fll_enable(struct snd_soc_codec *codec,
struct wcd9xxx *wcd9xxx;
if (!codec || !codec->control_data) {
- pr_err("%s: Invalid codec handle, %p\n",
+ pr_err("%s: Invalid codec handle, %pK\n",
__func__, codec);
return -EINVAL;
}
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 9acab619ec8e..46b8e7f72eb8 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -832,6 +832,7 @@ struct tasha_priv {
enum wcd9335_codec_event);
int spkr_gain_offset;
int spkr_mode;
+ int ear_spkr_gain;
struct hpf_work tx_hpf_work[TASHA_NUM_DECIMATORS];
struct tx_mute_work tx_mute_dwork[TASHA_NUM_DECIMATORS];
struct mutex codec_mutex;
@@ -5112,6 +5113,58 @@ static int tasha_codec_enable_swr(struct snd_soc_dapm_widget *w,
return 0;
}
+static int tasha_codec_config_ear_spkr_gain(struct snd_soc_codec *codec,
+ int event, int gain_reg)
+{
+ int comp_gain_offset, val;
+ struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+ switch (tasha->spkr_mode) {
+ /* Compander gain in SPKR_MODE1 case is 12 dB */
+ case SPKR_MODE_1:
+ comp_gain_offset = -12;
+ break;
+ /* Default case compander gain is 15 dB */
+ default:
+ comp_gain_offset = -15;
+ break;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /* Apply ear spkr gain only if compander is enabled */
+ if (tasha->comp_enabled[COMPANDER_7] &&
+ (gain_reg == WCD9335_CDC_RX7_RX_VOL_CTL ||
+ gain_reg == WCD9335_CDC_RX7_RX_VOL_MIX_CTL) &&
+ (tasha->ear_spkr_gain != 0)) {
+ /* For example, val is -8(-12+5-1) for 4dB of gain */
+ val = comp_gain_offset + tasha->ear_spkr_gain - 1;
+ snd_soc_write(codec, gain_reg, val);
+
+ dev_dbg(codec->dev, "%s: RX7 Volume %d dB\n",
+ __func__, val);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /*
+ * Reset RX7 volume to 0 dB if compander is enabled and
+ * ear_spkr_gain is non-zero.
+ */
+ if (tasha->comp_enabled[COMPANDER_7] &&
+ (gain_reg == WCD9335_CDC_RX7_RX_VOL_CTL ||
+ gain_reg == WCD9335_CDC_RX7_RX_VOL_MIX_CTL) &&
+ (tasha->ear_spkr_gain != 0)) {
+ snd_soc_write(codec, gain_reg, 0x0);
+
+ dev_dbg(codec->dev, "%s: Reset RX7 Volume to 0 dB\n",
+ __func__);
+ }
+ break;
+ }
+
+ return 0;
+}
+
static int tasha_codec_enable_mix_path(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -5179,6 +5232,7 @@ static int tasha_codec_enable_mix_path(struct snd_soc_dapm_widget *w,
val = snd_soc_read(codec, gain_reg);
val += offset_val;
snd_soc_write(codec, gain_reg, val);
+ tasha_codec_config_ear_spkr_gain(codec, event, gain_reg);
break;
case SND_SOC_DAPM_POST_PMD:
if ((tasha->spkr_gain_offset == RX_GAIN_OFFSET_M1P5_DB) &&
@@ -5201,6 +5255,7 @@ static int tasha_codec_enable_mix_path(struct snd_soc_dapm_widget *w,
val += offset_val;
snd_soc_write(codec, gain_reg, val);
}
+ tasha_codec_config_ear_spkr_gain(codec, event, gain_reg);
break;
};
@@ -5408,6 +5463,7 @@ static int tasha_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
val = snd_soc_read(codec, gain_reg);
val += offset_val;
snd_soc_write(codec, gain_reg, val);
+ tasha_codec_config_ear_spkr_gain(codec, event, gain_reg);
break;
case SND_SOC_DAPM_POST_PMD:
tasha_config_compander(codec, w->shift, event);
@@ -5432,6 +5488,7 @@ static int tasha_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
val += offset_val;
snd_soc_write(codec, gain_reg, val);
}
+ tasha_codec_config_ear_spkr_gain(codec, event, gain_reg);
break;
};
@@ -8605,6 +8662,34 @@ static int tasha_ear_pa_gain_put(struct snd_kcontrol *kcontrol,
return 0;
}
+static int tasha_ear_spkr_pa_gain_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = tasha->ear_spkr_gain;
+
+ dev_dbg(codec->dev, "%s: ear_spkr_gain = %ld\n", __func__,
+ ucontrol->value.integer.value[0]);
+
+ return 0;
+}
+
+static int tasha_ear_spkr_pa_gain_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+
+ tasha->ear_spkr_gain = ucontrol->value.integer.value[0];
+
+ return 0;
+}
+
static int tasha_config_compander(struct snd_soc_codec *codec, int interp_n,
int event)
{
@@ -8896,14 +8981,26 @@ static const char * const tasha_ear_pa_gain_text[] = {
"G_0_DB", "G_M2P5_DB", "UNDEFINED", "G_M12_DB"
};
+static const char * const tasha_ear_spkr_pa_gain_text[] = {
+ "G_DEFAULT", "G_0_DB", "G_1_DB", "G_2_DB", "G_3_DB", "G_4_DB",
+ "G_5_DB", "G_6_DB"
+};
+
static const struct soc_enum tasha_ear_pa_gain_enum =
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tasha_ear_pa_gain_text),
tasha_ear_pa_gain_text);
+static const struct soc_enum tasha_ear_spkr_pa_gain_enum =
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tasha_ear_spkr_pa_gain_text),
+ tasha_ear_spkr_pa_gain_text);
+
static const struct snd_kcontrol_new tasha_analog_gain_controls[] = {
SOC_ENUM_EXT("EAR PA Gain", tasha_ear_pa_gain_enum,
tasha_ear_pa_gain_get, tasha_ear_pa_gain_put),
+ SOC_ENUM_EXT("EAR SPKR PA Gain", tasha_ear_spkr_pa_gain_enum,
+ tasha_ear_spkr_pa_gain_get, tasha_ear_spkr_pa_gain_put),
+
SOC_SINGLE_TLV("HPHL Volume", WCD9335_HPH_L_EN, 0, 20, 1,
line_gain),
SOC_SINGLE_TLV("HPHR Volume", WCD9335_HPH_R_EN, 0, 20, 1,
@@ -10983,7 +11080,7 @@ static int tasha_get_channel_map(struct snd_soc_dai *dai,
case AIF4_PB:
case AIF_MIX1_PB:
if (!rx_slot || !rx_num) {
- pr_err("%s: Invalid rx_slot %p or rx_num %p\n",
+ pr_err("%s: Invalid rx_slot %pK or rx_num %pK\n",
__func__, rx_slot, rx_num);
return -EINVAL;
}
@@ -11002,7 +11099,7 @@ static int tasha_get_channel_map(struct snd_soc_dai *dai,
case AIF4_MAD_TX:
case AIF4_VIFEED:
if (!tx_slot || !tx_num) {
- pr_err("%s: Invalid tx_slot %p or tx_num %p\n",
+ pr_err("%s: Invalid tx_slot %pK or tx_num %pK\n",
__func__, tx_slot, tx_num);
return -EINVAL;
}
@@ -11040,7 +11137,7 @@ static int tasha_set_channel_map(struct snd_soc_dai *dai,
core = dev_get_drvdata(dai->codec->dev->parent);
if (!tx_slot || !rx_slot) {
- pr_err("%s: Invalid tx_slot=%p, rx_slot=%p\n",
+ pr_err("%s: Invalid tx_slot=%pK, rx_slot=%pK\n",
__func__, tx_slot, rx_slot);
return -EINVAL;
}
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsd.c b/sound/soc/codecs/wcd934x/wcd934x-dsd.c
index 246b3bfab876..55072466af55 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsd.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsd.c
@@ -378,6 +378,14 @@ static int tavil_enable_dsd(struct snd_soc_dapm_widget *w,
int interp_idx;
u8 pcm_rate_val;
+ if (!dsd_conf) {
+ dev_err(codec->dev, "%s: null dsd_config pointer\n", __func__);
+ return -EINVAL;
+ }
+
+ dev_dbg(codec->dev, "%s: DSD%d, event: %d\n", __func__,
+ w->shift, event);
+
if (w->shift == DSD0) {
/* Read out select */
if (snd_soc_read(codec, WCD934X_CDC_DSD0_CFG0) & 0x02)
@@ -422,10 +430,6 @@ static int tavil_enable_dsd(struct snd_soc_dapm_widget *w,
snd_soc_write(codec, WCD934X_CDC_DSD0_CFG1,
dsd_conf->volume[DSD0]);
- if (clk_users > 1)
- snd_soc_update_bits(codec,
- WCD934X_CDC_DSD0_CFG2,
- 0x04, 0x00);
} else if (w->shift == DSD1) {
snd_soc_update_bits(codec, WCD934X_CDC_DSD1_PATH_CTL,
0x02, 0x02);
@@ -436,26 +440,35 @@ static int tavil_enable_dsd(struct snd_soc_dapm_widget *w,
/* Apply Gain */
snd_soc_write(codec, WCD934X_CDC_DSD1_CFG1,
dsd_conf->volume[DSD1]);
+ }
+ /* 10msec sleep required after DSD clock is set */
+ usleep_range(10000, 10100);
- if (clk_users > 1)
+ if (clk_users > 1) {
+ snd_soc_update_bits(codec, WCD934X_ANA_RX_SUPPLIES,
+ 0x02, 0x02);
+ if (w->shift == DSD0)
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_DSD0_CFG2,
+ 0x04, 0x00);
+ if (w->shift == DSD1)
snd_soc_update_bits(codec,
WCD934X_CDC_DSD1_CFG2,
0x04, 0x00);
+
}
- /* 10msec sleep required after DSD clock is set */
- usleep_range(10000, 10100);
break;
case SND_SOC_DAPM_POST_PMD:
if (w->shift == DSD0) {
- snd_soc_update_bits(codec, WCD934X_CDC_DSD0_PATH_CTL,
- 0x01, 0x00);
snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2,
0x04, 0x04);
- } else if (w->shift == DSD1) {
- snd_soc_update_bits(codec, WCD934X_CDC_DSD1_PATH_CTL,
+ snd_soc_update_bits(codec, WCD934X_CDC_DSD0_PATH_CTL,
0x01, 0x00);
+ } else if (w->shift == DSD1) {
snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2,
0x04, 0x04);
+ snd_soc_update_bits(codec, WCD934X_CDC_DSD1_PATH_CTL,
+ 0x01, 0x00);
}
tavil_codec_enable_interp_clk(codec, event, interp_idx);
@@ -466,6 +479,7 @@ static int tavil_enable_dsd(struct snd_soc_dapm_widget *w,
WCD934X_CDC_CLK_RST_CTRL_DSD_CONTROL,
0x01, 0x00);
tavil_dsd_data_pull(codec, 0x03, 0x04, false);
+ tavil_dsd_reset(dsd_conf);
}
break;
}
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
index 225b3a755f66..8d2247176607 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
@@ -28,19 +28,22 @@
#define WCD_MEM_ENABLE_MAX_RETRIES 20
#define WCD_DSP_BOOT_TIMEOUT_MS 3000
#define WCD_SYSFS_ENTRY_MAX_LEN 8
-
-#define WCD_CNTL_MUTEX_LOCK(codec, lock) \
-{ \
- dev_dbg(codec->dev, "mutex_lock(%s)\n", \
- __func__); \
- mutex_lock(&lock); \
+#define WCD_PROCFS_ENTRY_MAX_LEN 16
+#define WCD_934X_RAMDUMP_START_ADDR 0x20100000
+#define WCD_934X_RAMDUMP_SIZE ((1024 * 1024) - 128)
+
+#define WCD_CNTL_MUTEX_LOCK(codec, lock) \
+{ \
+ dev_dbg(codec->dev, "%s: mutex_lock(%s)\n", \
+ __func__, __stringify_1(lock)); \
+ mutex_lock(&lock); \
}
-#define WCD_CNTL_MUTEX_UNLOCK(codec, lock) \
-{ \
- dev_dbg(codec->dev, "mutex_unlock(%s)\n",\
- __func__); \
- mutex_unlock(&lock); \
+#define WCD_CNTL_MUTEX_UNLOCK(codec, lock) \
+{ \
+ dev_dbg(codec->dev, "%s: mutex_unlock(%s)\n", \
+ __func__, __stringify_1(lock)); \
+ mutex_unlock(&lock); \
}
struct wcd_cntl_attribute {
@@ -147,6 +150,97 @@ static struct kobj_type wcd_cntl_ktype = {
.sysfs_ops = &wcd_cntl_sysfs_ops,
};
+static void wcd_cntl_change_online_state(struct wcd_dsp_cntl *cntl,
+ u8 online)
+{
+ struct wdsp_ssr_entry *ssr_entry = &cntl->ssr_entry;
+ unsigned long ret;
+
+ WCD_CNTL_MUTEX_LOCK(cntl->codec, cntl->ssr_mutex);
+ ssr_entry->offline = !online;
+ /* Make sure the write is complete */
+ wmb();
+ ret = xchg(&ssr_entry->offline_change, 1);
+ wake_up_interruptible(&ssr_entry->offline_poll_wait);
+ dev_dbg(cntl->codec->dev,
+ "%s: requested %u, offline %u offline_change %u, ret = %ldn",
+ __func__, online, ssr_entry->offline,
+ ssr_entry->offline_change, ret);
+ WCD_CNTL_MUTEX_UNLOCK(cntl->codec, cntl->ssr_mutex);
+}
+
+static ssize_t wdsp_ssr_entry_read(struct snd_info_entry *entry,
+ void *file_priv_data, struct file *file,
+ char __user *buf, size_t count, loff_t pos)
+{
+ int len = 0;
+ char buffer[WCD_PROCFS_ENTRY_MAX_LEN];
+ struct wcd_dsp_cntl *cntl;
+ struct wdsp_ssr_entry *ssr_entry;
+ ssize_t ret;
+ u8 offline;
+
+ cntl = (struct wcd_dsp_cntl *) entry->private_data;
+ if (!cntl) {
+ pr_err("%s: Invalid private data for SSR procfs entry\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ ssr_entry = &cntl->ssr_entry;
+
+ WCD_CNTL_MUTEX_LOCK(cntl->codec, cntl->ssr_mutex);
+ offline = ssr_entry->offline;
+ /* Make sure the read is complete */
+ rmb();
+ dev_dbg(cntl->codec->dev, "%s: offline = %s\n", __func__,
+ offline ? "true" : "false");
+ len = snprintf(buffer, sizeof(buffer), "%s\n",
+ offline ? "OFFLINE" : "ONLINE");
+ ret = simple_read_from_buffer(buf, count, &pos, buffer, len);
+ WCD_CNTL_MUTEX_UNLOCK(cntl->codec, cntl->ssr_mutex);
+
+ return ret;
+}
+
+static unsigned int wdsp_ssr_entry_poll(struct snd_info_entry *entry,
+ void *private_data, struct file *file,
+ poll_table *wait)
+{
+ struct wcd_dsp_cntl *cntl;
+ struct wdsp_ssr_entry *ssr_entry;
+ unsigned int ret = 0;
+
+ if (!entry || !entry->private_data) {
+ pr_err("%s: %s is NULL\n", __func__,
+ (!entry) ? "entry" : "private_data");
+ return -EINVAL;
+ }
+
+ cntl = (struct wcd_dsp_cntl *) entry->private_data;
+ ssr_entry = &cntl->ssr_entry;
+
+ dev_dbg(cntl->codec->dev, "%s: Poll wait, offline = %u\n",
+ __func__, ssr_entry->offline);
+ poll_wait(file, &ssr_entry->offline_poll_wait, wait);
+ dev_dbg(cntl->codec->dev, "%s: Woken up Poll wait, offline = %u\n",
+ __func__, ssr_entry->offline);
+
+ WCD_CNTL_MUTEX_LOCK(cntl->codec, cntl->ssr_mutex);
+ if (xchg(&ssr_entry->offline_change, 0))
+ ret = POLLIN | POLLPRI | POLLRDNORM;
+ dev_dbg(cntl->codec->dev, "%s: ret (%d) from poll_wait\n",
+ __func__, ret);
+ WCD_CNTL_MUTEX_UNLOCK(cntl->codec, cntl->ssr_mutex);
+
+ return ret;
+}
+
+static struct snd_info_entry_ops wdsp_ssr_entry_ops = {
+ .read = wdsp_ssr_entry_read,
+ .poll = wdsp_ssr_entry_poll,
+};
+
static int wcd_cntl_cpe_fll_calibrate(struct wcd_dsp_cntl *cntl)
{
struct snd_soc_codec *codec = cntl->codec;
@@ -429,7 +523,9 @@ static int wcd_cntl_enable_memory(struct wcd_dsp_cntl *cntl)
ARRAY_SIZE(mem_enable_values),
mem_enable_values);
- snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0, 0x05);
+ /* Make sure Deep sleep of memories is enabled for all banks */
+ snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0, 0xFF);
+ snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_1, 0x0F);
done:
return ret;
}
@@ -439,6 +535,7 @@ static void wcd_cntl_disable_memory(struct wcd_dsp_cntl *cntl)
struct snd_soc_codec *codec = cntl->codec;
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0, 0xFF);
+ snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_1, 0x0F);
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_3, 0xFF);
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_2, 0xFF);
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_DRAM1_SHUTDOWN, 0x07);
@@ -552,8 +649,9 @@ static irqreturn_t wcd_cntl_ipc_irq(int irq, void *data)
complete(&cntl->boot_complete);
if (cntl->m_dev && cntl->m_ops &&
- cntl->m_ops->intr_handler)
- ret = cntl->m_ops->intr_handler(cntl->m_dev, WDSP_IPC1_INTR);
+ cntl->m_ops->signal_handler)
+ ret = cntl->m_ops->signal_handler(cntl->m_dev, WDSP_IPC1_INTR,
+ NULL);
else
ret = -EINVAL;
@@ -568,8 +666,10 @@ static irqreturn_t wcd_cntl_err_irq(int irq, void *data)
{
struct wcd_dsp_cntl *cntl = data;
struct snd_soc_codec *codec = cntl->codec;
+ struct wdsp_err_signal_arg arg;
u16 status = 0;
u8 reg_val;
+ int ret = 0;
reg_val = snd_soc_read(codec, WCD934X_CPE_SS_SS_ERROR_INT_STATUS_0A);
status = status | reg_val;
@@ -580,6 +680,23 @@ static irqreturn_t wcd_cntl_err_irq(int irq, void *data)
dev_info(codec->dev, "%s: error interrupt status = 0x%x\n",
__func__, status);
+ if ((status & cntl->irqs.fatal_irqs) &&
+ (cntl->m_dev && cntl->m_ops && cntl->m_ops->signal_handler)) {
+ arg.mem_dumps_enabled = cntl->ramdump_enable;
+ arg.remote_start_addr = WCD_934X_RAMDUMP_START_ADDR;
+ arg.dump_size = WCD_934X_RAMDUMP_SIZE;
+ ret = cntl->m_ops->signal_handler(cntl->m_dev, WDSP_ERR_INTR,
+ &arg);
+ if (IS_ERR_VALUE(ret))
+ dev_err(cntl->codec->dev,
+ "%s: Failed to handle fatal irq 0x%x\n",
+ __func__, status & cntl->irqs.fatal_irqs);
+ wcd_cntl_change_online_state(cntl, 0);
+ } else {
+ dev_err(cntl->codec->dev, "%s: Invalid signal_handler\n",
+ __func__);
+ }
+
return IRQ_HANDLED;
}
@@ -591,10 +708,15 @@ static int wcd_control_handler(struct device *dev, void *priv_data,
int ret = 0;
switch (event) {
+ case WDSP_EVENT_POST_INIT:
case WDSP_EVENT_POST_DLOAD_CODE:
case WDSP_EVENT_DLOAD_FAILED:
case WDSP_EVENT_POST_SHUTDOWN:
+ if (event == WDSP_EVENT_POST_DLOAD_CODE)
+ /* Mark DSP online since code download is complete */
+ wcd_cntl_change_online_state(cntl, 1);
+
/* Disable CPAR */
wcd_cntl_cpar_ctrl(cntl, false);
/* Disable all the clocks */
@@ -605,12 +727,8 @@ static int wcd_control_handler(struct device *dev, void *priv_data,
__func__, ret);
break;
- case WDSP_EVENT_PRE_DLOAD_CODE:
-
- wcd_cntl_enable_memory(cntl);
- break;
-
case WDSP_EVENT_PRE_DLOAD_DATA:
+ case WDSP_EVENT_PRE_DLOAD_CODE:
/* Enable all the clocks */
ret = wcd_cntl_clocks_enable(cntl);
@@ -623,6 +741,9 @@ static int wcd_control_handler(struct device *dev, void *priv_data,
/* Enable CPAR */
wcd_cntl_cpar_ctrl(cntl, true);
+
+ if (event == WDSP_EVENT_PRE_DLOAD_CODE)
+ wcd_cntl_enable_memory(cntl);
break;
case WDSP_EVENT_DO_BOOT:
@@ -697,6 +818,8 @@ static void wcd_cntl_debugfs_init(char *dir, struct wcd_dsp_cntl *cntl)
debugfs_create_u32("debug_mode", S_IRUGO | S_IWUSR,
cntl->entry, &cntl->debug_mode);
+ debugfs_create_bool("ramdump_enable", S_IRUGO | S_IWUSR,
+ cntl->entry, &cntl->ramdump_enable);
done:
return;
}
@@ -713,8 +836,7 @@ static int wcd_control_init(struct device *dev, void *priv_data)
struct snd_soc_codec *codec = cntl->codec;
struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
- char wcd_cntl_dir_name[WCD_CNTL_DIR_NAME_LEN_MAX];
- int ret, ret1;
+ int ret;
bool err_irq_requested = false;
ret = wcd9xxx_request_irq(core_res,
@@ -756,25 +878,8 @@ static int wcd_control_init(struct device *dev, void *priv_data)
}
wcd_cntl_cpar_ctrl(cntl, true);
- snprintf(wcd_cntl_dir_name, WCD_CNTL_DIR_NAME_LEN_MAX,
- "%s%d", "wdsp", cntl->dsp_instance);
- wcd_cntl_debugfs_init(wcd_cntl_dir_name, cntl);
- ret = wcd_cntl_sysfs_init(wcd_cntl_dir_name, cntl);
- if (IS_ERR_VALUE(ret)) {
- dev_err(codec->dev,
- "%s: Failed to init sysfs %d\n",
- __func__, ret);
- goto err_sysfs_init;
- }
-
return 0;
-err_sysfs_init:
- wcd_cntl_cpar_ctrl(cntl, false);
- ret1 = wcd_cntl_clocks_disable(cntl);
- if (IS_ERR_VALUE(ret1))
- dev_err(codec->dev, "%s: Failed to disable clocks, err = %d\n",
- __func__, ret1);
err_clk_enable:
/* Mask all error interrupts */
snd_soc_write(codec, WCD934X_CPE_SS_SS_ERROR_INT_MASK_0A, 0xFF);
@@ -796,12 +901,6 @@ static int wcd_control_deinit(struct device *dev, void *priv_data)
struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
- /* Remove the sysfs entries */
- wcd_cntl_sysfs_remove(cntl);
-
- /* Remove the debugfs entries */
- wcd_cntl_debugfs_remove(cntl);
-
wcd_cntl_clocks_disable(cntl);
wcd_cntl_cpar_ctrl(cntl, false);
@@ -827,6 +926,11 @@ static int wcd_ctrl_component_bind(struct device *dev,
void *data)
{
struct wcd_dsp_cntl *cntl;
+ struct snd_soc_codec *codec;
+ struct snd_card *card;
+ struct snd_info_entry *entry;
+ char proc_name[WCD_PROCFS_ENTRY_MAX_LEN];
+ char wcd_cntl_dir_name[WCD_CNTL_DIR_NAME_LEN_MAX];
int ret = 0;
if (!dev || !master || !data) {
@@ -844,13 +948,58 @@ static int wcd_ctrl_component_bind(struct device *dev,
cntl->m_dev = master;
cntl->m_ops = data;
- if (cntl->m_ops->register_cmpnt_ops)
- ret = cntl->m_ops->register_cmpnt_ops(master, dev, cntl,
- &control_ops);
+ if (!cntl->m_ops->register_cmpnt_ops) {
+ dev_err(dev, "%s: invalid master callback register_cmpnt_ops\n",
+ __func__);
+ ret = -EINVAL;
+ goto done;
+ }
- if (ret)
+ ret = cntl->m_ops->register_cmpnt_ops(master, dev, cntl, &control_ops);
+ if (ret) {
dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
__func__, ret);
+ goto done;
+ }
+
+ snprintf(wcd_cntl_dir_name, WCD_CNTL_DIR_NAME_LEN_MAX,
+ "%s%d", "wdsp", cntl->dsp_instance);
+ ret = wcd_cntl_sysfs_init(wcd_cntl_dir_name, cntl);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(dev, "%s: sysfs_init failed, err = %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ wcd_cntl_debugfs_init(wcd_cntl_dir_name, cntl);
+
+ codec = cntl->codec;
+ card = codec->component.card->snd_card;
+ snprintf(proc_name, WCD_PROCFS_ENTRY_MAX_LEN, "%s%d%s", "cpe",
+ cntl->dsp_instance, "_state");
+ entry = snd_info_create_card_entry(card, proc_name, card->proc_root);
+ if (!entry) {
+ /* Do not treat this as Fatal error */
+ dev_err(dev, "%s: Failed to create procfs entry %s\n",
+ __func__, proc_name);
+ goto done;
+ }
+
+ cntl->ssr_entry.entry = entry;
+ cntl->ssr_entry.offline = 1;
+ entry->size = WCD_PROCFS_ENTRY_MAX_LEN;
+ entry->content = SNDRV_INFO_CONTENT_DATA;
+ entry->c.ops = &wdsp_ssr_entry_ops;
+ entry->private_data = cntl;
+ ret = snd_info_register(entry);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(dev, "%s: Failed to register entry %s, err = %d\n",
+ __func__, proc_name, ret);
+ snd_info_free_entry(entry);
+ /* Let bind still happen even if creating the entry failed */
+ ret = 0;
+ }
+done:
return ret;
}
@@ -874,6 +1023,13 @@ static void wcd_ctrl_component_unbind(struct device *dev,
cntl->m_dev = NULL;
cntl->m_ops = NULL;
+
+ /* Remove the sysfs entries */
+ wcd_cntl_sysfs_remove(cntl);
+
+ /* Remove the debugfs entries */
+ wcd_cntl_debugfs_remove(cntl);
+
}
static const struct component_ops wcd_ctrl_component_ops = {
@@ -882,6 +1038,60 @@ static const struct component_ops wcd_ctrl_component_ops = {
};
/*
+ * wcd_dsp_ssr_event: handle the SSR event raised by caller.
+ * @cntl: Handle to the wcd_dsp_cntl structure
+ * @event: The SSR event to be handled
+ *
+ * Notifies the manager driver about the SSR event.
+ * Returns 0 on success and negative error code on error.
+ */
+int wcd_dsp_ssr_event(struct wcd_dsp_cntl *cntl, enum cdc_ssr_event event)
+{
+ int ret = 0;
+
+ if (!cntl) {
+ pr_err("%s: Invalid handle to control\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!cntl->m_dev || !cntl->m_ops || !cntl->m_ops->signal_handler) {
+ dev_err(cntl->codec->dev,
+ "%s: Invalid signal_handler callback\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case WCD_CDC_DOWN_EVENT:
+ ret = cntl->m_ops->signal_handler(cntl->m_dev,
+ WDSP_CDC_DOWN_SIGNAL,
+ NULL);
+ if (IS_ERR_VALUE(ret))
+ dev_err(cntl->codec->dev,
+ "%s: WDSP_CDC_DOWN_SIGNAL failed, err = %d\n",
+ __func__, ret);
+ wcd_cntl_change_online_state(cntl, 0);
+ break;
+ case WCD_CDC_UP_EVENT:
+ ret = cntl->m_ops->signal_handler(cntl->m_dev,
+ WDSP_CDC_UP_SIGNAL,
+ NULL);
+ if (IS_ERR_VALUE(ret))
+ dev_err(cntl->codec->dev,
+ "%s: WDSP_CDC_UP_SIGNAL failed, err = %d\n",
+ __func__, ret);
+ break;
+ default:
+ dev_err(cntl->codec->dev, "%s: Invalid event %d\n",
+ __func__, event);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(wcd_dsp_ssr_event);
+
+/*
* wcd_dsp_cntl_init: Initialize the wcd-dsp control
* @codec: pointer to the codec handle
* @params: Parameters required to initialize wcd-dsp control
@@ -929,6 +1139,8 @@ void wcd_dsp_cntl_init(struct snd_soc_codec *codec,
memcpy(&control->irqs, &params->irqs, sizeof(control->irqs));
init_completion(&control->boot_complete);
mutex_init(&control->clk_mutex);
+ mutex_init(&control->ssr_mutex);
+ init_waitqueue_head(&control->ssr_entry.offline_poll_wait);
/*
* The default state of WDSP is in SVS mode.
@@ -981,6 +1193,7 @@ void wcd_dsp_cntl_deinit(struct wcd_dsp_cntl **cntl)
component_del(codec->dev, &wcd_ctrl_component_ops);
mutex_destroy(&control->clk_mutex);
+ mutex_destroy(&control->ssr_mutex);
kfree(*cntl);
*cntl = NULL;
}
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h
index 3d6db776a0b5..83c59ed7b676 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h
@@ -17,6 +17,11 @@
#include <sound/soc.h>
#include <sound/wcd-dsp-mgr.h>
+enum cdc_ssr_event {
+ WCD_CDC_DOWN_EVENT,
+ WCD_CDC_UP_EVENT,
+};
+
struct wcd_dsp_cdc_cb {
/* Callback to enable codec clock */
int (*cdc_clk_en)(struct snd_soc_codec *, bool);
@@ -54,6 +59,13 @@ struct wcd_dsp_params {
u32 dsp_instance;
};
+struct wdsp_ssr_entry {
+ u8 offline;
+ u8 offline_change;
+ wait_queue_head_t offline_poll_wait;
+ struct snd_info_entry *entry;
+};
+
struct wcd_dsp_cntl {
/* Handle to codec */
struct snd_soc_codec *codec;
@@ -77,6 +89,7 @@ struct wcd_dsp_cntl {
/* Debugfs related */
struct dentry *entry;
u32 debug_mode;
+ bool ramdump_enable;
/* WDSP manager drivers data */
struct device *m_dev;
@@ -88,11 +101,15 @@ struct wcd_dsp_cntl {
/* Keep track of WDSP boot status */
bool is_wdsp_booted;
+
+ /* SSR related */
+ struct wdsp_ssr_entry ssr_entry;
+ struct mutex ssr_mutex;
};
void wcd_dsp_cntl_init(struct snd_soc_codec *codec,
struct wcd_dsp_params *params,
struct wcd_dsp_cntl **cntl);
void wcd_dsp_cntl_deinit(struct wcd_dsp_cntl **cntl);
-
+int wcd_dsp_ssr_event(struct wcd_dsp_cntl *cntl, enum cdc_ssr_event event);
#endif /* end __WCD_DSP_CONTROL_H__ */
diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
index 20f3043656e5..b3a30eb10b92 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
@@ -118,6 +118,14 @@ static struct wcd_mbhc_register
WCD934X_MBHC_STATUS_SPARE_1, 0x01, 0, 0),
WCD_MBHC_REGISTER("WCD_MBHC_MUX_CTL",
WCD934X_MBHC_NEW_CTL_2, 0x70, 4, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_HPHL_OCP_DET_EN",
+ WCD934X_HPH_L_TEST, 0x01, 0, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_HPHR_OCP_DET_EN",
+ WCD934X_HPH_R_TEST, 0x01, 0, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_HPHL_OCP_STATUS",
+ WCD934X_INTR_PIN1_STATUS0, 0x04, 2, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_HPHR_OCP_STATUS",
+ WCD934X_INTR_PIN1_STATUS0, 0x08, 3, 0),
};
static const struct wcd_mbhc_intr intr_ids = {
@@ -778,6 +786,26 @@ static void tavil_mbhc_moisture_config(struct wcd_mbhc *mbhc)
0x0C, TAVIL_MBHC_MOISTURE_RREF << 2);
}
+static bool tavil_hph_register_recovery(struct wcd_mbhc *mbhc)
+{
+ struct snd_soc_codec *codec = mbhc->codec;
+ struct wcd934x_mbhc *wcd934x_mbhc = tavil_soc_get_mbhc(codec);
+
+ if (!wcd934x_mbhc)
+ return false;
+
+ wcd934x_mbhc->is_hph_recover = false;
+ snd_soc_dapm_force_enable_pin(snd_soc_codec_get_dapm(codec),
+ "RESET_HPH_REGISTERS");
+ snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec));
+
+ snd_soc_dapm_disable_pin(snd_soc_codec_get_dapm(codec),
+ "RESET_HPH_REGISTERS");
+ snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec));
+
+ return wcd934x_mbhc->is_hph_recover;
+}
+
static const struct wcd_mbhc_cb mbhc_cb = {
.request_irq = tavil_mbhc_request_irq,
.irq_control = tavil_mbhc_irq_control,
@@ -800,6 +828,7 @@ static const struct wcd_mbhc_cb mbhc_cb = {
.mbhc_gnd_det_ctrl = tavil_mbhc_gnd_det_ctrl,
.hph_pull_down_ctrl = tavil_mbhc_hph_pull_down_ctrl,
.mbhc_moisture_config = tavil_mbhc_moisture_config,
+ .hph_register_recovery = tavil_hph_register_recovery,
};
static struct regulator *tavil_codec_find_ondemand_regulator(
diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
index b747b120b605..120a7b0f8177 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
@@ -32,6 +32,7 @@ struct wcd934x_mbhc {
struct wcd9xxx *wcd9xxx;
struct fw_info *fw_data;
bool mbhc_started;
+ bool is_hph_recover;
};
extern int tavil_mbhc_init(struct wcd934x_mbhc **mbhc,
diff --git a/sound/soc/codecs/wcd934x/wcd934x-routing.h b/sound/soc/codecs/wcd934x/wcd934x-routing.h
index ac3031ffe615..940fdf89d361 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-routing.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-routing.h
@@ -21,6 +21,7 @@ const struct snd_soc_dapm_route tavil_slim_audio_map[] = {
{"AIF1 CAP", NULL, "AIF1_CAP Mixer"},
{"AIF2 CAP", NULL, "AIF2_CAP Mixer"},
{"AIF3 CAP", NULL, "AIF3_CAP Mixer"},
+ {"AIF4 MAD", NULL, "AIF4_MAD Mixer"},
/* Virtual input widget Mixer */
{"AIF1_CAP Mixer", "SLIM TX0", "SLIM TX0"},
@@ -65,6 +66,8 @@ const struct snd_soc_dapm_route tavil_slim_audio_map[] = {
{"AIF3_CAP Mixer", "SLIM TX11", "SLIM TX11"},
{"AIF3_CAP Mixer", "SLIM TX13", "SLIM TX13"},
+ {"AIF4_MAD Mixer", "SLIM TX13", "SLIM TX13"},
+
{"SLIM RX0 MUX", "AIF1_PB", "AIF1 PB"},
{"SLIM RX1 MUX", "AIF1_PB", "AIF1 PB"},
{"SLIM RX2 MUX", "AIF1_PB", "AIF1 PB"},
@@ -121,6 +124,7 @@ const struct snd_soc_dapm_route tavil_audio_map[] = {
{"MAD_INP MUX", "MAD", "MAD_SEL MUX"},
{"MAD_INP MUX", "DEC1", "ADC MUX1"},
+ {"MAD_BROADCAST", "Switch", "MAD_INP MUX"},
{"MAD_CPE1", "Switch", "MAD_INP MUX"},
{"MAD_CPE2", "Switch", "MAD_INP MUX"},
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 28914ed3f937..9e18c17d6f1c 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -123,6 +123,9 @@ static const struct snd_kcontrol_new name##_mux = \
#define WCD934X_DEC_PWR_LVL_DF 0x00
#define WCD934X_STRING_LEN 100
+#define WCD934X_DIG_CORE_REG_MIN WCD934X_CDC_ANC0_CLK_RESET_CTL
+#define WCD934X_DIG_CORE_REG_MAX 0xFFF
+
#define WCD934X_MAX_MICBIAS 4
#define DAPM_MICBIAS1_STANDALONE "MIC BIAS1 Standalone"
#define DAPM_MICBIAS2_STANDALONE "MIC BIAS2 Standalone"
@@ -141,6 +144,32 @@ static const struct snd_kcontrol_new name##_mux = \
#define TAVIL_VERSION_ENTRY_SIZE 17
+#define WCD934X_DIG_CORE_COLLAPSE_TIMER_MS (5 * 1000)
+
+enum {
+ POWER_COLLAPSE,
+ POWER_RESUME,
+};
+
+static int dig_core_collapse_enable = 1;
+module_param(dig_core_collapse_enable, int,
+ S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(dig_core_collapse_enable, "enable/disable power gating");
+
+/* dig_core_collapse timer in seconds */
+static int dig_core_collapse_timer = (WCD934X_DIG_CORE_COLLAPSE_TIMER_MS/1000);
+module_param(dig_core_collapse_timer, int,
+ S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(dig_core_collapse_timer, "timer for power gating");
+
+#define TAVIL_HPH_REG_RANGE_1 (WCD934X_HPH_R_DAC_CTL - WCD934X_HPH_CNP_EN + 1)
+#define TAVIL_HPH_REG_RANGE_2 (WCD934X_HPH_NEW_ANA_HPH3 -\
+ WCD934X_HPH_NEW_ANA_HPH2 + 1)
+#define TAVIL_HPH_REG_RANGE_3 (WCD934X_HPH_NEW_INT_PA_RDAC_MISC3 -\
+ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL + 1)
+#define TAVIL_HPH_TOTAL_REG (TAVIL_HPH_REG_RANGE_1 + TAVIL_HPH_REG_RANGE_2 +\
+ TAVIL_HPH_REG_RANGE_3)
+
enum {
VI_SENSE_1,
VI_SENSE_2,
@@ -157,6 +186,7 @@ enum {
AIF3_CAP,
AIF4_PB,
AIF4_VIFEED,
+ AIF4_MAD_TX,
NUM_CODEC_DAIS,
};
@@ -272,13 +302,13 @@ static const struct wcd9xxx_ch tavil_tx_chs[WCD934X_TX_MAX] = {
};
static const u32 vport_slim_check_table[NUM_CODEC_DAIS] = {
- 0, /* AIF1_PB */
- BIT(AIF2_CAP) | BIT(AIF3_CAP), /* AIF1_CAP */
- 0, /* AIF2_PB */
- BIT(AIF1_CAP) | BIT(AIF3_CAP), /* AIF2_CAP */
- 0, /* AIF3_PB */
- BIT(AIF1_CAP) | BIT(AIF2_CAP), /* AIF3_CAP */
- 0, /* AIF4_PB */
+ 0, /* AIF1_PB */
+ BIT(AIF2_CAP) | BIT(AIF3_CAP) | BIT(AIF4_MAD_TX), /* AIF1_CAP */
+ 0, /* AIF2_PB */
+ BIT(AIF1_CAP) | BIT(AIF3_CAP) | BIT(AIF4_MAD_TX), /* AIF2_CAP */
+ 0, /* AIF3_PB */
+ BIT(AIF1_CAP) | BIT(AIF2_CAP) | BIT(AIF4_MAD_TX), /* AIF3_CAP */
+ 0, /* AIF4_PB */
};
/* Codec supports 2 IIR filters */
@@ -418,6 +448,30 @@ static struct afe_param_cdc_reg_cfg audio_reg_cfg[] = {
WCD934X_CDC_ANC0_FF_A_GAIN_CTL),
AANC_GAIN_CONTROL, 0xFF, WCD934X_REG_BITS, 0
},
+ {
+ 1,
+ (WCD934X_REGISTER_START_OFFSET +
+ SB_PGD_TX_PORT_MULTI_CHANNEL_0(0)),
+ SB_PGD_TX_PORTn_MULTI_CHNL_0, 0xFF, WCD934X_REG_BITS, 0x4
+ },
+ {
+ 1,
+ (WCD934X_REGISTER_START_OFFSET +
+ SB_PGD_TX_PORT_MULTI_CHANNEL_1(0)),
+ SB_PGD_TX_PORTn_MULTI_CHNL_1, 0xFF, WCD934X_REG_BITS, 0x4
+ },
+ {
+ 1,
+ (WCD934X_REGISTER_START_OFFSET +
+ SB_PGD_RX_PORT_MULTI_CHANNEL_0(0x180, 0)),
+ SB_PGD_RX_PORTn_MULTI_CHNL_0, 0xFF, WCD934X_REG_BITS, 0x4
+ },
+ {
+ 1,
+ (WCD934X_REGISTER_START_OFFSET +
+ SB_PGD_RX_PORT_MULTI_CHANNEL_0(0x181, 0)),
+ SB_PGD_RX_PORTn_MULTI_CHNL_1, 0xFF, WCD934X_REG_BITS, 0x4
+ },
};
static struct afe_param_cdc_reg_cfg_data tavil_audio_reg_cfg = {
@@ -530,6 +584,9 @@ struct tavil_priv {
struct wcd934x_swr swr;
struct mutex micb_lock;
+ struct delayed_work power_gate_work;
+ struct mutex power_lock;
+
struct clk *wcd_ext_clk;
/* mbhc module */
@@ -563,6 +620,8 @@ struct tavil_priv {
int main_clk_users[WCD934X_NUM_INTERPOLATORS];
struct tavil_dsd_config *dsd_config;
struct tavil_idle_detect_config idle_det_cfg;
+
+ int power_active_ref;
};
static const struct tavil_reg_mask_val tavil_spkr_default[] = {
@@ -1033,17 +1092,15 @@ static int tavil_codec_enable_anc(struct snd_soc_dapm_widget *w,
snd_soc_write(codec, reg, (val & mask));
}
+ /* Rate converter clk enable and set bypass mode */
+ snd_soc_update_bits(codec, WCD934X_CDC_ANC0_RC_COMMON_CTL,
+ 0x05, 0x05);
if (!hwdep_cal)
release_firmware(fw);
break;
- case SND_SOC_DAPM_POST_PMU:
- /* Remove ANC Rx from reset */
- snd_soc_update_bits(codec, WCD934X_CDC_ANC0_CLK_RESET_CTL,
- 0x08, 0x00);
- snd_soc_update_bits(codec, WCD934X_CDC_ANC1_CLK_RESET_CTL,
- 0x08, 0x00);
- break;
case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, WCD934X_CDC_ANC0_RC_COMMON_CTL,
+ 0x05, 0x00);
if (!strcmp(w->name, "ANC EAR PA") ||
!strcmp(w->name, "ANC SPK1 PA")) {
snd_soc_update_bits(codec, WCD934X_CDC_ANC0_MODE_1_CTL,
@@ -1215,6 +1272,8 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kcontrol,
return 0;
}
break;
+ case AIF4_MAD_TX:
+ break;
default:
dev_err(codec->dev, "Unknown AIF %d\n", dai_id);
mutex_unlock(&tavil_p->codec_mutex);
@@ -1404,6 +1463,34 @@ static int tavil_codec_enable_slim_chmask(struct wcd9xxx_codec_dai_data *dai,
return ret;
}
+static void tavil_codec_mute_dsd(struct snd_soc_codec *codec,
+ struct list_head *ch_list)
+{
+ u8 dsd0_in;
+ u8 dsd1_in;
+ struct wcd9xxx_ch *ch;
+
+ /* Read DSD Input Ports */
+ dsd0_in = (snd_soc_read(codec, WCD934X_CDC_DSD0_CFG0) & 0x3C) >> 2;
+ dsd1_in = (snd_soc_read(codec, WCD934X_CDC_DSD1_CFG0) & 0x3C) >> 2;
+
+ if ((dsd0_in == 0) && (dsd1_in == 0))
+ return;
+
+ /*
+ * Check if the ports getting disabled are connected to DSD inputs.
+ * If connected, enable DSD mute to avoid DC entering into DSD Filter
+ */
+ list_for_each_entry(ch, ch_list, list) {
+ if (ch->port == (dsd0_in + WCD934X_RX_PORT_START_NUMBER - 1))
+ snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2,
+ 0x04, 0x04);
+ if (ch->port == (dsd1_in + WCD934X_RX_PORT_START_NUMBER - 1))
+ snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2,
+ 0x04, 0x04);
+ }
+}
+
static int tavil_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
@@ -1413,6 +1500,7 @@ static int tavil_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
int ret = 0;
struct wcd9xxx_codec_dai_data *dai;
+ struct tavil_dsd_config *dsd_conf = tavil_p->dsd_config;
core = dev_get_drvdata(codec->dev->parent);
@@ -1435,6 +1523,9 @@ static int tavil_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
&dai->grph);
break;
case SND_SOC_DAPM_POST_PMD:
+ if (dsd_conf)
+ tavil_codec_mute_dsd(codec, &dai->wcd9xxx_ch_list);
+
ret = wcd9xxx_disconnect_port(core, &dai->wcd9xxx_ch_list,
dai->grph);
dev_dbg(codec->dev, "%s: Disconnect RX port, ret = %d\n",
@@ -1771,6 +1862,12 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
0x06, (0x03 << 1));
set_bit(HPH_PA_DELAY, &tavil->status_mask);
+ if (dsd_conf &&
+ (snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01)) {
+ /* Set regulator mode to AB if DSD is enabled */
+ snd_soc_update_bits(codec, WCD934X_ANA_RX_SUPPLIES,
+ 0x02, 0x02);
+ }
break;
case SND_SOC_DAPM_POST_PMU:
/*
@@ -1781,6 +1878,9 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
usleep_range(7000, 7100);
clear_bit(HPH_PA_DELAY, &tavil->status_mask);
}
+
+ snd_soc_update_bits(codec, WCD934X_HPH_R_TEST, 0x01, 0x01);
+
/* Remove mute */
snd_soc_update_bits(codec, WCD934X_CDC_RX2_RX_PATH_CTL,
0x10, 0x00);
@@ -1797,28 +1897,33 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
WCD934X_CDC_RX2_RX_PATH_MIX_CTL,
0x10, 0x00);
if (dsd_conf &&
- (snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01)) {
- /* Set regulator mode to AB if DSD is enabled */
- snd_soc_update_bits(codec, WCD934X_ANA_RX_SUPPLIES,
- 0x02, 0x02);
+ (snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01))
snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2,
0x04, 0x00);
- }
tavil_codec_override(codec, tavil->hph_mode, event);
break;
case SND_SOC_DAPM_PRE_PMD:
+ blocking_notifier_call_chain(&tavil->mbhc->notifier,
+ WCD_EVENT_PRE_HPHR_PA_OFF,
+ &tavil->mbhc->wcd_mbhc);
/* Enable DSD Mute before PA disable */
if (dsd_conf &&
(snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01))
snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2,
0x04, 0x04);
+ snd_soc_update_bits(codec, WCD934X_HPH_R_TEST, 0x01, 0x00);
+ snd_soc_update_bits(codec, WCD934X_CDC_RX2_RX_PATH_CTL,
+ 0x10, 0x10);
break;
case SND_SOC_DAPM_POST_PMD:
+ /* 5ms sleep is required after PA disable */
+ usleep_range(5000, 5100);
tavil_codec_override(codec, tavil->hph_mode, event);
+ blocking_notifier_call_chain(&tavil->mbhc->notifier,
+ WCD_EVENT_POST_HPHR_PA_OFF,
+ &tavil->mbhc->wcd_mbhc);
snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
0x06, 0x0);
- /* 5ms sleep is required after PA disable */
- usleep_range(5000, 5100);
break;
};
@@ -1840,6 +1945,12 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
0x06, (0x03 << 1));
set_bit(HPH_PA_DELAY, &tavil->status_mask);
+ if (dsd_conf &&
+ (snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01)) {
+ /* Set regulator mode to AB if DSD is enabled */
+ snd_soc_update_bits(codec, WCD934X_ANA_RX_SUPPLIES,
+ 0x02, 0x02);
+ }
break;
case SND_SOC_DAPM_POST_PMU:
/*
@@ -1850,6 +1961,7 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
usleep_range(7000, 7100);
clear_bit(HPH_PA_DELAY, &tavil->status_mask);
}
+ snd_soc_update_bits(codec, WCD934X_HPH_L_TEST, 0x01, 0x01);
/* Remove Mute on primary path */
snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CTL,
0x10, 0x00);
@@ -1866,28 +1978,34 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
WCD934X_CDC_RX1_RX_PATH_MIX_CTL,
0x10, 0x00);
if (dsd_conf &&
- (snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01)) {
- /* Set regulator mode to AB if DSD is enabled */
- snd_soc_update_bits(codec, WCD934X_ANA_RX_SUPPLIES,
- 0x02, 0x02);
+ (snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01))
snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2,
0x04, 0x00);
- }
tavil_codec_override(codec, tavil->hph_mode, event);
break;
case SND_SOC_DAPM_PRE_PMD:
+ blocking_notifier_call_chain(&tavil->mbhc->notifier,
+ WCD_EVENT_PRE_HPHL_PA_OFF,
+ &tavil->mbhc->wcd_mbhc);
/* Enable DSD Mute before PA disable */
if (dsd_conf &&
(snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01))
snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2,
0x04, 0x04);
+
+ snd_soc_update_bits(codec, WCD934X_HPH_L_TEST, 0x01, 0x00);
+ snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CTL,
+ 0x10, 0x10);
break;
case SND_SOC_DAPM_POST_PMD:
+ /* 5ms sleep is required after PA disable */
+ usleep_range(5000, 5100);
tavil_codec_override(codec, tavil->hph_mode, event);
+ blocking_notifier_call_chain(&tavil->mbhc->notifier,
+ WCD_EVENT_POST_HPHL_PA_OFF,
+ &tavil->mbhc->wcd_mbhc);
snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
0x06, 0x0);
- /* 5ms sleep is required after PA disable */
- usleep_range(5000, 5100);
break;
};
@@ -2359,7 +2477,7 @@ done:
return ret;
}
-static int tavil_codec_enable_mad(struct snd_soc_codec *codec, bool enable)
+static int __tavil_codec_enable_mad(struct snd_soc_codec *codec, bool enable)
{
int rc = 0;
@@ -2404,6 +2522,29 @@ done:
return rc;
}
+static int tavil_codec_ape_enable_mad(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+ int rc = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec, WCD934X_CPE_SS_SVA_CFG, 0x40, 0x40);
+ rc = __tavil_codec_enable_mad(codec, true);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec, WCD934X_CPE_SS_SVA_CFG, 0x40, 0x00);
+ __tavil_codec_enable_mad(codec, false);
+ break;
+ }
+
+ dev_dbg(tavil->dev, "%s: event = %d\n", __func__, event);
+ return rc;
+}
+
static int tavil_codec_cpe_mad_ctl(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -2418,7 +2559,7 @@ static int tavil_codec_cpe_mad_ctl(struct snd_soc_dapm_widget *w,
goto done;
snd_soc_update_bits(codec, WCD934X_CPE_SS_SVA_CFG, 0x20, 0x20);
- rc = tavil_codec_enable_mad(codec, true);
+ rc = __tavil_codec_enable_mad(codec, true);
if (IS_ERR_VALUE(rc)) {
tavil->mad_switch_cnt--;
goto done;
@@ -2431,7 +2572,7 @@ static int tavil_codec_cpe_mad_ctl(struct snd_soc_dapm_widget *w,
goto done;
snd_soc_update_bits(codec, WCD934X_CPE_SS_SVA_CFG, 0x20, 0x00);
- tavil_codec_enable_mad(codec, false);
+ __tavil_codec_enable_mad(codec, false);
break;
}
done:
@@ -4060,13 +4201,12 @@ int tavil_codec_enable_standalone_micbias(struct snd_soc_codec *codec,
}
if (enable)
- rc = snd_soc_dapm_force_enable_pin_unlocked(
+ rc = snd_soc_dapm_force_enable_pin(
snd_soc_codec_get_dapm(codec),
micb_names[micb_index]);
else
- rc = snd_soc_dapm_disable_pin_unlocked(
- snd_soc_codec_get_dapm(codec),
- micb_names[micb_index]);
+ rc = snd_soc_dapm_disable_pin(snd_soc_codec_get_dapm(codec),
+ micb_names[micb_index]);
if (!rc)
snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec));
@@ -4110,6 +4250,244 @@ static int tavil_codec_enable_micbias(struct snd_soc_dapm_widget *w,
return __tavil_codec_enable_micbias(w, event);
}
+
+static const struct reg_sequence tavil_hph_reset_tbl[] = {
+ { WCD934X_HPH_CNP_EN, 0x80 },
+ { WCD934X_HPH_CNP_WG_CTL, 0x9A },
+ { WCD934X_HPH_CNP_WG_TIME, 0x14 },
+ { WCD934X_HPH_OCP_CTL, 0x28 },
+ { WCD934X_HPH_AUTO_CHOP, 0x16 },
+ { WCD934X_HPH_CHOP_CTL, 0x83 },
+ { WCD934X_HPH_PA_CTL1, 0x46 },
+ { WCD934X_HPH_PA_CTL2, 0x50 },
+ { WCD934X_HPH_L_EN, 0x80 },
+ { WCD934X_HPH_L_TEST, 0xE0 },
+ { WCD934X_HPH_L_ATEST, 0x50 },
+ { WCD934X_HPH_R_EN, 0x80 },
+ { WCD934X_HPH_R_TEST, 0xE0 },
+ { WCD934X_HPH_R_ATEST, 0x54 },
+ { WCD934X_HPH_RDAC_CLK_CTL1, 0x99 },
+ { WCD934X_HPH_RDAC_CLK_CTL2, 0x9B },
+ { WCD934X_HPH_RDAC_LDO_CTL, 0x33 },
+ { WCD934X_HPH_RDAC_CHOP_CLK_LP_CTL, 0x00 },
+ { WCD934X_HPH_REFBUFF_UHQA_CTL, 0xA8 },
+ { WCD934X_HPH_REFBUFF_LP_CTL, 0x0A },
+ { WCD934X_HPH_L_DAC_CTL, 0x00 },
+ { WCD934X_HPH_R_DAC_CTL, 0x00 },
+ { WCD934X_HPH_NEW_ANA_HPH2, 0x00 },
+ { WCD934X_HPH_NEW_ANA_HPH3, 0x00 },
+ { WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL, 0x00 },
+ { WCD934X_HPH_NEW_INT_RDAC_HD2_CTL, 0xA0 },
+ { WCD934X_HPH_NEW_INT_RDAC_VREF_CTL, 0x10 },
+ { WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL, 0x00 },
+ { WCD934X_HPH_NEW_INT_RDAC_MISC1, 0x00 },
+ { WCD934X_HPH_NEW_INT_PA_MISC1, 0x22 },
+ { WCD934X_HPH_NEW_INT_PA_MISC2, 0x00 },
+ { WCD934X_HPH_NEW_INT_PA_RDAC_MISC, 0x00 },
+ { WCD934X_HPH_NEW_INT_HPH_TIMER1, 0xFE },
+ { WCD934X_HPH_NEW_INT_HPH_TIMER2, 0x2 },
+ { WCD934X_HPH_NEW_INT_HPH_TIMER3, 0x4e},
+ { WCD934X_HPH_NEW_INT_HPH_TIMER4, 0x54 },
+ { WCD934X_HPH_NEW_INT_PA_RDAC_MISC2, 0x00 },
+ { WCD934X_HPH_NEW_INT_PA_RDAC_MISC3, 0x00 },
+};
+
+static const struct tavil_reg_mask_val tavil_pa_disable[] = {
+ { WCD934X_CDC_RX1_RX_PATH_CTL, 0x30, 0x10 }, /* RX1 mute enable */
+ { WCD934X_CDC_RX2_RX_PATH_CTL, 0x30, 0x10 }, /* RX2 mute enable */
+ { WCD934X_HPH_CNP_WG_CTL, 0x80, 0x00 }, /* GM3 boost disable */
+ { WCD934X_ANA_HPH, 0x80, 0x00 }, /* HPHL PA disable */
+ { WCD934X_ANA_HPH, 0x40, 0x00 }, /* HPHR PA disable */
+ { WCD934X_ANA_HPH, 0x20, 0x00 }, /* HPHL REF dsable */
+ { WCD934X_ANA_HPH, 0x10, 0x00 }, /* HPHR REF disable */
+};
+
+static const struct tavil_reg_mask_val tavil_ocp_en_seq[] = {
+ { WCD934X_RX_OCP_CTL, 0x0F, 0x01 }, /* OCP number of attempts is 1 */
+ { WCD934X_HPH_OCP_CTL, 0xFA, 0x3A }, /* OCP current limit */
+ { WCD934X_HPH_L_TEST, 0x01, 0x01 }, /* Enable HPHL OCP */
+ { WCD934X_HPH_R_TEST, 0x01, 0x01 }, /* Enable HPHR OCP */
+};
+
+static const struct tavil_reg_mask_val tavil_ocp_en_seq_1[] = {
+ { WCD934X_RX_OCP_CTL, 0x0F, 0x01 }, /* OCP number of attempts is 1 */
+ { WCD934X_HPH_OCP_CTL, 0xFA, 0x3A }, /* OCP current limit */
+};
+
+/* LO-HIFI */
+static const struct tavil_reg_mask_val tavil_pre_pa_en_lohifi[] = {
+ { WCD934X_HPH_NEW_INT_HPH_TIMER1, 0x02, 0x00 },
+ { WCD934X_HPH_NEW_INT_PA_MISC2, 0x20, 0x20 },
+ { WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL, 0xf0, 0x40 },
+ { WCD934X_HPH_CNP_WG_CTL, 0x80, 0x00 },
+ { WCD934X_RX_BIAS_HPH_LOWPOWER, 0xf0, 0xc0 },
+ { WCD934X_HPH_PA_CTL1, 0x0e, 0x02 },
+ { WCD934X_HPH_REFBUFF_LP_CTL, 0x06, 0x06 },
+};
+
+static const struct tavil_reg_mask_val tavil_pre_pa_en[] = {
+ { WCD934X_HPH_NEW_INT_HPH_TIMER1, 0x02, 0x00 },
+ { WCD934X_HPH_NEW_INT_PA_MISC2, 0x20, 0x0 },
+ { WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL, 0xf0, 0x40 },
+ { WCD934X_HPH_CNP_WG_CTL, 0x80, 0x00 },
+ { WCD934X_RX_BIAS_HPH_LOWPOWER, 0xf0, 0x80 },
+ { WCD934X_HPH_PA_CTL1, 0x0e, 0x06 },
+ { WCD934X_HPH_REFBUFF_LP_CTL, 0x06, 0x06 },
+};
+
+static const struct tavil_reg_mask_val tavil_post_pa_en[] = {
+ { WCD934X_HPH_L_TEST, 0x01, 0x01 }, /* Enable HPHL OCP */
+ { WCD934X_HPH_R_TEST, 0x01, 0x01 }, /* Enable HPHR OCP */
+ { WCD934X_CDC_RX1_RX_PATH_CTL, 0x30, 0x20 }, /* RX1 mute disable */
+ { WCD934X_CDC_RX2_RX_PATH_CTL, 0x30, 0x20 }, /* RX2 mute disable */
+ { WCD934X_HPH_CNP_WG_CTL, 0x80, 0x80 }, /* GM3 boost enable */
+ { WCD934X_HPH_NEW_INT_HPH_TIMER1, 0x02, 0x02 },
+};
+
+static void tavil_codec_hph_reg_range_read(struct regmap *map, u8 *buf)
+{
+ regmap_bulk_read(map, WCD934X_HPH_CNP_EN, buf, TAVIL_HPH_REG_RANGE_1);
+ regmap_bulk_read(map, WCD934X_HPH_NEW_ANA_HPH2,
+ buf + TAVIL_HPH_REG_RANGE_1, TAVIL_HPH_REG_RANGE_2);
+ regmap_bulk_read(map, WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
+ buf + TAVIL_HPH_REG_RANGE_1 + TAVIL_HPH_REG_RANGE_2,
+ TAVIL_HPH_REG_RANGE_3);
+}
+
+static void tavil_codec_hph_reg_recover(struct tavil_priv *tavil,
+ struct regmap *map, int pa_status)
+{
+ int i;
+
+ blocking_notifier_call_chain(&tavil->mbhc->notifier,
+ WCD_EVENT_OCP_OFF,
+ &tavil->mbhc->wcd_mbhc);
+
+ if (pa_status & 0xC0)
+ goto pa_en_restore;
+
+ dev_dbg(tavil->dev, "%s: HPH PA in disable state (0x%x)\n",
+ __func__, pa_status);
+
+ regmap_write_bits(map, WCD934X_CDC_RX1_RX_PATH_CTL, 0x10, 0x10);
+ regmap_write_bits(map, WCD934X_CDC_RX2_RX_PATH_CTL, 0x10, 0x10);
+ regmap_write_bits(map, WCD934X_ANA_HPH, 0xC0, 0x00);
+ regmap_write_bits(map, WCD934X_ANA_HPH, 0x30, 0x00);
+ regmap_write_bits(map, WCD934X_CDC_RX1_RX_PATH_CTL, 0x10, 0x00);
+ regmap_write_bits(map, WCD934X_CDC_RX2_RX_PATH_CTL, 0x10, 0x00);
+
+ /* Restore to HW defaults */
+ regmap_multi_reg_write(map, tavil_hph_reset_tbl,
+ ARRAY_SIZE(tavil_hph_reset_tbl));
+
+ for (i = 0; i < ARRAY_SIZE(tavil_ocp_en_seq); i++)
+ regmap_write_bits(map, tavil_ocp_en_seq[i].reg,
+ tavil_ocp_en_seq[i].mask,
+ tavil_ocp_en_seq[i].val);
+ goto end;
+
+
+pa_en_restore:
+ dev_dbg(tavil->dev, "%s: HPH PA in enable state (0x%x)\n",
+ __func__, pa_status);
+
+ /* Disable PA and other registers before restoring */
+ for (i = 0; i < ARRAY_SIZE(tavil_pa_disable); i++)
+ regmap_write_bits(map, tavil_pa_disable[i].reg,
+ tavil_pa_disable[i].mask,
+ tavil_pa_disable[i].val);
+
+ regmap_multi_reg_write(map, tavil_hph_reset_tbl,
+ ARRAY_SIZE(tavil_hph_reset_tbl));
+
+ for (i = 0; i < ARRAY_SIZE(tavil_ocp_en_seq_1); i++)
+ regmap_write_bits(map, tavil_ocp_en_seq_1[i].reg,
+ tavil_ocp_en_seq_1[i].mask,
+ tavil_ocp_en_seq_1[i].val);
+
+ if (tavil->hph_mode == CLS_H_LOHIFI) {
+ for (i = 0; i < ARRAY_SIZE(tavil_pre_pa_en_lohifi); i++)
+ regmap_write_bits(map,
+ tavil_pre_pa_en_lohifi[i].reg,
+ tavil_pre_pa_en_lohifi[i].mask,
+ tavil_pre_pa_en_lohifi[i].val);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(tavil_pre_pa_en); i++)
+ regmap_write_bits(map, tavil_pre_pa_en[i].reg,
+ tavil_pre_pa_en[i].mask,
+ tavil_pre_pa_en[i].val);
+ }
+ regmap_write_bits(map, WCD934X_ANA_HPH, 0x0C, pa_status & 0x0C);
+ regmap_write_bits(map, WCD934X_ANA_HPH, 0x30, 0x30);
+ /* wait for 100usec after HPH DAC is enabled */
+ usleep_range(100, 110);
+ regmap_write(map, WCD934X_ANA_HPH, pa_status);
+ /* Sleep for 7msec after PA is enabled */
+ usleep_range(7000, 7100);
+
+ for (i = 0; i < ARRAY_SIZE(tavil_post_pa_en); i++)
+ regmap_write_bits(map, tavil_post_pa_en[i].reg,
+ tavil_post_pa_en[i].mask,
+ tavil_post_pa_en[i].val);
+
+end:
+ tavil->mbhc->is_hph_recover = true;
+ blocking_notifier_call_chain(
+ &tavil->mbhc->notifier,
+ WCD_EVENT_OCP_ON,
+ &tavil->mbhc->wcd_mbhc);
+}
+
+static int tavil_codec_reset_hph_registers(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+ struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
+ u8 cache_val[TAVIL_HPH_TOTAL_REG];
+ u8 hw_val[TAVIL_HPH_TOTAL_REG];
+ int pa_status;
+ int ret;
+
+ dev_dbg(wcd9xxx->dev, "%s: event: %d\n", __func__, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ memset(cache_val, 0, TAVIL_HPH_TOTAL_REG);
+ memset(hw_val, 0, TAVIL_HPH_TOTAL_REG);
+
+ regmap_read(wcd9xxx->regmap, WCD934X_ANA_HPH, &pa_status);
+
+ tavil_codec_hph_reg_range_read(wcd9xxx->regmap, cache_val);
+
+ /* Read register values from HW directly */
+ regcache_cache_bypass(wcd9xxx->regmap, true);
+ tavil_codec_hph_reg_range_read(wcd9xxx->regmap, hw_val);
+ regcache_cache_bypass(wcd9xxx->regmap, false);
+
+ /* compare both the registers to know if there is corruption */
+ ret = memcmp(cache_val, hw_val, TAVIL_HPH_TOTAL_REG);
+
+ /* If both the values are same, it means no corruption */
+ if (ret) {
+ dev_dbg(codec->dev, "%s: cache and hw reg are not same\n",
+ __func__);
+ tavil_codec_hph_reg_recover(tavil, wcd9xxx->regmap,
+ pa_status);
+ } else {
+ dev_dbg(codec->dev, "%s: cache and hw reg are same\n",
+ __func__);
+ tavil->mbhc->is_hph_recover = false;
+ }
+ break;
+ default:
+ break;
+ };
+
+ return 0;
+}
+
static int tavil_iir_enable_audio_mixer_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -4870,6 +5248,10 @@ static const struct snd_kcontrol_new tavil_snd_controls[] = {
-84, 40, digital_gain),
SOC_SINGLE_SX_TLV("DEC4 Volume", WCD934X_CDC_TX4_TX_VOL_CTL, 0,
-84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("DEC5 Volume", WCD934X_CDC_TX5_TX_VOL_CTL, 0,
+ -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("DEC6 Volume", WCD934X_CDC_TX6_TX_VOL_CTL, 0,
+ -84, 40, digital_gain),
SOC_SINGLE_SX_TLV("DEC7 Volume", WCD934X_CDC_TX7_TX_VOL_CTL, 0,
-84, 40, digital_gain),
SOC_SINGLE_SX_TLV("DEC8 Volume", WCD934X_CDC_TX8_TX_VOL_CTL, 0,
@@ -5433,6 +5815,11 @@ static const struct snd_kcontrol_new aif3_cap_mixer[] = {
slim_tx_mixer_get, slim_tx_mixer_put),
};
+static const struct snd_kcontrol_new aif4_mad_mixer[] = {
+ SOC_SINGLE_EXT("SLIM TX13", SND_SOC_NOPM, WCD934X_TX13, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+};
+
WCD_DAPM_ENUM_EXT(slim_rx0, SND_SOC_NOPM, 0, slim_rx_mux_text,
slim_rx_mux_get, slim_rx_mux_put);
WCD_DAPM_ENUM_EXT(slim_rx1, SND_SOC_NOPM, 0, slim_rx_mux_text,
@@ -5754,6 +6141,9 @@ static const struct snd_kcontrol_new mad_cpe1_switch =
static const struct snd_kcontrol_new mad_cpe2_switch =
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+static const struct snd_kcontrol_new mad_brdcst_switch =
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
static const struct snd_kcontrol_new adc_us_mux0_switch =
SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0);
@@ -6140,6 +6530,14 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
tavil_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ /*
+ * Not supply widget, this is used to recover HPH registers.
+ * It is not connected to any other widgets
+ */
+ SND_SOC_DAPM_SUPPLY("RESET_HPH_REGISTERS", SND_SOC_NOPM,
+ 0, 0, tavil_codec_reset_hph_registers,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS1_STANDALONE, SND_SOC_NOPM, 0, 0,
tavil_codec_force_enable_micbias,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
@@ -6168,10 +6566,16 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
aif2_cap_mixer, ARRAY_SIZE(aif2_cap_mixer)),
SND_SOC_DAPM_MIXER("AIF3_CAP Mixer", SND_SOC_NOPM, AIF3_CAP, 0,
aif3_cap_mixer, ARRAY_SIZE(aif3_cap_mixer)),
+ SND_SOC_DAPM_MIXER("AIF4_MAD Mixer", SND_SOC_NOPM, AIF4_MAD_TX, 0,
+ aif4_mad_mixer, ARRAY_SIZE(aif4_mad_mixer)),
SND_SOC_DAPM_AIF_OUT_E("AIF4 VI", "VIfeed", 0, SND_SOC_NOPM,
AIF4_VIFEED, 0, tavil_codec_enable_slimvi_feedback,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_OUT("AIF4 MAD", "AIF4 MAD TX", 0,
+ SND_SOC_NOPM, 0, 0),
+
SND_SOC_DAPM_MIXER("AIF4_VI Mixer", SND_SOC_NOPM, AIF4_VIFEED, 0,
aif4_vi_mixer, ARRAY_SIZE(aif4_vi_mixer)),
SND_SOC_DAPM_INPUT("VIINPUT"),
@@ -6306,6 +6710,10 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
WCD_DAPM_MUX("MAD_SEL MUX", 0, mad_sel),
WCD_DAPM_MUX("MAD_INP MUX", 0, mad_inp_mux),
+ SND_SOC_DAPM_SWITCH_E("MAD_BROADCAST", SND_SOC_NOPM, 0, 0,
+ &mad_brdcst_switch, tavil_codec_ape_enable_mad,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
SND_SOC_DAPM_SWITCH_E("MAD_CPE1", SND_SOC_NOPM, 0, 0,
&mad_cpe1_switch, tavil_codec_cpe_mad_ctl,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
@@ -6462,6 +6870,7 @@ static int tavil_get_channel_map(struct snd_soc_dai *dai,
case AIF1_CAP:
case AIF2_CAP:
case AIF3_CAP:
+ case AIF4_MAD_TX:
case AIF4_VIFEED:
if (!tx_slot || !tx_num) {
dev_err(tavil->dev, "%s: Invalid tx_slot 0x%pK or tx_num 0x%pK\n",
@@ -6500,6 +6909,7 @@ static int tavil_set_channel_map(struct snd_soc_dai *dai,
{
struct tavil_priv *tavil;
struct wcd9xxx *core;
+ struct wcd9xxx_codec_dai_data *dai_data = NULL;
tavil = snd_soc_codec_get_drvdata(dai->codec);
core = dev_get_drvdata(dai->codec->dev->parent);
@@ -6514,6 +6924,12 @@ static int tavil_set_channel_map(struct snd_soc_dai *dai,
wcd9xxx_init_slimslave(core, core->slim->laddr,
tx_num, tx_slot, rx_num, rx_slot);
+ /* Reserve TX13 for MAD data channel */
+ dai_data = &tavil->dai[AIF4_MAD_TX];
+ if (dai_data)
+ list_add_tail(&core->tx_chs[WCD934X_TX13].list,
+ &dai_data->wcd9xxx_ch_list);
+
return 0;
}
@@ -6844,7 +7260,7 @@ static int tavil_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct tavil_priv *tavil = snd_soc_codec_get_drvdata(dai->codec);
- int ret;
+ int ret = 0;
dev_dbg(tavil->dev, "%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n",
__func__, dai->name, dai->id, params_rate(params),
@@ -6874,7 +7290,9 @@ static int tavil_hw_params(struct snd_pcm_substream *substream,
tavil->dai[dai->id].rate = params_rate(params);
break;
case SNDRV_PCM_STREAM_CAPTURE:
- ret = tavil_set_decimator_rate(dai, params_rate(params));
+ if (dai->id != AIF4_MAD_TX)
+ ret = tavil_set_decimator_rate(dai,
+ params_rate(params));
if (ret) {
dev_err(tavil->dev, "%s: cannot set TX Decimator rate: %d\n",
__func__, ret);
@@ -7031,8 +7449,152 @@ static struct snd_soc_dai_driver tavil_dai[] = {
},
.ops = &tavil_vi_dai_ops,
},
+ {
+ .name = "tavil_mad1",
+ .id = AIF4_MAD_TX,
+ .capture = {
+ .stream_name = "AIF4 MAD TX",
+ .rates = SNDRV_PCM_RATE_16000,
+ .formats = WCD934X_FORMATS_S16_LE,
+ .rate_min = 16000,
+ .rate_max = 16000,
+ .channels_min = 1,
+ .channels_max = 1,
+ },
+ .ops = &tavil_dai_ops,
+ },
};
+static void tavil_codec_power_gate_digital_core(struct tavil_priv *tavil)
+{
+ struct snd_soc_codec *codec = tavil->codec;
+
+ if (!codec)
+ return;
+
+ mutex_lock(&tavil->power_lock);
+ dev_dbg(codec->dev, "%s: Entering power gating function, %d\n",
+ __func__, tavil->power_active_ref);
+
+ if (tavil->power_active_ref > 0)
+ goto exit;
+
+ wcd9xxx_set_power_state(tavil->wcd9xxx,
+ WCD_REGION_POWER_COLLAPSE_BEGIN,
+ WCD9XXX_DIG_CORE_REGION_1);
+ snd_soc_update_bits(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+ 0x04, 0x04);
+ snd_soc_update_bits(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+ 0x01, 0x00);
+ snd_soc_update_bits(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+ 0x02, 0x00);
+ wcd9xxx_set_power_state(tavil->wcd9xxx, WCD_REGION_POWER_DOWN,
+ WCD9XXX_DIG_CORE_REGION_1);
+exit:
+ dev_dbg(codec->dev, "%s: Exiting power gating function, %d\n",
+ __func__, tavil->power_active_ref);
+ mutex_unlock(&tavil->power_lock);
+}
+
+static void tavil_codec_power_gate_work(struct work_struct *work)
+{
+ struct tavil_priv *tavil;
+ struct delayed_work *dwork;
+ struct snd_soc_codec *codec;
+
+ dwork = to_delayed_work(work);
+ tavil = container_of(dwork, struct tavil_priv, power_gate_work);
+ codec = tavil->codec;
+
+ if (!codec)
+ return;
+
+ tavil_codec_power_gate_digital_core(tavil);
+}
+
+/* called under power_lock acquisition */
+static int tavil_dig_core_remove_power_collapse(struct snd_soc_codec *codec)
+{
+ struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+
+ snd_soc_write(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
+ snd_soc_write(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
+ snd_soc_write(codec, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
+ snd_soc_update_bits(codec, WCD934X_CODEC_RPM_RST_CTL, 0x02, 0x00);
+ snd_soc_update_bits(codec, WCD934X_CODEC_RPM_RST_CTL, 0x02, 0x02);
+
+ wcd9xxx_set_power_state(tavil->wcd9xxx,
+ WCD_REGION_POWER_COLLAPSE_REMOVE,
+ WCD9XXX_DIG_CORE_REGION_1);
+ regcache_mark_dirty(codec->component.regmap);
+ regcache_sync_region(codec->component.regmap,
+ WCD934X_DIG_CORE_REG_MIN,
+ WCD934X_DIG_CORE_REG_MAX);
+
+ return 0;
+}
+
+static int tavil_dig_core_power_collapse(struct tavil_priv *tavil,
+ int req_state)
+{
+ struct snd_soc_codec *codec;
+ int cur_state;
+
+ /* Exit if feature is disabled */
+ if (!dig_core_collapse_enable)
+ return 0;
+
+ mutex_lock(&tavil->power_lock);
+ if (req_state == POWER_COLLAPSE)
+ tavil->power_active_ref--;
+ else if (req_state == POWER_RESUME)
+ tavil->power_active_ref++;
+ else
+ goto unlock_mutex;
+
+ if (tavil->power_active_ref < 0) {
+ dev_dbg(tavil->dev, "%s: power_active_ref is negative\n",
+ __func__);
+ goto unlock_mutex;
+ }
+
+ codec = tavil->codec;
+ if (!codec)
+ goto unlock_mutex;
+
+ if (req_state == POWER_COLLAPSE) {
+ if (tavil->power_active_ref == 0) {
+ schedule_delayed_work(&tavil->power_gate_work,
+ msecs_to_jiffies(dig_core_collapse_timer * 1000));
+ }
+ } else if (req_state == POWER_RESUME) {
+ if (tavil->power_active_ref == 1) {
+ /*
+ * At this point, there can be two cases:
+ * 1. Core already in power collapse state
+ * 2. Timer kicked in and still did not expire or
+ * waiting for the power_lock
+ */
+ cur_state = wcd9xxx_get_current_power_state(
+ tavil->wcd9xxx,
+ WCD9XXX_DIG_CORE_REGION_1);
+ if (cur_state == WCD_REGION_POWER_DOWN) {
+ tavil_dig_core_remove_power_collapse(codec);
+ } else {
+ mutex_unlock(&tavil->power_lock);
+ cancel_delayed_work_sync(
+ &tavil->power_gate_work);
+ mutex_lock(&tavil->power_lock);
+ }
+ }
+ }
+
+unlock_mutex:
+ mutex_unlock(&tavil->power_lock);
+
+ return 0;
+}
+
static int tavil_cdc_req_mclk_enable(struct tavil_priv *tavil,
bool enable)
{
@@ -7074,15 +7636,15 @@ static int __tavil_cdc_mclk_enable_locked(struct tavil_priv *tavil,
dev_dbg(tavil->dev, "%s: mclk_enable = %u\n", __func__, enable);
if (enable) {
+ tavil_dig_core_power_collapse(tavil, POWER_RESUME);
tavil_vote_svs(tavil, true);
ret = tavil_cdc_req_mclk_enable(tavil, true);
if (ret)
goto done;
-
- set_bit(AUDIO_NOMINAL, &tavil->status_mask);
} else {
tavil_cdc_req_mclk_enable(tavil, false);
tavil_vote_svs(tavil, false);
+ tavil_dig_core_power_collapse(tavil, POWER_COLLAPSE);
}
done:
@@ -7328,6 +7890,10 @@ static const struct tavil_reg_mask_val tavil_codec_reg_defaults[] = {
{WCD934X_CDC_TX6_TX_PATH_CFG1, 0x01, 0x00},
{WCD934X_CDC_TX7_TX_PATH_CFG1, 0x01, 0x00},
{WCD934X_CDC_TX8_TX_PATH_CFG1, 0x01, 0x00},
+ {WCD934X_RX_OCP_CTL, 0x0F, 0x01}, /* OCP number of attempts is 1 */
+ {WCD934X_HPH_OCP_CTL, 0xFF, 0x3A}, /* OCP current limit */
+ {WCD934X_HPH_L_TEST, 0x01, 0x01},
+ {WCD934X_HPH_R_TEST, 0x01, 0x01},
};
static const struct tavil_reg_mask_val tavil_codec_reg_init_common_val[] = {
@@ -7933,6 +8499,15 @@ static int tavil_soc_codec_probe(struct snd_soc_codec *codec)
snd_soc_dapm_enable_pin(dapm, "ANC SPK1 PA");
mutex_unlock(&tavil->codec_mutex);
+ snd_soc_dapm_ignore_suspend(dapm, "AIF1 Playback");
+ snd_soc_dapm_ignore_suspend(dapm, "AIF1 Capture");
+ snd_soc_dapm_ignore_suspend(dapm, "AIF2 Playback");
+ snd_soc_dapm_ignore_suspend(dapm, "AIF2 Capture");
+ snd_soc_dapm_ignore_suspend(dapm, "AIF3 Playback");
+ snd_soc_dapm_ignore_suspend(dapm, "AIF3 Capture");
+ snd_soc_dapm_ignore_suspend(dapm, "AIF4 Playback");
+ snd_soc_dapm_ignore_suspend(dapm, "VIfeed");
+
snd_soc_dapm_sync(dapm);
tavil_wdsp_initialize(codec);
@@ -8007,6 +8582,9 @@ static int tavil_suspend(struct device *dev)
return -EINVAL;
}
dev_dbg(dev, "%s: system suspend\n", __func__);
+ if (delayed_work_pending(&tavil->power_gate_work) &&
+ cancel_delayed_work_sync(&tavil->power_gate_work))
+ tavil_codec_power_gate_digital_core(tavil);
return 0;
}
@@ -8508,6 +9086,7 @@ static int tavil_probe(struct platform_device *pdev)
struct tavil_priv *tavil;
struct clk *wcd_ext_clk;
struct wcd9xxx_resmgr_v2 *resmgr;
+ struct wcd9xxx_power_region *cdc_pwr;
tavil = devm_kzalloc(&pdev->dev, sizeof(struct tavil_priv),
GFP_KERNEL);
@@ -8518,6 +9097,8 @@ static int tavil_probe(struct platform_device *pdev)
tavil->wcd9xxx = dev_get_drvdata(pdev->dev.parent);
tavil->dev = &pdev->dev;
+ INIT_DELAYED_WORK(&tavil->power_gate_work, tavil_codec_power_gate_work);
+ mutex_init(&tavil->power_lock);
INIT_WORK(&tavil->tavil_add_child_devices_work,
tavil_add_child_devices);
mutex_init(&tavil->micb_lock);
@@ -8534,6 +9115,18 @@ static int tavil_probe(struct platform_device *pdev)
*/
tavil->svs_ref_cnt = 1;
+ cdc_pwr = devm_kzalloc(&pdev->dev, sizeof(struct wcd9xxx_power_region),
+ GFP_KERNEL);
+ if (!cdc_pwr) {
+ ret = -ENOMEM;
+ goto err_resmgr;
+ }
+ tavil->wcd9xxx->wcd9xxx_pwr[WCD9XXX_DIG_CORE_REGION_1] = cdc_pwr;
+ cdc_pwr->pwr_collapse_reg_min = WCD934X_DIG_CORE_REG_MIN;
+ cdc_pwr->pwr_collapse_reg_max = WCD934X_DIG_CORE_REG_MAX;
+ wcd9xxx_set_power_state(tavil->wcd9xxx,
+ WCD_REGION_POWER_COLLAPSE_REMOVE,
+ WCD9XXX_DIG_CORE_REGION_1);
/*
* Init resource manager so that if child nodes such as SoundWire
* requests for clock, resource manager can honor the request
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
index 52ca82fba8e9..2012e4617ee1 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ b/sound/soc/codecs/wcd9xxx-mbhc.c
@@ -4675,7 +4675,7 @@ int wcd9xxx_mbhc_start(struct wcd9xxx_mbhc *mbhc,
schedule_delayed_work(&mbhc->mbhc_firmware_dwork,
usecs_to_jiffies(FW_READ_TIMEOUT));
else
- pr_debug("%s: Skipping to read mbhc fw, 0x%p %p\n",
+ pr_debug("%s: Skipping to read mbhc fw, 0x%pK %pK\n",
__func__, mbhc->mbhc_fw, mbhc->mbhc_cal);
}
@@ -5073,7 +5073,7 @@ static int wcd9xxx_remeasure_z_values(struct wcd9xxx_mbhc *mbhc,
right = !!(r);
dev_dbg(codec->dev, "%s: Remeasuring impedance values\n", __func__);
- dev_dbg(codec->dev, "%s: l: %p, r: %p, left=%d, right=%d\n", __func__,
+ dev_dbg(codec->dev, "%s: l: %pK, r: %pK, left=%d, right=%d\n", __func__,
l, r, left, right);
/* Remeasure V2 values */
diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.c b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
index 29718a8d7c04..39ca965e791e 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr-v2.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
@@ -307,7 +307,7 @@ static int wcd_resmgr_disable_clk_mclk(struct wcd9xxx_resmgr_v2 *resmgr)
WCD9335_ANA_CLK_TOP,
0x04, 0x00);
wcd_resmgr_codec_reg_update_bits(resmgr,
- WCD934X_CLK_SYS_MCLK_PRG, 0x01, 0x0);
+ WCD934X_CLK_SYS_MCLK_PRG, 0x81, 0x00);
resmgr->clk_type = WCD_CLK_OFF;
}
diff --git a/sound/soc/codecs/wcd_cpe_core.c b/sound/soc/codecs/wcd_cpe_core.c
index e9f167fa643b..3aa9ac8d40b6 100644
--- a/sound/soc/codecs/wcd_cpe_core.c
+++ b/sound/soc/codecs/wcd_cpe_core.c
@@ -473,7 +473,7 @@ static int wcd_cpe_load_fw(struct wcd_cpe_core *core,
bool load_segment;
if (!core || !core->cpe_handle) {
- pr_err("%s: Error CPE core %p\n", __func__,
+ pr_err("%s: Error CPE core %pK\n", __func__,
core);
return -EINVAL;
}
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index 46a073bac2e9..d7f4044b71ee 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -917,7 +917,7 @@ int wsa881x_set_channel_map(struct snd_soc_codec *codec, u8 *port, u8 num_port,
if (!port || !ch_mask || !ch_rate ||
(num_port > WSA881X_MAX_SWR_PORTS)) {
dev_err(codec->dev,
- "%s: Invalid port=%p, ch_mask=%p, ch_rate=%p\n",
+ "%s: Invalid port=%pK, ch_mask=%pK, ch_rate=%pK\n",
__func__, port, ch_mask, ch_rate);
return -EINVAL;
}
@@ -1305,7 +1305,8 @@ static int wsa881x_swr_down(struct swr_device *pdev)
dev_err(&pdev->dev, "%s: wsa881x is NULL\n", __func__);
return -EINVAL;
}
- cancel_delayed_work_sync(&wsa881x->ocp_ctl_work);
+ if (delayed_work_pending(&wsa881x->ocp_ctl_work))
+ cancel_delayed_work_sync(&wsa881x->ocp_ctl_work);
ret = wsa881x_gpio_ctrl(wsa881x, false);
if (ret)
dev_err(&pdev->dev, "%s: Failed to disable gpio\n", __func__);
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index e740a24704b7..64a1fa76604d 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -79,6 +79,15 @@ config QTI_PP
tuning parameters of various modules such as equalizer,
customized mixing.
+config QTI_PP_AUDIOSPHERE
+ bool "Enable QTI AUDIOSPHERE PP"
+ depends on SND_SOC_MSM_QDSP6V2_INTF
+ help
+ To add support for QTI audio sphere post processing.
+ This support is to configure the post processing
+ parameters to DSP. The configuration includes sending
+ tuning parameters of audio sphere module.
+
config SND_SOC_CPE
tristate "CPE drivers"
depends on SND_SOC_WCD_CPE
@@ -104,6 +113,7 @@ config SND_SOC_MSM8996
select SND_SOC_MSM_HDMI_CODEC_RX
select DTS_SRS_TM
select QTI_PP
+ select QTI_PP_AUDIOSPHERE
select SND_SOC_CPE
select MSM_ULTRASOUND
select DOLBY_DS2
@@ -125,6 +135,9 @@ config SND_SOC_MSMCOBALT
select SND_SOC_MSM_HOSTLESS_PCM
select SND_DYNAMIC_MINORS
select MSM_QDSP6_APRV2_GLINK
+ select MSM_QDSP6_SSR
+ select MSM_QDSP6_PDR
+ select MSM_QDSP6_NOTIFIER
select MSM_QDSP6V2_CODECS
select SND_SOC_WCD9335
select SND_SOC_WCD934X
diff --git a/sound/soc/msm/msm-cpe-lsm.c b/sound/soc/msm/msm-cpe-lsm.c
index 8270cfb98de8..ef4c9b01d91e 100644
--- a/sound/soc/msm/msm-cpe-lsm.c
+++ b/sound/soc/msm/msm-cpe-lsm.c
@@ -496,7 +496,7 @@ static int msm_cpe_lab_buf_alloc(struct snd_pcm_substream *substream,
pcm_buf[count].mem = pcm_buf[0].mem + (count * bufsz);
pcm_buf[count].phys = pcm_buf[0].phys + (count * bufsz);
dev_dbg(rtd->dev,
- "%s: pcm_buf[%d].mem %p pcm_buf[%d].phys %pa\n",
+ "%s: pcm_buf[%d].mem %pK pcm_buf[%d].phys %pK\n",
__func__, count,
(void *)pcm_buf[count].mem,
count, &(pcm_buf[count].phys));
@@ -722,7 +722,7 @@ static int msm_cpe_lab_thread(void *data)
cur_buf = &lab_d->pcm_buf[buf_count % prd_cnt];
next_buf = &lab_d->pcm_buf[(buf_count + 2) % prd_cnt];
dev_dbg(rtd->dev,
- "%s: Cur buf.mem = %p Next Buf.mem = %p\n"
+ "%s: Cur buf.mem = %pK Next Buf.mem = %pK\n"
" buf count = 0x%x\n", __func__,
cur_buf->mem, next_buf->mem, buf_count);
} else {
@@ -1544,7 +1544,7 @@ static int msm_cpe_lsm_lab_start(struct snd_pcm_substream *substream,
int rc;
if (!substream || !substream->private_data) {
- pr_err("%s: invalid substream (%p)\n",
+ pr_err("%s: invalid substream (%pK)\n",
__func__, substream);
return -EINVAL;
}
@@ -1634,7 +1634,7 @@ static bool msm_cpe_lsm_is_valid_stream(struct snd_pcm_substream *substream,
struct wcd_cpe_lsm_ops *lsm_ops;
if (!substream || !substream->private_data) {
- pr_err("%s: invalid substream (%p)\n",
+ pr_err("%s: invalid substream (%pK)\n",
func, substream);
return false;
}
@@ -2075,7 +2075,7 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
struct wcd_cpe_lsm_ops *lsm_ops;
if (!substream || !substream->private_data) {
- pr_err("%s: invalid substream (%p)\n",
+ pr_err("%s: invalid substream (%pK)\n",
__func__, substream);
return -EINVAL;
}
@@ -2347,7 +2347,7 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
struct wcd_cpe_lsm_ops *lsm_ops;
if (!substream || !substream->private_data) {
- pr_err("%s: invalid substream (%p)\n",
+ pr_err("%s: invalid substream (%pK)\n",
__func__, substream);
return -EINVAL;
}
@@ -2997,7 +2997,7 @@ static int msm_cpe_lsm_copy(struct snd_pcm_substream *substream, int a,
if (lab_d->buf_idx >= (lsm_d->hw_params.period_count))
lab_d->buf_idx = 0;
pcm_buf = (lab_d->pcm_buf[lab_d->buf_idx].mem);
- pr_debug("%s: Buf IDX = 0x%x pcm_buf %p\n",
+ pr_debug("%s: Buf IDX = 0x%x pcm_buf %pK\n",
__func__, lab_d->buf_idx, pcm_buf);
if (pcm_buf) {
if (copy_to_user(buf, pcm_buf, fbytes)) {
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 4cb62a6b3e7d..ee9dcacdd5c9 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -96,7 +96,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -108,8 +109,9 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.rates = (SNDRV_PCM_RATE_8000_384000|
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE|
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 4,
.rate_min = 8000,
@@ -127,7 +129,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -140,7 +143,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -210,7 +214,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 6,
.rate_min = 8000,
@@ -222,7 +227,9 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.rates = (SNDRV_PCM_RATE_8000_384000|
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE),
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -240,7 +247,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -259,7 +267,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -271,8 +280,9 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.rates = (SNDRV_PCM_RATE_8000_48000|
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE|
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -290,7 +300,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -303,7 +314,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -321,7 +333,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -340,7 +353,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -353,7 +367,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -2220,7 +2235,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -2239,7 +2255,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -2258,7 +2275,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -2277,7 +2295,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -2296,7 +2315,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -2315,7 +2335,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
@@ -2334,7 +2355,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
diff --git a/sound/soc/msm/msmcobalt.c b/sound/soc/msm/msmcobalt.c
index 17e78b25c6ca..a2bd3be62175 100644
--- a/sound/soc/msm/msmcobalt.c
+++ b/sound/soc/msm/msmcobalt.c
@@ -33,6 +33,7 @@
#include <sound/pcm_params.h>
#include <sound/info.h>
#include <device_event.h>
+#include <linux/qdsp6v2/audio_notifier.h>
#include "qdsp6v2/msm-pcm-routing-v2.h"
#include "../codecs/wcd9335.h"
#include "../codecs/wcd934x/wcd934x.h"
@@ -70,6 +71,9 @@
#define WCN_CDC_SLIM_RX_CH_MAX 2
#define WCN_CDC_SLIM_TX_CH_MAX 3
+#define TDM_CHANNEL_MAX 8
+#define TDM_SLOT_OFFSET_MAX 8
+
enum {
SLIM_RX_0 = 0,
SLIM_RX_1,
@@ -169,6 +173,131 @@ struct msm_asoc_wcd93xx_codec {
void (*mbhc_hs_detect_exit)(struct snd_soc_codec *codec);
};
+enum {
+ TDM_0 = 0,
+ TDM_1,
+ TDM_2,
+ TDM_3,
+ TDM_4,
+ TDM_5,
+ TDM_6,
+ TDM_7,
+ TDM_PORT_MAX,
+};
+
+enum {
+ TDM_PRI = 0,
+ TDM_SEC,
+ TDM_TERT,
+ TDM_QUAT,
+ TDM_INTERFACE_MAX,
+};
+
+struct tdm_port {
+ u32 mode;
+ u32 channel;
+};
+
+/* TDM default config */
+static struct dev_config tdm_rx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
+ { /* PRI TDM */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_0 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_1 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_2 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_3 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_4 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_5 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_6 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_7 */
+ },
+ { /* SEC TDM */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_0 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_1 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_2 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_3 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_4 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_5 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_6 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_7 */
+ },
+ { /* TERT TDM */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_0 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_1 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_2 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_3 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_4 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_5 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_6 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_7 */
+ },
+ { /* QUAT TDM */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_0 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_1 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_2 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_3 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_4 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_5 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_6 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_7 */
+ }
+};
+
+/* TDM default config */
+static struct dev_config tdm_tx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
+ { /* PRI TDM */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_0 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_1 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_2 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_3 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_4 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_5 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_6 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_7 */
+ },
+ { /* SEC TDM */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_0 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_1 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_2 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_3 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_4 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_5 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_6 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_7 */
+ },
+ { /* TERT TDM */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_0 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_1 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_2 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_3 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_4 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_5 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_6 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_7 */
+ },
+ { /* QUAT TDM */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_0 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_1 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_2 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_3 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_4 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_5 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_6 */
+ {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_7 */
+ }
+};
+
+/*TDM default offset currently only supporting TDM_RX_0 and TDM_TX_0 */
+static unsigned int tdm_slot_offset[TDM_PORT_MAX][TDM_SLOT_OFFSET_MAX] = {
+ {0, 4, 8, 12, 16, 20, 24, 28},/* TX_0 | RX_0 */
+ {AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_1 | RX_1 */
+ {AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_2 | RX_2 */
+ {AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_3 | RX_3 */
+ {AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_4 | RX_4 */
+ {AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_5 | RX_5 */
+ {AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_6 | RX_6 */
+ {AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_7 | RX_7 */
+};
+
/* Default configuration of slimbus channels */
static struct dev_config slim_rx_cfg[] = {
[SLIM_RX_0] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
@@ -253,7 +382,8 @@ static const char *const slim_tx_ch_text[] = {"One", "Two", "Three", "Four",
"Five", "Six", "Seven",
"Eight"};
static const char *const vi_feed_ch_text[] = {"One", "Two"};
-static char const *bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE"};
+static char const *bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE",
+ "S32_LE"};
static char const *ext_disp_bit_format_text[] = {"S16_LE", "S24_LE"};
static char const *slim_sample_rate_text[] = {"KHZ_8", "KHZ_16",
"KHZ_32", "KHZ_44P1", "KHZ_48",
@@ -268,9 +398,15 @@ static char const *ch_text[] = {"Two", "Three", "Four", "Five",
static char const *usb_sample_rate_text[] = {"KHZ_8", "KHZ_11P025",
"KHZ_16", "KHZ_22P05",
"KHZ_32", "KHZ_44P1", "KHZ_48",
- "KHZ_96", "KHZ_192"};
+ "KHZ_96", "KHZ_192", "KHZ_384"};
static char const *ext_disp_sample_rate_text[] = {"KHZ_48", "KHZ_96",
"KHZ_192"};
+static char const *tdm_ch_text[] = {"One", "Two", "Three", "Four",
+ "Five", "Six", "Seven", "Eight"};
+static char const *tdm_bit_format_text[] = {"S16_LE", "S24_LE", "S32_LE"};
+static char const *tdm_sample_rate_text[] = {"KHZ_8", "KHZ_16", "KHZ_32",
+ "KHZ_44P1", "KHZ_48", "KHZ_96",
+ "KHZ_192", "KHZ_352P8", "KHZ_384"};
static const char *const auxpcm_rate_text[] = {"KHZ_8", "KHZ_16"};
static char const *mi2s_rate_text[] = {"KHZ_8", "KHZ_16",
"KHZ_32", "KHZ_44P1", "KHZ_48",
@@ -307,6 +443,12 @@ static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_sample_rate, usb_sample_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_sample_rate, usb_sample_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_sample_rate,
ext_disp_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_chs, tdm_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_format, tdm_bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_sample_rate, tdm_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tdm_rx_chs, tdm_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tdm_rx_format, tdm_bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tdm_rx_sample_rate, tdm_sample_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(prim_aux_pcm_rx_sample_rate, auxpcm_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(sec_aux_pcm_rx_sample_rate, auxpcm_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(tert_aux_pcm_rx_sample_rate, auxpcm_rate_text);
@@ -334,11 +476,11 @@ static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_chs, mi2s_ch_text);
static struct platform_device *spdev;
+static bool is_initial_boot;
static bool codec_reg_done;
static struct snd_soc_aux_dev *msm_aux_dev;
static struct snd_soc_codec_conf *msm_codec_conf;
static struct msm_asoc_wcd93xx_codec msm_codec_fn;
-static void *adsp_state_notifier;
static void *def_tasha_mbhc_cal(void);
static void *def_tavil_mbhc_cal(void);
@@ -510,6 +652,9 @@ static int slim_get_bit_format_val(int bit_format)
int val = 0;
switch (bit_format) {
+ case SNDRV_PCM_FORMAT_S32_LE:
+ val = 3;
+ break;
case SNDRV_PCM_FORMAT_S24_3LE:
val = 2;
break;
@@ -538,6 +683,9 @@ static int slim_get_bit_format(int val)
case 2:
bit_fmt = SNDRV_PCM_FORMAT_S24_3LE;
break;
+ case 3:
+ bit_fmt = SNDRV_PCM_FORMAT_S32_LE;
+ break;
default:
bit_fmt = SNDRV_PCM_FORMAT_S16_LE;
break;
@@ -875,6 +1023,9 @@ static int usb_audio_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
int sample_rate_val;
switch (usb_rx_cfg.sample_rate) {
+ case SAMPLING_RATE_384KHZ:
+ sample_rate_val = 9;
+ break;
case SAMPLING_RATE_192KHZ:
sample_rate_val = 8;
break;
@@ -915,6 +1066,9 @@ static int usb_audio_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
switch (ucontrol->value.integer.value[0]) {
+ case 9:
+ usb_rx_cfg.sample_rate = SAMPLING_RATE_384KHZ;
+ break;
case 8:
usb_rx_cfg.sample_rate = SAMPLING_RATE_192KHZ;
break;
@@ -957,6 +1111,9 @@ static int usb_audio_rx_format_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
switch (usb_rx_cfg.bit_format) {
+ case SNDRV_PCM_FORMAT_S32_LE:
+ ucontrol->value.integer.value[0] = 3;
+ break;
case SNDRV_PCM_FORMAT_S24_3LE:
ucontrol->value.integer.value[0] = 2;
break;
@@ -981,6 +1138,9 @@ static int usb_audio_rx_format_put(struct snd_kcontrol *kcontrol,
int rc = 0;
switch (ucontrol->value.integer.value[0]) {
+ case 3:
+ usb_rx_cfg.bit_format = SNDRV_PCM_FORMAT_S32_LE;
+ break;
case 2:
usb_rx_cfg.bit_format = SNDRV_PCM_FORMAT_S24_3LE;
break;
@@ -1023,6 +1183,9 @@ static int usb_audio_tx_sample_rate_get(struct snd_kcontrol *kcontrol,
int sample_rate_val;
switch (usb_tx_cfg.sample_rate) {
+ case SAMPLING_RATE_384KHZ:
+ sample_rate_val = 9;
+ break;
case SAMPLING_RATE_192KHZ:
sample_rate_val = 8;
break;
@@ -1065,6 +1228,9 @@ static int usb_audio_tx_sample_rate_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
switch (ucontrol->value.integer.value[0]) {
+ case 9:
+ usb_tx_cfg.sample_rate = SAMPLING_RATE_384KHZ;
+ break;
case 8:
usb_tx_cfg.sample_rate = SAMPLING_RATE_192KHZ;
break;
@@ -1107,6 +1273,9 @@ static int usb_audio_tx_format_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
switch (usb_tx_cfg.bit_format) {
+ case SNDRV_PCM_FORMAT_S32_LE:
+ ucontrol->value.integer.value[0] = 3;
+ break;
case SNDRV_PCM_FORMAT_S24_3LE:
ucontrol->value.integer.value[0] = 2;
break;
@@ -1131,6 +1300,9 @@ static int usb_audio_tx_format_put(struct snd_kcontrol *kcontrol,
int rc = 0;
switch (ucontrol->value.integer.value[0]) {
+ case 3:
+ usb_tx_cfg.bit_format = SNDRV_PCM_FORMAT_S32_LE;
+ break;
case 2:
usb_tx_cfg.bit_format = SNDRV_PCM_FORMAT_S24_3LE;
break;
@@ -1327,6 +1499,45 @@ static int proxy_rx_ch_put(struct snd_kcontrol *kcontrol,
return 1;
}
+static int tdm_get_sample_rate(int value)
+{
+ int sample_rate = 0;
+
+ switch (value) {
+ case 0:
+ sample_rate = SAMPLING_RATE_8KHZ;
+ break;
+ case 1:
+ sample_rate = SAMPLING_RATE_16KHZ;
+ break;
+ case 2:
+ sample_rate = SAMPLING_RATE_32KHZ;
+ break;
+ case 3:
+ sample_rate = SAMPLING_RATE_44P1KHZ;
+ break;
+ case 4:
+ sample_rate = SAMPLING_RATE_48KHZ;
+ break;
+ case 5:
+ sample_rate = SAMPLING_RATE_96KHZ;
+ break;
+ case 6:
+ sample_rate = SAMPLING_RATE_192KHZ;
+ break;
+ case 7:
+ sample_rate = SAMPLING_RATE_352P8KHZ;
+ break;
+ case 8:
+ sample_rate = SAMPLING_RATE_384KHZ;
+ break;
+ default:
+ sample_rate = SAMPLING_RATE_48KHZ;
+ break;
+ }
+ return sample_rate;
+}
+
static int aux_pcm_get_sample_rate(int value)
{
int sample_rate;
@@ -1343,6 +1554,45 @@ static int aux_pcm_get_sample_rate(int value)
return sample_rate;
}
+static int tdm_get_sample_rate_val(int sample_rate)
+{
+ int sample_rate_val = 0;
+
+ switch (sample_rate) {
+ case SAMPLING_RATE_8KHZ:
+ sample_rate_val = 0;
+ break;
+ case SAMPLING_RATE_16KHZ:
+ sample_rate_val = 1;
+ break;
+ case SAMPLING_RATE_32KHZ:
+ sample_rate_val = 2;
+ break;
+ case SAMPLING_RATE_44P1KHZ:
+ sample_rate_val = 3;
+ break;
+ case SAMPLING_RATE_48KHZ:
+ sample_rate_val = 4;
+ break;
+ case SAMPLING_RATE_96KHZ:
+ sample_rate_val = 5;
+ break;
+ case SAMPLING_RATE_192KHZ:
+ sample_rate_val = 6;
+ break;
+ case SAMPLING_RATE_352P8KHZ:
+ sample_rate_val = 7;
+ break;
+ case SAMPLING_RATE_384KHZ:
+ sample_rate_val = 8;
+ break;
+ default:
+ sample_rate_val = 4;
+ break;
+ }
+ return sample_rate_val;
+}
+
static int aux_pcm_get_sample_rate_val(int sample_rate)
{
int sample_rate_val;
@@ -1359,6 +1609,361 @@ static int aux_pcm_get_sample_rate_val(int sample_rate)
return sample_rate_val;
}
+static int tdm_get_port_idx(struct snd_kcontrol *kcontrol,
+ struct tdm_port *port)
+{
+ if (port) {
+ if (strnstr(kcontrol->id.name, "PRI",
+ sizeof(kcontrol->id.name))) {
+ port->mode = TDM_PRI;
+ } else if (strnstr(kcontrol->id.name, "SEC",
+ sizeof(kcontrol->id.name))) {
+ port->mode = TDM_SEC;
+ } else if (strnstr(kcontrol->id.name, "TERT",
+ sizeof(kcontrol->id.name))) {
+ port->mode = TDM_TERT;
+ } else if (strnstr(kcontrol->id.name, "QUAT",
+ sizeof(kcontrol->id.name))) {
+ port->mode = TDM_QUAT;
+ } else {
+ pr_err("%s: unsupported mode in: %s",
+ __func__, kcontrol->id.name);
+ return -EINVAL;
+ }
+
+ if (strnstr(kcontrol->id.name, "RX_0",
+ sizeof(kcontrol->id.name)) ||
+ strnstr(kcontrol->id.name, "TX_0",
+ sizeof(kcontrol->id.name))) {
+ port->channel = TDM_0;
+ } else if (strnstr(kcontrol->id.name, "RX_1",
+ sizeof(kcontrol->id.name)) ||
+ strnstr(kcontrol->id.name, "TX_1",
+ sizeof(kcontrol->id.name))) {
+ port->channel = TDM_1;
+ } else if (strnstr(kcontrol->id.name, "RX_2",
+ sizeof(kcontrol->id.name)) ||
+ strnstr(kcontrol->id.name, "TX_2",
+ sizeof(kcontrol->id.name))) {
+ port->channel = TDM_2;
+ } else if (strnstr(kcontrol->id.name, "RX_3",
+ sizeof(kcontrol->id.name)) ||
+ strnstr(kcontrol->id.name, "TX_3",
+ sizeof(kcontrol->id.name))) {
+ port->channel = TDM_3;
+ } else if (strnstr(kcontrol->id.name, "RX_4",
+ sizeof(kcontrol->id.name)) ||
+ strnstr(kcontrol->id.name, "TX_4",
+ sizeof(kcontrol->id.name))) {
+ port->channel = TDM_4;
+ } else if (strnstr(kcontrol->id.name, "RX_5",
+ sizeof(kcontrol->id.name)) ||
+ strnstr(kcontrol->id.name, "TX_5",
+ sizeof(kcontrol->id.name))) {
+ port->channel = TDM_5;
+ } else if (strnstr(kcontrol->id.name, "RX_6",
+ sizeof(kcontrol->id.name)) ||
+ strnstr(kcontrol->id.name, "TX_6",
+ sizeof(kcontrol->id.name))) {
+ port->channel = TDM_6;
+ } else if (strnstr(kcontrol->id.name, "RX_7",
+ sizeof(kcontrol->id.name)) ||
+ strnstr(kcontrol->id.name, "TX_7",
+ sizeof(kcontrol->id.name))) {
+ port->channel = TDM_7;
+ } else {
+ pr_err("%s: unsupported channel in: %s",
+ __func__, kcontrol->id.name);
+ return -EINVAL;
+ }
+ } else
+ return -EINVAL;
+ return 0;
+}
+
+static int tdm_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tdm_port port;
+ int ret = tdm_get_port_idx(kcontrol, &port);
+
+ if (ret) {
+ pr_err("%s: unsupported control: %s",
+ __func__, kcontrol->id.name);
+ } else {
+ ucontrol->value.enumerated.item[0] = tdm_get_sample_rate_val(
+ tdm_rx_cfg[port.mode][port.channel].sample_rate);
+
+ pr_debug("%s: tdm_rx_sample_rate = %d, item = %d\n", __func__,
+ tdm_rx_cfg[port.mode][port.channel].sample_rate,
+ ucontrol->value.enumerated.item[0]);
+ }
+ return ret;
+}
+
+static int tdm_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tdm_port port;
+ int ret = tdm_get_port_idx(kcontrol, &port);
+
+ if (ret) {
+ pr_err("%s: unsupported control: %s",
+ __func__, kcontrol->id.name);
+ } else {
+ tdm_rx_cfg[port.mode][port.channel].sample_rate =
+ tdm_get_sample_rate(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: tdm_rx_sample_rate = %d, item = %d\n", __func__,
+ tdm_rx_cfg[port.mode][port.channel].sample_rate,
+ ucontrol->value.enumerated.item[0]);
+ }
+ return ret;
+}
+
+static int tdm_tx_sample_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tdm_port port;
+ int ret = tdm_get_port_idx(kcontrol, &port);
+
+ if (ret) {
+ pr_err("%s: unsupported control: %s",
+ __func__, kcontrol->id.name);
+ } else {
+ ucontrol->value.enumerated.item[0] = tdm_get_sample_rate_val(
+ tdm_tx_cfg[port.mode][port.channel].sample_rate);
+
+ pr_debug("%s: tdm_tx_sample_rate = %d, item = %d\n", __func__,
+ tdm_tx_cfg[port.mode][port.channel].sample_rate,
+ ucontrol->value.enumerated.item[0]);
+ }
+ return ret;
+}
+
+static int tdm_tx_sample_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tdm_port port;
+ int ret = tdm_get_port_idx(kcontrol, &port);
+
+ if (ret) {
+ pr_err("%s: unsupported control: %s",
+ __func__, kcontrol->id.name);
+ } else {
+ tdm_tx_cfg[port.mode][port.channel].sample_rate =
+ tdm_get_sample_rate(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: tdm_tx_sample_rate = %d, item = %d\n", __func__,
+ tdm_tx_cfg[port.mode][port.channel].sample_rate,
+ ucontrol->value.enumerated.item[0]);
+ }
+ return ret;
+}
+
+static int tdm_get_format(int value)
+{
+ int format = 0;
+
+ switch (value) {
+ case 0:
+ format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ case 1:
+ format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 2:
+ format = SNDRV_PCM_FORMAT_S32_LE;
+ break;
+ default:
+ format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ return format;
+}
+
+static int tdm_get_format_val(int format)
+{
+ int value = 0;
+
+ switch (format) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ value = 0;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ value = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ value = 2;
+ break;
+ default:
+ value = 0;
+ break;
+ }
+ return value;
+}
+
+static int tdm_rx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tdm_port port;
+ int ret = tdm_get_port_idx(kcontrol, &port);
+
+ if (ret) {
+ pr_err("%s: unsupported control: %s",
+ __func__, kcontrol->id.name);
+ } else {
+ ucontrol->value.enumerated.item[0] = tdm_get_format_val(
+ tdm_rx_cfg[port.mode][port.channel].bit_format);
+
+ pr_debug("%s: tdm_rx_bit_format = %d, item = %d\n", __func__,
+ tdm_rx_cfg[port.mode][port.channel].bit_format,
+ ucontrol->value.enumerated.item[0]);
+ }
+ return ret;
+}
+
+static int tdm_rx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tdm_port port;
+ int ret = tdm_get_port_idx(kcontrol, &port);
+
+ if (ret) {
+ pr_err("%s: unsupported control: %s",
+ __func__, kcontrol->id.name);
+ } else {
+ tdm_rx_cfg[port.mode][port.channel].bit_format =
+ tdm_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: tdm_rx_bit_format = %d, item = %d\n", __func__,
+ tdm_rx_cfg[port.mode][port.channel].bit_format,
+ ucontrol->value.enumerated.item[0]);
+ }
+ return ret;
+}
+
+static int tdm_tx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tdm_port port;
+ int ret = tdm_get_port_idx(kcontrol, &port);
+
+ if (ret) {
+ pr_err("%s: unsupported control: %s",
+ __func__, kcontrol->id.name);
+ } else {
+ ucontrol->value.enumerated.item[0] = tdm_get_format_val(
+ tdm_tx_cfg[port.mode][port.channel].bit_format);
+
+ pr_debug("%s: tdm_tx_bit_format = %d, item = %d\n", __func__,
+ tdm_tx_cfg[port.mode][port.channel].bit_format,
+ ucontrol->value.enumerated.item[0]);
+ }
+ return ret;
+}
+
+static int tdm_tx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tdm_port port;
+ int ret = tdm_get_port_idx(kcontrol, &port);
+
+ if (ret) {
+ pr_err("%s: unsupported control: %s",
+ __func__, kcontrol->id.name);
+ } else {
+ tdm_tx_cfg[port.mode][port.channel].bit_format =
+ tdm_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: tdm_tx_bit_format = %d, item = %d\n", __func__,
+ tdm_tx_cfg[port.mode][port.channel].bit_format,
+ ucontrol->value.enumerated.item[0]);
+ }
+ return ret;
+}
+
+static int tdm_rx_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tdm_port port;
+ int ret = tdm_get_port_idx(kcontrol, &port);
+
+ if (ret) {
+ pr_err("%s: unsupported control: %s",
+ __func__, kcontrol->id.name);
+ } else {
+
+ ucontrol->value.enumerated.item[0] =
+ tdm_rx_cfg[port.mode][port.channel].channels - 1;
+
+ pr_debug("%s: tdm_rx_ch = %d, item = %d\n", __func__,
+ tdm_rx_cfg[port.mode][port.channel].channels - 1,
+ ucontrol->value.enumerated.item[0]);
+ }
+ return ret;
+}
+
+static int tdm_rx_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tdm_port port;
+ int ret = tdm_get_port_idx(kcontrol, &port);
+
+ if (ret) {
+ pr_err("%s: unsupported control: %s",
+ __func__, kcontrol->id.name);
+ } else {
+ tdm_rx_cfg[port.mode][port.channel].channels =
+ ucontrol->value.enumerated.item[0] + 1;
+
+ pr_debug("%s: tdm_rx_ch = %d, item = %d\n", __func__,
+ tdm_rx_cfg[port.mode][port.channel].channels,
+ ucontrol->value.enumerated.item[0] + 1);
+ }
+ return ret;
+}
+
+static int tdm_tx_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tdm_port port;
+ int ret = tdm_get_port_idx(kcontrol, &port);
+
+ if (ret) {
+ pr_err("%s: unsupported control: %s",
+ __func__, kcontrol->id.name);
+ } else {
+ ucontrol->value.enumerated.item[0] =
+ tdm_tx_cfg[port.mode][port.channel].channels - 1;
+
+ pr_debug("%s: tdm_tx_ch = %d, item = %d\n", __func__,
+ tdm_tx_cfg[port.mode][port.channel].channels - 1,
+ ucontrol->value.enumerated.item[0]);
+ }
+ return ret;
+}
+
+static int tdm_tx_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tdm_port port;
+ int ret = tdm_get_port_idx(kcontrol, &port);
+
+ if (ret) {
+ pr_err("%s: unsupported control: %s",
+ __func__, kcontrol->id.name);
+ } else {
+ tdm_tx_cfg[port.mode][port.channel].channels =
+ ucontrol->value.enumerated.item[0] + 1;
+
+ pr_debug("%s: tdm_tx_ch = %d, item = %d\n", __func__,
+ tdm_tx_cfg[port.mode][port.channel].channels,
+ ucontrol->value.enumerated.item[0] + 1);
+ }
+ return ret;
+}
+
static int aux_pcm_get_port_idx(struct snd_kcontrol *kcontrol)
{
int idx;
@@ -1757,6 +2362,24 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
SOC_ENUM_EXT("Display Port RX SampleRate", ext_disp_rx_sample_rate,
ext_disp_rx_sample_rate_get,
ext_disp_rx_sample_rate_put),
+ SOC_ENUM_EXT("TERT_TDM_RX_0 SampleRate", tdm_rx_sample_rate,
+ tdm_rx_sample_rate_get,
+ tdm_rx_sample_rate_put),
+ SOC_ENUM_EXT("TERT_TDM_TX_0 SampleRate", tdm_tx_sample_rate,
+ tdm_tx_sample_rate_get,
+ tdm_tx_sample_rate_put),
+ SOC_ENUM_EXT("TERT_TDM_RX_0 Format", tdm_rx_format,
+ tdm_rx_format_get,
+ tdm_rx_format_put),
+ SOC_ENUM_EXT("TERT_TDM_TX_0 Format", tdm_tx_format,
+ tdm_tx_format_get,
+ tdm_tx_format_put),
+ SOC_ENUM_EXT("TERT_TDM_RX_0 Channels", tdm_rx_chs,
+ tdm_rx_ch_get,
+ tdm_rx_ch_put),
+ SOC_ENUM_EXT("TERT_TDM_TX_0 Channels", tdm_tx_chs,
+ tdm_tx_ch_get,
+ tdm_tx_ch_put),
SOC_ENUM_EXT("PRIM_AUX_PCM_RX SampleRate", prim_aux_pcm_rx_sample_rate,
aux_pcm_rx_sample_rate_get,
aux_pcm_rx_sample_rate_put),
@@ -2094,6 +2717,22 @@ static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
rate->min = rate->max = SAMPLING_RATE_48KHZ;
break;
+ case MSM_BACKEND_DAI_TERT_TDM_RX_0:
+ channels->min = channels->max =
+ tdm_rx_cfg[TDM_TERT][TDM_0].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ tdm_rx_cfg[TDM_TERT][TDM_0].bit_format);
+ rate->min = rate->max = tdm_rx_cfg[TDM_TERT][TDM_0].sample_rate;
+ break;
+
+ case MSM_BACKEND_DAI_TERT_TDM_TX_0:
+ channels->min = channels->max =
+ tdm_tx_cfg[TDM_TERT][TDM_0].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ tdm_tx_cfg[TDM_TERT][TDM_0].bit_format);
+ rate->min = rate->max = tdm_tx_cfg[TDM_TERT][TDM_0].sample_rate;
+ break;
+
case MSM_BACKEND_DAI_AUXPCM_RX:
rate->min = rate->max =
aux_pcm_rx_cfg[PRIM_AUX_PCM].sample_rate;
@@ -2326,49 +2965,65 @@ err_fail:
return ret;
}
-static int msm_adsp_state_callback(struct notifier_block *nb,
- unsigned long value, void *priv)
+static int msmcobalt_notifier_service_cb(struct notifier_block *this,
+ unsigned long opcode, void *ptr)
{
- int ret = NOTIFY_OK;
+ int ret;
struct snd_soc_card *card = NULL;
const char *be_dl_name = LPASS_BE_SLIMBUS_0_RX;
struct snd_soc_pcm_runtime *rtd;
struct snd_soc_codec *codec;
- if (!spdev)
- return -EINVAL;
-
- card = platform_get_drvdata(spdev);
- rtd = snd_soc_get_pcm_runtime(card, be_dl_name);
- if (!rtd) {
- dev_err(card->dev,
- "%s: snd_soc_get_pcm_runtime for %s failed!\n",
- __func__, be_dl_name);
- ret = -EINVAL;
- goto err_pcm_runtime;
- }
+ pr_debug("%s: Service opcode 0x%lx\n", __func__, opcode);
- codec = rtd->codec;
- if (value == SUBSYS_BEFORE_SHUTDOWN) {
- pr_debug("%s: ADSP is about to shutdown. Clearing AFE config\n",
- __func__);
+ switch (opcode) {
+ case AUDIO_NOTIFIER_SERVICE_DOWN:
+ /*
+ * Use flag to ignore initial boot notifications
+ * On initial boot msm_adsp_power_up_config is
+ * called on init. There is no need to clear
+ * and set the config again on initial boot.
+ */
+ if (is_initial_boot)
+ break;
msm_afe_clear_config();
- } else if (value == SUBSYS_AFTER_POWERUP) {
- ret = msm_adsp_power_up_config(codec);
- if (ret) {
+ break;
+ case AUDIO_NOTIFIER_SERVICE_UP:
+ if (is_initial_boot) {
+ is_initial_boot = false;
+ break;
+ }
+ if (!spdev)
+ return -EINVAL;
+
+ card = platform_get_drvdata(spdev);
+ rtd = snd_soc_get_pcm_runtime(card, be_dl_name);
+ if (!rtd) {
+ dev_err(card->dev,
+ "%s: snd_soc_get_pcm_runtime for %s failed!\n",
+ __func__, be_dl_name);
ret = -EINVAL;
- } else {
- pr_debug("%s: ADSP is up\n", __func__);
- ret = NOTIFY_OK;
+ goto done;
}
- }
+ codec = rtd->codec;
-err_pcm_runtime:
- return ret;
+ ret = msm_adsp_power_up_config(codec);
+ if (ret < 0) {
+ dev_err(card->dev,
+ "%s: msm_adsp_power_up_config failed ret = %d!\n",
+ __func__, ret);
+ goto done;
+ }
+ break;
+ default:
+ break;
+ }
+done:
+ return NOTIFY_OK;
}
-static struct notifier_block adsp_state_notifier_block = {
- .notifier_call = msm_adsp_state_callback,
+static struct notifier_block service_nb = {
+ .notifier_call = msmcobalt_notifier_service_cb,
.priority = -INT_MAX,
};
@@ -2486,17 +3141,18 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
goto err_afe_cfg;
}
- if (!strcmp(dev_name(codec_dai->dev), "tasha_codec")) {
- config_data = msm_codec_fn.get_afe_config_fn(codec,
- AFE_AANC_VERSION);
- if (config_data) {
- ret = afe_set_config(AFE_AANC_VERSION, config_data, 0);
- if (ret) {
- pr_err("%s: Failed to set aanc version %d\n",
- __func__, ret);
- goto err_afe_cfg;
- }
+ config_data = msm_codec_fn.get_afe_config_fn(codec,
+ AFE_AANC_VERSION);
+ if (config_data) {
+ ret = afe_set_config(AFE_AANC_VERSION, config_data, 0);
+ if (ret) {
+ pr_err("%s: Failed to set aanc version %d\n",
+ __func__, ret);
+ goto err_afe_cfg;
}
+ }
+
+ if (!strcmp(dev_name(codec_dai->dev), "tasha_codec")) {
config_data = msm_codec_fn.get_afe_config_fn(codec,
AFE_CDC_CLIP_REGISTERS_CONFIG);
if (config_data) {
@@ -2519,14 +3175,7 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
}
}
}
- adsp_state_notifier = subsys_notif_register_notifier("adsp",
- &adsp_state_notifier_block);
- if (!adsp_state_notifier) {
- pr_err("%s: Failed to register adsp state notifier\n",
- __func__);
- ret = -EFAULT;
- goto err_adsp_notify;
- }
+
/*
* Send speaker configuration only for WSA8810.
* Defalut configuration is for WSA8815.
@@ -2575,7 +3224,6 @@ done:
return 0;
err_snd_module:
-err_adsp_notify:
err_afe_cfg:
return ret;
}
@@ -2602,7 +3250,7 @@ static void *def_tasha_mbhc_cal(void)
return NULL;
#define S(X, Y) ((WCD_MBHC_CAL_PLUG_TYPE_PTR(tasha_wcd_cal)->X) = (Y))
- S(v_hs_max, 1500);
+ S(v_hs_max, 1600);
#undef S
#define S(X, Y) ((WCD_MBHC_CAL_BTN_DET_PTR(tasha_wcd_cal)->X) = (Y))
S(num_btn, WCD_MBHC_DEF_BUTTONS);
@@ -3166,6 +3814,157 @@ static struct snd_soc_ops msm_aux_pcm_be_ops = {
.shutdown = msm_aux_pcm_snd_shutdown,
};
+static unsigned int tdm_param_set_slot_mask(u16 port_id, int slot_width,
+ int slots)
+{
+ unsigned int slot_mask = 0;
+ int i, j;
+ unsigned int *slot_offset;
+
+ for (i = TDM_0; i < TDM_PORT_MAX; i++) {
+ slot_offset = tdm_slot_offset[i];
+
+ for (j = 0; j < TDM_SLOT_OFFSET_MAX; j++) {
+ if (slot_offset[j] != AFE_SLOT_MAPPING_OFFSET_INVALID)
+ slot_mask |=
+ (1 << ((slot_offset[j] * 8) / slot_width));
+ else
+ break;
+ }
+ }
+
+ return slot_mask;
+}
+
+static int msm_tdm_snd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int ret = 0;
+ int channels, slot_width, slots;
+ unsigned int slot_mask;
+ unsigned int *slot_offset;
+ int offset_channels = 0;
+ int i;
+
+ pr_debug("%s: dai id = 0x%x\n", __func__, cpu_dai->id);
+
+ channels = params_channels(params);
+ switch (channels) {
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S32_LE:
+ case SNDRV_PCM_FORMAT_S24_LE:
+ case SNDRV_PCM_FORMAT_S16_LE:
+ /*
+ * up to 8 channels HW config should
+ * use 32 bit slot width for max support of
+ * stream bit width. (slot_width > bit_width)
+ */
+ slot_width = 32;
+ break;
+ default:
+ pr_err("%s: invalid param format 0x%x\n",
+ __func__, params_format(params));
+ return -EINVAL;
+ }
+ slots = 8;
+ slot_mask = tdm_param_set_slot_mask(cpu_dai->id,
+ slot_width,
+ slots);
+ if (!slot_mask) {
+ pr_err("%s: invalid slot_mask 0x%x\n",
+ __func__, slot_mask);
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("%s: invalid param channels %d\n",
+ __func__, channels);
+ return -EINVAL;
+ }
+ /* currently only supporting TDM_RX_0 and TDM_TX_0 */
+ switch (cpu_dai->id) {
+ case AFE_PORT_ID_PRIMARY_TDM_RX:
+ case AFE_PORT_ID_SECONDARY_TDM_RX:
+ case AFE_PORT_ID_TERTIARY_TDM_RX:
+ case AFE_PORT_ID_QUATERNARY_TDM_RX:
+ case AFE_PORT_ID_PRIMARY_TDM_TX:
+ case AFE_PORT_ID_SECONDARY_TDM_TX:
+ case AFE_PORT_ID_TERTIARY_TDM_TX:
+ case AFE_PORT_ID_QUATERNARY_TDM_TX:
+ slot_offset = tdm_slot_offset[TDM_0];
+ break;
+ default:
+ pr_err("%s: dai id 0x%x not supported\n",
+ __func__, cpu_dai->id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < TDM_SLOT_OFFSET_MAX; i++) {
+ if (slot_offset[i] != AFE_SLOT_MAPPING_OFFSET_INVALID)
+ offset_channels++;
+ else
+ break;
+ }
+
+ if (offset_channels == 0) {
+ pr_err("%s: slot offset not supported, offset_channels %d\n",
+ __func__, offset_channels);
+ return -EINVAL;
+ }
+
+ if (channels > offset_channels) {
+ pr_err("%s: channels %d exceed offset_channels %d\n",
+ __func__, channels, offset_channels);
+ return -EINVAL;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0, slot_mask,
+ slots, slot_width);
+ if (ret < 0) {
+ pr_err("%s: failed to set tdm slot, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+
+ ret = snd_soc_dai_set_channel_map(cpu_dai, 0, NULL,
+ channels, slot_offset);
+ if (ret < 0) {
+ pr_err("%s: failed to set channel map, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+ } else {
+ ret = snd_soc_dai_set_tdm_slot(cpu_dai, slot_mask, 0,
+ slots, slot_width);
+ if (ret < 0) {
+ pr_err("%s: failed to set tdm slot, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+
+ ret = snd_soc_dai_set_channel_map(cpu_dai, channels,
+ slot_offset, 0, NULL);
+ if (ret < 0) {
+ pr_err("%s: failed to set channel map, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+ }
+end:
+ return ret;
+}
+
static struct snd_soc_ops msm_be_ops = {
.hw_params = msm_snd_hw_params,
};
@@ -3182,6 +3981,10 @@ static struct snd_soc_ops msm_wcn_ops = {
.hw_params = msm_wcn_hw_params,
};
+static struct snd_soc_ops msm_tdm_be_ops = {
+ .hw_params = msm_tdm_snd_hw_params
+};
+
/* Digital audio interface glue - connects codec <---> CPU */
static struct snd_soc_dai_link msm_common_dai_links[] = {
/* FrontEnd DAI Links */
@@ -3982,6 +4785,34 @@ static struct snd_soc_dai_link msm_common_be_dai_links[] = {
.be_hw_params_fixup = msm_be_hw_params_fixup,
.ignore_suspend = 1,
},
+ {
+ .name = LPASS_BE_TERT_TDM_RX_0,
+ .stream_name = "Tertiary TDM0 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36896",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_TERT_TDM_RX_0,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ops = &msm_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_TERT_TDM_TX_0,
+ .stream_name = "Tertiary TDM0 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36897",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-tx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ops = &msm_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
};
static struct snd_soc_dai_link msm_tasha_be_dai_links[] = {
@@ -4280,6 +5111,21 @@ static struct snd_soc_dai_link msm_tavil_be_dai_links[] = {
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
},
+ /* MAD BE */
+ {
+ .name = LPASS_BE_SLIMBUS_5_TX,
+ .stream_name = "Slimbus5 Capture",
+ .cpu_dai_name = "msm-dai-q6-dev.16395",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "tavil_codec",
+ .codec_dai_name = "tavil_mad1",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_SLIMBUS_5_TX,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ops = &msm_be_ops,
+ .ignore_suspend = 1,
+ },
{
.name = LPASS_BE_SLIMBUS_6_RX,
.stream_name = "Slimbus6 Playback",
@@ -5610,6 +6456,14 @@ static int msm_asoc_machine_probe(struct platform_device *pdev)
ret);
i2s_auxpcm_init(pdev);
+
+ is_initial_boot = true;
+ ret = audio_notifier_register("msmcobalt", AUDIO_NOTIFIER_ADSP_DOMAIN,
+ &service_nb);
+ if (ret < 0)
+ pr_err("%s: Audio notifier register failed ret = %d\n",
+ __func__, ret);
+
return 0;
err:
if (pdata->us_euro_gpio > 0) {
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
index 26528e6a2bb8..58a4de5af145 100644
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
@@ -1024,6 +1024,7 @@ static int msm_compr_ioctl_shared(struct snd_pcm_substream *substream,
struct snd_dec_ddp *ddp =
&compr->info.codec_param.codec.options.ddp;
uint32_t params_length = 0;
+ memset(params_value, 0, MAX_AC3_PARAM_SIZE);
/* check integer overflow */
if (ddp->params_length > UINT_MAX/sizeof(int)) {
pr_err("%s: Integer overflow ddp->params_length %d\n",
@@ -1064,6 +1065,7 @@ static int msm_compr_ioctl_shared(struct snd_pcm_substream *substream,
struct snd_dec_ddp *ddp =
&compr->info.codec_param.codec.options.ddp;
uint32_t params_length = 0;
+ memset(params_value, 0, MAX_AC3_PARAM_SIZE);
/* check integer overflow */
if (ddp->params_length > UINT_MAX/sizeof(int)) {
pr_err("%s: Integer overflow ddp->params_length %d\n",
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index 841bb5bce13f..770bd12eb501 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -709,6 +709,10 @@ static int msm_compr_send_media_format_block(struct snd_compr_stream *cstream,
}
switch (prtd->codec_param.codec.format) {
+ case SNDRV_PCM_FORMAT_S32_LE:
+ bit_width = 32;
+ sample_word_size = 32;
+ break;
case SNDRV_PCM_FORMAT_S24_LE:
bit_width = 24;
sample_word_size = 32;
@@ -723,14 +727,16 @@ static int msm_compr_send_media_format_block(struct snd_compr_stream *cstream,
sample_word_size = 16;
break;
}
- ret = q6asm_media_format_block_pcm_format_support_v3(
+ ret = q6asm_media_format_block_pcm_format_support_v4(
prtd->audio_client,
prtd->sample_rate,
prtd->num_channels,
bit_width, stream_id,
use_default_chmap,
chmap,
- sample_word_size);
+ sample_word_size,
+ ASM_LITTLE_ENDIAN,
+ DEFAULT_QF);
if (ret < 0)
pr_err("%s: CMD Format block failed\n", __func__);
@@ -1010,7 +1016,7 @@ static int msm_compr_configure_dsp(struct snd_compr_stream *cstream)
} else {
pr_debug("%s: stream_id %d bits_per_sample %d\n",
__func__, ac->stream_id, bits_per_sample);
- ret = q6asm_stream_open_write_v3(ac,
+ ret = q6asm_stream_open_write_v4(ac,
prtd->codec, bits_per_sample,
ac->stream_id,
prtd->gapless_state.use_dsp_gapless_mode);
@@ -1942,7 +1948,7 @@ static int msm_compr_trigger(struct snd_compr_stream *cstream, int cmd)
pr_debug("%s: open_write stream_id %d bits_per_sample %d",
__func__, stream_id, bits_per_sample);
- rc = q6asm_stream_open_write_v3(prtd->audio_client,
+ rc = q6asm_stream_open_write_v4(prtd->audio_client,
prtd->codec, bits_per_sample,
stream_id,
prtd->gapless_state.use_dsp_gapless_mode);
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index a89d88eac41e..7eb4a10b83c7 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -173,6 +173,7 @@ struct msm_dai_q6_dai_data {
u32 bitwidth;
u32 cal_mode;
u32 afe_in_channels;
+ u16 afe_in_bitformat;
struct afe_enc_config enc_config;
union afe_port_config port_config;
};
@@ -1417,11 +1418,20 @@ static int msm_dai_q6_prepare(struct snd_pcm_substream *substream,
if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
if (dai_data->enc_config.format != ENC_FMT_NONE) {
+ int bitwidth = 0;
+
+ if (dai_data->afe_in_bitformat ==
+ SNDRV_PCM_FORMAT_S24_LE)
+ bitwidth = 24;
+ else if (dai_data->afe_in_bitformat ==
+ SNDRV_PCM_FORMAT_S16_LE)
+ bitwidth = 16;
pr_debug("%s: calling AFE_PORT_START_V2 with enc_format: %d\n",
__func__, dai_data->enc_config.format);
rc = afe_port_start_v2(dai->id, &dai_data->port_config,
dai_data->rate,
dai_data->afe_in_channels,
+ bitwidth,
&dai_data->enc_config);
if (rc < 0)
pr_err("%s: afe_port_start_v2 failed error: %d\n",
@@ -1607,8 +1617,13 @@ static int msm_dai_q6_usb_audio_hw_params(struct snd_pcm_hw_params *params,
dai_data->port_config.usb_audio.bit_width = 16;
break;
case SNDRV_PCM_FORMAT_S24_LE:
+ case SNDRV_PCM_FORMAT_S24_3LE:
dai_data->port_config.usb_audio.bit_width = 24;
break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ dai_data->port_config.usb_audio.bit_width = 32;
+ break;
+
default:
dev_err(dai->dev, "%s: invalid format %d\n",
__func__, params_format(params));
@@ -2140,6 +2155,12 @@ static const struct soc_enum afe_input_chs_enum[] = {
SOC_ENUM_SINGLE_EXT(3, afe_input_chs_text),
};
+static const char *const afe_input_bit_format_text[] = {"S16_LE", "S24_LE"};
+
+static const struct soc_enum afe_input_bit_format_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, afe_input_bit_format_text),
+};
+
static int msm_dai_q6_afe_input_channel_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -2168,6 +2189,58 @@ static int msm_dai_q6_afe_input_channel_put(struct snd_kcontrol *kcontrol,
return 0;
}
+static int msm_dai_q6_afe_input_bit_format_get(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+ if (!dai_data) {
+ pr_err("%s: Invalid dai data\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (dai_data->afe_in_bitformat) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: afe input bit format : %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+
+ return 0;
+}
+
+static int msm_dai_q6_afe_input_bit_format_put(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+
+ if (!dai_data) {
+ pr_err("%s: Invalid dai data\n", __func__);
+ return -EINVAL;
+ }
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ dai_data->afe_in_bitformat = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ dai_data->afe_in_bitformat = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: updating afe input bit format : %d\n",
+ __func__, dai_data->afe_in_bitformat);
+
+ return 0;
+}
+
+
static const struct snd_kcontrol_new afe_enc_config_controls[] = {
{
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
@@ -2181,6 +2254,9 @@ static const struct snd_kcontrol_new afe_enc_config_controls[] = {
SOC_ENUM_EXT("AFE Input Channels", afe_input_chs_enum[0],
msm_dai_q6_afe_input_channel_get,
msm_dai_q6_afe_input_channel_put),
+ SOC_ENUM_EXT("AFE Input Bit Format", afe_input_bit_format_enum[0],
+ msm_dai_q6_afe_input_bit_format_get,
+ msm_dai_q6_afe_input_bit_format_put),
};
static const char * const afe_cal_mode_text[] = {
@@ -2570,11 +2646,12 @@ static struct snd_soc_dai_driver msm_dai_q6_usb_rx_dai = {
SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
- SNDRV_PCM_RATE_192000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_RATE_192000 | SNDRV_PCM_RATE_384000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
- .rate_max = 192000,
+ .rate_max = 384000,
.rate_min = 8000,
},
.ops = &msm_dai_q6_ops,
@@ -2591,11 +2668,12 @@ static struct snd_soc_dai_driver msm_dai_q6_usb_tx_dai = {
SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |
SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |
- SNDRV_PCM_RATE_192000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ SNDRV_PCM_RATE_192000 | SNDRV_PCM_RATE_384000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 1,
.channels_max = 8,
- .rate_max = 192000,
+ .rate_max = 384000,
.rate_min = 8000,
},
.ops = &msm_dai_q6_ops,
@@ -5825,11 +5903,6 @@ static int msm_dai_q6_tdm_hw_params(struct snd_pcm_substream *substream,
pr_debug("%s: dev_name: %s\n",
__func__, dev_name(dai->dev));
- if (params_rate(params) != 48000) {
- dev_err(dai->dev, "%s: invalid param rate %d\n",
- __func__, params_rate(params));
- return -EINVAL;
- }
if ((params_channels(params) == 0) ||
(params_channels(params) > 8)) {
dev_err(dai->dev, "%s: invalid param channels %d\n",
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
index 972cacb50f47..1fdb878a1a1f 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
@@ -395,6 +395,30 @@ static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
+static int msm_pcm_ioctl(struct snd_pcm_substream *substream,
+ unsigned int cmd, void *arg)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_audio *prtd = runtime->private_data;
+ int dir = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? 0 : 1;
+ struct audio_buffer *buf;
+
+ switch (cmd) {
+ case SNDRV_PCM_IOCTL1_RESET:
+ pr_debug("%s: %s SNDRV_PCM_IOCTL1_RESET\n", __func__,
+ dir == 0 ? "P" : "C");
+ buf = q6asm_shared_io_buf(prtd->audio_client, dir);
+
+ if (buf && buf->data)
+ memset(buf->data, 0, buf->actual_size);
+ break;
+ default:
+ break;
+ }
+
+ return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+
static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -728,7 +752,7 @@ static struct snd_pcm_ops msm_pcm_ops = {
.prepare = msm_pcm_prepare,
.copy = msm_pcm_copy,
.hw_params = msm_pcm_hw_params,
- .ioctl = snd_pcm_lib_ioctl,
+ .ioctl = msm_pcm_ioctl,
.trigger = msm_pcm_trigger,
.pointer = msm_pcm_pointer,
.mmap = msm_pcm_mmap,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index 4e3745d4d976..c5baf0e63732 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -57,6 +57,7 @@ struct snd_msm {
#define CMD_EOS_MIN_TIMEOUT_LENGTH 50
#define CMD_EOS_TIMEOUT_MULTIPLIER (HZ * 50)
+#define MAX_PB_COPY_RETRIES 3
static struct snd_pcm_hardware msm_pcm_hardware_capture = {
.info = (SNDRV_PCM_INFO_MMAP |
@@ -66,10 +67,11 @@ static struct snd_pcm_hardware msm_pcm_hardware_capture = {
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
- .rates = SNDRV_PCM_RATE_8000_48000,
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ .rates = SNDRV_PCM_RATE_8000_384000,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 384000,
.channels_min = 1,
.channels_max = 4,
.buffer_bytes_max = CAPTURE_MAX_NUM_PERIODS *
@@ -89,10 +91,11 @@ static struct snd_pcm_hardware msm_pcm_hardware_playback = {
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S24_3LE),
- .rates = SNDRV_PCM_RATE_8000_192000,
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ .rates = SNDRV_PCM_RATE_8000_384000,
.rate_min = 8000,
- .rate_max = 192000,
+ .rate_max = 384000,
.channels_min = 1,
.channels_max = 8,
.buffer_bytes_max = PLAYBACK_MAX_NUM_PERIODS *
@@ -107,7 +110,7 @@ static struct snd_pcm_hardware msm_pcm_hardware_playback = {
/* Conventional and unconventional sample rate supported */
static unsigned int supported_sample_rates[] = {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000,
- 88200, 96000, 176400, 192000
+ 88200, 96000, 176400, 192000, 384000
};
static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
@@ -312,6 +315,10 @@ static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
pr_debug("%s: perf: %x\n", __func__, pdata->perf_mode);
switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S32_LE:
+ bits_per_sample = 32;
+ sample_word_size = 32;
+ break;
case SNDRV_PCM_FORMAT_S24_LE:
bits_per_sample = 24;
sample_word_size = 32;
@@ -327,7 +334,7 @@ static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
break;
}
- ret = q6asm_open_write_v3(prtd->audio_client,
+ ret = q6asm_open_write_v4(prtd->audio_client,
FORMAT_LINEAR_PCM, bits_per_sample);
if (ret < 0) {
@@ -352,11 +359,12 @@ static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
return ret;
}
- ret = q6asm_media_format_block_multi_ch_pcm_v3(
+ ret = q6asm_media_format_block_multi_ch_pcm_v4(
prtd->audio_client, runtime->rate,
runtime->channels, !prtd->set_channel_map,
prtd->channel_map, bits_per_sample,
- sample_word_size);
+ sample_word_size, ASM_LITTLE_ENDIAN,
+ DEFAULT_QF);
if (ret < 0)
pr_info("%s: CMD Format block failed\n", __func__);
@@ -401,6 +409,8 @@ static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
if ((params_format(params) == SNDRV_PCM_FORMAT_S24_LE) ||
(params_format(params) == SNDRV_PCM_FORMAT_S24_3LE))
bits_per_sample = 24;
+ else if (params_format(params) == SNDRV_PCM_FORMAT_S32_LE)
+ bits_per_sample = 32;
/* ULL mode is not supported in capture path */
if (pdata->perf_mode == LEGACY_PCM_MODE)
@@ -412,7 +422,7 @@ static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
__func__, params_channels(params),
prtd->audio_client->perf_mode);
- ret = q6asm_open_read_v3(prtd->audio_client, FORMAT_LINEAR_PCM,
+ ret = q6asm_open_read_v4(prtd->audio_client, FORMAT_LINEAR_PCM,
bits_per_sample);
if (ret < 0) {
pr_err("%s: q6asm_open_read failed\n", __func__);
@@ -458,6 +468,10 @@ static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
return 0;
switch (runtime->format) {
+ case SNDRV_PCM_FORMAT_S32_LE:
+ bits_per_sample = 32;
+ sample_word_size = 32;
+ break;
case SNDRV_PCM_FORMAT_S24_LE:
bits_per_sample = 24;
sample_word_size = 32;
@@ -476,11 +490,13 @@ static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
pr_debug("%s: Samp_rate = %d Channel = %d bit width = %d, word size = %d\n",
__func__, prtd->samp_rate, prtd->channel_mode,
bits_per_sample, sample_word_size);
- ret = q6asm_enc_cfg_blk_pcm_format_support_v3(prtd->audio_client,
+ ret = q6asm_enc_cfg_blk_pcm_format_support_v4(prtd->audio_client,
prtd->samp_rate,
prtd->channel_mode,
bits_per_sample,
- sample_word_size);
+ sample_word_size,
+ ASM_LITTLE_ENDIAN,
+ DEFAULT_QF);
if (ret < 0)
pr_debug("%s: cmd cfg pcm was block failed", __func__);
@@ -629,6 +645,7 @@ static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
void *data = NULL;
uint32_t idx = 0;
uint32_t size = 0;
+ uint32_t retries = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
@@ -637,7 +654,7 @@ static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
pr_debug("%s: prtd->out_count = %d\n",
__func__, atomic_read(&prtd->out_count));
- while (fbytes > 0) {
+ while ((fbytes > 0) && (retries < MAX_PB_COPY_RETRIES)) {
if (prtd->reset_event) {
pr_err("%s: In SSR return ENETRESET before wait\n",
__func__);
@@ -666,6 +683,13 @@ static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
data = q6asm_is_cpu_buf_avail(IN, prtd->audio_client, &size,
&idx);
+ if (data == NULL) {
+ retries++;
+ continue;
+ } else {
+ retries = 0;
+ }
+
if (fbytes > size)
xfer = size;
else
@@ -677,6 +701,9 @@ static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
__func__, fbytes, xfer, size);
if (copy_from_user(bufptr, buf, xfer)) {
ret = -EFAULT;
+ pr_err("%s: copy_from_user failed\n",
+ __func__);
+ q6asm_cpu_buf_release(IN, prtd->audio_client);
goto fail;
}
buf += xfer;
@@ -690,6 +717,8 @@ static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
0, 0, NO_TIMESTAMP);
if (ret < 0) {
ret = -EFAULT;
+ q6asm_cpu_buf_release(IN,
+ prtd->audio_client);
goto fail;
}
} else
@@ -698,6 +727,9 @@ static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
}
}
fail:
+ if (retries >= MAX_PB_COPY_RETRIES)
+ ret = -ENOMEM;
+
return ret;
}
@@ -802,6 +834,7 @@ static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
if (copy_to_user(buf, bufptr+offset, xfer)) {
pr_err("Failed to copy buf to user\n");
ret = -EFAULT;
+ q6asm_cpu_buf_release(OUT, prtd->audio_client);
goto fail;
}
fbytes -= xfer;
@@ -817,6 +850,7 @@ static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
if (ret < 0) {
pr_err("q6asm read failed\n");
ret = -EFAULT;
+ q6asm_cpu_buf_release(OUT, prtd->audio_client);
goto fail;
}
} else
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h
index 72418ea56bb9..8fe31394eef0 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h
@@ -59,11 +59,11 @@ struct msm_audio_in_frame_info {
#define PLAYBACK_MIN_NUM_PERIODS 2
#define PLAYBACK_MAX_NUM_PERIODS 8
-#define PLAYBACK_MAX_PERIOD_SIZE 12288
+#define PLAYBACK_MAX_PERIOD_SIZE 122880
#define PLAYBACK_MIN_PERIOD_SIZE 128
#define CAPTURE_MIN_NUM_PERIODS 2
#define CAPTURE_MAX_NUM_PERIODS 8
-#define CAPTURE_MAX_PERIOD_SIZE 61440
+#define CAPTURE_MAX_PERIOD_SIZE 122880
#define CAPTURE_MIN_PERIOD_SIZE 320
struct msm_audio {
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 7e3653955a64..547af163c5c0 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -177,7 +177,10 @@ static void msm_pcm_routing_cfg_pp(int port_id, int copp_idx, int topology,
break;
case ADM_CMD_COPP_OPEN_TOPOLOGY_ID_AUDIOSPHERE:
pr_debug("%s: TOPOLOGY_ID_AUDIOSPHERE\n", __func__);
- msm_qti_pp_asphere_init(port_id, copp_idx);
+ rc = msm_qti_pp_asphere_init(port_id, copp_idx);
+ if (rc < 0)
+ pr_err("%s: topo_id 0x%x, port %d, copp %d, rc %d\n",
+ __func__, topology, port_id, copp_idx, rc);
break;
default:
/* custom topology specific feature param handlers */
@@ -223,240 +226,245 @@ static void msm_pcm_routing_deinit_pp(int port_id, int topology)
static void msm_pcm_routng_cfg_matrix_map_pp(struct route_payload payload,
int path_type, int perf_mode)
{
- int itr = 0;
+ int itr = 0, rc = 0;
if ((path_type == ADM_PATH_PLAYBACK) &&
(perf_mode == LEGACY_PCM_MODE) &&
is_custom_stereo_on) {
for (itr = 0; itr < payload.num_copps; itr++) {
- if ((payload.port_id[itr] == SLIMBUS_0_RX) ||
- (payload.port_id[itr] == RT_PROXY_PORT_001_RX)) {
- msm_qti_pp_send_stereo_to_custom_stereo_cmd(
- payload.port_id[itr],
- payload.copp_idx[itr],
- payload.session_id,
- Q14_GAIN_ZERO_POINT_FIVE,
- Q14_GAIN_ZERO_POINT_FIVE,
- Q14_GAIN_ZERO_POINT_FIVE,
- Q14_GAIN_ZERO_POINT_FIVE);
+ if ((payload.port_id[itr] != SLIMBUS_0_RX) &&
+ (payload.port_id[itr] != RT_PROXY_PORT_001_RX)) {
+ continue;
}
+
+ rc = msm_qti_pp_send_stereo_to_custom_stereo_cmd(
+ payload.port_id[itr],
+ payload.copp_idx[itr],
+ payload.session_id,
+ Q14_GAIN_ZERO_POINT_FIVE,
+ Q14_GAIN_ZERO_POINT_FIVE,
+ Q14_GAIN_ZERO_POINT_FIVE,
+ Q14_GAIN_ZERO_POINT_FIVE);
+ if (rc < 0)
+ pr_err("%s: err setting custom stereo\n",
+ __func__);
}
}
}
#define SLIMBUS_EXTPROC_RX AFE_PORT_INVALID
struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
- { PRIMARY_I2S_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_RX},
- { PRIMARY_I2S_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_TX},
- { SLIMBUS_0_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_RX},
- { SLIMBUS_0_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_TX},
- { HDMI_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_HDMI},
- { INT_BT_SCO_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_RX},
- { INT_BT_SCO_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_TX},
- { INT_FM_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_RX},
- { INT_FM_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_TX},
- { RT_PROXY_PORT_001_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_RX},
- { RT_PROXY_PORT_001_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_TX},
- { AFE_PORT_ID_PRIMARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { PRIMARY_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_RX},
+ { PRIMARY_I2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_TX},
+ { SLIMBUS_0_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_RX},
+ { SLIMBUS_0_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_TX},
+ { HDMI_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_HDMI},
+ { INT_BT_SCO_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_RX},
+ { INT_BT_SCO_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_TX},
+ { INT_FM_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_RX},
+ { INT_FM_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_TX},
+ { RT_PROXY_PORT_001_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_RX},
+ { RT_PROXY_PORT_001_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_TX},
+ { AFE_PORT_ID_PRIMARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_AUXPCM_RX},
- { AFE_PORT_ID_PRIMARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_AUXPCM_TX},
- { VOICE_PLAYBACK_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { VOICE_PLAYBACK_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_VOICE_PLAYBACK_TX},
- { VOICE2_PLAYBACK_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { VOICE2_PLAYBACK_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_VOICE2_PLAYBACK_TX},
- { VOICE_RECORD_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_RX},
- { VOICE_RECORD_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_TX},
- { MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_MI2S_RX},
- { MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_MI2S_TX},
- { SECONDARY_I2S_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX},
- { SLIMBUS_1_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX},
- { SLIMBUS_1_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX},
- { SLIMBUS_2_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_RX},
- { SLIMBUS_4_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX},
- { SLIMBUS_4_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX},
- { SLIMBUS_3_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX},
- { SLIMBUS_3_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_TX},
- { SLIMBUS_5_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_TX},
- { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_STUB_RX},
- { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_STUB_TX},
- { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_STUB_1_TX},
- { AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { VOICE_RECORD_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_RX},
+ { VOICE_RECORD_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_TX},
+ { MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_RX},
+ { MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_TX},
+ { SECONDARY_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX},
+ { SLIMBUS_1_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX},
+ { SLIMBUS_1_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX},
+ { SLIMBUS_2_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_RX},
+ { SLIMBUS_4_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX},
+ { SLIMBUS_4_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX},
+ { SLIMBUS_3_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX},
+ { SLIMBUS_3_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_TX},
+ { SLIMBUS_5_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_TX},
+ { SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_RX},
+ { SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_TX},
+ { SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_1_TX},
+ { AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_MI2S_RX},
- { AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_MI2S_TX},
- { AFE_PORT_ID_SECONDARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_MI2S_RX},
- { AFE_PORT_ID_SECONDARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_MI2S_TX},
- { AFE_PORT_ID_PRIMARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_MI2S_RX},
- { AFE_PORT_ID_PRIMARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_MI2S_TX},
- { AFE_PORT_ID_TERTIARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_MI2S_RX},
- { AFE_PORT_ID_TERTIARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_MI2S_TX},
- { AUDIO_PORT_ID_I2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AUDIO_PORT_ID_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_AUDIO_I2S_RX},
- { AFE_PORT_ID_SECONDARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_AUXPCM_RX},
- { AFE_PORT_ID_SECONDARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_AUXPCM_TX},
- { SLIMBUS_6_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_RX},
- { SLIMBUS_6_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_TX},
- { AFE_PORT_ID_SPDIF_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SPDIF_RX},
- { AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { SLIMBUS_6_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_RX},
+ { SLIMBUS_6_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_TX},
+ { AFE_PORT_ID_SPDIF_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SPDIF_RX},
+ { AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_MI2S_RX_SD1},
- { SLIMBUS_5_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_RX},
- { AFE_PORT_ID_QUINARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { SLIMBUS_5_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_RX},
+ { AFE_PORT_ID_QUINARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUIN_MI2S_RX},
- { AFE_PORT_ID_QUINARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUINARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUIN_MI2S_TX},
- { AFE_PORT_ID_SENARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SENARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SENARY_MI2S_TX},
- { AFE_PORT_ID_PRIMARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_0},
- { AFE_PORT_ID_PRIMARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_0},
- { AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_1},
- { AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_1},
- { AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_2},
- { AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_2},
- { AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_3},
- { AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_3},
- { AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_4},
- { AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_4},
- { AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_5},
- { AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_5},
- { AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_6},
- { AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_6},
- { AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_7},
- { AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_7},
- { AFE_PORT_ID_SECONDARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_0},
- { AFE_PORT_ID_SECONDARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_0},
- { AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_1},
- { AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_1},
- { AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_2},
- { AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_2},
- { AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_3},
- { AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_3},
- { AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_4},
- { AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_4},
- { AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_5},
- { AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_5},
- { AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_6},
- { AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_6},
- { AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_7},
- { AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_7},
- { AFE_PORT_ID_TERTIARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_0},
- { AFE_PORT_ID_TERTIARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_0},
- { AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_1},
- { AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_1},
- { AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_2},
- { AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_2},
- { AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_3},
- { AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_3},
- { AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_4},
- { AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_4},
- { AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_5},
- { AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_5},
- { AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_6},
- { AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_6},
- { AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_7},
- { AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_7},
- { AFE_PORT_ID_QUATERNARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_0},
- { AFE_PORT_ID_QUATERNARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_0},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_1},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_1},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_2},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_2},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_3},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_3},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_4},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_4},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_5},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_5},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_6},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_6},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_7},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_7},
- { INT_BT_A2DP_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_A2DP_RX},
- { SLIMBUS_7_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_RX},
- { SLIMBUS_7_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_TX},
- { SLIMBUS_8_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_RX},
- { SLIMBUS_8_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_TX},
- { AFE_PORT_ID_USB_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_RX},
- { AFE_PORT_ID_USB_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_TX},
- { DISPLAY_PORT_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_DISPLAY_PORT},
- { AFE_PORT_ID_TERTIARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { INT_BT_A2DP_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_A2DP_RX},
+ { SLIMBUS_7_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_RX},
+ { SLIMBUS_7_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_TX},
+ { SLIMBUS_8_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_RX},
+ { SLIMBUS_8_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_TX},
+ { AFE_PORT_ID_USB_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_RX},
+ { AFE_PORT_ID_USB_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_TX},
+ { DISPLAY_PORT_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_DISPLAY_PORT},
+ { AFE_PORT_ID_TERTIARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_AUXPCM_RX},
- { AFE_PORT_ID_TERTIARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_AUXPCM_TX},
- { AFE_PORT_ID_QUATERNARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_AUXPCM_RX},
- { AFE_PORT_ID_QUATERNARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_AUXPCM_TX},
};
@@ -1998,11 +2006,20 @@ static int msm_routing_slim_0_rx_aanc_mux_put(struct snd_kcontrol *kcontrol,
static int msm_routing_get_port_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
+ int idx = 0, shift = 0;
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- if (test_bit(mc->shift,
- (unsigned long *)&msm_bedais[mc->reg].port_sessions))
+ idx = mc->shift/(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+ shift = mc->shift%(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+
+ if (idx >= BE_DAI_PORT_SESSIONS_IDX_MAX) {
+ pr_err("%s: Invalid idx = %d\n", __func__, idx);
+ return -EINVAL;
+ }
+
+ if (test_bit(shift,
+ (unsigned long *)&msm_bedais[mc->reg].port_sessions[idx]))
ucontrol->value.integer.value[0] = 1;
else
ucontrol->value.integer.value[0] = 0;
@@ -2016,22 +2033,32 @@ static int msm_routing_get_port_mixer(struct snd_kcontrol *kcontrol,
static int msm_routing_put_port_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
+ int idx = 0, shift = 0;
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- pr_debug("%s: reg 0x%x shift 0x%x val %ld\n", __func__, mc->reg,
- mc->shift, ucontrol->value.integer.value[0]);
+ idx = mc->shift/(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+ shift = mc->shift%(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+
+ if (idx >= BE_DAI_PORT_SESSIONS_IDX_MAX) {
+ pr_err("%s: Invalid idx = %d\n", __func__, idx);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: reg 0x%x shift 0x%x val %ld idx %d reminder shift %d\n",
+ __func__, mc->reg, mc->shift,
+ ucontrol->value.integer.value[0], idx, shift);
if (ucontrol->value.integer.value[0]) {
afe_loopback(1, msm_bedais[mc->reg].port_id,
msm_bedais[mc->shift].port_id);
- set_bit(mc->shift,
- (unsigned long *)&msm_bedais[mc->reg].port_sessions);
+ set_bit(shift,
+ (unsigned long *)&msm_bedais[mc->reg].port_sessions[idx]);
} else {
afe_loopback(0, msm_bedais[mc->reg].port_id,
msm_bedais[mc->shift].port_id);
- clear_bit(mc->shift,
- (unsigned long *)&msm_bedais[mc->reg].port_sessions);
+ clear_bit(shift,
+ (unsigned long *)&msm_bedais[mc->reg].port_sessions[idx]);
}
return 1;
@@ -8899,6 +8926,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia5 Mixer", "MI2S_TX", "MI2S_TX"},
{"MultiMedia1 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia2 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"MultiMedia6 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia1 Mixer", "QUIN_MI2S_TX", "QUIN_MI2S_TX"},
{"MultiMedia2 Mixer", "QUIN_MI2S_TX", "QUIN_MI2S_TX"},
{"MultiMedia1 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index 6b7f2113e0f6..8e3086849d92 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -355,6 +355,7 @@ enum {
#define ADM_PP_PARAM_MUTE_BIT 1
#define ADM_PP_PARAM_LATENCY_ID 1
#define ADM_PP_PARAM_LATENCY_BIT 2
+#define BE_DAI_PORT_SESSIONS_IDX_MAX 4
struct msm_pcm_routing_evt {
void (*event_func)(enum msm_pcm_routing_event, void *);
@@ -365,10 +366,15 @@ struct msm_pcm_routing_bdai_data {
u16 port_id; /* AFE port ID */
u8 active; /* track if this backend is enabled */
unsigned long fe_sessions; /* Front-end sessions */
- u64 port_sessions; /* track Tx BE ports -> Rx BE
- * number of BE should not exceed
- * the size of this field
- */
+ /*
+ * Track Tx BE ports -> Rx BE ports.
+ * port_sessions[0] used to track BE 0 to BE 63.
+ * port_sessions[1] used to track BE 64 to BE 127.
+ * port_sessions[2] used to track BE 128 to BE 191.
+ * port_sessions[3] used to track BE 192 to BE 255.
+ */
+ u64 port_sessions[BE_DAI_PORT_SESSIONS_IDX_MAX];
+
unsigned int sample_rate;
unsigned int channel;
unsigned int format;
diff --git a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
index d4d22d3587ba..7c8af09a8793 100644
--- a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
@@ -241,6 +241,7 @@ static int msm_qti_pp_put_eq_band_audio_mixer(struct snd_kcontrol *kcontrol,
return 0;
}
+#ifdef CONFIG_QTI_PP
void msm_qti_pp_send_eq_values(int fedai_id)
{
if (eq_data[fedai_id].enable)
@@ -325,6 +326,7 @@ skip_send_cmd:
kfree(params_value);
return -ENOMEM;
}
+#endif /* CONFIG_QTI_PP */
/* RMS */
static int msm_qti_pp_get_rms_value_control(struct snd_kcontrol *kcontrol,
@@ -682,6 +684,7 @@ static int msm_qti_pp_asphere_send_params(int port_id, int copp_idx, bool force)
return 0;
}
+#if defined(CONFIG_QTI_PP) && defined(CONFIG_QTI_PP_AUDIOSPHERE)
int msm_qti_pp_asphere_init(int port_id, int copp_idx)
{
int index = adm_validate_and_get_port_index(port_id);
@@ -719,6 +722,7 @@ void msm_qti_pp_asphere_deinit(int port_id)
asphere_state.copp_idx[index] = -1;
}
}
+#endif
static int msm_qti_pp_asphere_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
@@ -976,6 +980,7 @@ static const struct snd_kcontrol_new asphere_mixer_controls[] = {
0xFFFFFFFF, 0, 2, msm_qti_pp_asphere_get, msm_qti_pp_asphere_set),
};
+#ifdef CONFIG_QTI_PP
void msm_qti_pp_add_controls(struct snd_soc_platform *platform)
{
snd_soc_add_platform_controls(platform, int_fm_vol_mixer_controls,
@@ -1023,3 +1028,4 @@ void msm_qti_pp_add_controls(struct snd_soc_platform *platform)
snd_soc_add_platform_controls(platform, asphere_mixer_controls,
ARRAY_SIZE(asphere_mixer_controls));
}
+#endif /* CONFIG_QTI_PP */
diff --git a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.h b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.h
index 5c600f0ae866..f8a1da5e7702 100644
--- a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.h
+++ b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -15,7 +15,6 @@
#include <sound/soc.h>
#ifdef CONFIG_QTI_PP
-
void msm_qti_pp_send_eq_values(int fedai_id);
int msm_qti_pp_send_stereo_to_custom_stereo_cmd(int port_id, int copp_idx,
unsigned int session_id,
@@ -24,32 +23,22 @@ int msm_qti_pp_send_stereo_to_custom_stereo_cmd(int port_id, int copp_idx,
uint16_t op_FR_ip_FL_weight,
uint16_t op_FR_ip_FR_weight);
void msm_qti_pp_add_controls(struct snd_soc_platform *platform);
+#else /* CONFIG_QTI_PP */
+#define msm_qti_pp_send_eq_values(fedai_id) do {} while (0)
+#define msm_qti_pp_send_stereo_to_custom_stereo_cmd(port_id, copp_idx, \
+ session_id, op_FL_ip_FL_weight, op_FL_ip_FR_weight, \
+ op_FR_ip_FL_weight, op_FR_ip_FR_weight) (0)
+#define msm_qti_pp_add_controls(platform) do {} while (0)
+#endif /* CONFIG_QTI_PP */
+
+#if defined(CONFIG_QTI_PP) && defined(CONFIG_QTI_PP_AUDIOSPHERE)
int msm_qti_pp_asphere_init(int port_id, int copp_idx);
void msm_qti_pp_asphere_deinit(int port_id);
-
#else
-
-void msm_qti_pp_send_eq_values(int fedai_id) { }
-int msm_qti_pp_send_stereo_to_custom_stereo_cmd(int port_id, int copp_idx,
- unsigned int session_id,
- uint16_t op_FL_ip_FL_weight,
- uint16_t op_FL_ip_FR_weight,
- uint16_t op_FR_ip_FL_weight,
- uint16_t op_FR_ip_FR_weight)
-{
- return 0;
-}
-
-void msm_qti_pp_add_controls(struct snd_soc_platform *platform) { }
-
-int msm_qti_pp_asphere_init(int port_id, int copp_idx)
-{
- return 0;
-}
-void msm_qti_pp_asphere_deinit(int port_id) { }
-
+#define msm_qti_pp_asphere_init(port_id, copp_idx) (0)
+#define msm_qti_pp_asphere_deinit(port_id) do {} while (0)
#endif
-#endif
+#endif /* _MSM_QTI_PP_H_ */
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index ffa78af72544..e30a4efa6e60 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -2173,69 +2173,74 @@ int adm_arrange_mch_map(struct adm_cmd_device_open_v5 *open, int path,
int channel_mode)
{
int rc = 0, idx;
-
- memset(open->dev_channel_mapping, 0,
- PCM_FORMAT_MAX_NUM_CHANNEL);
-
- if (channel_mode == 1) {
- open->dev_channel_mapping[0] = PCM_CHANNEL_FC;
- } else if (channel_mode == 2) {
- open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
- open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
- } else if (channel_mode == 3) {
- open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
- open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
- open->dev_channel_mapping[2] = PCM_CHANNEL_FC;
- } else if (channel_mode == 4) {
- open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
- open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
- open->dev_channel_mapping[2] = PCM_CHANNEL_LS;
- open->dev_channel_mapping[3] = PCM_CHANNEL_RS;
- } else if (channel_mode == 5) {
- open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
- open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
- open->dev_channel_mapping[2] = PCM_CHANNEL_FC;
- open->dev_channel_mapping[3] = PCM_CHANNEL_LS;
- open->dev_channel_mapping[4] = PCM_CHANNEL_RS;
- } else if (channel_mode == 6) {
- open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
- open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
- open->dev_channel_mapping[2] = PCM_CHANNEL_LFE;
- open->dev_channel_mapping[3] = PCM_CHANNEL_FC;
- open->dev_channel_mapping[4] = PCM_CHANNEL_LS;
- open->dev_channel_mapping[5] = PCM_CHANNEL_RS;
- } else if (channel_mode == 8) {
- open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
- open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
- open->dev_channel_mapping[2] = PCM_CHANNEL_LFE;
- open->dev_channel_mapping[3] = PCM_CHANNEL_FC;
- open->dev_channel_mapping[4] = PCM_CHANNEL_LS;
- open->dev_channel_mapping[5] = PCM_CHANNEL_RS;
- open->dev_channel_mapping[6] = PCM_CHANNEL_LB;
- open->dev_channel_mapping[7] = PCM_CHANNEL_RB;
- } else {
- pr_err("%s: invalid num_chan %d\n", __func__,
- channel_mode);
- rc = -EINVAL;
- goto inval_ch_mod;
- }
-
+ memset(open->dev_channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
switch (path) {
case ADM_PATH_PLAYBACK:
idx = ADM_MCH_MAP_IDX_PLAYBACK;
break;
case ADM_PATH_LIVE_REC:
+ case ADM_PATH_NONLIVE_REC:
idx = ADM_MCH_MAP_IDX_REC;
break;
default:
goto non_mch_path;
- break;
};
-
- if ((open->dev_num_channel > 2) && multi_ch_maps[idx].set_channel_map)
+ if ((open->dev_num_channel > 2) && multi_ch_maps[idx].set_channel_map) {
memcpy(open->dev_channel_mapping,
- multi_ch_maps[idx].channel_mapping,
- PCM_FORMAT_MAX_NUM_CHANNEL);
+ multi_ch_maps[idx].channel_mapping,
+ PCM_FORMAT_MAX_NUM_CHANNEL);
+ } else {
+ if (channel_mode == 1) {
+ open->dev_channel_mapping[0] = PCM_CHANNEL_FC;
+ } else if (channel_mode == 2) {
+ open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
+ open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
+ } else if (channel_mode == 3) {
+ open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
+ open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
+ open->dev_channel_mapping[2] = PCM_CHANNEL_FC;
+ } else if (channel_mode == 4) {
+ open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
+ open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
+ open->dev_channel_mapping[2] = PCM_CHANNEL_LS;
+ open->dev_channel_mapping[3] = PCM_CHANNEL_RS;
+ } else if (channel_mode == 5) {
+ open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
+ open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
+ open->dev_channel_mapping[2] = PCM_CHANNEL_FC;
+ open->dev_channel_mapping[3] = PCM_CHANNEL_LS;
+ open->dev_channel_mapping[4] = PCM_CHANNEL_RS;
+ } else if (channel_mode == 6) {
+ open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
+ open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
+ open->dev_channel_mapping[2] = PCM_CHANNEL_LFE;
+ open->dev_channel_mapping[3] = PCM_CHANNEL_FC;
+ open->dev_channel_mapping[4] = PCM_CHANNEL_LS;
+ open->dev_channel_mapping[5] = PCM_CHANNEL_RS;
+ } else if (channel_mode == 7) {
+ open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
+ open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
+ open->dev_channel_mapping[2] = PCM_CHANNEL_FC;
+ open->dev_channel_mapping[3] = PCM_CHANNEL_LFE;
+ open->dev_channel_mapping[4] = PCM_CHANNEL_LB;
+ open->dev_channel_mapping[5] = PCM_CHANNEL_RB;
+ open->dev_channel_mapping[6] = PCM_CHANNEL_CS;
+ } else if (channel_mode == 8) {
+ open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
+ open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
+ open->dev_channel_mapping[2] = PCM_CHANNEL_LFE;
+ open->dev_channel_mapping[3] = PCM_CHANNEL_FC;
+ open->dev_channel_mapping[4] = PCM_CHANNEL_LS;
+ open->dev_channel_mapping[5] = PCM_CHANNEL_RS;
+ open->dev_channel_mapping[6] = PCM_CHANNEL_LB;
+ open->dev_channel_mapping[7] = PCM_CHANNEL_RB;
+ } else {
+ pr_err("%s: invalid num_chan %d\n", __func__,
+ channel_mode);
+ rc = -EINVAL;
+ goto inval_ch_mod;
+ }
+ }
non_mch_path:
inval_ch_mod:
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index af5a99e56afc..be0a8b2e3abe 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -2645,8 +2645,9 @@ exit:
}
static int q6afe_send_enc_config(u16 port_id,
- union afe_enc_config_data *cfg, u32 format,
- union afe_port_config afe_config, u16 afe_in_channels)
+ union afe_enc_config_data *cfg, u32 format,
+ union afe_port_config afe_config,
+ u16 afe_in_channels, u16 afe_in_bit_width)
{
struct afe_audioif_config_command config;
int index;
@@ -2728,8 +2729,13 @@ static int q6afe_send_enc_config(u16 port_id,
config.pdata.param_id = AFE_PARAM_ID_PORT_MEDIA_TYPE;
config.port.media_type.minor_version = AFE_API_VERSION_PORT_MEDIA_TYPE;
config.port.media_type.sample_rate = afe_config.slim_sch.sample_rate;
- config.port.media_type.bit_width = afe_config.slim_sch.bit_width;
- if (afe_in_channels != 0)
+ if (afe_in_bit_width)
+ config.port.media_type.bit_width = afe_in_bit_width;
+ else
+ config.port.media_type.bit_width =
+ afe_config.slim_sch.bit_width;
+
+ if (afe_in_channels)
config.port.media_type.num_channels = afe_in_channels;
else
config.port.media_type.num_channels =
@@ -2749,8 +2755,8 @@ exit:
}
static int __afe_port_start(u16 port_id, union afe_port_config *afe_config,
- u32 rate, u16 afe_in_channels,
- union afe_enc_config_data *cfg, u32 enc_format)
+ u32 rate, u16 afe_in_channels, u16 afe_in_bit_width,
+ union afe_enc_config_data *cfg, u32 enc_format)
{
struct afe_audioif_config_command config;
int ret = 0;
@@ -2989,7 +2995,8 @@ static int __afe_port_start(u16 port_id, union afe_port_config *afe_config,
pr_debug("%s: Found AFE encoder support for SLIMBUS enc_format = %d\n",
__func__, enc_format);
ret = q6afe_send_enc_config(port_id, cfg, enc_format,
- *afe_config, afe_in_channels);
+ *afe_config, afe_in_channels,
+ afe_in_bit_width);
if (ret) {
pr_err("%s: AFE encoder config for port 0x%x failed %d\n",
__func__, port_id, ret);
@@ -3043,7 +3050,7 @@ int afe_port_start(u16 port_id, union afe_port_config *afe_config,
u32 rate)
{
return __afe_port_start(port_id, afe_config, rate,
- 0, NULL, ASM_MEDIA_FMT_NONE);
+ 0, 0, NULL, ASM_MEDIA_FMT_NONE);
}
EXPORT_SYMBOL(afe_port_start);
@@ -3061,12 +3068,12 @@ EXPORT_SYMBOL(afe_port_start);
* Returns 0 on success or error value on port start failure.
*/
int afe_port_start_v2(u16 port_id, union afe_port_config *afe_config,
- u32 rate, u16 afe_in_channels,
+ u32 rate, u16 afe_in_channels, u16 afe_in_bit_width,
struct afe_enc_config *enc_cfg)
{
return __afe_port_start(port_id, afe_config, rate,
- afe_in_channels, &enc_cfg->data,
- enc_cfg->format);
+ afe_in_channels, afe_in_bit_width,
+ &enc_cfg->data, enc_cfg->format);
}
EXPORT_SYMBOL(afe_port_start_v2);
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 206fbec249fa..88c27339b299 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -183,6 +183,25 @@ static inline void q6asm_update_token(u32 *token, u8 session_id, u8 stream_id,
*token = asm_token.token;
}
+static inline uint32_t q6asm_get_pcm_format_id(uint32_t media_format_block_ver)
+{
+ uint32_t pcm_format_id;
+
+ switch (media_format_block_ver) {
+ case PCM_MEDIA_FORMAT_V4:
+ pcm_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V4;
+ break;
+ case PCM_MEDIA_FORMAT_V3:
+ pcm_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3;
+ break;
+ case PCM_MEDIA_FORMAT_V2:
+ default:
+ pcm_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+ break;
+ }
+ return pcm_format_id;
+}
+
/*
* q6asm_get_buf_index_from_token:
* Retrieve buffer index from token.
@@ -1996,6 +2015,40 @@ void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac, uint32_t *size,
return NULL;
}
+int q6asm_cpu_buf_release(int dir, struct audio_client *ac)
+{
+ struct audio_port_data *port;
+ int ret = 0;
+ int idx;
+
+ if (!ac || ((dir != IN) && (dir != OUT))) {
+ pr_err("%s: ac %pK dir %d\n", __func__, ac, dir);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ac->io_mode & SYNC_IO_MODE) {
+ port = &ac->port[dir];
+ mutex_lock(&port->lock);
+ idx = port->cpu_buf;
+ if (port->cpu_buf == 0) {
+ port->cpu_buf = port->max_buf_cnt - 1;
+ } else if (port->cpu_buf < port->max_buf_cnt) {
+ port->cpu_buf = port->cpu_buf - 1;
+ } else {
+ pr_err("%s: buffer index(%d) out of range\n",
+ __func__, port->cpu_buf);
+ ret = -EINVAL;
+ mutex_unlock(&port->lock);
+ goto exit;
+ }
+ port->buf[port->cpu_buf].used = dir ^ 1;
+ mutex_unlock(&port->lock);
+ }
+exit:
+ return ret;
+}
+
void *q6asm_is_cpu_buf_avail_nolock(int dir, struct audio_client *ac,
uint32_t *size, uint32_t *index)
{
@@ -2229,7 +2282,7 @@ static void q6asm_add_mmaphdr(struct audio_client *ac, struct apr_hdr *hdr,
static int __q6asm_open_read(struct audio_client *ac,
uint32_t format, uint16_t bits_per_sample,
- bool use_v3_format)
+ uint32_t pcm_format_block_ver)
{
int rc = 0x00;
struct asm_stream_cmd_open_read_v3 open;
@@ -2272,10 +2325,7 @@ static int __q6asm_open_read(struct audio_client *ac,
switch (format) {
case FORMAT_LINEAR_PCM:
open.mode_flags |= 0x00;
- if (use_v3_format)
- open.enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3;
- else
- open.enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
+ open.enc_cfg_id = q6asm_get_pcm_format_id(pcm_format_block_ver);
break;
case FORMAT_MPEG4_AAC:
open.mode_flags |= BUFFER_META_ENABLE;
@@ -2338,14 +2388,14 @@ int q6asm_open_read(struct audio_client *ac,
uint32_t format)
{
return __q6asm_open_read(ac, format, 16,
- false /*use_v3_format*/);
+ PCM_MEDIA_FORMAT_V2 /*media fmt block ver*/);
}
int q6asm_open_read_v2(struct audio_client *ac, uint32_t format,
uint16_t bits_per_sample)
{
return __q6asm_open_read(ac, format, bits_per_sample,
- false /*use_v3_format*/);
+ PCM_MEDIA_FORMAT_V2 /*media fmt block ver*/);
}
/*
@@ -2359,10 +2409,25 @@ int q6asm_open_read_v3(struct audio_client *ac, uint32_t format,
uint16_t bits_per_sample)
{
return __q6asm_open_read(ac, format, bits_per_sample,
- true /*use_v3_format*/);
+ PCM_MEDIA_FORMAT_V3/*media fmt block ver*/);
}
EXPORT_SYMBOL(q6asm_open_read_v3);
+/*
+ * asm_open_read_v4 - Opens audio capture session
+ *
+ * @ac: Client session handle
+ * @format: encoder format
+ * @bits_per_sample: bit width of capture session
+ */
+int q6asm_open_read_v4(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample)
+{
+ return __q6asm_open_read(ac, format, bits_per_sample,
+ PCM_MEDIA_FORMAT_V4 /*media fmt block ver*/);
+}
+EXPORT_SYMBOL(q6asm_open_read_v4);
+
int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format,
uint32_t passthrough_flag)
{
@@ -2454,7 +2519,8 @@ fail_cmd:
static int __q6asm_open_write(struct audio_client *ac, uint32_t format,
uint16_t bits_per_sample, uint32_t stream_id,
- bool is_gapless_mode, bool use_v3_format)
+ bool is_gapless_mode,
+ uint32_t pcm_format_block_ver)
{
int rc = 0x00;
struct asm_stream_cmd_open_write_v3 open;
@@ -2530,11 +2596,7 @@ static int __q6asm_open_write(struct audio_client *ac, uint32_t format,
}
switch (format) {
case FORMAT_LINEAR_PCM:
- if (use_v3_format)
- open.dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3;
- else
- open.dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
-
+ open.dec_fmt_id = q6asm_get_pcm_format_id(pcm_format_block_ver);
break;
case FORMAT_MPEG4_AAC:
open.dec_fmt_id = ASM_MEDIA_FMT_AAC_V2;
@@ -2613,7 +2675,7 @@ int q6asm_open_write(struct audio_client *ac, uint32_t format)
{
return __q6asm_open_write(ac, format, 16, ac->stream_id,
false /*gapless*/,
- false /*use_v3_format*/);
+ PCM_MEDIA_FORMAT_V2 /*pcm_format_block_ver*/);
}
int q6asm_open_write_v2(struct audio_client *ac, uint32_t format,
@@ -2621,7 +2683,7 @@ int q6asm_open_write_v2(struct audio_client *ac, uint32_t format,
{
return __q6asm_open_write(ac, format, bits_per_sample,
ac->stream_id, false /*gapless*/,
- false /*use_v3_format*/);
+ PCM_MEDIA_FORMAT_V2 /*pcm_format_block_ver*/);
}
/*
@@ -2636,17 +2698,33 @@ int q6asm_open_write_v3(struct audio_client *ac, uint32_t format,
{
return __q6asm_open_write(ac, format, bits_per_sample,
ac->stream_id, false /*gapless*/,
- true /*use_v3_format*/);
+ PCM_MEDIA_FORMAT_V3 /*pcm_format_block_ver*/);
}
EXPORT_SYMBOL(q6asm_open_write_v3);
+/*
+ * q6asm_open_write_v4 - Opens audio playback session
+ *
+ * @ac: Client session handle
+ * @format: decoder format
+ * @bits_per_sample: bit width of playback session
+ */
+int q6asm_open_write_v4(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample)
+{
+ return __q6asm_open_write(ac, format, bits_per_sample,
+ ac->stream_id, false /*gapless*/,
+ PCM_MEDIA_FORMAT_V4 /*pcm_format_block_ver*/);
+}
+EXPORT_SYMBOL(q6asm_open_write_v4);
+
int q6asm_stream_open_write_v2(struct audio_client *ac, uint32_t format,
uint16_t bits_per_sample, int32_t stream_id,
bool is_gapless_mode)
{
return __q6asm_open_write(ac, format, bits_per_sample,
stream_id, is_gapless_mode,
- false /*use_v3_format*/);
+ PCM_MEDIA_FORMAT_V2 /*pcm_format_block_ver*/);
}
/*
@@ -2664,10 +2742,29 @@ int q6asm_stream_open_write_v3(struct audio_client *ac, uint32_t format,
{
return __q6asm_open_write(ac, format, bits_per_sample,
stream_id, is_gapless_mode,
- true /*use_v3_format*/);
+ PCM_MEDIA_FORMAT_V3 /*pcm_format_block_ver*/);
}
EXPORT_SYMBOL(q6asm_stream_open_write_v3);
+/*
+ * q6asm_stream_open_write_v4 - Creates audio stream for playback
+ *
+ * @ac: Client session handle
+ * @format: asm playback format
+ * @bits_per_sample: bit width of requested stream
+ * @stream_id: stream id of stream to be associated with this session
+ * @is_gapless_mode: true if gapless mode needs to be enabled
+ */
+int q6asm_stream_open_write_v4(struct audio_client *ac, uint32_t format,
+ uint16_t bits_per_sample, int32_t stream_id,
+ bool is_gapless_mode)
+{
+ return __q6asm_open_write(ac, format, bits_per_sample,
+ stream_id, is_gapless_mode,
+ PCM_MEDIA_FORMAT_V4 /*pcm_format_block_ver*/);
+}
+EXPORT_SYMBOL(q6asm_stream_open_write_v4);
+
static int __q6asm_open_read_write(struct audio_client *ac, uint32_t rd_format,
uint32_t wr_format, bool is_meta_data_mode,
uint32_t bits_per_sample,
@@ -3491,6 +3588,108 @@ fail_cmd:
}
/*
+ * q6asm_enc_cfg_blk_pcm_v4 - sends encoder configuration parameters
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ * @bits_per_sample: bit width of encoder session
+ * @use_default_chmap: true if default channel map to be used
+ * @use_back_flavor: to configure back left and right channel
+ * @channel_map: input channel map
+ * @sample_word_size: Size in bits of the word that holds a sample of a channel
+ * @endianness: endianness of the pcm data
+ * @mode: Mode to provide additional info about the pcm input data
+ */
+int q6asm_enc_cfg_blk_pcm_v4(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ uint16_t bits_per_sample, bool use_default_chmap,
+ bool use_back_flavor, u8 *channel_map,
+ uint16_t sample_word_size, uint16_t endianness,
+ uint16_t mode)
+{
+ struct asm_multi_channel_pcm_enc_cfg_v4 enc_cfg;
+ struct asm_enc_cfg_blk_param_v2 enc_fg_blk;
+ u8 *channel_mapping;
+ u32 frames_per_buf = 0;
+ int rc;
+
+ if (!use_default_chmap && (channel_map == NULL)) {
+ pr_err("%s: No valid chan map and can't use default\n",
+ __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
+ pr_debug("%s: session[%d]rate[%d]ch[%d]bps[%d]wordsize[%d]\n", __func__,
+ ac->session, rate, channels,
+ bits_per_sample, sample_word_size);
+
+ memset(&enc_cfg, 0, sizeof(enc_cfg));
+ q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+ atomic_set(&ac->cmd_state, -1);
+ enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+ enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
+ sizeof(enc_cfg.encdec);
+ enc_cfg.encblk.frames_per_buf = frames_per_buf;
+ enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size -
+ sizeof(enc_fg_blk);
+ enc_cfg.num_channels = channels;
+ enc_cfg.bits_per_sample = bits_per_sample;
+ enc_cfg.sample_rate = rate;
+ enc_cfg.is_signed = 1;
+ enc_cfg.sample_word_size = sample_word_size;
+ enc_cfg.endianness = endianness;
+ enc_cfg.mode = mode;
+ channel_mapping = enc_cfg.channel_mapping;
+
+ memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+ if (use_default_chmap) {
+ pr_debug("%s: setting default channel map for %d channels",
+ __func__, channels);
+ if (q6asm_map_channels(channel_mapping, channels,
+ use_back_flavor)) {
+ pr_err("%s: map channels failed %d\n",
+ __func__, channels);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ } else {
+ pr_debug("%s: Using pre-defined channel map", __func__);
+ memcpy(channel_mapping, channel_map,
+ PCM_FORMAT_MAX_NUM_CHANNEL);
+ }
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+ if (rc < 0) {
+ pr_err("%s: Command open failed %d\n", __func__, rc);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout opcode[0x%x]\n",
+ __func__, enc_cfg.hdr.opcode);
+ rc = -ETIMEDOUT;
+ goto fail_cmd;
+ }
+ if (atomic_read(&ac->cmd_state) > 0) {
+ pr_err("%s: DSP returned error[%s]\n",
+ __func__, adsp_err_get_err_str(
+ atomic_read(&ac->cmd_state)));
+ rc = adsp_err_get_lnx_err_code(
+ atomic_read(&ac->cmd_state));
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return rc;
+}
+EXPORT_SYMBOL(q6asm_enc_cfg_blk_pcm_v4);
+
+/*
* q6asm_enc_cfg_blk_pcm_v3 - sends encoder configuration parameters
*
* @ac: Client session handle
@@ -3666,6 +3865,18 @@ fail_cmd:
return rc;
}
+static int __q6asm_enc_cfg_blk_pcm_v4(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ uint16_t bits_per_sample,
+ uint16_t sample_word_size,
+ uint16_t endianness,
+ uint16_t mode)
+{
+ return q6asm_enc_cfg_blk_pcm_v4(ac, rate, channels,
+ bits_per_sample, true, false, NULL,
+ sample_word_size, endianness, mode);
+}
+
static int __q6asm_enc_cfg_blk_pcm_v3(struct audio_client *ac,
uint32_t rate, uint32_t channels,
uint16_t bits_per_sample,
@@ -3715,6 +3926,31 @@ int q6asm_enc_cfg_blk_pcm_format_support_v3(struct audio_client *ac,
}
EXPORT_SYMBOL(q6asm_enc_cfg_blk_pcm_format_support_v3);
+/*
+ * q6asm_enc_cfg_blk_pcm_format_support_v4 - sends encoder configuration
+ * parameters
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ * @bits_per_sample: bit width of encoder session
+ * @sample_word_size: Size in bits of the word that holds a sample of a channel
+ * @endianness: endianness of the pcm data
+ * @mode: Mode to provide additional info about the pcm input data
+ */
+int q6asm_enc_cfg_blk_pcm_format_support_v4(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ uint16_t bits_per_sample,
+ uint16_t sample_word_size,
+ uint16_t endianness,
+ uint16_t mode)
+{
+ return __q6asm_enc_cfg_blk_pcm_v4(ac, rate, channels,
+ bits_per_sample, sample_word_size,
+ endianness, mode);
+}
+EXPORT_SYMBOL(q6asm_enc_cfg_blk_pcm_format_support_v4);
+
int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
uint32_t rate, uint32_t channels)
{
@@ -4347,6 +4583,91 @@ fail_cmd:
return rc;
}
+static int __q6asm_media_format_block_pcm_v4(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ uint16_t bits_per_sample,
+ int stream_id,
+ bool use_default_chmap,
+ char *channel_map,
+ uint16_t sample_word_size,
+ uint16_t endianness,
+ uint16_t mode)
+{
+ struct asm_multi_channel_pcm_fmt_blk_param_v4 fmt;
+ u8 *channel_mapping;
+ int rc;
+
+ pr_debug("%s: session[%d]rate[%d]ch[%d]bps[%d]wordsize[%d]\n", __func__,
+ ac->session, rate, channels,
+ bits_per_sample, sample_word_size);
+
+ memset(&fmt, 0, sizeof(fmt));
+ q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
+ atomic_set(&ac->cmd_state, -1);
+ /*
+ * Updated the token field with stream/session for compressed playback
+ * Platform driver must know the the stream with which the command is
+ * associated
+ */
+ if (ac->io_mode & COMPRESSED_STREAM_IO)
+ fmt.hdr.token = ((ac->session << 8) & 0xFFFF00) |
+ (stream_id & 0xFF);
+
+ pr_debug("%s: token = 0x%x, stream_id %d, session 0x%x\n",
+ __func__, fmt.hdr.token, stream_id, ac->session);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+ sizeof(fmt.fmt_blk);
+ fmt.param.num_channels = channels;
+ fmt.param.bits_per_sample = bits_per_sample;
+ fmt.param.sample_rate = rate;
+ fmt.param.is_signed = 1;
+ fmt.param.sample_word_size = sample_word_size;
+ fmt.param.endianness = endianness;
+ fmt.param.mode = mode;
+ channel_mapping = fmt.param.channel_mapping;
+
+ memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+ if (use_default_chmap) {
+ if (q6asm_map_channels(channel_mapping, channels, false)) {
+ pr_err("%s: map channels failed %d\n",
+ __func__, channels);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ } else {
+ memcpy(channel_mapping, channel_map,
+ PCM_FORMAT_MAX_NUM_CHANNEL);
+ }
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+ if (rc < 0) {
+ pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout. waited for format update\n", __func__);
+ rc = -ETIMEDOUT;
+ goto fail_cmd;
+ }
+ if (atomic_read(&ac->cmd_state) > 0) {
+ pr_err("%s: DSP returned error[%s]\n",
+ __func__, adsp_err_get_err_str(
+ atomic_read(&ac->cmd_state)));
+ rc = adsp_err_get_lnx_err_code(
+ atomic_read(&ac->cmd_state));
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return rc;
+}
+
int q6asm_media_format_block_pcm(struct audio_client *ac,
uint32_t rate, uint32_t channels)
{
@@ -4414,6 +4735,47 @@ int q6asm_media_format_block_pcm_format_support_v3(struct audio_client *ac,
}
EXPORT_SYMBOL(q6asm_media_format_block_pcm_format_support_v3);
+/*
+ * q6asm_media_format_block_pcm_format_support_v4- sends pcm decoder
+ * configuration parameters
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ * @bits_per_sample: bit width of encoder session
+ * @stream_id: stream id of stream to be associated with this session
+ * @use_default_chmap: true if default channel map to be used
+ * @channel_map: input channel map
+ * @sample_word_size: Size in bits of the word that holds a sample of a channel
+ * @endianness: endianness of the pcm data
+ * @mode: Mode to provide additional info about the pcm input data
+ */
+int q6asm_media_format_block_pcm_format_support_v4(struct audio_client *ac,
+ uint32_t rate,
+ uint32_t channels,
+ uint16_t bits_per_sample,
+ int stream_id,
+ bool use_default_chmap,
+ char *channel_map,
+ uint16_t sample_word_size,
+ uint16_t endianness,
+ uint16_t mode)
+{
+ if (!use_default_chmap && (channel_map == NULL)) {
+ pr_err("%s: No valid chan map and can't use default\n",
+ __func__);
+ return -EINVAL;
+ }
+ return __q6asm_media_format_block_pcm_v4(ac, rate,
+ channels, bits_per_sample, stream_id,
+ use_default_chmap, channel_map,
+ sample_word_size, endianness,
+ mode);
+
+}
+EXPORT_SYMBOL(q6asm_media_format_block_pcm_format_support_v4);
+
+
static int __q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
uint32_t rate, uint32_t channels,
bool use_default_chmap, char *channel_map,
@@ -4547,6 +4909,78 @@ fail_cmd:
return rc;
}
+static int __q6asm_media_format_block_multi_ch_pcm_v4(struct audio_client *ac,
+ uint32_t rate,
+ uint32_t channels,
+ bool use_default_chmap,
+ char *channel_map,
+ uint16_t bits_per_sample,
+ uint16_t sample_word_size,
+ uint16_t endianness,
+ uint16_t mode)
+{
+ struct asm_multi_channel_pcm_fmt_blk_param_v4 fmt;
+ u8 *channel_mapping;
+ int rc;
+
+ pr_debug("%s: session[%d]rate[%d]ch[%d]bps[%d]wordsize[%d]\n", __func__,
+ ac->session, rate, channels,
+ bits_per_sample, sample_word_size);
+
+ memset(&fmt, 0, sizeof(fmt));
+ q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+ atomic_set(&ac->cmd_state, -1);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+ sizeof(fmt.fmt_blk);
+ fmt.param.num_channels = channels;
+ fmt.param.bits_per_sample = bits_per_sample;
+ fmt.param.sample_rate = rate;
+ fmt.param.is_signed = 1;
+ fmt.param.sample_word_size = sample_word_size;
+ fmt.param.endianness = endianness;
+ fmt.param.mode = mode;
+ channel_mapping = fmt.param.channel_mapping;
+
+ memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+ if (use_default_chmap) {
+ if (q6asm_map_channels(channel_mapping, channels, false)) {
+ pr_err("%s: map channels failed %d\n",
+ __func__, channels);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ } else {
+ memcpy(channel_mapping, channel_map,
+ PCM_FORMAT_MAX_NUM_CHANNEL);
+ }
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+ if (rc < 0) {
+ pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout. waited for format update\n", __func__);
+ rc = -ETIMEDOUT;
+ goto fail_cmd;
+ }
+ if (atomic_read(&ac->cmd_state) > 0) {
+ pr_err("%s: DSP returned error[%s]\n",
+ __func__, adsp_err_get_err_str(
+ atomic_read(&ac->cmd_state)));
+ rc = adsp_err_get_lnx_err_code(
+ atomic_read(&ac->cmd_state));
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return rc;
+}
int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
uint32_t rate, uint32_t channels,
@@ -4594,6 +5028,39 @@ int q6asm_media_format_block_multi_ch_pcm_v3(struct audio_client *ac,
}
EXPORT_SYMBOL(q6asm_media_format_block_multi_ch_pcm_v3);
+/*
+ * q6asm_media_format_block_multi_ch_pcm_v4 - sends pcm decoder configuration
+ * parameters
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ * @bits_per_sample: bit width of encoder session
+ * @use_default_chmap: true if default channel map to be used
+ * @channel_map: input channel map
+ * @sample_word_size: Size in bits of the word that holds a sample of a channel
+ * @endianness: endianness of the pcm data
+ * @mode: Mode to provide additional info about the pcm input data
+ */
+int q6asm_media_format_block_multi_ch_pcm_v4(struct audio_client *ac,
+ uint32_t rate, uint32_t channels,
+ bool use_default_chmap,
+ char *channel_map,
+ uint16_t bits_per_sample,
+ uint16_t sample_word_size,
+ uint16_t endianness,
+ uint16_t mode)
+{
+ return __q6asm_media_format_block_multi_ch_pcm_v4(ac, rate, channels,
+ use_default_chmap,
+ channel_map,
+ bits_per_sample,
+ sample_word_size,
+ endianness,
+ mode);
+}
+EXPORT_SYMBOL(q6asm_media_format_block_multi_ch_pcm_v4);
+
static int __q6asm_media_format_block_multi_aac(struct audio_client *ac,
struct asm_aac_cfg *cfg, int stream_id)
{
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 94806ad6437b..0700b4c00aeb 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -4034,10 +4034,8 @@ int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
{
struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
- mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
if (!w) {
dev_err(dapm->dev, "ASoC: unknown pin %s\n", pin);
- mutex_unlock(&dapm->card->dapm_mutex);
return -EINVAL;
}
@@ -4053,7 +4051,6 @@ int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
}
w->force = 1;
dapm_mark_dirty(w, "force enable");
- mutex_unlock(&dapm->card->dapm_mutex);
return 0;
}