summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/msm/wil6210.txt4
-rw-r--r--Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt11
-rw-r--r--Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt5
-rw-r--r--Documentation/devicetree/bindings/net/neutrino_avb.txt6
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt5
-rw-r--r--Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt18
-rw-r--r--Documentation/timers/timer_stats.txt73
-rw-r--r--MAINTAINERS16
-rw-r--r--Makefile2
-rw-r--r--android/configs/android-base.cfg2
-rw-r--r--arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts31
-rw-r--r--arch/arm/boot/dts/qcom/msm-pm660.dtsi81
-rw-r--r--arch/arm/boot/dts/qcom/msm-pmi8998.dtsi9
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-camera-sensor-adp.dtsi20
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-camera.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-cdp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-mdss.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-mtp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/sda630-pm660a-qrd-hdk.dts60
-rw-r--r--arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts19
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-camera-sensor-mtp.dtsi207
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-mdss.dtsi11
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-common.dtsi68
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mtp.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-pm.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-regulator.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-vidc.dtsi11
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/devtree.c11
-rw-r--r--arch/arm/kernel/pj4-cp0.c4
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S3
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig1
-rw-r--r--arch/arm64/configs/msmcortex_defconfig1
-rw-r--r--arch/arm64/net/bpf_jit_comp.c8
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c12
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S2
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/kprobes/common.h2
-rw-r--r--arch/x86/kernel/kprobes/core.c6
-rw-r--r--arch/x86/kernel/kprobes/opt.c2
-rw-r--r--arch/x86/kernel/pci-calgary_64.c2
-rw-r--r--arch/x86/kvm/cpuid.c6
-rw-r--r--arch/x86/kvm/vmx.c17
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_wdt.c2
-rw-r--r--block/blk-integrity.c19
-rw-r--r--block/partition-generic.c1
-rw-r--r--drivers/base/firmware_class.c3
-rw-r--r--drivers/bluetooth/btfm_slim.h1
-rw-r--r--drivers/bluetooth/btfm_slim_codec.c3
-rw-r--r--drivers/bluetooth/btfm_slim_wcn3990.c18
-rw-r--r--drivers/char/adsprpc.c3
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c11
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_defs.h4
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_drm.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c49
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h3
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c12
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c6
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_intf.c9
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_top.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c12
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c2
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c7
-rw-r--r--drivers/gpu/msm/kgsl.c3
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-remote-etm.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c3
-rw-r--r--drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c14
-rw-r--r--drivers/irqchip/irq-gic-v3.c3
-rw-r--r--drivers/leds/leds-ktd2692.c8
-rw-r--r--drivers/leds/leds-qpnp-flash-v2.c110
-rw-r--r--drivers/leds/leds-qpnp.c11
-rw-r--r--drivers/media/Kconfig19
-rw-r--r--drivers/media/Makefile8
-rw-r--r--drivers/media/cec-edid.c171
-rw-r--r--drivers/media/cec/Makefile5
-rw-r--r--drivers/media/cec/cec-adap.c1880
-rw-r--r--drivers/media/cec/cec-api.c588
-rw-r--r--drivers/media/cec/cec-core.c413
-rw-r--r--drivers/media/cec/cec-priv.h56
-rw-r--r--drivers/media/dvb-core/dvb_demux.c3
-rw-r--r--drivers/media/i2c/Makefile11
-rw-r--r--drivers/media/platform/msm/Kconfig15
-rw-r--r--drivers/media/platform/msm/Makefile1
-rw-r--r--drivers/media/platform/msm/ais/Kconfig85
-rw-r--r--drivers/media/platform/msm/ais/Makefile24
-rw-r--r--drivers/media/platform/msm/ais/camera/Makefile3
-rw-r--r--drivers/media/platform/msm/ais/camera/camera.c956
-rw-r--r--drivers/media/platform/msm/ais/camera/camera.h23
-rw-r--r--drivers/media/platform/msm/ais/common/Makefile2
-rw-r--r--drivers/media/platform/msm/ais/common/cam_hw_ops.c338
-rw-r--r--drivers/media/platform/msm/ais/common/cam_hw_ops.h42
-rw-r--r--drivers/media/platform/msm/ais/common/cam_smmu_api.c1680
-rw-r--r--drivers/media/platform/msm/ais/common/cam_smmu_api.h166
-rw-r--r--drivers/media/platform/msm/ais/common/cam_soc_api.c1015
-rw-r--r--drivers/media/platform/msm/ais/common/cam_soc_api.h425
-rw-r--r--drivers/media/platform/msm/ais/common/msm_camera_io_util.c851
-rw-r--r--drivers/media/platform/msm/ais/common/msm_camera_io_util.h93
-rw-r--r--drivers/media/platform/msm/ais/fd/Makefile8
-rw-r--r--drivers/media/platform/msm/ais/fd/msm_fd_dev.c1441
-rw-r--r--drivers/media/platform/msm/ais/fd/msm_fd_dev.h258
-rw-r--r--drivers/media/platform/msm/ais/fd/msm_fd_hw.c1313
-rw-r--r--drivers/media/platform/msm/ais/fd/msm_fd_hw.h82
-rw-r--r--drivers/media/platform/msm/ais/fd/msm_fd_regs.h169
-rw-r--r--drivers/media/platform/msm/ais/isp/Makefile5
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_buf_mgr.c1531
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_buf_mgr.h230
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp.c658
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp.h813
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp47.c2851
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp47.h202
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c4169
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h131
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c973
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_stats_util.h31
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_util.c2465
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_util.h105
-rw-r--r--drivers/media/platform/msm/ais/ispif/Makefile4
-rw-r--r--drivers/media/platform/msm/ais/ispif/msm_ispif.c1802
-rw-r--r--drivers/media/platform/msm/ais/ispif/msm_ispif.h82
-rw-r--r--drivers/media/platform/msm/ais/ispif/msm_ispif_hwreg_v1.h124
-rw-r--r--drivers/media/platform/msm/ais/ispif/msm_ispif_hwreg_v2.h104
-rw-r--r--drivers/media/platform/msm/ais/ispif/msm_ispif_hwreg_v3.h135
-rw-r--r--drivers/media/platform/msm/ais/jpeg_10/Makefile7
-rw-r--r--drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_common.h38
-rw-r--r--drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_core.c384
-rw-r--r--drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_core.h40
-rw-r--r--drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_dev.c345
-rw-r--r--drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_hw.c928
-rw-r--r--drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_hw.h142
-rw-r--r--drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_hw_reg.h210
-rw-r--r--drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_platform.c514
-rw-r--r--drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_platform.h38
-rw-r--r--drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_sync.c1584
-rw-r--r--drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_sync.h139
-rw-r--r--drivers/media/platform/msm/ais/jpeg_dma/Makefile4
-rw-r--r--drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c1380
-rw-r--r--drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.h374
-rw-r--r--drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_hw.c1776
-rw-r--r--drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_hw.h78
-rw-r--r--drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_regs.h122
-rw-r--r--drivers/media/platform/msm/ais/msm.c1357
-rw-r--r--drivers/media/platform/msm/ais/msm.h145
-rw-r--r--drivers/media/platform/msm/ais/msm_buf_mgr/Makefile2
-rw-r--r--drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.c896
-rw-r--r--drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.h65
-rw-r--r--drivers/media/platform/msm/ais/msm_sd.h100
-rw-r--r--drivers/media/platform/msm/ais/msm_vb2/Makefile3
-rw-r--r--drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c438
-rw-r--r--drivers/media/platform/msm/ais/msm_vb2/msm_vb2.h72
-rw-r--r--drivers/media/platform/msm/ais/pproc/Makefile1
-rw-r--r--drivers/media/platform/msm/ais/pproc/cpp/Makefile6
-rw-r--r--drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c4497
-rw-r--r--drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.h294
-rw-r--r--drivers/media/platform/msm/ais/pproc/cpp/msm_cpp_soc.c251
-rw-r--r--drivers/media/platform/msm/ais/pproc/vpe/Makefile3
-rw-r--r--drivers/media/platform/msm/ais/pproc/vpe/msm_vpe.c1691
-rw-r--r--drivers/media/platform/msm/ais/pproc/vpe/msm_vpe.h258
-rw-r--r--drivers/media/platform/msm/ais/sensor/Makefile8
-rw-r--r--drivers/media/platform/msm/ais/sensor/actuator/Makefile5
-rw-r--r--drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c2120
-rw-r--r--drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.h114
-rw-r--r--drivers/media/platform/msm/ais/sensor/cci/Makefile4
-rw-r--r--drivers/media/platform/msm/ais/sensor/cci/msm_cam_cci_hwreg.h69
-rw-r--r--drivers/media/platform/msm/ais/sensor/cci/msm_cci.c2167
-rw-r--r--drivers/media/platform/msm/ais/sensor/cci/msm_cci.h231
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/Makefile4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_0_hwreg.h65
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_2_hwreg.h64
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_0_hwreg.h64
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_1_hwreg.h64
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_2_hwreg.h64
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_1_hwreg.h63
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_2_hwreg.h63
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_3_hwreg.h63
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_1_hwreg.h64
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h64
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_6_0_hwreg.h63
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/msm_csid.c1341
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/msm_csid.h124
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/Makefile4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h46
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h46
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h46
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h46
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h46
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h95
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h95
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h95
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c1580
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.h177
-rw-r--r--drivers/media/platform/msm/ais/sensor/eeprom/Makefile5
-rw-r--r--drivers/media/platform/msm/ais/sensor/eeprom/msm_eeprom.c1879
-rw-r--r--drivers/media/platform/msm/ais/sensor/eeprom/msm_eeprom.h52
-rw-r--r--drivers/media/platform/msm/ais/sensor/flash/Makefile5
-rw-r--r--drivers/media/platform/msm/ais/sensor/flash/msm_flash.c1223
-rw-r--r--drivers/media/platform/msm/ais/sensor/flash/msm_flash.h124
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/Makefile6
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_cci_i2c.c578
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.c1734
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.h68
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_i2c.h211
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_i2c_mux.c185
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_i2c_mux.h42
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_qup_i2c.c608
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.c851
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.h120
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_tz_i2c.c1096
-rw-r--r--drivers/media/platform/msm/ais/sensor/ir_cut/Makefile4
-rw-r--r--drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.c662
-rw-r--r--drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.h72
-rw-r--r--drivers/media/platform/msm/ais/sensor/ir_led/Makefile4
-rw-r--r--drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.c456
-rw-r--r--drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.h76
-rw-r--r--drivers/media/platform/msm/ais/sensor/msm_sensor.c1583
-rw-r--r--drivers/media/platform/msm/ais/sensor/msm_sensor.h129
-rw-r--r--drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c1309
-rw-r--r--drivers/media/platform/msm/ais/sensor/msm_sensor_driver.h21
-rw-r--r--drivers/media/platform/msm/ais/sensor/msm_sensor_init.c114
-rw-r--r--drivers/media/platform/msm/ais/sensor/msm_sensor_init.h26
-rw-r--r--drivers/media/platform/msm/ais/sensor/ois/Makefile5
-rw-r--r--drivers/media/platform/msm/ais/sensor/ois/msm_ois.c1015
-rw-r--r--drivers/media/platform/msm/ais/sensor/ois/msm_ois.h74
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.c1
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h1
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c7
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c22
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_core.c6
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c4
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c2
-rw-r--r--drivers/misc/memory_state_time.c78
-rw-r--r--drivers/mmc/card/block.c9
-rw-r--r--drivers/mmc/core/core.c74
-rw-r--r--drivers/mmc/core/mmc.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/core.c23
-rw-r--r--drivers/net/wireless/cnss/cnss_common.c16
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c19
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c3
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c2
-rw-r--r--drivers/pci/host/pci-msm.c2
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c105
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c10
-rw-r--r--drivers/platform/msm/mhi/mhi.h6
-rw-r--r--drivers/platform/msm/mhi/mhi_bhi.c85
-rw-r--r--drivers/platform/msm/msm_11ad/msm_11ad.c18
-rw-r--r--drivers/platform/msm/msm_ext_display.c13
-rw-r--r--drivers/power/bq24190_charger.c182
-rw-r--r--drivers/power/power_supply_sysfs.c1
-rw-r--r--drivers/power/supply/qcom/battery.c101
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c2
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c8
-rw-r--r--drivers/power/supply/qcom/smb-lib.c182
-rw-r--r--drivers/power/supply/qcom/smb-lib.h5
-rw-r--r--drivers/regulator/qpnp-labibb-regulator.c376
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/soc/qcom/common_log.c9
-rw-r--r--drivers/soc/qcom/glink.c85
-rw-r--r--drivers/soc/qcom/glink_smem_native_xprt.c54
-rw-r--r--drivers/soc/qcom/glink_xprt_if.h4
-rw-r--r--drivers/soc/qcom/icnss.c47
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_dbg.c102
-rw-r--r--drivers/soc/qcom/msm_performance.c6
-rw-r--r--drivers/soc/qcom/peripheral-loader.c13
-rw-r--r--drivers/soc/qcom/peripheral-loader.h1
-rw-r--r--drivers/soc/qcom/pil-q6v5-mss.c1
-rw-r--r--drivers/soc/qcom/qbt1000.c13
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr.c3
-rw-r--r--drivers/soc/qcom/rpm_log.c7
-rw-r--r--drivers/soc/qcom/rpm_stats.c11
-rw-r--r--drivers/soc/qcom/smp2p_test.c6
-rw-r--r--drivers/soc/qcom/spcom.c37
-rw-r--r--drivers/soc/qcom/subsystem_restart.c14
-rw-r--r--drivers/spmi/spmi-pmic-arb.c33
-rw-r--r--drivers/staging/android/ion/ion.c2
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c4
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/tty/serial/8250/8250_omap.c8
-rw-r--r--drivers/usb/chipidea/ci.h3
-rw-r--r--drivers/usb/chipidea/core.c32
-rw-r--r--drivers/usb/chipidea/otg.c80
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/debugfs.c10
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c11
-rw-r--r--drivers/usb/gadget/composite.c3
-rw-r--r--drivers/usb/gadget/function/f_audio_source.c27
-rw-r--r--drivers/usb/gadget/function/f_qc_rndis.c91
-rw-r--r--drivers/usb/host/ehci-exynos.c2
-rw-r--r--drivers/usb/host/ohci-exynos.c2
-rw-r--r--drivers/usb/pd/policy_engine.c20
-rw-r--r--drivers/usb/pd/qpnp-pdphy.c4
-rw-r--r--drivers/usb/phy/phy-msm-qusb-v2.c104
-rw-r--r--drivers/usb/phy/phy-msm-qusb.c3
-rw-r--r--drivers/usb/serial/ark3116.c25
-rw-r--r--drivers/usb/serial/digi_acceleport.c38
-rw-r--r--drivers/usb/serial/ftdi_sio.c7
-rw-r--r--drivers/usb/serial/io_edgeport.c48
-rw-r--r--drivers/usb/serial/keyspan_pda.c19
-rw-r--r--drivers/usb/serial/mct_u232.c6
-rw-r--r--drivers/usb/serial/quatech2.c24
-rw-r--r--drivers/usb/serial/ssu100.c31
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c12
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c8
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c34
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_host.c20
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c15
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c3
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c38
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c23
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c20
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pipe.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.h3
-rw-r--r--drivers/video/fbdev/msm/msm_dba/msm_dba.c7
-rw-r--r--drivers/video/fbdev/msm/msm_mdss_io_8974.c3
-rw-r--r--fs/block_dev.c1
-rw-r--r--fs/f2fs/super.c7
-rw-r--r--fs/sdcardfs/inode.c9
-rw-r--r--fs/sdcardfs/super.c9
-rw-r--r--include/linux/f2fs_fs.h6
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/hrtimer.h11
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/netfilter/xt_qtaguid.h1
-rw-r--r--include/linux/power_supply.h1
-rw-r--r--include/linux/timer.h45
-rw-r--r--include/linux/usb/chipidea.h2
-rw-r--r--include/media/ais/msm_ais.h37
-rw-r--r--include/media/ais/msm_ais_buf_mgr.h49
-rw-r--r--include/media/ais/msm_ais_isp.h35
-rw-r--r--include/media/ais/msm_ais_sensor.h294
-rw-r--r--include/media/cec-edid.h104
-rw-r--r--include/media/cec.h239
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/cnss.h10
-rw-r--r--include/net/ip6_route.h1
-rw-r--r--include/soc/qcom/ais.h223
-rw-r--r--include/soc/qcom/glink.h29
-rw-r--r--include/soc/qcom/icnss.h9
-rw-r--r--include/soc/qcom/subsystem_restart.h1
-rw-r--r--include/sound/apr_audio-v2.h32
-rw-r--r--include/sound/q6asm-v2.h9
-rw-r--r--include/uapi/drm/msm_drm.h4
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/cec-funcs.h1969
-rw-r--r--include/uapi/linux/cec.h1065
-rw-r--r--include/uapi/linux/msm_audio.h7
-rw-r--r--include/uapi/media/Kbuild1
-rw-r--r--include/uapi/media/ais/Kbuild6
-rw-r--r--include/uapi/media/ais/msm_ais.h230
-rw-r--r--include/uapi/media/ais/msm_ais_buf_mgr.h66
-rw-r--r--include/uapi/media/ais/msm_ais_isp.h1105
-rw-r--r--include/uapi/media/ais/msm_ais_ispif.h173
-rw-r--r--include/uapi/media/ais/msm_ais_sensor.h614
-rw-r--r--include/uapi/media/ais/msm_ais_sensor_sdk.h417
-rw-r--r--kernel/irq/cpuhotplug.c2
-rw-r--r--kernel/time/Makefile1
-rw-r--r--kernel/time/hrtimer.c38
-rw-r--r--kernel/time/timer.c48
-rw-r--r--kernel/time/timer_list.c10
-rw-r--r--kernel/time/timer_stats.c425
-rw-r--r--kernel/workqueue.c2
-rw-r--r--lib/Kconfig.debug14
-rw-r--r--lib/test_bpf.c45
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/ipv4/af_inet.c4
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/tcp_lp.c6
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_output.c19
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/route.c39
-rw-r--r--net/netfilter/xt_qtaguid.c107
-rw-r--r--net/netfilter/xt_qtaguid_internal.h2
-rw-r--r--net/netfilter/xt_qtaguid_print.c8
-rw-r--r--net/rfkill/core.c13
-rw-r--r--net/rmnet_data/rmnet_data_config.c10
-rw-r--r--net/rmnet_data/rmnet_data_handlers.c9
-rw-r--r--net/wireless/db.txt8
-rw-r--r--net/xfrm/xfrm_algo.c2
-rw-r--r--sound/pci/hda/hda_intel.c13
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c16
-rw-r--r--sound/soc/codecs/wcd_cpe_core.c4
-rw-r--r--sound/soc/msm/apq8096-auto.c82
-rw-r--r--sound/soc/msm/msm-dai-fe.c33
-rw-r--r--sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c33
-rw-r--r--sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c4
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c467
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h7
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c174
-rw-r--r--sound/soc/msm/qdsp6v2/q6voice.c4
-rw-r--r--sound/soc/msm/sdm660-internal.c40
-rw-r--r--tools/power/cpupower/utils/helpers/cpuid.c1
408 files changed, 80316 insertions, 2260 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/wil6210.txt b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
index 54bbf2535340..0c75cf64e4d2 100644
--- a/Documentation/devicetree/bindings/arm/msm/wil6210.txt
+++ b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
@@ -32,7 +32,7 @@ Optional properties:
- clocks : List of phandle and clock specifier pairs
- clock-names : List of clock input name strings sorted in the same
order as the clocks property.
-- qcom,keep_radio_on_during_sleep: Boolean flag to indicate if to suspend to d3hot
+- qcom,keep-radio-on-during-sleep: Boolean flag to indicate if to suspend to d3hot
instead of turning off the device
Example:
@@ -58,6 +58,6 @@ Example:
clocks = <&clock_gcc clk_rf_clk3>,
<&clock_gcc clk_rf_clk3_pin>;
clock-names = "rf_clk3_clk", "rf_clk3_pin_clk";
- qcom,keep_radio_on_during_sleep;
+ qcom,keep-radio-on-during-sleep;
};
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt
index da54fb11ffd4..176f9e115b42 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt
@@ -169,8 +169,15 @@ Optional properties:
sleep configuration defined for each pin or pin group.
- qcom,hw-strobe-gpio : phandle to specify GPIO for hardware strobing. This is used when there is no
pinctrl support or PMIC GPIOs are used.
-- qcom,hw-strobe-sel : Boolean property to enable hardware strobe. If not defined, software strobe
- will be used.
+- qcom,strobe-sel : Property to select strobe type. If not defined,
+ software strobe will be used. Allowed options are:
+ 0 - SW strobe
+ 1 - HW strobe
+ 2 - LPG strobe
+ LPG strobe is supported only for LED3.
+ If LPG strobe is specified, then strobe control is
+ configured for active high and level triggered. Also
+ qcom,hw-strobe-option should be set to 1 or 2.
- qcom,hw-strobe-edge-trigger : Boolean property to select trigger type. If defined, hw-strobe is set to
be edge triggered. Otherwise, it is level triggered.
- qcom,hw-strobe-active-low : Boolean property to select strobe signal polarity. If defined, hw-strobe
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt b/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
index b078cb9ab374..740c444ed21e 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
@@ -14,8 +14,8 @@ Required properties:
[Second level nodes]
Required properties:
- compatible : one of:
- - "qcom,msm-cam-smmu-cb"
- - "qcom,qsmmu-cam-cb"
+ - "qcom,msm-cam-smmu-cb" : For arm smmu iommu type.
+ - "qcom,qsmmu-cam-cb" : For qsmmu iommu type.
- iommus : Handle parsed by smmu driver. Number of entries will vary
across targets.
- label - string describing iommu domain usage.
@@ -23,6 +23,7 @@ Required properties:
Optional properties:
- qcom,scratch-buf-support : Enables iommu scratch buffer support in
that context bank.
+- qcom,secure-context : boolean type, to set the context domain type as secure.
Example:
qcom,cam_smmu@0 {
diff --git a/Documentation/devicetree/bindings/net/neutrino_avb.txt b/Documentation/devicetree/bindings/net/neutrino_avb.txt
index 46c6a5208eba..471d59f2a3c0 100644
--- a/Documentation/devicetree/bindings/net/neutrino_avb.txt
+++ b/Documentation/devicetree/bindings/net/neutrino_avb.txt
@@ -2,6 +2,11 @@
This driver implements Ethernet driver for Neutrino ethernet controller
Required properties:
- compatible: Should be "qcom,ntn_avb"
+ - ntn-rst-delay-msec: delay (msec) required after PCIe reset for stabilization
+ - ntn-rc-num: PCIe root complex number on which Neutrino is connected
+
+Optional properties:
+ - ntn-bus-num: PCIe bus number on which Neutrino is connected
- ntn-rst-gpio: Neutrino reset GPIO
- vdd-ntn-hsic-supply: neutrino HSIC power supply
- vdd-ntn-pci-supply: PCIe core power supply
@@ -25,4 +30,5 @@ Example:
pinctrl-0 = <&ntn_default>;
qcom,ntn-rst-delay-msec = <100>;
qcom,ntn-rc-num = <1>;
+ qcom,ntn-bus-num = <1>;
};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index b41219d51973..468db388b0a6 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -35,11 +35,6 @@ Charger specific properties:
addition battery properties will be faked such that the device
assumes normal operation.
-- qcom,external-vconn
- Usage: optional
- Value type: <empty>
- Definition: Boolean flag which indicates VCONN is sourced externally.
-
- qcom,fcc-max-ua
Usage: optional
Value type: <u32>
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
index 0d53b9fa4378..1ac52d120daa 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
@@ -86,11 +86,11 @@ LAB subnode required properties:
40 and 50.
- interrupts: Specify the interrupts as per the interrupt
encoding.
- Currently "lab-vreg-ok" is required for
- LCD mode in pmi8998. For AMOLED mode,
- "lab-vreg-ok" is required only when SWIRE
- control is enabled and skipping 2nd SWIRE
- pulse is required in pmi8952/8996.
+ Currently "lab-vreg-ok" is required and "lab-sc_err"
+ is optional for LCD mode in pmi8998.
+ For AMOLED mode, "lab-vreg-ok" is required
+ only when SWIRE control is enabled and skipping
+ 2nd SWIRE pulse is required in pmi8952/8996.
- interrupt-names: Interrupt names to match up 1-to-1 with
the interrupts specified in 'interrupts'
property.
@@ -211,6 +211,14 @@ IBB subnode required properties:
IBB subnode optional properties:
+- interrupts: Specify the interrupts as per the interrupt
+ encoding.
+ Currently "ibb-sc-err" could be used for LCD mode
+ in pmi8998 to detect the short circuit fault.
+- interrupt-names: Interrupt names to match up 1-to-1 with
+ the interrupts specified in 'interrupts'
+ property.
+
- qcom,qpnp-ibb-discharge-resistor: The discharge resistor in Kilo Ohms which
controls the soft start time. Supported values
are 300, 64, 32 and 16.
diff --git a/Documentation/timers/timer_stats.txt b/Documentation/timers/timer_stats.txt
deleted file mode 100644
index de835ee97455..000000000000
--- a/Documentation/timers/timer_stats.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-timer_stats - timer usage statistics
-------------------------------------
-
-timer_stats is a debugging facility to make the timer (ab)usage in a Linux
-system visible to kernel and userspace developers. If enabled in the config
-but not used it has almost zero runtime overhead, and a relatively small
-data structure overhead. Even if collection is enabled runtime all the
-locking is per-CPU and lookup is hashed.
-
-timer_stats should be used by kernel and userspace developers to verify that
-their code does not make unduly use of timers. This helps to avoid unnecessary
-wakeups, which should be avoided to optimize power consumption.
-
-It can be enabled by CONFIG_TIMER_STATS in the "Kernel hacking" configuration
-section.
-
-timer_stats collects information about the timer events which are fired in a
-Linux system over a sample period:
-
-- the pid of the task(process) which initialized the timer
-- the name of the process which initialized the timer
-- the function where the timer was initialized
-- the callback function which is associated to the timer
-- the number of events (callbacks)
-
-timer_stats adds an entry to /proc: /proc/timer_stats
-
-This entry is used to control the statistics functionality and to read out the
-sampled information.
-
-The timer_stats functionality is inactive on bootup.
-
-To activate a sample period issue:
-# echo 1 >/proc/timer_stats
-
-To stop a sample period issue:
-# echo 0 >/proc/timer_stats
-
-The statistics can be retrieved by:
-# cat /proc/timer_stats
-
-While sampling is enabled, each readout from /proc/timer_stats will see
-newly updated statistics. Once sampling is disabled, the sampled information
-is kept until a new sample period is started. This allows multiple readouts.
-
-Sample output of /proc/timer_stats:
-
-Timerstats sample period: 3.888770 s
- 12, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
- 15, 1 swapper hcd_submit_urb (rh_timer_func)
- 4, 959 kedac schedule_timeout (process_timeout)
- 1, 0 swapper page_writeback_init (wb_timer_fn)
- 28, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
- 22, 2948 IRQ 4 tty_flip_buffer_push (delayed_work_timer_fn)
- 3, 3100 bash schedule_timeout (process_timeout)
- 1, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
- 1, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
- 1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
- 1, 2292 ip __netdev_watchdog_up (dev_watchdog)
- 1, 23 events/1 do_cache_clean (delayed_work_timer_fn)
-90 total events, 30.0 events/sec
-
-The first column is the number of events, the second column the pid, the third
-column is the name of the process. The forth column shows the function which
-initialized the timer and in parenthesis the callback function which was
-executed on expiry.
-
- Thomas, Ingo
-
-Added flag to indicate 'deferrable timer' in /proc/timer_stats. A deferrable
-timer will appear as follows
- 10D, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
-
diff --git a/MAINTAINERS b/MAINTAINERS
index 7875f7b71546..167a1a751339 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2657,6 +2657,22 @@ F: drivers/net/ieee802154/cc2520.c
F: include/linux/spi/cc2520.h
F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
+CEC DRIVER
+M: Hans Verkuil <hans.verkuil@cisco.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Supported
+F: Documentation/cec.txt
+F: Documentation/DocBook/media/v4l/cec*
+F: drivers/staging/media/cec/
+F: drivers/media/cec-edid.c
+F: drivers/media/rc/keymaps/rc-cec.c
+F: include/media/cec.h
+F: include/media/cec-edid.h
+F: include/linux/cec.h
+F: include/linux/cec-funcs.h
+
CELL BROADBAND ENGINE ARCHITECTURE
M: Arnd Bergmann <arnd@arndb.de>
L: linuxppc-dev@lists.ozlabs.org
diff --git a/Makefile b/Makefile
index 13486839df5b..a3ac228e0b3a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 67
+SUBLEVEL = 68
EXTRAVERSION =
NAME = Blurry Fish Butt
diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg
index 73429a4b25b1..b0ef9fcbaac6 100644
--- a/android/configs/android-base.cfg
+++ b/android/configs/android-base.cfg
@@ -3,7 +3,6 @@
# CONFIG_DEVMEM is not set
# CONFIG_FHANDLE is not set
# CONFIG_INET_LRO is not set
-# CONFIG_MODULES is not set
# CONFIG_OABI_COMPAT is not set
# CONFIG_SYSVIPC is not set
# CONFIG_USELIB is not set
@@ -86,7 +85,6 @@ CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
-CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
diff --git a/arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts b/arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts
index 822757ae1ec4..022841b5e769 100644
--- a/arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts
+++ b/arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts
@@ -87,3 +87,34 @@
&tspp {
qcom,lpass-timer-tts = <1>;
};
+
+&snd_9335 {
+ qcom,msm-mi2s-master = <1>, <1>, <1>, <0>;
+};
+
+&wcd_usbc_analog_en1_gpio {
+ status = "disabled";
+};
+
+&wcd_usbc_analog_en2n_gpio {
+ status = "disabled";
+};
+
+&soc {
+ qcom,msm-dai-mi2s {
+ dai_mi2s3: qcom,msm-dai-q6-mi2s-quat {
+ /* SD0 (1 << 0) | SD1 (1 << 1) | SD2 (1 << 2) */
+ qcom,msm-mi2s-rx-lines = <0>;
+ qcom,msm-mi2s-tx-lines = <15>; /* SD3 (1 << 3) */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&quat_mi2s_active &quat_mi2s_sd0_active
+ &quat_mi2s_sd1_active
+ &quat_mi2s_sd2_active
+ &quat_mi2s_sd3_active>;
+ pinctrl-1 = <&quat_mi2s_sleep &quat_mi2s_sd0_sleep
+ &quat_mi2s_sd1_sleep
+ &quat_mi2s_sd2_sleep
+ &quat_mi2s_sd3_sleep>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm-pm660.dtsi b/arch/arm/boot/dts/qcom/msm-pm660.dtsi
index 7fde74f3d570..05225f7178e9 100644
--- a/arch/arm/boot/dts/qcom/msm-pm660.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pm660.dtsi
@@ -338,11 +338,12 @@
qcom,chgr@1000 {
reg = <0x1000 0x100>;
- interrupts = <0x0 0x10 0x0 IRQ_TYPE_NONE>,
- <0x0 0x10 0x1 IRQ_TYPE_NONE>,
- <0x0 0x10 0x2 IRQ_TYPE_NONE>,
- <0x0 0x10 0x3 IRQ_TYPE_NONE>,
- <0x0 0x10 0x4 IRQ_TYPE_NONE>;
+ interrupts =
+ <0x0 0x10 0x0 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x10 0x1 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x10 0x2 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x10 0x3 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x10 0x4 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "chg-error",
"chg-state-change",
@@ -353,10 +354,10 @@
qcom,otg@1100 {
reg = <0x1100 0x100>;
- interrupts = <0x0 0x11 0x0 IRQ_TYPE_NONE>,
- <0x0 0x11 0x1 IRQ_TYPE_NONE>,
- <0x0 0x11 0x2 IRQ_TYPE_NONE>,
- <0x0 0x11 0x3 IRQ_TYPE_NONE>;
+ interrupts = <0x0 0x11 0x0 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x11 0x1 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x11 0x2 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x11 0x3 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "otg-fail",
"otg-overcurrent",
@@ -366,12 +367,13 @@
qcom,bat-if@1200 {
reg = <0x1200 0x100>;
- interrupts = <0x0 0x12 0x0 IRQ_TYPE_NONE>,
- <0x0 0x12 0x1 IRQ_TYPE_NONE>,
- <0x0 0x12 0x2 IRQ_TYPE_NONE>,
- <0x0 0x12 0x3 IRQ_TYPE_NONE>,
- <0x0 0x12 0x4 IRQ_TYPE_NONE>,
- <0x0 0x12 0x5 IRQ_TYPE_NONE>;
+ interrupts =
+ <0x0 0x12 0x0 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x12 0x1 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x12 0x2 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x12 0x3 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x12 0x4 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x12 0x5 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "bat-temp",
"bat-ocp",
@@ -383,14 +385,15 @@
qcom,usb-chgpth@1300 {
reg = <0x1300 0x100>;
- interrupts = <0x0 0x13 0x0 IRQ_TYPE_NONE>,
- <0x0 0x13 0x1 IRQ_TYPE_NONE>,
- <0x0 0x13 0x2 IRQ_TYPE_NONE>,
- <0x0 0x13 0x3 IRQ_TYPE_NONE>,
- <0x0 0x13 0x4 IRQ_TYPE_NONE>,
- <0x0 0x13 0x5 IRQ_TYPE_NONE>,
- <0x0 0x13 0x6 IRQ_TYPE_NONE>,
- <0x0 0x13 0x7 IRQ_TYPE_NONE>;
+ interrupts =
+ <0x0 0x13 0x0 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x13 0x1 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x13 0x2 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x13 0x3 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x13 0x4 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x13 0x5 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x13 0x6 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x13 0x7 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "usbin-collapse",
"usbin-lt-3p6v",
@@ -404,13 +407,14 @@
qcom,dc-chgpth@1400 {
reg = <0x1400 0x100>;
- interrupts = <0x0 0x14 0x0 IRQ_TYPE_NONE>,
- <0x0 0x14 0x1 IRQ_TYPE_NONE>,
- <0x0 0x14 0x2 IRQ_TYPE_NONE>,
- <0x0 0x14 0x3 IRQ_TYPE_NONE>,
- <0x0 0x14 0x4 IRQ_TYPE_NONE>,
- <0x0 0x14 0x5 IRQ_TYPE_NONE>,
- <0x0 0x14 0x6 IRQ_TYPE_NONE>;
+ interrupts =
+ <0x0 0x14 0x0 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x14 0x1 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x14 0x2 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x14 0x3 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x14 0x4 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x14 0x5 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x14 0x6 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "dcin-collapse",
"dcin-lt-3p6v",
@@ -423,14 +427,15 @@
qcom,chgr-misc@1600 {
reg = <0x1600 0x100>;
- interrupts = <0x0 0x16 0x0 IRQ_TYPE_NONE>,
- <0x0 0x16 0x1 IRQ_TYPE_NONE>,
- <0x0 0x16 0x2 IRQ_TYPE_NONE>,
- <0x0 0x16 0x3 IRQ_TYPE_NONE>,
- <0x0 0x16 0x4 IRQ_TYPE_NONE>,
- <0x0 0x16 0x5 IRQ_TYPE_NONE>,
- <0x0 0x16 0x6 IRQ_TYPE_NONE>,
- <0x0 0x16 0x7 IRQ_TYPE_NONE>;
+ interrupts =
+ <0x0 0x16 0x0 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x16 0x2 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x16 0x3 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x16 0x4 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x16 0x5 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x16 0x6 IRQ_TYPE_EDGE_FALLING>,
+ <0x0 0x16 0x7 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "wdog-snarl",
"wdog-bark",
diff --git a/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi b/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi
index ad32ab01c5fb..2b0fcb77eaf2 100644
--- a/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi
@@ -540,6 +540,10 @@
regulator-min-microvolt = <4600000>;
regulator-max-microvolt = <6000000>;
+ interrupts = <0x3 0xdc 0x2
+ IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ibb-sc-err";
+
qcom,qpnp-ibb-min-voltage = <1400000>;
qcom,qpnp-ibb-step-size = <100000>;
qcom,qpnp-ibb-slew-rate = <2000000>;
@@ -573,8 +577,11 @@
regulator-max-microvolt = <6000000>;
interrupts = <0x3 0xde 0x0
+ IRQ_TYPE_EDGE_RISING>,
+ <0x3 0xde 0x1
IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "lab-vreg-ok";
+ interrupt-names = "lab-vreg-ok", "lab-sc-err";
+
qcom,qpnp-lab-min-voltage = <4600000>;
qcom,qpnp-lab-step-size = <100000>;
qcom,qpnp-lab-slew-rate = <5000>;
diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
index 3e28f21eaac1..a1299027ae18 100644
--- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
@@ -454,7 +454,7 @@
};
&soc {
- qcom,ntn_avb {
+ ntn1: ntn_avb@1 { /* Neutrno device on RC1*/
compatible = "qcom,ntn_avb";
ntn-rst-gpio = <&pm8994_gpios 13 0>;
@@ -465,6 +465,14 @@
qcom,ntn-rst-delay-msec = <100>;
qcom,ntn-rc-num = <1>;
+ qcom,ntn-bus-num = <1>;
+ };
+
+ ntn2: ntn_avb@2 { /*Neutrino device on RC2*/
+ compatible = "qcom,ntn_avb";
+ qcom,ntn-rst-delay-msec = <100>;
+ qcom,ntn-rc-num = <2>;
+ qcom,ntn-bus-num = <1>;
};
i2c@75ba000 {
diff --git a/arch/arm/boot/dts/qcom/msm8996-camera-sensor-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-camera-sensor-adp.dtsi
index 5fdb71c4a3d3..31139c0fbb6d 100644
--- a/arch/arm/boot/dts/qcom/msm8996-camera-sensor-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-camera-sensor-adp.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -128,9 +128,9 @@
cam_vio-supply = <&pm8994_lvs1>;
cam_vana-supply = <&pm8994_l17>;
qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
- qcom,cam-vreg-min-voltage = <1300000 0 2500000>;
- qcom,cam-vreg-max-voltage = <1300000 0 2500000>;
- qcom,cam-vreg-op-mode = <105000 0 80000>;
+ qcom,cam-vreg-min-voltage = <1300000 0 1800000>;
+ qcom,cam-vreg-max-voltage = <1300000 0 1800000>;
+ qcom,cam-vreg-op-mode = <1300000 0 1800000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active &cam_sensor_rear_active>;
@@ -167,9 +167,9 @@
cam_vio-supply = <&pm8994_lvs1>;
cam_vana-supply = <&pmi8994_boostbypass>;
qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
- qcom,cam-vreg-min-voltage = <1000000 0 3150000>;
- qcom,cam-vreg-max-voltage = <1000000 0 3600000>;
- qcom,cam-vreg-op-mode = <105000 0 80000>;
+ qcom,cam-vreg-min-voltage = <800000 0 3150000>;
+ qcom,cam-vreg-max-voltage = <800000 0 3600000>;
+ qcom,cam-vreg-op-mode = <800000 0 80000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active &cam_sensor_rear2_active>;
@@ -208,9 +208,9 @@
cam_vio-supply = <&pm8994_lvs1>;
cam_vana-supply = <&pm8994_l29>;
qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
- qcom,cam-vreg-min-voltage = <1000000 0 2800000>;
- qcom,cam-vreg-max-voltage = <1000000 0 2800000>;
- qcom,cam-vreg-op-mode = <105000 0 80000>;
+ qcom,cam-vreg-min-voltage = <800000 0 2500000>;
+ qcom,cam-vreg-max-voltage = <800000 0 2500000>;
+ qcom,cam-vreg-op-mode = <800000 0 2500000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active &cam_sensor_front_active>;
diff --git a/arch/arm/boot/dts/qcom/msm8996-camera.dtsi b/arch/arm/boot/dts/qcom/msm8996-camera.dtsi
index 3ffd74e15f32..f3838785b38c 100644
--- a/arch/arm/boot/dts/qcom/msm8996-camera.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-camera.dtsi
@@ -280,6 +280,7 @@
0 0 0 0
0 0 0
0 0 0>;
+ qcom,clock-cntl-support;
qcom,clock-control = "NO_SET_RATE", "NO_SET_RATE",
"NO_SET_RATE", "NO_SET_RATE",
"INIT_RATE",
diff --git a/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi
index 9f57fa5127d7..80888b73ef12 100644
--- a/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi
@@ -257,6 +257,8 @@
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,partial-update-enabled = "single_roi";
+ qcom,panel-roi-alignment = <720 128 720 128 1440 128>;
};
&dsi_dual_nt35597_truly_video {
diff --git a/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi
index 64f377f1a576..7de4fbbbf9ff 100644
--- a/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi
@@ -89,6 +89,10 @@
qcom,cmd-sync-wait-broadcast;
qcom,esd-check-enabled;
qcom,mdss-dsi-panel-status-check-mode = "bta_check";
+ qcom,mdss-dsi-min-refresh-rate = <55>;
+ qcom,mdss-dsi-max-refresh-rate = <60>;
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
};
&dsi_dual_nt35597_cmd {
@@ -124,6 +128,10 @@
qcom,mdss-dsi-panel-on-check-value = <0x9c>;
qcom,mdss-dsi-panel-status-read-length = <1>;
qcom,mdss-dsi-panel-max-error-count = <3>;
+ qcom,mdss-dsi-min-refresh-rate = <55>;
+ qcom,mdss-dsi-max-refresh-rate = <60>;
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
};
&dsi_nt35597_dsc_cmd {
diff --git a/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi b/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi
index 1b746a593579..b7651aee5a67 100644
--- a/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi
@@ -212,7 +212,7 @@
qcom,mdss-pp-offsets {
qcom,mdss-sspp-mdss-igc-lut-off = <0x2000>;
- qcom,mdss-sspp-vig-pcc-off = <0x1780>;
+ qcom,mdss-sspp-vig-pcc-off = <0x1b00>;
qcom,mdss-sspp-rgb-pcc-off = <0x380>;
qcom,mdss-sspp-dma-pcc-off = <0x380>;
qcom,mdss-lm-pgc-off = <0x3c0>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi
index 859e77679e44..e3e9d45bc784 100644
--- a/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi
@@ -318,6 +318,8 @@
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,partial-update-enabled = "single_roi";
+ qcom,panel-roi-alignment = <720 128 720 128 1440 128>;
};
&dsi_dual_nt35597_truly_video {
diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi
index c1d785e42669..0fb84e024698 100644
--- a/arch/arm/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998.dtsi
@@ -1827,6 +1827,7 @@
vdda18-supply = <&pm8998_l12>;
vdda33-supply = <&pm8998_l24>;
qcom,vdd-voltage-level = <0 880000 880000>;
+ qcom,vdda33-voltage-level = <2400000 3088000 3088000>;
qcom,qusb-phy-init-seq =
/* <value reg_offset> */
<0x80 0x0
diff --git a/arch/arm/boot/dts/qcom/sda630-pm660a-qrd-hdk.dts b/arch/arm/boot/dts/qcom/sda630-pm660a-qrd-hdk.dts
index 480a69601541..227a8999a745 100644
--- a/arch/arm/boot/dts/qcom/sda630-pm660a-qrd-hdk.dts
+++ b/arch/arm/boot/dts/qcom/sda630-pm660a-qrd-hdk.dts
@@ -16,42 +16,15 @@
#include "sda630.dtsi"
#include "sdm630-qrd.dtsi"
#include "msm-pm660a.dtsi"
-#include "sdm660-internal-codec.dtsi"
+#include "sdm660-external-codec.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660A QRD HDK630";
+ model = "Qualcomm Technologies, Inc. SDA 630 PM660 + PM660A QRD HDK630";
compatible = "qcom,sda630-qrd", "qcom,sda630", "qcom,qrd";
- qcom,board-id = <0x0006000b 0x00>;
+ qcom,board-id = <0x0016000b 0x00>;
qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>;
};
-&int_codec {
- qcom,model = "sdm660-snd-card-skush";
- /delete-property/ qcom,us-euro-gpios;
- qcom,audio-routing =
- "RX_BIAS", "INT_MCLK0",
- "SPK_RX_BIAS", "INT_MCLK0",
- "INT_LDO_H", "INT_MCLK0",
- "MIC BIAS External2", "Headset Mic",
- "AMIC2", "MIC BIAS External2",
- "MIC BIAS External", "Digital Mic1",
- "DMIC1", "MIC BIAS External",
- "MIC BIAS External", "Digital Mic3",
- "DMIC3", "MIC BIAS External",
- "MIC BIAS External", "Digital Mic4",
- "DMIC4", "MIC BIAS External",
- "SpkrLeft IN", "SPK1 OUT",
- "PDM_IN_RX1", "PDM_OUT_RX1",
- "PDM_IN_RX2", "PDM_OUT_RX2",
- "PDM_IN_RX3", "PDM_OUT_RX3",
- "ADC1_IN", "ADC1_OUT",
- "ADC2_IN", "ADC2_OUT",
- "ADC3_IN", "ADC3_OUT";
- qcom,wsa-max-devs = <1>;
- qcom,wsa-devs = <&wsa881x_211_en>, <&wsa881x_213_en>;
- qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
-};
-
&pm660a_oledb {
status = "okay";
qcom,oledb-default-voltage-mv = <6400>;
@@ -83,3 +56,30 @@
qcom,mdss-dsi-bl-max-level = <255>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>;
};
+
+/delete-node/ &tasha_hph_en0;
+/delete-node/ &tasha_hph_en1;
+
+&tasha_snd {
+ qcom,model = "sdm660-tasha-skus-snd-card";
+ qcom,audio-routing =
+ "AIF4 VI", "MCLK",
+ "RX_BIAS", "MCLK",
+ "MADINPUT", "MCLK",
+ "AMIC2", "MIC BIAS2",
+ "MIC BIAS2", "Headset Mic",
+ "DMIC0", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic0",
+ "DMIC3", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic3",
+ "DMIC5", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic5",
+ "SpkrLeft IN", "SPK1 OUT";
+ qcom,msm-mbhc-hphl-swh = <0>;
+ /delete-property/ qcom,us-euro-gpios;
+ /delete-property/ qcom,hph-en0-gpio;
+ /delete-property/ qcom,hph-en1-gpio;
+ qcom,wsa-max-devs = <1>;
+ qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_213>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
+};
diff --git a/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts b/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts
index c2bf2c4a088e..7be428693f83 100644
--- a/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts
+++ b/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts
@@ -157,7 +157,7 @@
/ {
model = "Qualcomm Technologies, Inc. SDA 660 PM660 + PM660A QRD HDK660";
compatible = "qcom,sda660-qrd", "qcom,sda660", "qcom,qrd";
- qcom,board-id = <0x0006000b 0>;
+ qcom,board-id = <0x0016000b 0>;
qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>;
};
@@ -191,3 +191,20 @@
qcom,mdss-dsi-bl-max-level = <255>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>;
};
+
+&tasha_snd {
+ qcom,audio-routing =
+ "AIF4 VI", "MCLK",
+ "RX_BIAS", "MCLK",
+ "MADINPUT", "MCLK",
+ "AMIC2", "MIC BIAS2",
+ "MIC BIAS2", "Headset Mic",
+ "DMIC0", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic0",
+ "DMIC3", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic3",
+ "DMIC5", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic5",
+ "SpkrLeft IN", "SPK1 OUT";
+ qcom,msm-mbhc-hphl-swh = <0>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm630-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm630-camera-sensor-mtp.dtsi
index 94158834eee6..0275016c9662 100644
--- a/arch/arm/boot/dts/qcom/sdm630-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-camera-sensor-mtp.dtsi
@@ -29,6 +29,36 @@
qcom,switch-source = <&pm660l_switch1>;
status = "ok";
};
+
+ cam_avdd_gpio_regulator: cam_avdd_fixed_regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "cam_avdd_gpio_regulator";
+ regulator-min-microvolt = <3600000>;
+ regulator-max-microvolt = <3600000>;
+ enable-active-high;
+ gpio = <&tlmm 51 0>;
+ vin-supply = <&pm660l_bob>;
+ };
+
+ cam_dvdd_gpio_regulator: cam_dvdd_fixed_regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "cam_dvdd_gpio_regulator";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ enable-active-high;
+ gpio = <&pm660l_gpios 3 0>;
+ vin-supply = <&pm660_s5>;
+ };
+
+ cam_rear_dvdd_gpio_regulator: cam_rear_dvdd_fixed_regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "cam_rear_dvdd_gpio_regulator";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ enable-active-high;
+ gpio = <&pm660l_gpios 4 0>;
+ vin-supply = <&pm660_s5>;
+ };
};
&cci {
@@ -89,14 +119,14 @@
reg = <0>;
compatible = "qcom,eeprom";
cam_vio-supply = <&pm660_l11>;
- cam_vana-supply = <&pm660l_bob>;
- cam_vdig-supply = <&pm660_s5>;
+ cam_vana-supply = <&cam_avdd_gpio_regulator>;
+ cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>;
cam_vaf-supply = <&pm660l_l8>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
"cam_vaf";
- qcom,cam-vreg-min-voltage = <1780000 3300000 1350000 2800000>;
- qcom,cam-vreg-max-voltage = <1950000 3600000 1350000 3400000>;
- qcom,cam-vreg-op-mode = <105000 80000 105000 100000>;
+ qcom,cam-vreg-min-voltage = <1780000 0 0 2800000>;
+ qcom,cam-vreg-max-voltage = <1950000 0 0 3400000>;
+ qcom,cam-vreg-op-mode = <105000 0 0 100000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
@@ -104,18 +134,12 @@
pinctrl-1 = <&cam_sensor_mclk0_suspend
&cam_sensor_rear_suspend>;
gpios = <&tlmm 32 0>,
- <&tlmm 46 0>,
- <&pm660l_gpios 4 0>,
- <&tlmm 51 0>;
+ <&tlmm 46 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-vdig = <2>;
- qcom,gpio-vana = <3>;
- qcom,gpio-req-tbl-num = <0 1 2 3>;
- qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
- "CAM_RESET0",
- "CAM_VDIG",
- "CAM_VANA";
+ "CAM_RESET0";
qcom,sensor-position = <0>;
qcom,sensor-mode = <0>;
qcom,cci-master = <0>;
@@ -131,14 +155,14 @@
reg = <0x1>;
compatible = "qcom,eeprom";
cam_vio-supply = <&pm660_l11>;
- cam_vana-supply = <&pm660l_bob>;
- cam_vdig-supply = <&pm660_s5>;
+ cam_vana-supply = <&cam_avdd_gpio_regulator>;
+ cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
cam_vaf-supply = <&pm660l_l8>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
"cam_vaf";
- qcom,cam-vreg-min-voltage = <1780000 3300000 1350000 2800000>;
- qcom,cam-vreg-max-voltage = <1950000 3600000 1350000 3400000>;
- qcom,cam-vreg-op-mode = <105000 80000 105000 100000>;
+ qcom,cam-vreg-min-voltage = <1780000 0 0 2800000>;
+ qcom,cam-vreg-max-voltage = <1950000 0 0 3400000>;
+ qcom,cam-vreg-op-mode = <105000 0 0 100000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
@@ -146,18 +170,12 @@
pinctrl-1 = <&cam_sensor_mclk2_suspend
&cam_sensor_rear2_suspend>;
gpios = <&tlmm 34 0>,
- <&tlmm 48 0>,
- <&pm660l_gpios 3 0>,
- <&tlmm 51 0>;
+ <&tlmm 48 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-vdig = <2>;
- qcom,gpio-vana = <3>;
- qcom,gpio-req-tbl-num = <0 1 2 3>;
- qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
qcom,gpio-req-tbl-label = "CAMIF_MCLK",
- "CAM_RESET",
- "CAM_VDIG",
- "CAM_VANA";
+ "CAM_RESET";
qcom,sensor-position = <0>;
qcom,sensor-mode = <0>;
qcom,cci-master = <1>;
@@ -174,12 +192,12 @@
compatible = "qcom,eeprom";
cam_vio-supply = <&pm660_l11>;
cam_vana-supply = <&pm660l_bob>;
- cam_vdig-supply = <&pm660_s5>;
+ cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
"cam_vaf";
- qcom,cam-vreg-min-voltage = <1780000 3300000 1350000 2800000>;
- qcom,cam-vreg-max-voltage = <1950000 3600000 1350000 3400000>;
- qcom,cam-vreg-op-mode = <105000 80000 105000 100000>;
+ qcom,cam-vreg-min-voltage = <1780000 3300000 0 2800000>;
+ qcom,cam-vreg-max-voltage = <1950000 3600000 0 3400000>;
+ qcom,cam-vreg-op-mode = <105000 80000 0 100000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active
@@ -188,16 +206,13 @@
&cam_sensor_front_suspend>;
gpios = <&tlmm 33 0>,
<&tlmm 47 0>,
- <&pm660_gpios 3 0>,
<&tlmm 44 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-vdig = <2>;
- qcom,gpio-vana = <3>;
- qcom,gpio-req-tbl-num = <0 1 2 3>;
- qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
"CAM_RESET2",
- "CAM_VDIG",
"CAM_VANA";
qcom,sensor-position = <1>;
qcom,sensor-mode = <0>;
@@ -221,12 +236,12 @@
qcom,ois-src = <&ois0>;
qcom,eeprom-src = <&eeprom0>;
cam_vio-supply = <&pm660_l11>;
- cam_vana-supply = <&pm660l_bob>;
- cam_vdig-supply = <&pm660_s5>;
+ cam_vana-supply = <&cam_avdd_gpio_regulator>;
+ cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
- qcom,cam-vreg-min-voltage = <1780000 3300000 1350000>;
- qcom,cam-vreg-max-voltage = <1950000 3600000 1350000>;
- qcom,cam-vreg-op-mode = <105000 80000 105000>;
+ qcom,cam-vreg-min-voltage = <1780000 0 0>;
+ qcom,cam-vreg-max-voltage = <1950000 0 0>;
+ qcom,cam-vreg-op-mode = <105000 0 0>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
@@ -234,18 +249,12 @@
pinctrl-1 = <&cam_sensor_mclk0_suspend
&cam_sensor_rear_suspend>;
gpios = <&tlmm 32 0>,
- <&tlmm 46 0>,
- <&pm660l_gpios 4 0>,
- <&tlmm 51 0>;
+ <&tlmm 46 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-vdig = <2>;
- qcom,gpio-vana = <3>;
- qcom,gpio-req-tbl-num = <0 1 2 3>;
- qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
- "CAM_RESET0",
- "CAM_VDIG",
- "CAM_VANA";
+ "CAM_RESET0";
qcom,sensor-position = <0>;
qcom,sensor-mode = <0>;
qcom,cci-master = <0>;
@@ -266,12 +275,12 @@
qcom,actuator-src = <&actuator1>;
qcom,eeprom-src = <&eeprom1>;
cam_vio-supply = <&pm660_l11>;
- cam_vana-supply = <&pm660l_bob>;
- cam_vdig-supply = <&pm660_s5>;
+ cam_vana-supply = <&cam_avdd_gpio_regulator>;
+ cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
- qcom,cam-vreg-min-voltage = <1780000 3300000 1350000>;
- qcom,cam-vreg-max-voltage = <1950000 3600000 1350000>;
- qcom,cam-vreg-op-mode = <105000 80000 105000>;
+ qcom,cam-vreg-min-voltage = <1780000 0 0>;
+ qcom,cam-vreg-max-voltage = <1950000 0 0>;
+ qcom,cam-vreg-op-mode = <105000 0 0>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
@@ -279,18 +288,12 @@
pinctrl-1 = <&cam_sensor_mclk2_suspend
&cam_sensor_rear2_suspend>;
gpios = <&tlmm 34 0>,
- <&tlmm 48 0>,
- <&pm660l_gpios 3 0>,
- <&tlmm 51 0>;
+ <&tlmm 48 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-vdig = <2>;
- qcom,gpio-vana = <3>;
- qcom,gpio-req-tbl-num = <0 1 2 3>;
- qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
qcom,gpio-req-tbl-label = "CAMIF_MCLK",
- "CAM_RESET",
- "CAM_VDIG",
- "CAM_VANA";
+ "CAM_RESET";
qcom,sensor-position = <0>;
qcom,sensor-mode = <0>;
qcom,cci-master = <1>;
@@ -311,12 +314,12 @@
qcom,actuator-src = <&actuator2>;
qcom,eeprom-src = <&eeprom2>;
cam_vio-supply = <&pm660_l11>;
- cam_vana-supply = <&pm660l_bob>;
- cam_vdig-supply = <&pm660_s5>;
+ cam_vana-supply = <&cam_avdd_gpio_regulator>;
+ cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
- qcom,cam-vreg-min-voltage = <1780000 3300000 1350000>;
- qcom,cam-vreg-max-voltage = <1950000 3600000 1350000>;
- qcom,cam-vreg-op-mode = <105000 80000 105000>;
+ qcom,cam-vreg-min-voltage = <1780000 0 0>;
+ qcom,cam-vreg-max-voltage = <1950000 0 0>;
+ qcom,cam-vreg-op-mode = <105000 0 0>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active
@@ -324,18 +327,12 @@
pinctrl-1 = <&cam_sensor_mclk1_suspend
&cam_sensor_front_suspend>;
gpios = <&tlmm 33 0>,
- <&tlmm 47 0>,
- <&pm660l_gpios 3 0>,
- <&tlmm 51 0>;
+ <&tlmm 47 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-vdig = <2>;
- qcom,gpio-vana = <3>;
- qcom,gpio-req-tbl-num = <0 1 2 3>;
- qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
- "CAM_RESET2",
- "CAM_VDIG",
- "CAM_VANA";
+ "CAM_RESET2";
qcom,sensor-position = <1>;
qcom,sensor-mode = <0>;
qcom,cci-master = <1>;
@@ -345,6 +342,46 @@
clock-names = "cam_src_clk", "cam_clk";
qcom,clock-rates = <24000000 0>;
};
+
+ qcom,camera@3 {
+ cell-index = <3>;
+ compatible = "qcom,camera";
+ reg = <0x03>;
+ qcom,csiphy-sd-index = <1>;
+ qcom,csid-sd-index = <1>;
+ qcom,mount-angle = <90>;
+ qcom,led-flash-src = <&led_flash1>;
+ qcom,actuator-src = <&actuator2>;
+ qcom,eeprom-src = <&eeprom2>;
+ cam_vio-supply = <&pm660_l11>;
+ cam_vana-supply = <&cam_avdd_gpio_regulator>;
+ cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage = <1780000 0 0>;
+ qcom,cam-vreg-max-voltage = <1950000 0 0>;
+ qcom,cam-vreg-op-mode = <105000 0 0>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk3_active
+ &cam_sensor_front_iris_active>;
+ pinctrl-1 = <&cam_sensor_mclk3_suspend
+ &cam_sensor_front_iris_suspend>;
+ gpios = <&tlmm 35 0>,
+ <&tlmm 52 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK3",
+ "CAM_RESET3";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss MCLK3_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_MCLK3_CLK>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
};
&pm660l_gpios {
diff --git a/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi b/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi
index 49e4fd7e5ba7..d7fef426d4b6 100644
--- a/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi
@@ -441,7 +441,16 @@
qcom,msm_ext_disp = <&msm_ext_disp>;
- qcom,aux-cfg-settings = [00 13 00 00 0a 28 0a 03 b7 03];
+ qcom,aux-cfg0-settings = [20 00];
+ qcom,aux-cfg1-settings = [24 13 23 1d];
+ qcom,aux-cfg2-settings = [28 00];
+ qcom,aux-cfg3-settings = [2c 00];
+ qcom,aux-cfg4-settings = [30 0a];
+ qcom,aux-cfg5-settings = [34 28];
+ qcom,aux-cfg6-settings = [38 0a];
+ qcom,aux-cfg7-settings = [3c 03];
+ qcom,aux-cfg8-settings = [40 b7];
+ qcom,aux-cfg9-settings = [44 03];
qcom,logical2physical-lane-map = [00 01 02 03];
qcom,phy-register-offset = <0x4>;
qcom,max-pclk-frequency-khz = <150000>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi
index 64ca4676ccd5..46f77e9a3253 100644
--- a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi
@@ -277,6 +277,7 @@
qcom,csiphy-sd-index = <1>;
qcom,csid-sd-index = <2>;
qcom,mount-angle = <90>;
+ qcom,led-flash-src = <&led_flash0>;
qcom,actuator-src = <&actuator1>;
qcom,eeprom-src = <&eeprom1>;
cam_vio-supply = <&pm660_l11>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi
index 3d37d169a97c..94166bf8dd3e 100644
--- a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi
@@ -286,6 +286,7 @@
qcom,csiphy-sd-index = <1>;
qcom,csid-sd-index = <2>;
qcom,mount-angle = <90>;
+ qcom,led-flash-src = <&led_flash0>;
qcom,actuator-src = <&actuator1>;
qcom,eeprom-src = <&eeprom1>;
cam_vio-supply = <&pm660_l11>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi
index 0425a338c51d..ec754f3cce80 100644
--- a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi
@@ -328,6 +328,7 @@
qcom,csiphy-sd-index = <1>;
qcom,csid-sd-index = <1>;
qcom,mount-angle = <270>;
+ qcom,led-flash-src = <&led_flash0>;
qcom,actuator-src = <&actuator1>;
qcom,eeprom-src = <&eeprom1>;
cam_vio-supply = <&pm660_l11>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-common.dtsi b/arch/arm/boot/dts/qcom/sdm660-common.dtsi
index f933586183ec..baced7758c9f 100644
--- a/arch/arm/boot/dts/qcom/sdm660-common.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-common.dtsi
@@ -267,10 +267,8 @@
qusb_phy0: qusb@c012000 {
compatible = "qcom,qusb2phy";
reg = <0x0c012000 0x180>,
- <0x01fcb24c 0x4>,
<0x00188018 0x4>;
reg-names = "qusb_phy_base",
- "tcsr_clamp_dig_n_1p8",
"ref_clk_addr";
vdd-supply = <&pm660l_l1>;
vdda18-supply = <&pm660_l10>;
@@ -319,9 +317,9 @@
0x34 0x08 0x00
0x174 0x30 0x00
0x3c 0x06 0x00
- 0xbc 0x00 0x00
- 0xc0 0x08 0x00
- 0x194 0x06 0x00
+ 0xb4 0x00 0x00
+ 0xb8 0x08 0x00
+ 0x70 0x0f 0x00
0x19c 0x01 0x00
0x178 0x00 0x00
0xd0 0x82 0x00
@@ -350,7 +348,7 @@
0x24 0xde 0x00
0x28 0x07 0x00
0x48 0x0f 0x00
- 0x70 0x0f 0x00
+ 0x194 0x06 0x00
0x100 0x80 0x00
0xa8 0x01 0x00
0x430 0x0b 0x00
@@ -377,8 +375,6 @@
0x8f8 0x77 0x00
0x4fc 0x80 0x00
0x8fc 0x80 0x00
- 0x564 0x00 0x00
- 0x964 0x00 0x00
0x4c0 0x0a 0x00
0x8c0 0x0a 0x00
0x504 0x03 0x00
@@ -387,6 +383,8 @@
0x90c 0x16 0x00
0x500 0x00 0x00
0x900 0x00 0x00
+ 0x564 0x00 0x00
+ 0x964 0x00 0x00
0x260 0x10 0x00
0x660 0x10 0x00
0x2a4 0x12 0x00
@@ -456,6 +454,54 @@
qcom,reset-ep-after-lpm-resume;
};
+ usb2s: hsusb@c200000 {
+ compatible = "qcom,dwc-usb3-msm";
+ reg = <0x0c200000 0xfc000>,
+ <0x0c016000 0x400>;
+ reg-names = "core_base",
+ "ahb2phy_base";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ interrupts = <0 348 0>, <0 144 0>;
+ interrupt-names = "hs_phy_irq", "pwr_event_irq";
+
+ qcom,msm-bus,name = "usb-hs";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <87 512 0 0>,
+ <87 512 60000 800000>;
+
+ qcom,pm-qos-latency = <52>; /* CPU-CLUSTER-WFI-LVL latency +1 */
+ clocks = <&clock_gcc GCC_USB20_MASTER_CLK>,
+ <&clock_gcc GCC_CFG_NOC_USB2_AXI_CLK>,
+ <&clock_gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&clock_gcc GCC_USB20_SLEEP_CLK>,
+ <&clock_rpmcc CXO_DWC3_CLK>,
+ <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+ clock-names = "core_clk", "iface_clk", "utmi_clk", "sleep_clk",
+ "xo", "cfg_ahb_clk";
+ qcom,core-clk-rate = <60000000>;
+ resets = <&clock_gcc GCC_USB_20_BCR>;
+ reset-names = "core_reset";
+
+ status = "disabled";
+ dwc3@c200000 {
+ compatible = "snps,dwc3";
+ reg = <0x0c200000 0xc8d0>;
+ interrupt-parent = <&intc>;
+ interrupts = <0 143 0>;
+ usb-phy = <&qusb_phy1>, <&usb_nop_phy>;
+ maximum-speed = "high-speed";
+ snps,nominal-elastic-buffer;
+ snps,is-utmi-l1-suspend;
+ snps,hird-threshold = /bits/ 8 <0x0>;
+ dr_mode = "host";
+ };
+ };
+
qusb_phy1: qusb@c014000 {
compatible = "qcom,qusb2phy";
reg = <0x0c014000 0x180>,
@@ -465,7 +511,7 @@
vdd-supply = <&pm660l_l1>;
vdda18-supply = <&pm660_l10>;
vdda33-supply = <&pm660l_l7>;
- qcom,vdd-voltage-level = <1 5 7>;
+ qcom,vdd-voltage-level = <0 925000 925000>;
qcom,qusb-phy-init-seq = <0xF8 0x80
0xB3 0x84
0x83 0x88
@@ -490,6 +536,10 @@
reset-names = "phy_reset";
};
+ usb_nop_phy: usb_nop_phy {
+ compatible = "usb-nop-xceiv";
+ };
+
sdhc_1: sdhci@c0c4000 {
compatible = "qcom,sdhci-msm-v5";
reg = <0xc0c4000 0x1000>, <0xc0c5000 0x1000>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
index cb554a639d71..45b7201fbf71 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
@@ -41,8 +41,8 @@
vccq2-supply = <&pm660_l8>;
vcc-max-microamp = <500000>;
vccq2-max-microamp = <600000>;
- vddp-ref-clk-supply = <&pm660_l1>;
- vddp-ref-clk-max-microamp = <100>;
+ qcom,vddp-ref-clk-supply = <&pm660_l1>;
+ qcom,vddp-ref-clk-max-microamp = <100>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-pm.dtsi b/arch/arm/boot/dts/qcom/sdm660-pm.dtsi
index 1624975028c5..21fab4923331 100644
--- a/arch/arm/boot/dts/qcom/sdm660-pm.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-pm.dtsi
@@ -341,8 +341,10 @@
qcom,gic-map =
<0x02 216>, /* tsens1_tsens_upper_lower_int */
<0x34 275>, /* qmp_usb3_lfps_rxterm_irq_cx */
- <0x4f 379>, /* qusb2phy_intr */
- <0x51 379>, /* qusb2phy_intr */
+ <0x4f 379>, /* qusb2phy_intr for Dm */
+ <0x50 380>, /* qusb2phy_intr for Dm for secondary PHY */
+ <0x51 379>, /* qusb2phy_intr for Dp */
+ <0x52 380>, /* qusb2phy_intr for Dp for secondary PHY */
<0x57 358>, /* ee0_apps_hlos_spmi_periph_irq */
<0x5b 519>, /* lpass_pmu_tmr_timeout_irq_cx */
<0xff 16>, /* APC[0-7]_qgicQTmrHypPhysIrptReq */
@@ -484,6 +486,7 @@
<0xff 208>, /* lpi_dir_conn_irq_apps[0] */
<0xff 209>, /* lpi_dir_conn_irq_apps[1] */
<0xff 210>, /* lpi_dir_conn_irq_apps[2] */
+ <0xff 212>, /* usb30s_power_event_irq */
<0xff 213>, /* secure_wdog_bark_irq */
<0xff 214>, /* tsens1_tsens_max_min_int */
<0xff 215>, /* o_bimc_intr[0] */
@@ -610,7 +613,6 @@
<0xff 364>, /* osmmu_CIrpt[3] */
<0xff 365>, /* ipa_irq[0] */
<0xff 366>, /* osmmu_PMIrpt */
- <0xff 380>, /* qusb2phy_intr */
<0xff 381>, /* osmmu_CIrpt[6] */
<0xff 382>, /* osmmu_CIrpt[7] */
<0xff 385>, /* osmmu_CIrpt[12] */
diff --git a/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi b/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi
index 8b6bbac171f4..b701ecd562cd 100644
--- a/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi
@@ -237,6 +237,9 @@
rpm-regulator-ldoa10 {
status = "okay";
pm660_l10: regulator-l10 {
+ proxy-supply = <&pm660_l10>;
+ qcom,proxy-consumer-enable;
+ qcom,proxy-consumer-current = <14000>;
regulator-min-microvolt = <1780000>;
regulator-max-microvolt = <1950000>;
status = "okay";
diff --git a/arch/arm/boot/dts/qcom/sdm660-vidc.dtsi b/arch/arm/boot/dts/qcom/sdm660-vidc.dtsi
index b1ca93b9f613..06b3be2d5c0a 100644
--- a/arch/arm/boot/dts/qcom/sdm660-vidc.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-vidc.dtsi
@@ -62,13 +62,14 @@
/* Clocks */
clock-names = "gcc_mmss_sys_noc_axi_clk",
- "mmssnoc_axi_clk", "mmss_mnoc_ahb_clk",
- "mmss_bimc_smmu_ahb_clk", "mmss_bimc_smmu_axi_clk",
- "mmss_video_core_clk", "mmss_video_ahb_clk",
- "mmss_video_axi_clk",
+ "mmssnoc_axi_clk", "mmss_throttle_video_axi_clk",
+ "mmss_mnoc_ahb_clk", "mmss_bimc_smmu_ahb_clk",
+ "mmss_bimc_smmu_axi_clk", "mmss_video_core_clk",
+ "mmss_video_ahb_clk", "mmss_video_axi_clk",
"mmss_video_core0_clk";
clocks = <&clock_gcc GCC_MMSS_SYS_NOC_AXI_CLK>,
<&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_THROTTLE_VIDEO_AXI_CLK>,
<&clock_mmss MMSS_MNOC_AHB_CLK>,
<&clock_mmss MMSS_BIMC_SMMU_AHB_CLK>,
<&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>,
@@ -76,7 +77,7 @@
<&clock_mmss MMSS_VIDEO_AHB_CLK>,
<&clock_mmss MMSS_VIDEO_AXI_CLK>,
<&clock_mmss MMSS_VIDEO_SUBCORE0_CLK>;
- qcom,clock-configs = <0x0 0x0 0x0 0x0 0x0
+ qcom,clock-configs = <0x0 0x0 0x0 0x0 0x0 0x0
0x3 0x0 0x2 0x3>;
/* Buses */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 80856def2465..82bdac0f2804 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -73,7 +73,6 @@ obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
perf_event_v7.o
-CFLAGS_pj4-cp0.o := -marm
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
obj-$(CONFIG_VDSO) += vdso.o
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index da3cafbd682b..b3b950fc8ea0 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -87,9 +87,9 @@ void __init arm_dt_init_cpu_maps(void)
return;
for_each_child_of_node(cpus, cpu) {
+ const __be32 *cell;
int prop_bytes;
u32 hwid;
- const __be32 *cell;
if (of_node_cmp(cpu->type, "cpu"))
continue;
@@ -100,13 +100,14 @@ void __init arm_dt_init_cpu_maps(void)
* properties is considered invalid to build the
* cpu_logical_map.
*/
- cell = of_get_property(cpu, "reg", NULL);
- if (!cell) {
- pr_err("%s: missing reg property\n", cpu->full_name);
+ cell = of_get_property(cpu, "reg", &prop_bytes);
+ if (!cell || prop_bytes < sizeof(*cell)) {
+ pr_debug(" * %s missing reg property\n",
+ cpu->full_name);
of_node_put(cpu);
return;
}
- hwid = of_read_number(cell, of_n_addr_cells(cpu));
+
/*
* Bits n:24 must be set to 0 in the DT since the reg property
* defines the MPIDR[23:0].
diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c
index 8153e36b2491..7c9248b74d3f 100644
--- a/arch/arm/kernel/pj4-cp0.c
+++ b/arch/arm/kernel/pj4-cp0.c
@@ -66,9 +66,13 @@ static void __init pj4_cp_access_write(u32 value)
__asm__ __volatile__ (
"mcr p15, 0, %1, c1, c0, 2\n\t"
+#ifdef CONFIG_THUMB2_KERNEL
+ "isb\n\t"
+#else
"mrc p15, 0, %0, c1, c0, 2\n\t"
"mov %0, %0\n\t"
"sub pc, pc, #4\n\t"
+#endif
: "=r" (temp) : "r" (value));
}
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index 6d1dffca6c7b..748dde9fa4a5 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -17,6 +17,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <asm/assembler.h>
#include "omap44xx.h"
@@ -56,7 +57,7 @@ wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
cmp r0, r4
bne wait_2
ldr r12, =API_HYP_ENTRY
- adr r0, hyp_boot
+ badr r0, hyp_boot
smc #0
hyp_boot:
b secondary_startup
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 6ee2f4003574..6f5be663140f 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -639,6 +639,7 @@ CONFIG_CORESIGHT_QPDI=y
CONFIG_CORESIGHT_SOURCE_DUMMY=y
CONFIG_PFK=y
CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index bbe23b823b5d..f09a134a2fd5 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -715,6 +715,7 @@ CONFIG_CORESIGHT_QPDI=y
CONFIG_CORESIGHT_SOURCE_DUMMY=y
CONFIG_PFK=y
CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index b162ad70effc..6297140dd84f 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -728,14 +728,14 @@ static int build_body(struct jit_ctx *ctx)
int ret;
ret = build_insn(insn, ctx);
-
- if (ctx->image == NULL)
- ctx->offset[i] = ctx->idx;
-
if (ret > 0) {
i++;
+ if (ctx->image == NULL)
+ ctx->offset[i] = ctx->idx;
continue;
}
+ if (ctx->image == NULL)
+ ctx->offset[i] = ctx->idx;
if (ret)
return ret;
}
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index af27334d6809..e3384065f5e7 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -434,8 +434,8 @@ static int multu_func(struct pt_regs *regs, u32 ir)
rs = regs->regs[MIPSInst_RS(ir)];
res = (u64)rt * (u64)rs;
rt = res;
- regs->lo = (s64)rt;
- regs->hi = (s64)(res >> 32);
+ regs->lo = (s64)(s32)rt;
+ regs->hi = (s64)(s32)(res >> 32);
MIPS_R2_STATS(muls);
@@ -671,9 +671,9 @@ static int maddu_func(struct pt_regs *regs, u32 ir)
res += ((((s64)rt) << 32) | (u32)rs);
rt = res;
- regs->lo = (s64)rt;
+ regs->lo = (s64)(s32)rt;
rs = res >> 32;
- regs->hi = (s64)rs;
+ regs->hi = (s64)(s32)rs;
MIPS_R2_STATS(dsps);
@@ -729,9 +729,9 @@ static int msubu_func(struct pt_regs *regs, u32 ir)
res = ((((s64)rt) << 32) | (u32)rs) - res;
rt = res;
- regs->lo = (s64)rt;
+ regs->lo = (s64)(s32)rt;
rs = res >> 32;
- regs->hi = (s64)rs;
+ regs->hi = (s64)(s32)rs;
MIPS_R2_STATS(dsps);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index e45b88a5d7e0..ae877c7b3905 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -148,7 +148,7 @@ opal_tracepoint_entry:
opal_tracepoint_return:
std r3,STK_REG(R31)(r1)
mr r4,r3
- ld r0,STK_REG(R23)(r1)
+ ld r3,STK_REG(R23)(r1)
bl __trace_opal_exit
ld r3,STK_REG(R31)(r1)
addi r1,r1,STACKFRAMESIZE
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 8ca533b8c606..1e5d2f07416b 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1875,6 +1875,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ack_level,
.irq_set_affinity = ioapic_set_affinity,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@@ -1886,6 +1887,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ir_ack_level,
.irq_set_affinity = ioapic_set_affinity,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index c6ee63f927ab..d688826e5736 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -67,7 +67,7 @@
#endif
/* Ensure if the instruction can be boostable */
-extern int can_boost(kprobe_opcode_t *instruction);
+extern int can_boost(kprobe_opcode_t *instruction, void *addr);
/* Recover instruction if given address is probed */
extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
unsigned long addr);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 023c442c33bb..99d293ea2b49 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -163,12 +163,12 @@ NOKPROBE_SYMBOL(skip_prefixes);
* Returns non-zero if opcode is boostable.
* RIP relative instructions are adjusted at copying time in 64 bits mode
*/
-int can_boost(kprobe_opcode_t *opcodes)
+int can_boost(kprobe_opcode_t *opcodes, void *addr)
{
kprobe_opcode_t opcode;
kprobe_opcode_t *orig_opcodes = opcodes;
- if (search_exception_tables((unsigned long)opcodes))
+ if (search_exception_tables((unsigned long)addr))
return 0; /* Page fault may occur on this address. */
retry:
@@ -413,7 +413,7 @@ static int arch_copy_kprobe(struct kprobe *p)
* __copy_instruction can modify the displacement of the instruction,
* but it doesn't affect boostable check.
*/
- if (can_boost(p->ainsn.insn))
+ if (can_boost(p->ainsn.insn, p->addr))
p->ainsn.boostable = 0;
else
p->ainsn.boostable = -1;
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 7b3b9d15c47a..c9d488f3e4cd 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -177,7 +177,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
while (len < RELATIVEJUMP_SIZE) {
ret = __copy_instruction(dest + len, src + len);
- if (!ret || !can_boost(dest + len))
+ if (!ret || !can_boost(dest + len, src + len))
return -EINVAL;
len += ret;
}
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 0497f719977d..c055e9a4e547 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -296,7 +296,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
/* were we called with bad_dma_address? */
badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
- if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) {
+ if (unlikely(dma_addr < badend)) {
WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
"address 0x%Lx\n", dma_addr);
return;
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 2e1fd586b895..642e9c93a097 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -818,12 +818,6 @@ void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
if (!best)
best = check_cpuid_limit(vcpu, function, index);
- /*
- * Perfmon not yet supported for L2 guest.
- */
- if (is_guest_mode(vcpu) && function == 0xa)
- best = NULL;
-
if (best) {
*eax = best->eax;
*ebx = best->ebx;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0a472e9865c5..50ca8f409a7c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7754,8 +7754,6 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
case EXIT_REASON_TASK_SWITCH:
return true;
case EXIT_REASON_CPUID:
- if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa)
- return false;
return true;
case EXIT_REASON_HLT:
return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
@@ -7840,6 +7838,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
case EXIT_REASON_PCOMMIT:
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_PCOMMIT);
+ case EXIT_REASON_PML_FULL:
+ /* We don't expose PML support to L1. */
+ return false;
default:
return true;
}
@@ -9759,6 +9760,18 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
}
+ if (enable_pml) {
+ /*
+ * Conceptually we want to copy the PML address and index from
+ * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
+ * since we always flush the log on each vmexit, this happens
+ * to be equivalent to simply resetting the fields in vmcs02.
+ */
+ ASSERT(vmx->pml_pg);
+ vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+ }
+
if (nested_cpu_has_ept(vmcs12)) {
kvm_mmu_unload(vcpu);
nested_ept_init_mmu_context(vcpu);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
index de734134bc8d..40c616495da7 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
@@ -17,7 +17,7 @@
#include <asm/intel-mid.h>
#include <asm/io_apic.h>
-#define TANGIER_EXT_TIMER0_MSI 15
+#define TANGIER_EXT_TIMER0_MSI 12
static struct platform_device wdt_dev = {
.name = "intel_mid_wdt",
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index d69c5c79f98e..319f2e4f4a8b 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -417,7 +417,7 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template
bi->tuple_size = template->tuple_size;
bi->tag_size = template->tag_size;
- blk_integrity_revalidate(disk);
+ disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
}
EXPORT_SYMBOL(blk_integrity_register);
@@ -430,26 +430,11 @@ EXPORT_SYMBOL(blk_integrity_register);
*/
void blk_integrity_unregister(struct gendisk *disk)
{
- blk_integrity_revalidate(disk);
+ disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES;
memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
}
EXPORT_SYMBOL(blk_integrity_unregister);
-void blk_integrity_revalidate(struct gendisk *disk)
-{
- struct blk_integrity *bi = &disk->queue->integrity;
-
- if (!(disk->flags & GENHD_FL_UP))
- return;
-
- if (bi->profile)
- disk->queue->backing_dev_info.capabilities |=
- BDI_CAP_STABLE_WRITES;
- else
- disk->queue->backing_dev_info.capabilities &=
- ~BDI_CAP_STABLE_WRITES;
-}
-
void blk_integrity_add(struct gendisk *disk)
{
if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 91327dbfbb1d..19cf33b91a5a 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -446,7 +446,6 @@ rescan:
if (disk->fops->revalidate_disk)
disk->fops->revalidate_disk(disk);
- blk_integrity_revalidate(disk);
check_disk_size_change(disk, bdev);
bdev->bd_invalidated = 0;
if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index f8112c356bc5..212ca2eee257 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -272,6 +272,9 @@ static void __fw_free_buf(struct kref *ref)
(unsigned int)buf->size);
list_del(&buf->list);
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ list_del(&buf->pending_list);
+#endif
spin_unlock(&fwc->lock);
#ifdef CONFIG_FW_LOADER_USER_HELPER
diff --git a/drivers/bluetooth/btfm_slim.h b/drivers/bluetooth/btfm_slim.h
index e67c6964ee65..c7b2b45eb19d 100644
--- a/drivers/bluetooth/btfm_slim.h
+++ b/drivers/bluetooth/btfm_slim.h
@@ -68,6 +68,7 @@ struct btfmslim {
uint32_t num_rx_port;
uint32_t num_tx_port;
+ uint32_t sample_rate;
struct btfmslim_ch *rx_chs;
struct btfmslim_ch *tx_chs;
diff --git a/drivers/bluetooth/btfm_slim_codec.c b/drivers/bluetooth/btfm_slim_codec.c
index 4dd8e6833ccf..05da1fb1f975 100644
--- a/drivers/bluetooth/btfm_slim_codec.c
+++ b/drivers/bluetooth/btfm_slim_codec.c
@@ -134,6 +134,9 @@ int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
dai->id, dai->rate);
+ /* save sample rate */
+ btfmslim->sample_rate = dai->rate;
+
switch (dai->id) {
case BTFM_FM_SLIM_TX:
grp = true; nchan = 2;
diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c
index a451ff33103c..77e2973e023c 100644
--- a/drivers/bluetooth/btfm_slim_wcn3990.c
+++ b/drivers/bluetooth/btfm_slim_wcn3990.c
@@ -88,12 +88,12 @@ int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num,
BTFMSLIM_DBG("port(%d) enable(%d)", port_num, enable);
if (rxport) {
- if (enable) {
- /* For SCO Rx, A2DP Rx */
+ if (enable && btfmslim->sample_rate == 48000) {
+ /* For A2DP Rx */
reg_val = 0x1;
port_bit = port_num - 0x10;
reg = CHRK_SB_PGD_RX_PORTn_MULTI_CHNL_0(port_bit);
- BTFMSLIM_DBG("writing reg_val (%d) to reg(%x)",
+ BTFMSLIM_DBG("writing reg_val (%d) to reg(%x) for A2DP",
reg_val, reg);
ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
if (ret) {
@@ -120,18 +120,6 @@ int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num,
BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg);
goto error;
}
- } else if (port_num == CHRK_SB_PGD_PORT_TX_SCO) {
- /* SCO Tx */
- reg_val = 0x1 << CHRK_SB_PGD_PORT_TX_SCO;
- reg = CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(port_num);
- BTFMSLIM_DBG("writing reg_val (%d) to reg(%x)",
- reg_val, reg);
- ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
- if (ret) {
- BTFMSLIM_ERR("failed to write (%d) reg 0x%x",
- ret, reg);
- goto error;
- }
}
/* Enable Tx port hw auto recovery for underrun or overrun error */
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 1b76f58809b3..14c833691194 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -2552,7 +2552,8 @@ static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
if (err)
goto bail;
}
- *info = (fl->sctx->smmu.enabled ? 1 : 0);
+ if (fl->sctx)
+ *info = (fl->sctx->smmu.enabled ? 1 : 0);
bail:
return err;
}
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 37f67c77fe7c..0082b30a66c4 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -78,7 +78,9 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_ARCH_OMAP2PLUS) += ti/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
+ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_X86) += x86/
+endif
obj-$(CONFIG_ARCH_ZX) += zte/
obj-$(CONFIG_ARCH_ZYNQ) += zynq/
obj-$(CONFIG_H8300) += h8300/
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index ac6c47bd33ae..3caa460aa5ba 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -263,8 +263,6 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
local_irq_restore(flags);
- do_div(ktime, NSEC_PER_SEC);
-
profile_buf->queue_time = ktime;
profile_buf->submit_time = ktime;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a66c7e80d2af..d3cb497411c4 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -176,7 +176,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_ringbuffer *ring = gpu->rb[submit->ring];
- unsigned i, ibs = 0;
+ unsigned i;
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
@@ -191,18 +191,11 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
- ibs++;
+ OUT_PKT2(ring);
break;
}
}
- /* on a320, at least, we seem to need to pad things out to an
- * even number of qwords to avoid issue w/ CP hanging on wrap-
- * around:
- */
- if (ibs % 2)
- OUT_PKT2(ring);
-
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->fence);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
index d9fcec60693d..91501a2efd20 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -198,12 +198,12 @@ enum dsi_video_traffic_mode {
* @h_sync_width: HSYNC width in pixels.
* @h_front_porch: Horizontal fron porch in pixels.
* @h_skew:
- * @h_sync_polarity: Polarity of HSYNC (false is active low).
+ * @h_sync_polarity: Polarity of HSYNC (false is active high).
* @v_active: Active height of one frame in lines.
* @v_back_porch: Vertical back porch in lines.
* @v_sync_width: VSYNC width in lines.
* @v_front_porch: Vertical front porch in lines.
- * @v_sync_polarity: Polarity of VSYNC (false is active low).
+ * @v_sync_polarity: Polarity of VSYNC (false is active high).
* @refresh_rate: Refresh rate in Hz.
*/
struct dsi_mode_info {
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 995cda97a2f0..c34713a13332 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -56,6 +56,10 @@ static void convert_to_dsi_mode(const struct drm_display_mode *drm_mode,
dsi_mode->flags |= DSI_MODE_FLAG_DFPS;
if (msm_needs_vblank_pre_modeset(drm_mode))
dsi_mode->flags |= DSI_MODE_FLAG_VBLANK_PRE_MODESET;
+ dsi_mode->timing.h_sync_polarity =
+ (drm_mode->flags & DRM_MODE_FLAG_PHSYNC) ? false : true;
+ dsi_mode->timing.v_sync_polarity =
+ (drm_mode->flags & DRM_MODE_FLAG_PVSYNC) ? false : true;
}
static void convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
@@ -87,6 +91,10 @@ static void convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS;
if (dsi_mode->flags & DSI_MODE_FLAG_VBLANK_PRE_MODESET)
drm_mode->private_flags |= MSM_MODE_FLAG_VBLANK_PRE_MODESET;
+ drm_mode->flags |= (dsi_mode->timing.h_sync_polarity) ?
+ DRM_MODE_FLAG_NHSYNC : DRM_MODE_FLAG_PHSYNC;
+ drm_mode->flags |= (dsi_mode->timing.v_sync_polarity) ?
+ DRM_MODE_FLAG_NVSYNC : DRM_MODE_FLAG_PVSYNC;
drm_mode_set_name(drm_mode);
}
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
index c377f3759e67..4c70472bd338 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -23,6 +23,7 @@
#include <linux/gpio.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/irqdomain.h>
#include "sde_kms.h"
#include "sde_connector.h"
@@ -967,6 +968,18 @@ static void _sde_hdmi_connector_irq(struct sde_hdmi *sde_hdmi)
}
}
+static void _sde_hdmi_cec_irq(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ u32 cec_intr = hdmi_read(hdmi, REG_HDMI_CEC_INT);
+
+ /* Routing interrupt to external CEC drivers */
+ if (cec_intr)
+ generic_handle_irq(irq_find_mapping(
+ sde_hdmi->irq_domain, 1));
+}
+
+
static irqreturn_t _sde_hdmi_irq(int irq, void *dev_id)
{
struct sde_hdmi *sde_hdmi = dev_id;
@@ -987,7 +1000,8 @@ static irqreturn_t _sde_hdmi_irq(int irq, void *dev_id)
if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
hdmi_hdcp_ctrl_irq(hdmi->hdcp_ctrl);
- /* TODO audio.. */
+ /* Process CEC: */
+ _sde_hdmi_cec_irq(sde_hdmi);
return IRQ_HANDLED;
}
@@ -2017,6 +2031,29 @@ static struct platform_driver sde_hdmi_driver = {
},
};
+static int sde_hdmi_irqdomain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct sde_hdmi *display;
+ int rc;
+
+ if (!domain || !domain->host_data) {
+ pr_err("invalid parameters domain\n");
+ return -EINVAL;
+ }
+ display = domain->host_data;
+
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq);
+ rc = irq_set_chip_data(irq, display);
+
+ return rc;
+}
+
+static const struct irq_domain_ops sde_hdmi_irqdomain_ops = {
+ .map = sde_hdmi_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
{
int rc = 0;
@@ -2071,6 +2108,13 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
goto error;
}
+ display->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 8,
+ &sde_hdmi_irqdomain_ops, display);
+ if (!display->irq_domain) {
+ SDE_ERROR("failed to create IRQ domain\n");
+ goto error;
+ }
+
enc->bridge = hdmi->bridge;
priv->bridges[priv->num_bridges++] = hdmi->bridge;
@@ -2096,6 +2140,9 @@ int sde_hdmi_drm_deinit(struct sde_hdmi *display)
return -EINVAL;
}
+ if (display->irq_domain)
+ irq_domain_remove(display->irq_domain);
+
return rc;
}
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
index ffa9a27e7dfe..54506da4f9b0 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -86,6 +86,7 @@ struct sde_hdmi_ctrl {
* @hpd_work: HPD work structure.
* @codec_ready: If audio codec is ready.
* @client_notify_pending: If there is client notification pending.
+ * @irq_domain: IRQ domain structure.
* @root: Debug fs root entry.
*/
struct sde_hdmi {
@@ -114,6 +115,8 @@ struct sde_hdmi {
bool codec_ready;
bool client_notify_pending;
+ struct irq_domain *irq_domain;
+
/* DEBUG FS */
struct dentry *root;
};
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index d8791155236c..fa746d71cd3b 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -429,11 +429,21 @@ static void complete_commit(struct msm_commit *commit)
commit_destroy(commit);
}
+static int msm_atomic_commit_dispatch(struct drm_device *dev,
+ struct drm_atomic_state *state, struct msm_commit *commit);
+
static void fence_cb(struct msm_fence_cb *cb)
{
struct msm_commit *commit =
container_of(cb, struct msm_commit, fence_cb);
- complete_commit(commit);
+ int ret = -EINVAL;
+
+ ret = msm_atomic_commit_dispatch(commit->dev, commit->state, commit);
+ if (ret) {
+ DRM_ERROR("%s: atomic commit failed\n", __func__);
+ drm_atomic_state_free(commit->state);
+ commit_destroy(commit);
+ }
}
static void _msm_drm_commit_work_cb(struct kthread_work *work)
@@ -624,13 +634,7 @@ int msm_atomic_commit(struct drm_device *dev,
*/
if (async) {
- ret = msm_atomic_commit_dispatch(dev, state, commit);
- if (ret) {
- DRM_ERROR("%s: atomic commit failed\n", __func__);
- drm_atomic_state_free(state);
- commit_destroy(commit);
- goto error;
- }
+ msm_queue_fence_cb(dev, &commit->fence_cb, commit->fence);
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 4674c5423cbd..4bee797da746 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -778,6 +778,13 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
size = PAGE_ALIGN(size);
+ /*
+ * Disallow zero sized objects as they make the underlying
+ * infrastructure grumpy
+ */
+ if (!size)
+ return ERR_PTR(-EINVAL);
+
ret = msm_gem_new_impl(dev, size, flags, &obj);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index d5204221d902..41b4b5a4fd66 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -101,8 +101,6 @@ static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
return fence;
}
-#define MAX_CMDS 4
-
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
* make it easier to unwind when things go wrong, etc). This only
@@ -127,7 +125,7 @@ struct msm_gem_submit {
uint32_t size; /* in dwords */
uint64_t iova;
uint32_t idx; /* cmdstream buffer idx in bos[] */
- } cmd[MAX_CMDS];
+ } *cmd; /* array of size nr_cmds */
struct {
uint32_t flags;
struct msm_gem_object *obj;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index ea7b4441fe99..12cc28acec18 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -34,10 +34,11 @@ static inline void __user *to_user_ptr(u64 address)
}
static struct msm_gem_submit *submit_create(struct drm_device *dev,
- struct msm_gem_address_space *aspace, int nr)
+ struct msm_gem_address_space *aspace, int nr_bos, int nr_cmds)
{
struct msm_gem_submit *submit;
- int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
+ int sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
+ (nr_cmds * sizeof(*submit->cmd));
submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (submit) {
@@ -50,6 +51,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->profile_buf_vaddr = NULL;
submit->profile_buf_iova = 0;
+ submit->cmd = (void *)&submit->bos[nr_bos];
+
submit->secure = false;
INIT_LIST_HEAD(&submit->bo_list);
@@ -393,12 +396,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!gpu)
return -ENXIO;
- if (args->nr_cmds > MAX_CMDS)
- return -EINVAL;
-
mutex_lock(&dev->struct_mutex);
- submit = submit_create(dev, ctx->aspace, args->nr_bos);
+ submit = submit_create(dev, ctx->aspace, args->nr_bos, args->nr_cmds);
if (!submit) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index e61ff97d2ca4..0b6ee302e231 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -73,12 +73,6 @@ static void drm_mode_to_intf_timing_params(
timing->underflow_clr = 0xff;
timing->hsync_skew = mode->hskew;
- /* DSI controller cannot handle active-low sync signals. */
- if (vid_enc->hw_intf->cap->type == INTF_DSI) {
- timing->hsync_polarity = 0;
- timing->vsync_polarity = 0;
- }
-
/*
* For edp only:
* DISPLAY_V_START = (VBP * HCYCLE) + HBP
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
index 3b34719e9971..042b0ee7909a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -157,13 +157,8 @@ static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx,
display_hctl = (hsync_end_x << 16) | hsync_start_x;
den_polarity = 0;
- if (ctx->cap->type == INTF_HDMI) {
- hsync_polarity = p->yres >= 720 ? 0 : 1;
- vsync_polarity = p->yres >= 720 ? 0 : 1;
- } else {
- hsync_polarity = 0;
- vsync_polarity = 0;
- }
+ hsync_polarity = p->hsync_polarity;
+ vsync_polarity = p->vsync_polarity;
polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
(vsync_polarity << 1) | /* VSYNC Polarity */
(hsync_polarity << 0); /* HSYNC Polarity */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index 1a5d469e6e7e..d6d2e41ff5aa 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -42,6 +42,10 @@ static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
if (!mdp || !cfg)
return;
+ /* The SPLIT registers are only for DSI interfaces */
+ if ((cfg->intf != INTF_1) && (cfg->intf != INTF_2))
+ return;
+
if (cfg->en) {
if (cfg->mode == INTF_MODE_CMD) {
lower_pipe = FLD_SPLIT_DISPLAY_CMD;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 8fb7213277cc..b75391495778 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -66,8 +66,11 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_unlock;
+ ttm_bo_reference(bo);
up_read(&vma->vm_mm->mmap_sem);
(void) ttm_bo_wait(bo, false, true, false);
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
goto out_unlock;
}
@@ -114,8 +117,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ ttm_bo_reference(bo);
up_read(&vma->vm_mm->mmap_sem);
(void) ttm_bo_wait_unreserved(bo);
+ ttm_bo_unref(&bo);
}
return VM_FAULT_RETRY;
@@ -160,6 +165,13 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
if (unlikely(ret != 0)) {
retval = ret;
+
+ if (retval == VM_FAULT_RETRY &&
+ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ /* The BO has already been unreserved. */
+ return retval;
+ }
+
goto out_unlock;
}
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 0715022be6e3..dcc6651710fe 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -59,7 +59,7 @@ static const struct adreno_vbif_platform a5xx_vbif_platforms[] = {
{ adreno_is_a530, a530_vbif },
{ adreno_is_a512, a540_vbif },
{ adreno_is_a510, a530_vbif },
- { adreno_is_a508, a530_vbif },
+ { adreno_is_a508, a540_vbif },
{ adreno_is_a505, a530_vbif },
{ adreno_is_a506, a530_vbif },
};
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 54b8da5302e8..55f906c9cb90 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -2090,7 +2090,12 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
/* Turn off all the timers */
del_timer_sync(&dispatcher->timer);
del_timer_sync(&dispatcher->fault_timer);
- del_timer_sync(&adreno_dev->preempt.timer);
+ /*
+ * Deleting uninitialized timer will block for ever on kernel debug
+ * disable build. Hence skip del timer if it is not initialized.
+ */
+ if (adreno_is_preemption_enabled(adreno_dev))
+ del_timer_sync(&adreno_dev->preempt.timer);
mutex_lock(&device->mutex);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 50f55abd6db8..b2def8dea954 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1238,7 +1238,8 @@ kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr)
spin_lock(&private->mem_lock);
idr_for_each_entry(&private->mem_idr, entry, id) {
if (GPUADDR_IN_MEMDESC(gpuaddr, &entry->memdesc)) {
- ret = kgsl_mem_entry_get(entry);
+ if (!entry->pending_free)
+ ret = kgsl_mem_entry_get(entry);
break;
}
}
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index af9fc1c15236..57d99c451952 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1547,6 +1547,8 @@ static int _setup_user_context(struct kgsl_mmu *mmu)
ret = PTR_ERR(mmu->defaultpagetable);
mmu->defaultpagetable = NULL;
return ret;
+ } else if (mmu->defaultpagetable == NULL) {
+ return -ENOMEM;
}
}
diff --git a/drivers/hwtracing/coresight/coresight-remote-etm.c b/drivers/hwtracing/coresight/coresight-remote-etm.c
index 9015015381d5..30b13282f6c0 100644
--- a/drivers/hwtracing/coresight/coresight-remote-etm.c
+++ b/drivers/hwtracing/coresight/coresight-remote-etm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -186,10 +186,12 @@ static void remote_etm_rcv_msg(struct work_struct *work)
struct remote_etm_drvdata *drvdata = container_of(work,
struct remote_etm_drvdata,
work_rcv_msg);
-
+ mutex_lock(&drvdata->mutex);
if (qmi_recv_msg(drvdata->handle) < 0)
dev_err(drvdata->dev, "%s: Error receiving QMI message\n",
__func__);
+
+ mutex_unlock(&drvdata->mutex);
}
static void remote_etm_notify(struct qmi_handle *handle,
@@ -227,6 +229,7 @@ static void remote_etm_svc_arrive(struct work_struct *work)
return;
}
+ mutex_lock(&drvdata->mutex);
if (qmi_connect_to_service(drvdata->handle, CORESIGHT_QMI_SVC_ID,
CORESIGHT_QMI_VERSION,
drvdata->inst_id) < 0) {
@@ -236,7 +239,6 @@ static void remote_etm_svc_arrive(struct work_struct *work)
drvdata->handle = NULL;
}
- mutex_lock(&drvdata->mutex);
if (drvdata->inst_id < sizeof(int)*BITS_PER_BYTE
&& (boot_enable & BIT(drvdata->inst_id))) {
if (!drvdata->enable)
@@ -251,9 +253,10 @@ static void remote_etm_svc_exit(struct work_struct *work)
struct remote_etm_drvdata *drvdata = container_of(work,
struct remote_etm_drvdata,
work_svc_exit);
-
+ mutex_lock(&drvdata->mutex);
qmi_handle_destroy(drvdata->handle);
drvdata->handle = NULL;
+ mutex_unlock(&drvdata->mutex);
}
static int remote_etm_svc_event_notify(struct notifier_block *this,
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index c5998bd5ce02..316d8b783d94 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -1018,6 +1018,7 @@ static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
{
unsigned long flags;
+ mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading)
goto out;
@@ -1054,7 +1055,7 @@ out:
}
pm_runtime_put(drvdata->dev);
-
+ mutex_unlock(&drvdata->mem_lock);
dev_info(drvdata->dev, "TMC disabled\n");
}
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
index 206941708141..daa35845fc0a 100644
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
* Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -3774,6 +3774,12 @@ static int synaptics_rmi4_probe(struct platform_device *pdev)
rmi4_data->irq = gpio_to_irq(bdata->irq_gpio);
+ if (!exp_data.initialized) {
+ mutex_init(&exp_data.mutex);
+ INIT_LIST_HEAD(&exp_data.list);
+ exp_data.initialized = true;
+ }
+
retval = synaptics_rmi4_irq_enable(rmi4_data, true);
if (retval < 0) {
dev_err(&pdev->dev,
@@ -3782,12 +3788,6 @@ static int synaptics_rmi4_probe(struct platform_device *pdev)
goto err_enable_irq;
}
- if (!exp_data.initialized) {
- mutex_init(&exp_data.mutex);
- INIT_LIST_HEAD(&exp_data.list);
- exp_data.initialized = true;
- }
-
exp_data.workqueue = create_singlethread_workqueue("dsx_exp_workqueue");
INIT_DELAYED_WORK(&exp_data.work, synaptics_rmi4_exp_fn_work);
exp_data.rmi4_data = rmi4_data;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 4ff2ee2609ca..2e0f61a2dc3f 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -289,6 +289,9 @@ static int gic_irq_get_irqchip_state(struct irq_data *d,
}
static void gic_disable_irq(struct irq_data *d)
{
+ /* don't lazy-disable PPIs */
+ if (gic_irq(d) < 32)
+ gic_mask_irq(d);
if (gic_arch_extn.irq_disable)
gic_arch_extn.irq_disable(d);
}
diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c
index feca07be85f5..1eb9fb33db38 100644
--- a/drivers/leds/leds-ktd2692.c
+++ b/drivers/leds/leds-ktd2692.c
@@ -296,15 +296,15 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
return -ENXIO;
led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS);
- if (IS_ERR(led->ctrl_gpio)) {
- ret = PTR_ERR(led->ctrl_gpio);
+ ret = PTR_ERR_OR_ZERO(led->ctrl_gpio);
+ if (ret) {
dev_err(dev, "cannot get ctrl-gpios %d\n", ret);
return ret;
}
led->aux_gpio = devm_gpiod_get(dev, "aux", GPIOD_ASIS);
- if (IS_ERR(led->aux_gpio)) {
- ret = PTR_ERR(led->aux_gpio);
+ ret = PTR_ERR_OR_ZERO(led->aux_gpio);
+ if (ret) {
dev_err(dev, "cannot get aux-gpios %d\n", ret);
return ret;
}
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 1a6cad946a25..a89a92d253ac 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -63,11 +63,13 @@
#define FLASH_LED_REG_MITIGATION_SEL(base) (base + 0x6E)
#define FLASH_LED_REG_MITIGATION_SW(base) (base + 0x6F)
#define FLASH_LED_REG_LMH_LEVEL(base) (base + 0x70)
+#define FLASH_LED_REG_MULTI_STROBE_CTRL(base) (base + 0x71)
+#define FLASH_LED_REG_LPG_INPUT_CTRL(base) (base + 0x72)
#define FLASH_LED_REG_CURRENT_DERATE_EN(base) (base + 0x76)
#define FLASH_LED_HDRM_VOL_MASK GENMASK(7, 4)
#define FLASH_LED_CURRENT_MASK GENMASK(6, 0)
-#define FLASH_LED_ENABLE_MASK GENMASK(2, 0)
+#define FLASH_LED_STROBE_MASK GENMASK(1, 0)
#define FLASH_HW_STROBE_MASK GENMASK(2, 0)
#define FLASH_LED_ISC_WARMUP_DELAY_MASK GENMASK(1, 0)
#define FLASH_LED_CURRENT_DERATE_EN_MASK GENMASK(2, 0)
@@ -91,6 +93,9 @@
#define THERMAL_DERATE_SLOW_SHIFT 4
#define THERMAL_DERATE_SLOW_MASK GENMASK(6, 4)
#define THERMAL_DERATE_FAST_MASK GENMASK(2, 0)
+#define LED1N2_FLASH_ONCE_ONLY_BIT BIT(0)
+#define LED3_FLASH_ONCE_ONLY_BIT BIT(1)
+#define LPG_INPUT_SEL_BIT BIT(0)
#define VPH_DROOP_DEBOUNCE_US_TO_VAL(val_us) (val_us / 8)
#define VPH_DROOP_HYST_MV_TO_VAL(val_mv) (val_mv / 25)
@@ -174,6 +179,12 @@ enum {
LED3,
};
+enum strobe_type {
+ SW_STROBE = 0,
+ HW_STROBE,
+ LPG_STROBE,
+};
+
/*
* Configurations for each individual LED
*/
@@ -194,7 +205,8 @@ struct flash_node_data {
u8 ires;
u8 hdrm_val;
u8 current_reg_val;
- u8 trigger;
+ u8 strobe_ctrl;
+ u8 strobe_sel;
bool led_on;
};
@@ -232,6 +244,7 @@ struct flash_led_platform_data {
int thermal_thrsh1;
int thermal_thrsh2;
int thermal_thrsh3;
+ int hw_strobe_option;
u32 led1n2_iclamp_low_ma;
u32 led1n2_iclamp_mid_ma;
u32 led3_iclamp_low_ma;
@@ -246,7 +259,6 @@ struct flash_led_platform_data {
u8 chgr_mitigation_sel;
u8 lmh_level;
u8 iled_thrsh_val;
- u8 hw_strobe_option;
bool hdrm_auto_mode_en;
bool thermal_derate_en;
bool otst_ramp_bkup_en;
@@ -557,6 +569,28 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
return rc;
}
+ if (led->pdata->hw_strobe_option > 0) {
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_STROBE_CFG(led->base),
+ FLASH_LED_STROBE_MASK,
+ led->pdata->hw_strobe_option);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (led->fnode[LED3].strobe_sel == LPG_STROBE) {
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_MULTI_STROBE_CTRL(led->base),
+ LED3_FLASH_ONCE_ONLY_BIT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_LPG_INPUT_CTRL(led->base),
+ LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT);
+ if (rc < 0)
+ return rc;
+ }
return 0;
}
@@ -980,7 +1014,7 @@ static int qpnp_flash_led_switch_disable(struct flash_switch_data *snode)
led->fnode[i].led_on = false;
- if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) {
+ if (led->fnode[i].strobe_sel == HW_STROBE) {
rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i],
led->pdata->hw_strobe_option, false);
if (rc < 0) {
@@ -1034,13 +1068,6 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
if (rc < 0)
return rc;
- rc = qpnp_flash_led_masked_write(led,
- FLASH_LED_REG_STROBE_CFG(led->base),
- FLASH_LED_ENABLE_MASK,
- led->pdata->hw_strobe_option);
- if (rc < 0)
- return rc;
-
val = 0;
for (i = 0; i < led->num_fnodes; i++) {
if (!led->fnode[i].led_on ||
@@ -1048,13 +1075,13 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
continue;
addr_offset = led->fnode[i].id;
- if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT)
- mask = FLASH_HW_STROBE_MASK;
- else
+ if (led->fnode[i].strobe_sel == SW_STROBE)
mask = FLASH_LED_HW_SW_STROBE_SEL_BIT;
+ else
+ mask = FLASH_HW_STROBE_MASK;
rc = qpnp_flash_led_masked_write(led,
FLASH_LED_REG_STROBE_CTRL(led->base + addr_offset),
- mask, led->fnode[i].trigger);
+ mask, led->fnode[i].strobe_ctrl);
if (rc < 0)
return rc;
@@ -1072,7 +1099,7 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
val |= FLASH_LED_ENABLE << led->fnode[i].id;
- if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) {
+ if (led->fnode[i].strobe_sel == HW_STROBE) {
rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i],
led->pdata->hw_strobe_option, true);
if (rc < 0) {
@@ -1364,7 +1391,7 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
const char *temp_string;
int rc, min_ma;
u32 val;
- bool strobe_sel = 0, edge_trigger = 0, active_high = 0;
+ bool hw_strobe = 0, edge_trigger = 0, active_high = 0;
fnode->pdev = led->pdev;
fnode->cdev.brightness_set = qpnp_flash_led_brightness_set;
@@ -1483,14 +1510,52 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
return rc;
}
- strobe_sel = of_property_read_bool(node, "qcom,hw-strobe-sel");
- if (strobe_sel) {
+ fnode->strobe_sel = SW_STROBE;
+ rc = of_property_read_u32(node, "qcom,strobe-sel", &val);
+ if (rc < 0) {
+ if (rc != -EINVAL) {
+ pr_err("Unable to read qcom,strobe-sel property\n");
+ return rc;
+ }
+ } else {
+ if (val < SW_STROBE || val > LPG_STROBE) {
+ pr_err("Incorrect strobe selection specified %d\n",
+ val);
+ return -EINVAL;
+ }
+ fnode->strobe_sel = (u8)val;
+ }
+
+ /*
+ * LPG strobe is allowed only for LED3 and HW strobe option should be
+ * option 2 or 3.
+ */
+ if (fnode->strobe_sel == LPG_STROBE) {
+ if (led->pdata->hw_strobe_option ==
+ FLASH_LED_HW_STROBE_OPTION_1) {
+ pr_err("Incorrect strobe option for LPG strobe\n");
+ return -EINVAL;
+ }
+ if (fnode->id != LED3) {
+ pr_err("Incorrect LED chosen for LPG strobe\n");
+ return -EINVAL;
+ }
+ }
+
+ if (fnode->strobe_sel == HW_STROBE) {
edge_trigger = of_property_read_bool(node,
"qcom,hw-strobe-edge-trigger");
active_high = !of_property_read_bool(node,
"qcom,hw-strobe-active-low");
+ hw_strobe = 1;
+ } else if (fnode->strobe_sel == LPG_STROBE) {
+ /* LPG strobe requires level trigger and active high */
+ edge_trigger = 0;
+ active_high = 1;
+ hw_strobe = 1;
}
- fnode->trigger = (strobe_sel << 2) | (edge_trigger << 1) | active_high;
+ fnode->strobe_ctrl = (hw_strobe << 2) | (edge_trigger << 1) |
+ active_high;
rc = led_classdev_register(&led->pdev->dev, &fnode->cdev);
if (rc < 0) {
@@ -1506,7 +1571,7 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
fnode->strobe_pinctrl = NULL;
}
- if (fnode->trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) {
+ if (fnode->strobe_sel == HW_STROBE) {
if (of_find_property(node, "qcom,hw-strobe-gpio", NULL)) {
fnode->hw_strobe_gpio = of_get_named_gpio(node,
"qcom,hw-strobe-gpio", 0);
@@ -1886,9 +1951,10 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
led->pdata->vph_droop_hysteresis <<= FLASH_LED_VPH_DROOP_HYST_SHIFT;
+ led->pdata->hw_strobe_option = -EINVAL;
rc = of_property_read_u32(node, "qcom,hw-strobe-option", &val);
if (!rc) {
- led->pdata->hw_strobe_option = (u8)val;
+ led->pdata->hw_strobe_option = val;
} else if (rc != -EINVAL) {
pr_err("Unable to parse hw strobe option, rc=%d\n", rc);
return rc;
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index 817dfa3b2f53..ab0e4f99ebb9 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -1213,7 +1213,7 @@ regulator_turn_off:
static int qpnp_flash_set(struct qpnp_led_data *led)
{
- int rc, error;
+ int rc = 0, error;
int val = led->cdev.brightness;
if (led->flash_cfg->torch_enable)
@@ -1251,7 +1251,8 @@ static int qpnp_flash_set(struct qpnp_led_data *led)
}
}
- qpnp_led_masked_write(led, FLASH_MAX_CURR(led->base),
+ rc = qpnp_led_masked_write(led,
+ FLASH_MAX_CURR(led->base),
FLASH_CURRENT_MASK,
TORCH_MAX_LEVEL);
if (rc) {
@@ -1261,7 +1262,7 @@ static int qpnp_flash_set(struct qpnp_led_data *led)
goto error_reg_write;
}
- qpnp_led_masked_write(led,
+ rc = qpnp_led_masked_write(led,
FLASH_LED_TMR_CTRL(led->base),
FLASH_TMR_MASK,
FLASH_TMR_WATCHDOG);
@@ -1293,7 +1294,7 @@ static int qpnp_flash_set(struct qpnp_led_data *led)
goto error_reg_write;
}
- qpnp_led_masked_write(led,
+ rc = qpnp_led_masked_write(led,
FLASH_WATCHDOG_TMR(led->base),
FLASH_WATCHDOG_MASK,
led->flash_cfg->duration);
@@ -1341,7 +1342,7 @@ static int qpnp_flash_set(struct qpnp_led_data *led)
goto error_flash_set;
}
- qpnp_led_masked_write(led,
+ rc = qpnp_led_masked_write(led,
FLASH_LED_TMR_CTRL(led->base),
FLASH_TMR_MASK,
FLASH_TMR_SAFETY);
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 3ef3d6c6bbf8..0b219a81e8a2 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -80,6 +80,25 @@ config MEDIA_RC_SUPPORT
Say Y when you have a TV or an IR device.
+config MEDIA_CEC_SUPPORT
+ bool "HDMI CEC support"
+ select MEDIA_CEC_EDID
+ ---help---
+ Enable support for HDMI CEC (Consumer Electronics Control),
+ which is an optional HDMI feature.
+
+ Say Y when you have an HDMI receiver, transmitter or a USB CEC
+ adapter that supports HDMI CEC.
+
+config MEDIA_CEC_DEBUG
+ bool "HDMI CEC debugfs interface"
+ depends on MEDIA_CEC_SUPPORT && DEBUG_FS
+ ---help---
+ Turns on the DebugFS interface for CEC devices.
+
+config MEDIA_CEC_EDID
+ bool
+
#
# Media controller
# Selectable only for webcam/grabbers, as other drivers don't use it
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index e608bbce0c35..ba516dcbc6aa 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -2,6 +2,14 @@
# Makefile for the kernel multimedia device drivers.
#
+ifeq ($(CONFIG_MEDIA_CEC_EDID),y)
+ obj-$(CONFIG_MEDIA_SUPPORT) += cec-edid.o
+endif
+
+ifeq ($(CONFIG_MEDIA_CEC_SUPPORT),y)
+ obj-$(CONFIG_MEDIA_SUPPORT) += cec/
+endif
+
media-objs := media-device.o media-devnode.o media-entity.o
#
diff --git a/drivers/media/cec-edid.c b/drivers/media/cec-edid.c
new file mode 100644
index 000000000000..5719b991e340
--- /dev/null
+++ b/drivers/media/cec-edid.c
@@ -0,0 +1,171 @@
+/*
+ * cec-edid - HDMI Consumer Electronics Control EDID & CEC helper functions
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <media/cec-edid.h>
+
+/*
+ * This EDID is expected to be a CEA-861 compliant, which means that there are
+ * at least two blocks and one or more of the extensions blocks are CEA-861
+ * blocks.
+ *
+ * The returned location is guaranteed to be < size - 1.
+ */
+static unsigned int cec_get_edid_spa_location(const u8 *edid, unsigned int size)
+{
+ unsigned int blocks = size / 128;
+ unsigned int block;
+ u8 d;
+
+ /* Sanity check: at least 2 blocks and a multiple of the block size */
+ if (blocks < 2 || size % 128)
+ return 0;
+
+ /*
+ * If there are fewer extension blocks than the size, then update
+ * 'blocks'. It is allowed to have more extension blocks than the size,
+ * since some hardware can only read e.g. 256 bytes of the EDID, even
+ * though more blocks are present. The first CEA-861 extension block
+ * should normally be in block 1 anyway.
+ */
+ if (edid[0x7e] + 1 < blocks)
+ blocks = edid[0x7e] + 1;
+
+ for (block = 1; block < blocks; block++) {
+ unsigned int offset = block * 128;
+
+ /* Skip any non-CEA-861 extension blocks */
+ if (edid[offset] != 0x02 || edid[offset + 1] != 0x03)
+ continue;
+
+ /* search Vendor Specific Data Block (tag 3) */
+ d = edid[offset + 2] & 0x7f;
+ /* Check if there are Data Blocks */
+ if (d <= 4)
+ continue;
+ if (d > 4) {
+ unsigned int i = offset + 4;
+ unsigned int end = offset + d;
+
+ /* Note: 'end' is always < 'size' */
+ do {
+ u8 tag = edid[i] >> 5;
+ u8 len = edid[i] & 0x1f;
+
+ if (tag == 3 && len >= 5 && i + len <= end &&
+ edid[i + 1] == 0x03 &&
+ edid[i + 2] == 0x0c &&
+ edid[i + 3] == 0x00)
+ return i + 4;
+ i += len + 1;
+ } while (i < end);
+ }
+ }
+ return 0;
+}
+
+u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
+ unsigned int *offset)
+{
+ unsigned int loc = cec_get_edid_spa_location(edid, size);
+
+ if (offset)
+ *offset = loc;
+ if (loc == 0)
+ return CEC_PHYS_ADDR_INVALID;
+ return (edid[loc] << 8) | edid[loc + 1];
+}
+EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr);
+
+void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
+{
+ unsigned int loc = cec_get_edid_spa_location(edid, size);
+ u8 sum = 0;
+ unsigned int i;
+
+ if (loc == 0)
+ return;
+ edid[loc] = phys_addr >> 8;
+ edid[loc + 1] = phys_addr & 0xff;
+ loc &= ~0x7f;
+
+ /* update the checksum */
+ for (i = loc; i < loc + 127; i++)
+ sum += edid[i];
+ edid[i] = 256 - sum;
+}
+EXPORT_SYMBOL_GPL(cec_set_edid_phys_addr);
+
+u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
+{
+ /* Check if input is sane */
+ if (WARN_ON(input == 0 || input > 0xf))
+ return CEC_PHYS_ADDR_INVALID;
+
+ if (phys_addr == 0)
+ return input << 12;
+
+ if ((phys_addr & 0x0fff) == 0)
+ return phys_addr | (input << 8);
+
+ if ((phys_addr & 0x00ff) == 0)
+ return phys_addr | (input << 4);
+
+ if ((phys_addr & 0x000f) == 0)
+ return phys_addr | input;
+
+ /*
+ * All nibbles are used so no valid physical addresses can be assigned
+ * to the input.
+ */
+ return CEC_PHYS_ADDR_INVALID;
+}
+EXPORT_SYMBOL_GPL(cec_phys_addr_for_input);
+
+int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
+{
+ int i;
+
+ if (parent)
+ *parent = phys_addr;
+ if (port)
+ *port = 0;
+ if (phys_addr == CEC_PHYS_ADDR_INVALID)
+ return 0;
+ for (i = 0; i < 16; i += 4)
+ if (phys_addr & (0xf << i))
+ break;
+ if (i == 16)
+ return 0;
+ if (parent)
+ *parent = phys_addr & (0xfff0 << i);
+ if (port)
+ *port = (phys_addr >> i) & 0xf;
+ for (i += 4; i < 16; i += 4)
+ if ((phys_addr & (0xf << i)) == 0)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cec_phys_addr_validate);
+
+MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
+MODULE_DESCRIPTION("CEC EDID helper functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile
new file mode 100644
index 000000000000..d6686337275f
--- /dev/null
+++ b/drivers/media/cec/Makefile
@@ -0,0 +1,5 @@
+cec-objs := cec-core.o cec-adap.o cec-api.o
+
+ifeq ($(CONFIG_MEDIA_CEC_SUPPORT),y)
+ obj-$(CONFIG_MEDIA_SUPPORT) += cec.o
+endif
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
new file mode 100644
index 000000000000..ccda41c2c9e4
--- /dev/null
+++ b/drivers/media/cec/cec-adap.c
@@ -0,0 +1,1880 @@
+/*
+ * cec-adap.c - HDMI Consumer Electronics Control framework - CEC adapter
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "cec-priv.h"
+
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+ struct cec_msg *msg,
+ unsigned int la_idx);
+
+/*
+ * 400 ms is the time it takes for one 16 byte message to be
+ * transferred and 5 is the maximum number of retries. Add
+ * another 100 ms as a margin. So if the transmit doesn't
+ * finish before that time something is really wrong and we
+ * have to time out.
+ *
+ * This is a sign that something it really wrong and a warning
+ * will be issued.
+ */
+#define CEC_XFER_TIMEOUT_MS (5 * 400 + 100)
+
+#define call_op(adap, op, arg...) \
+ (adap->ops->op ? adap->ops->op(adap, ## arg) : 0)
+
+#define call_void_op(adap, op, arg...) \
+ do { \
+ if (adap->ops->op) \
+ adap->ops->op(adap, ## arg); \
+ } while (0)
+
+static int cec_log_addr2idx(const struct cec_adapter *adap, u8 log_addr)
+{
+ int i;
+
+ for (i = 0; i < adap->log_addrs.num_log_addrs; i++)
+ if (adap->log_addrs.log_addr[i] == log_addr)
+ return i;
+ return -1;
+}
+
+static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr)
+{
+ int i = cec_log_addr2idx(adap, log_addr);
+
+ return adap->log_addrs.primary_device_type[i < 0 ? 0 : i];
+}
+
+/*
+ * Queue a new event for this filehandle. If ts == 0, then set it
+ * to the current time.
+ *
+ * The two events that are currently defined do not need to keep track
+ * of intermediate events, so no actual queue of events is needed,
+ * instead just store the latest state and the total number of lost
+ * messages.
+ *
+ * Should new events be added in the future that require intermediate
+ * results to be queued as well, then a proper queue data structure is
+ * required. But until then, just keep it simple.
+ */
+void cec_queue_event_fh(struct cec_fh *fh,
+ const struct cec_event *new_ev, u64 ts)
+{
+ struct cec_event *ev = &fh->events[new_ev->event - 1];
+
+ if (ts == 0)
+ ts = ktime_get_ns();
+
+ mutex_lock(&fh->lock);
+ if (new_ev->event == CEC_EVENT_LOST_MSGS &&
+ fh->pending_events & (1 << new_ev->event)) {
+ /*
+ * If there is already a lost_msgs event, then just
+ * update the lost_msgs count. This effectively
+ * merges the old and new events into one.
+ */
+ ev->lost_msgs.lost_msgs += new_ev->lost_msgs.lost_msgs;
+ goto unlock;
+ }
+
+ /*
+ * Intermediate states are not interesting, so just
+ * overwrite any older event.
+ */
+ *ev = *new_ev;
+ ev->ts = ts;
+ fh->pending_events |= 1 << new_ev->event;
+
+unlock:
+ mutex_unlock(&fh->lock);
+ wake_up_interruptible(&fh->wait);
+}
+
+/* Queue a new event for all open filehandles. */
+static void cec_queue_event(struct cec_adapter *adap,
+ const struct cec_event *ev)
+{
+ u64 ts = ktime_get_ns();
+ struct cec_fh *fh;
+
+ mutex_lock(&adap->devnode.lock);
+ list_for_each_entry(fh, &adap->devnode.fhs, list)
+ cec_queue_event_fh(fh, ev, ts);
+ mutex_unlock(&adap->devnode.lock);
+}
+
+/*
+ * Queue a new message for this filehandle. If there is no more room
+ * in the queue, then send the LOST_MSGS event instead.
+ */
+static void cec_queue_msg_fh(struct cec_fh *fh, const struct cec_msg *msg)
+{
+ static const struct cec_event ev_lost_msg = {
+ .ts = 0,
+ .event = CEC_EVENT_LOST_MSGS,
+ .flags = 0,
+ {
+ .lost_msgs.lost_msgs = 1,
+ },
+ };
+ struct cec_msg_entry *entry;
+
+ mutex_lock(&fh->lock);
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ goto lost_msgs;
+
+ entry->msg = *msg;
+ /* Add new msg at the end of the queue */
+ list_add_tail(&entry->list, &fh->msgs);
+
+ /*
+ * if the queue now has more than CEC_MAX_MSG_RX_QUEUE_SZ
+ * messages, drop the oldest one and send a lost message event.
+ */
+ if (fh->queued_msgs == CEC_MAX_MSG_RX_QUEUE_SZ) {
+ list_del(&entry->list);
+ goto lost_msgs;
+ }
+ fh->queued_msgs++;
+ mutex_unlock(&fh->lock);
+ wake_up_interruptible(&fh->wait);
+ return;
+
+lost_msgs:
+ mutex_unlock(&fh->lock);
+ cec_queue_event_fh(fh, &ev_lost_msg, 0);
+}
+
+/*
+ * Queue the message for those filehandles that are in monitor mode.
+ * If valid_la is true (this message is for us or was sent by us),
+ * then pass it on to any monitoring filehandle. If this message
+ * isn't for us or from us, then only give it to filehandles that
+ * are in MONITOR_ALL mode.
+ *
+ * This can only happen if the CEC_CAP_MONITOR_ALL capability is
+ * set and the CEC adapter was placed in 'monitor all' mode.
+ */
+static void cec_queue_msg_monitor(struct cec_adapter *adap,
+ const struct cec_msg *msg,
+ bool valid_la)
+{
+ struct cec_fh *fh;
+ u32 monitor_mode = valid_la ? CEC_MODE_MONITOR :
+ CEC_MODE_MONITOR_ALL;
+
+ mutex_lock(&adap->devnode.lock);
+ list_for_each_entry(fh, &adap->devnode.fhs, list) {
+ if (fh->mode_follower >= monitor_mode)
+ cec_queue_msg_fh(fh, msg);
+ }
+ mutex_unlock(&adap->devnode.lock);
+}
+
+/*
+ * Queue the message for follower filehandles.
+ */
+static void cec_queue_msg_followers(struct cec_adapter *adap,
+ const struct cec_msg *msg)
+{
+ struct cec_fh *fh;
+
+ mutex_lock(&adap->devnode.lock);
+ list_for_each_entry(fh, &adap->devnode.fhs, list) {
+ if (fh->mode_follower == CEC_MODE_FOLLOWER)
+ cec_queue_msg_fh(fh, msg);
+ }
+ mutex_unlock(&adap->devnode.lock);
+}
+
+/* Notify userspace of an adapter state change. */
+static void cec_post_state_event(struct cec_adapter *adap)
+{
+ struct cec_event ev = {
+ .event = CEC_EVENT_STATE_CHANGE,
+ };
+
+ ev.state_change.phys_addr = adap->phys_addr;
+ ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+ cec_queue_event(adap, &ev);
+}
+
+/*
+ * A CEC transmit (and a possible wait for reply) completed.
+ * If this was in blocking mode, then complete it, otherwise
+ * queue the message for userspace to dequeue later.
+ *
+ * This function is called with adap->lock held.
+ */
+static void cec_data_completed(struct cec_data *data)
+{
+ /*
+ * Delete this transmit from the filehandle's xfer_list since
+ * we're done with it.
+ *
+ * Note that if the filehandle is closed before this transmit
+ * finished, then the release() function will set data->fh to NULL.
+ * Without that we would be referring to a closed filehandle.
+ */
+ if (data->fh)
+ list_del(&data->xfer_list);
+
+ if (data->blocking) {
+ /*
+ * Someone is blocking so mark the message as completed
+ * and call complete.
+ */
+ data->completed = true;
+ complete(&data->c);
+ } else {
+ /*
+ * No blocking, so just queue the message if needed and
+ * free the memory.
+ */
+ if (data->fh)
+ cec_queue_msg_fh(data->fh, &data->msg);
+ kfree(data);
+ }
+}
+
+/*
+ * A pending CEC transmit needs to be cancelled, either because the CEC
+ * adapter is disabled or the transmit takes an impossibly long time to
+ * finish.
+ *
+ * This function is called with adap->lock held.
+ */
+static void cec_data_cancel(struct cec_data *data)
+{
+ /*
+ * It's either the current transmit, or it is a pending
+ * transmit. Take the appropriate action to clear it.
+ */
+ if (data->adap->transmitting == data) {
+ data->adap->transmitting = NULL;
+ } else {
+ list_del_init(&data->list);
+ if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
+ data->adap->transmit_queue_sz--;
+ }
+
+ /* Mark it as an error */
+ data->msg.tx_ts = ktime_get_ns();
+ data->msg.tx_status |= CEC_TX_STATUS_ERROR |
+ CEC_TX_STATUS_MAX_RETRIES;
+ data->msg.tx_error_cnt++;
+ data->attempts = 0;
+ /* Queue transmitted message for monitoring purposes */
+ cec_queue_msg_monitor(data->adap, &data->msg, 1);
+
+ cec_data_completed(data);
+}
+
+/*
+ * Main CEC state machine
+ *
+ * Wait until the thread should be stopped, or we are not transmitting and
+ * a new transmit message is queued up, in which case we start transmitting
+ * that message. When the adapter finished transmitting the message it will
+ * call cec_transmit_done().
+ *
+ * If the adapter is disabled, then remove all queued messages instead.
+ *
+ * If the current transmit times out, then cancel that transmit.
+ */
+int cec_thread_func(void *_adap)
+{
+ struct cec_adapter *adap = _adap;
+
+ for (;;) {
+ unsigned int signal_free_time;
+ struct cec_data *data;
+ bool timeout = false;
+ u8 attempts;
+
+ if (adap->transmitting) {
+ int err;
+
+ /*
+ * We are transmitting a message, so add a timeout
+ * to prevent the state machine to get stuck waiting
+ * for this message to finalize and add a check to
+ * see if the adapter is disabled in which case the
+ * transmit should be canceled.
+ */
+ err = wait_event_interruptible_timeout(adap->kthread_waitq,
+ kthread_should_stop() ||
+ (!adap->is_configured && !adap->is_configuring) ||
+ (!adap->transmitting &&
+ !list_empty(&adap->transmit_queue)),
+ msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
+ timeout = err == 0;
+ } else {
+ /* Otherwise we just wait for something to happen. */
+ wait_event_interruptible(adap->kthread_waitq,
+ kthread_should_stop() ||
+ (!adap->transmitting &&
+ !list_empty(&adap->transmit_queue)));
+ }
+
+ mutex_lock(&adap->lock);
+
+ if ((!adap->is_configured && !adap->is_configuring) ||
+ kthread_should_stop()) {
+ /*
+ * If the adapter is disabled, or we're asked to stop,
+ * then cancel any pending transmits.
+ */
+ while (!list_empty(&adap->transmit_queue)) {
+ data = list_first_entry(&adap->transmit_queue,
+ struct cec_data, list);
+ cec_data_cancel(data);
+ }
+ if (adap->transmitting)
+ cec_data_cancel(adap->transmitting);
+
+ /*
+ * Cancel the pending timeout work. We have to unlock
+ * the mutex when flushing the work since
+ * cec_wait_timeout() will take it. This is OK since
+ * no new entries can be added to wait_queue as long
+ * as adap->transmitting is NULL, which it is due to
+ * the cec_data_cancel() above.
+ */
+ while (!list_empty(&adap->wait_queue)) {
+ data = list_first_entry(&adap->wait_queue,
+ struct cec_data, list);
+
+ if (!cancel_delayed_work(&data->work)) {
+ mutex_unlock(&adap->lock);
+ flush_scheduled_work();
+ mutex_lock(&adap->lock);
+ }
+ cec_data_cancel(data);
+ }
+ goto unlock;
+ }
+
+ if (adap->transmitting && timeout) {
+ /*
+ * If we timeout, then log that. This really shouldn't
+ * happen and is an indication of a faulty CEC adapter
+ * driver, or the CEC bus is in some weird state.
+ */
+ dprintk(0, "message %*ph timed out!\n",
+ adap->transmitting->msg.len,
+ adap->transmitting->msg.msg);
+ /* Just give up on this. */
+ cec_data_cancel(adap->transmitting);
+ goto unlock;
+ }
+
+ /*
+ * If we are still transmitting, or there is nothing new to
+ * transmit, then just continue waiting.
+ */
+ if (adap->transmitting || list_empty(&adap->transmit_queue))
+ goto unlock;
+
+ /* Get a new message to transmit */
+ data = list_first_entry(&adap->transmit_queue,
+ struct cec_data, list);
+ list_del_init(&data->list);
+ adap->transmit_queue_sz--;
+ /* Make this the current transmitting message */
+ adap->transmitting = data;
+
+ /*
+ * Suggested number of attempts as per the CEC 2.0 spec:
+ * 4 attempts is the default, except for 'secondary poll
+ * messages', i.e. poll messages not sent during the adapter
+ * configuration phase when it allocates logical addresses.
+ */
+ if (data->msg.len == 1 && adap->is_configured)
+ attempts = 2;
+ else
+ attempts = 4;
+
+ /* Set the suggested signal free time */
+ if (data->attempts) {
+ /* should be >= 3 data bit periods for a retry */
+ signal_free_time = CEC_SIGNAL_FREE_TIME_RETRY;
+ } else if (data->new_initiator) {
+ /* should be >= 5 data bit periods for new initiator */
+ signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
+ } else {
+ /*
+ * should be >= 7 data bit periods for sending another
+ * frame immediately after another.
+ */
+ signal_free_time = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
+ }
+ if (data->attempts == 0)
+ data->attempts = attempts;
+
+ /* Tell the adapter to transmit, cancel on error */
+ if (adap->ops->adap_transmit(adap, data->attempts,
+ signal_free_time, &data->msg))
+ cec_data_cancel(data);
+
+unlock:
+ mutex_unlock(&adap->lock);
+
+ if (kthread_should_stop())
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Called by the CEC adapter if a transmit finished.
+ */
+void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
+ u8 nack_cnt, u8 low_drive_cnt, u8 error_cnt)
+{
+ struct cec_data *data;
+ struct cec_msg *msg;
+ u64 ts = ktime_get_ns();
+
+ dprintk(2, "cec_transmit_done %02x\n", status);
+ mutex_lock(&adap->lock);
+ data = adap->transmitting;
+ if (!data) {
+ /*
+ * This can happen if a transmit was issued and the cable is
+ * unplugged while the transmit is ongoing. Ignore this
+ * transmit in that case.
+ */
+ dprintk(1, "cec_transmit_done without an ongoing transmit!\n");
+ goto unlock;
+ }
+
+ msg = &data->msg;
+
+ /* Drivers must fill in the status! */
+ WARN_ON(status == 0);
+ msg->tx_ts = ts;
+ msg->tx_status |= status;
+ msg->tx_arb_lost_cnt += arb_lost_cnt;
+ msg->tx_nack_cnt += nack_cnt;
+ msg->tx_low_drive_cnt += low_drive_cnt;
+ msg->tx_error_cnt += error_cnt;
+
+ /* Mark that we're done with this transmit */
+ adap->transmitting = NULL;
+
+ /*
+ * If there are still retry attempts left and there was an error and
+ * the hardware didn't signal that it retried itself (by setting
+ * CEC_TX_STATUS_MAX_RETRIES), then we will retry ourselves.
+ */
+ if (data->attempts > 1 &&
+ !(status & (CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_OK))) {
+ /* Retry this message */
+ data->attempts--;
+ /* Add the message in front of the transmit queue */
+ list_add(&data->list, &adap->transmit_queue);
+ adap->transmit_queue_sz++;
+ goto wake_thread;
+ }
+
+ data->attempts = 0;
+
+ /* Always set CEC_TX_STATUS_MAX_RETRIES on error */
+ if (!(status & CEC_TX_STATUS_OK))
+ msg->tx_status |= CEC_TX_STATUS_MAX_RETRIES;
+
+ /* Queue transmitted message for monitoring purposes */
+ cec_queue_msg_monitor(adap, msg, 1);
+
+ if ((status & CEC_TX_STATUS_OK) && adap->is_configured &&
+ msg->timeout) {
+ /*
+ * Queue the message into the wait queue if we want to wait
+ * for a reply.
+ */
+ list_add_tail(&data->list, &adap->wait_queue);
+ schedule_delayed_work(&data->work,
+ msecs_to_jiffies(msg->timeout));
+ } else {
+ /* Otherwise we're done */
+ cec_data_completed(data);
+ }
+
+wake_thread:
+ /*
+ * Wake up the main thread to see if another message is ready
+ * for transmitting or to retry the current message.
+ */
+ wake_up_interruptible(&adap->kthread_waitq);
+unlock:
+ mutex_unlock(&adap->lock);
+}
+EXPORT_SYMBOL_GPL(cec_transmit_done);
+
+/*
+ * Called when waiting for a reply times out.
+ */
+static void cec_wait_timeout(struct work_struct *work)
+{
+ struct cec_data *data = container_of(work, struct cec_data, work.work);
+ struct cec_adapter *adap = data->adap;
+
+ mutex_lock(&adap->lock);
+ /*
+ * Sanity check in case the timeout and the arrival of the message
+ * happened at the same time.
+ */
+ if (list_empty(&data->list))
+ goto unlock;
+
+ /* Mark the message as timed out */
+ list_del_init(&data->list);
+ data->msg.rx_ts = ktime_get_ns();
+ data->msg.rx_status = CEC_RX_STATUS_TIMEOUT;
+ cec_data_completed(data);
+unlock:
+ mutex_unlock(&adap->lock);
+}
+
+/*
+ * Transmit a message. The fh argument may be NULL if the transmit is not
+ * associated with a specific filehandle.
+ *
+ * This function is called with adap->lock held.
+ */
+int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
+ struct cec_fh *fh, bool block)
+{
+ struct cec_data *data;
+ u8 last_initiator = 0xff;
+ unsigned int timeout;
+ int res = 0;
+
+ msg->rx_ts = 0;
+ msg->tx_ts = 0;
+ msg->rx_status = 0;
+ msg->tx_status = 0;
+ msg->tx_arb_lost_cnt = 0;
+ msg->tx_nack_cnt = 0;
+ msg->tx_low_drive_cnt = 0;
+ msg->tx_error_cnt = 0;
+ msg->sequence = ++adap->sequence;
+ if (!msg->sequence)
+ msg->sequence = ++adap->sequence;
+
+ if (msg->reply && msg->timeout == 0) {
+ /* Make sure the timeout isn't 0. */
+ msg->timeout = 1000;
+ }
+ if (msg->timeout)
+ msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS;
+ else
+ msg->flags = 0;
+
+ /* Sanity checks */
+ if (msg->len == 0 || msg->len > CEC_MAX_MSG_SIZE) {
+ dprintk(1, "cec_transmit_msg: invalid length %d\n", msg->len);
+ return -EINVAL;
+ }
+ if (msg->timeout && msg->len == 1) {
+ dprintk(1, "cec_transmit_msg: can't reply for poll msg\n");
+ return -EINVAL;
+ }
+ memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
+ if (msg->len == 1) {
+ if (cec_msg_destination(msg) == 0xf) {
+ dprintk(1, "cec_transmit_msg: invalid poll message\n");
+ return -EINVAL;
+ }
+ if (cec_has_log_addr(adap, cec_msg_destination(msg))) {
+ /*
+ * If the destination is a logical address our adapter
+ * has already claimed, then just NACK this.
+ * It depends on the hardware what it will do with a
+ * POLL to itself (some OK this), so it is just as
+ * easy to handle it here so the behavior will be
+ * consistent.
+ */
+ msg->tx_ts = ktime_get_ns();
+ msg->tx_status = CEC_TX_STATUS_NACK |
+ CEC_TX_STATUS_MAX_RETRIES;
+ msg->tx_nack_cnt = 1;
+ return 0;
+ }
+ }
+ if (msg->len > 1 && !cec_msg_is_broadcast(msg) &&
+ cec_has_log_addr(adap, cec_msg_destination(msg))) {
+ dprintk(1, "cec_transmit_msg: destination is the adapter itself\n");
+ return -EINVAL;
+ }
+ if (msg->len > 1 && adap->is_configured &&
+ !cec_has_log_addr(adap, cec_msg_initiator(msg))) {
+ dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n",
+ cec_msg_initiator(msg));
+ return -EINVAL;
+ }
+ if (!adap->is_configured && !adap->is_configuring)
+ return -ENONET;
+
+ if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ)
+ return -EBUSY;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (msg->len > 1 && msg->msg[1] == CEC_MSG_CDC_MESSAGE) {
+ msg->msg[2] = adap->phys_addr >> 8;
+ msg->msg[3] = adap->phys_addr & 0xff;
+ }
+
+ if (msg->timeout)
+ dprintk(2, "cec_transmit_msg: %*ph (wait for 0x%02x%s)\n",
+ msg->len, msg->msg, msg->reply, !block ? ", nb" : "");
+ else
+ dprintk(2, "cec_transmit_msg: %*ph%s\n",
+ msg->len, msg->msg, !block ? " (nb)" : "");
+
+ data->msg = *msg;
+ data->fh = fh;
+ data->adap = adap;
+ data->blocking = block;
+
+ /*
+ * Determine if this message follows a message from the same
+ * initiator. Needed to determine the free signal time later on.
+ */
+ if (msg->len > 1) {
+ if (!(list_empty(&adap->transmit_queue))) {
+ const struct cec_data *last;
+
+ last = list_last_entry(&adap->transmit_queue,
+ const struct cec_data, list);
+ last_initiator = cec_msg_initiator(&last->msg);
+ } else if (adap->transmitting) {
+ last_initiator =
+ cec_msg_initiator(&adap->transmitting->msg);
+ }
+ }
+ data->new_initiator = last_initiator != cec_msg_initiator(msg);
+ init_completion(&data->c);
+ INIT_DELAYED_WORK(&data->work, cec_wait_timeout);
+
+ if (fh)
+ list_add_tail(&data->xfer_list, &fh->xfer_list);
+ list_add_tail(&data->list, &adap->transmit_queue);
+ adap->transmit_queue_sz++;
+ if (!adap->transmitting)
+ wake_up_interruptible(&adap->kthread_waitq);
+
+ /* All done if we don't need to block waiting for completion */
+ if (!block)
+ return 0;
+
+ /*
+ * If we don't get a completion before this time something is really
+ * wrong and we time out.
+ */
+ timeout = CEC_XFER_TIMEOUT_MS;
+ /* Add the requested timeout if we have to wait for a reply as well */
+ if (msg->timeout)
+ timeout += msg->timeout;
+
+ /*
+ * Release the lock and wait, retake the lock afterwards.
+ */
+ mutex_unlock(&adap->lock);
+ res = wait_for_completion_killable_timeout(&data->c,
+ msecs_to_jiffies(timeout));
+ mutex_lock(&adap->lock);
+
+ if (data->completed) {
+ /* The transmit completed (possibly with an error) */
+ *msg = data->msg;
+ kfree(data);
+ return 0;
+ }
+ /*
+ * The wait for completion timed out or was interrupted, so mark this
+ * as non-blocking and disconnect from the filehandle since it is
+ * still 'in flight'. When it finally completes it will just drop the
+ * result silently.
+ */
+ data->blocking = false;
+ if (data->fh)
+ list_del(&data->xfer_list);
+ data->fh = NULL;
+
+ if (res == 0) { /* timed out */
+ /* Check if the reply or the transmit failed */
+ if (msg->timeout && (msg->tx_status & CEC_TX_STATUS_OK))
+ msg->rx_status = CEC_RX_STATUS_TIMEOUT;
+ else
+ msg->tx_status = CEC_TX_STATUS_MAX_RETRIES;
+ }
+ return res > 0 ? 0 : res;
+}
+
+/* Helper function to be used by drivers and this framework. */
+int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg,
+ bool block)
+{
+ int ret;
+
+ mutex_lock(&adap->lock);
+ ret = cec_transmit_msg_fh(adap, msg, NULL, block);
+ mutex_unlock(&adap->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cec_transmit_msg);
+
+/*
+ * I don't like forward references but without this the low-level
+ * cec_received_msg() function would come after a bunch of high-level
+ * CEC protocol handling functions. That was very confusing.
+ */
+static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
+ bool is_reply);
+
+#define DIRECTED 0x80
+#define BCAST1_4 0x40
+#define BCAST2_0 0x20 /* broadcast only allowed for >= 2.0 */
+#define BCAST (BCAST1_4 | BCAST2_0)
+#define BOTH (BCAST | DIRECTED)
+
+/*
+ * Specify minimum length and whether the message is directed, broadcast
+ * or both. Messages that do not match the criteria are ignored as per
+ * the CEC specification.
+ */
+static const u8 cec_msg_size[256] = {
+ [CEC_MSG_ACTIVE_SOURCE] = 4 | BCAST,
+ [CEC_MSG_IMAGE_VIEW_ON] = 2 | DIRECTED,
+ [CEC_MSG_TEXT_VIEW_ON] = 2 | DIRECTED,
+ [CEC_MSG_INACTIVE_SOURCE] = 4 | DIRECTED,
+ [CEC_MSG_REQUEST_ACTIVE_SOURCE] = 2 | BCAST,
+ [CEC_MSG_ROUTING_CHANGE] = 6 | BCAST,
+ [CEC_MSG_ROUTING_INFORMATION] = 4 | BCAST,
+ [CEC_MSG_SET_STREAM_PATH] = 4 | BCAST,
+ [CEC_MSG_STANDBY] = 2 | BOTH,
+ [CEC_MSG_RECORD_OFF] = 2 | DIRECTED,
+ [CEC_MSG_RECORD_ON] = 3 | DIRECTED,
+ [CEC_MSG_RECORD_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_RECORD_TV_SCREEN] = 2 | DIRECTED,
+ [CEC_MSG_CLEAR_ANALOGUE_TIMER] = 13 | DIRECTED,
+ [CEC_MSG_CLEAR_DIGITAL_TIMER] = 16 | DIRECTED,
+ [CEC_MSG_CLEAR_EXT_TIMER] = 13 | DIRECTED,
+ [CEC_MSG_SET_ANALOGUE_TIMER] = 13 | DIRECTED,
+ [CEC_MSG_SET_DIGITAL_TIMER] = 16 | DIRECTED,
+ [CEC_MSG_SET_EXT_TIMER] = 13 | DIRECTED,
+ [CEC_MSG_SET_TIMER_PROGRAM_TITLE] = 2 | DIRECTED,
+ [CEC_MSG_TIMER_CLEARED_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_TIMER_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_CEC_VERSION] = 3 | DIRECTED,
+ [CEC_MSG_GET_CEC_VERSION] = 2 | DIRECTED,
+ [CEC_MSG_GIVE_PHYSICAL_ADDR] = 2 | DIRECTED,
+ [CEC_MSG_GET_MENU_LANGUAGE] = 2 | DIRECTED,
+ [CEC_MSG_REPORT_PHYSICAL_ADDR] = 5 | BCAST,
+ [CEC_MSG_SET_MENU_LANGUAGE] = 5 | BCAST,
+ [CEC_MSG_REPORT_FEATURES] = 6 | BCAST,
+ [CEC_MSG_GIVE_FEATURES] = 2 | DIRECTED,
+ [CEC_MSG_DECK_CONTROL] = 3 | DIRECTED,
+ [CEC_MSG_DECK_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_GIVE_DECK_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_PLAY] = 3 | DIRECTED,
+ [CEC_MSG_GIVE_TUNER_DEVICE_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_SELECT_ANALOGUE_SERVICE] = 6 | DIRECTED,
+ [CEC_MSG_SELECT_DIGITAL_SERVICE] = 9 | DIRECTED,
+ [CEC_MSG_TUNER_DEVICE_STATUS] = 7 | DIRECTED,
+ [CEC_MSG_TUNER_STEP_DECREMENT] = 2 | DIRECTED,
+ [CEC_MSG_TUNER_STEP_INCREMENT] = 2 | DIRECTED,
+ [CEC_MSG_DEVICE_VENDOR_ID] = 5 | BCAST,
+ [CEC_MSG_GIVE_DEVICE_VENDOR_ID] = 2 | DIRECTED,
+ [CEC_MSG_VENDOR_COMMAND] = 2 | DIRECTED,
+ [CEC_MSG_VENDOR_COMMAND_WITH_ID] = 5 | BOTH,
+ [CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN] = 2 | BOTH,
+ [CEC_MSG_VENDOR_REMOTE_BUTTON_UP] = 2 | BOTH,
+ [CEC_MSG_SET_OSD_STRING] = 3 | DIRECTED,
+ [CEC_MSG_GIVE_OSD_NAME] = 2 | DIRECTED,
+ [CEC_MSG_SET_OSD_NAME] = 2 | DIRECTED,
+ [CEC_MSG_MENU_REQUEST] = 3 | DIRECTED,
+ [CEC_MSG_MENU_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_USER_CONTROL_PRESSED] = 3 | DIRECTED,
+ [CEC_MSG_USER_CONTROL_RELEASED] = 2 | DIRECTED,
+ [CEC_MSG_GIVE_DEVICE_POWER_STATUS] = 2 | DIRECTED,
+ [CEC_MSG_REPORT_POWER_STATUS] = 3 | DIRECTED | BCAST2_0,
+ [CEC_MSG_FEATURE_ABORT] = 4 | DIRECTED,
+ [CEC_MSG_ABORT] = 2 | DIRECTED,
+ [CEC_MSG_GIVE_AUDIO_STATUS] = 2 | DIRECTED,
+ [CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS] = 2 | DIRECTED,
+ [CEC_MSG_REPORT_AUDIO_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
+ [CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
+ [CEC_MSG_SET_SYSTEM_AUDIO_MODE] = 3 | BOTH,
+ [CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST] = 2 | DIRECTED,
+ [CEC_MSG_SYSTEM_AUDIO_MODE_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_SET_AUDIO_RATE] = 3 | DIRECTED,
+ [CEC_MSG_INITIATE_ARC] = 2 | DIRECTED,
+ [CEC_MSG_REPORT_ARC_INITIATED] = 2 | DIRECTED,
+ [CEC_MSG_REPORT_ARC_TERMINATED] = 2 | DIRECTED,
+ [CEC_MSG_REQUEST_ARC_INITIATION] = 2 | DIRECTED,
+ [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
+ [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
+ [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
+ [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST,
+ [CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
+};
+
+/* Called by the CEC adapter if a message is received */
+void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
+{
+ struct cec_data *data;
+ u8 msg_init = cec_msg_initiator(msg);
+ u8 msg_dest = cec_msg_destination(msg);
+ u8 cmd = msg->msg[1];
+ bool is_reply = false;
+ bool valid_la = true;
+ u8 min_len = 0;
+
+ if (WARN_ON(!msg->len || msg->len > CEC_MAX_MSG_SIZE))
+ return;
+
+ /*
+ * Some CEC adapters will receive the messages that they transmitted.
+ * This test filters out those messages by checking if we are the
+ * initiator, and just returning in that case.
+ *
+ * Note that this won't work if this is an Unregistered device.
+ *
+ * It is bad practice if the hardware receives the message that it
+ * transmitted and luckily most CEC adapters behave correctly in this
+ * respect.
+ */
+ if (msg_init != CEC_LOG_ADDR_UNREGISTERED &&
+ cec_has_log_addr(adap, msg_init))
+ return;
+
+ msg->rx_ts = ktime_get_ns();
+ msg->rx_status = CEC_RX_STATUS_OK;
+ msg->sequence = msg->reply = msg->timeout = 0;
+ msg->tx_status = 0;
+ msg->tx_ts = 0;
+ msg->tx_arb_lost_cnt = 0;
+ msg->tx_nack_cnt = 0;
+ msg->tx_low_drive_cnt = 0;
+ msg->tx_error_cnt = 0;
+ msg->flags = 0;
+ memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
+
+ mutex_lock(&adap->lock);
+ dprintk(2, "cec_received_msg: %*ph\n", msg->len, msg->msg);
+
+ /* Check if this message was for us (directed or broadcast). */
+ if (!cec_msg_is_broadcast(msg))
+ valid_la = cec_has_log_addr(adap, msg_dest);
+
+ /*
+ * Check if the length is not too short or if the message is a
+ * broadcast message where a directed message was expected or
+ * vice versa. If so, then the message has to be ignored (according
+ * to section CEC 7.3 and CEC 12.2).
+ */
+ if (valid_la && msg->len > 1 && cec_msg_size[cmd]) {
+ u8 dir_fl = cec_msg_size[cmd] & BOTH;
+
+ min_len = cec_msg_size[cmd] & 0x1f;
+ if (msg->len < min_len)
+ valid_la = false;
+ else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
+ valid_la = false;
+ else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4))
+ valid_la = false;
+ else if (cec_msg_is_broadcast(msg) &&
+ adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 &&
+ !(dir_fl & BCAST2_0))
+ valid_la = false;
+ }
+ if (valid_la && min_len) {
+ /* These messages have special length requirements */
+ switch (cmd) {
+ case CEC_MSG_TIMER_STATUS:
+ if (msg->msg[2] & 0x10) {
+ switch (msg->msg[2] & 0xf) {
+ case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
+ case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
+ if (msg->len < 5)
+ valid_la = false;
+ break;
+ }
+ } else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
+ if (msg->len < 5)
+ valid_la = false;
+ }
+ break;
+ case CEC_MSG_RECORD_ON:
+ switch (msg->msg[2]) {
+ case CEC_OP_RECORD_SRC_OWN:
+ break;
+ case CEC_OP_RECORD_SRC_DIGITAL:
+ if (msg->len < 10)
+ valid_la = false;
+ break;
+ case CEC_OP_RECORD_SRC_ANALOG:
+ if (msg->len < 7)
+ valid_la = false;
+ break;
+ case CEC_OP_RECORD_SRC_EXT_PLUG:
+ if (msg->len < 4)
+ valid_la = false;
+ break;
+ case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
+ if (msg->len < 5)
+ valid_la = false;
+ break;
+ }
+ break;
+ }
+ }
+
+ /* It's a valid message and not a poll or CDC message */
+ if (valid_la && msg->len > 1 && cmd != CEC_MSG_CDC_MESSAGE) {
+ bool abort = cmd == CEC_MSG_FEATURE_ABORT;
+
+ /* The aborted command is in msg[2] */
+ if (abort)
+ cmd = msg->msg[2];
+
+ /*
+ * Walk over all transmitted messages that are waiting for a
+ * reply.
+ */
+ list_for_each_entry(data, &adap->wait_queue, list) {
+ struct cec_msg *dst = &data->msg;
+
+ /*
+ * The *only* CEC message that has two possible replies
+ * is CEC_MSG_INITIATE_ARC.
+ * In this case allow either of the two replies.
+ */
+ if (!abort && dst->msg[1] == CEC_MSG_INITIATE_ARC &&
+ (cmd == CEC_MSG_REPORT_ARC_INITIATED ||
+ cmd == CEC_MSG_REPORT_ARC_TERMINATED) &&
+ (dst->reply == CEC_MSG_REPORT_ARC_INITIATED ||
+ dst->reply == CEC_MSG_REPORT_ARC_TERMINATED))
+ dst->reply = cmd;
+
+ /* Does the command match? */
+ if ((abort && cmd != dst->msg[1]) ||
+ (!abort && cmd != dst->reply))
+ continue;
+
+ /* Does the addressing match? */
+ if (msg_init != cec_msg_destination(dst) &&
+ !cec_msg_is_broadcast(dst))
+ continue;
+
+ /* We got a reply */
+ memcpy(dst->msg, msg->msg, msg->len);
+ dst->len = msg->len;
+ dst->rx_ts = msg->rx_ts;
+ dst->rx_status = msg->rx_status;
+ if (abort)
+ dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
+ msg->flags = dst->flags;
+ /* Remove it from the wait_queue */
+ list_del_init(&data->list);
+
+ /* Cancel the pending timeout work */
+ if (!cancel_delayed_work(&data->work)) {
+ mutex_unlock(&adap->lock);
+ flush_scheduled_work();
+ mutex_lock(&adap->lock);
+ }
+ /*
+ * Mark this as a reply, provided someone is still
+ * waiting for the answer.
+ */
+ if (data->fh)
+ is_reply = true;
+ cec_data_completed(data);
+ break;
+ }
+ }
+ mutex_unlock(&adap->lock);
+
+ /* Pass the message on to any monitoring filehandles */
+ cec_queue_msg_monitor(adap, msg, valid_la);
+
+ /* We're done if it is not for us or a poll message */
+ if (!valid_la || msg->len <= 1)
+ return;
+
+ if (adap->log_addrs.log_addr_mask == 0)
+ return;
+
+ /*
+ * Process the message on the protocol level. If is_reply is true,
+ * then cec_receive_notify() won't pass on the reply to the listener(s)
+ * since that was already done by cec_data_completed() above.
+ */
+ cec_receive_notify(adap, msg, is_reply);
+}
+EXPORT_SYMBOL_GPL(cec_received_msg);
+
+/* Logical Address Handling */
+
+/*
+ * Attempt to claim a specific logical address.
+ *
+ * This function is called with adap->lock held.
+ */
+static int cec_config_log_addr(struct cec_adapter *adap,
+ unsigned int idx,
+ unsigned int log_addr)
+{
+ struct cec_log_addrs *las = &adap->log_addrs;
+ struct cec_msg msg = { };
+ int err;
+
+ if (cec_has_log_addr(adap, log_addr))
+ return 0;
+
+ /* Send poll message */
+ msg.len = 1;
+ msg.msg[0] = (log_addr << 4) | log_addr;
+ err = cec_transmit_msg_fh(adap, &msg, NULL, true);
+
+ /*
+ * While trying to poll the physical address was reset
+ * and the adapter was unconfigured, so bail out.
+ */
+ if (!adap->is_configuring)
+ return -EINTR;
+
+ if (err)
+ return err;
+
+ if (msg.tx_status & CEC_TX_STATUS_OK)
+ return 0;
+
+ /*
+ * Message not acknowledged, so this logical
+ * address is free to use.
+ */
+ err = adap->ops->adap_log_addr(adap, log_addr);
+ if (err)
+ return err;
+
+ las->log_addr[idx] = log_addr;
+ las->log_addr_mask |= 1 << log_addr;
+ adap->phys_addrs[log_addr] = adap->phys_addr;
+
+ dprintk(2, "claimed addr %d (%d)\n", log_addr,
+ las->primary_device_type[idx]);
+ return 1;
+}
+
+/*
+ * Unconfigure the adapter: clear all logical addresses and send
+ * the state changed event.
+ *
+ * This function is called with adap->lock held.
+ */
+static void cec_adap_unconfigure(struct cec_adapter *adap)
+{
+ WARN_ON(adap->ops->adap_log_addr(adap, CEC_LOG_ADDR_INVALID));
+ adap->log_addrs.log_addr_mask = 0;
+ adap->is_configuring = false;
+ adap->is_configured = false;
+ memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs));
+ wake_up_interruptible(&adap->kthread_waitq);
+ cec_post_state_event(adap);
+}
+
+/*
+ * Attempt to claim the required logical addresses.
+ */
+static int cec_config_thread_func(void *arg)
+{
+ /* The various LAs for each type of device */
+ static const u8 tv_log_addrs[] = {
+ CEC_LOG_ADDR_TV, CEC_LOG_ADDR_SPECIFIC,
+ CEC_LOG_ADDR_INVALID
+ };
+ static const u8 record_log_addrs[] = {
+ CEC_LOG_ADDR_RECORD_1, CEC_LOG_ADDR_RECORD_2,
+ CEC_LOG_ADDR_RECORD_3,
+ CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
+ CEC_LOG_ADDR_INVALID
+ };
+ static const u8 tuner_log_addrs[] = {
+ CEC_LOG_ADDR_TUNER_1, CEC_LOG_ADDR_TUNER_2,
+ CEC_LOG_ADDR_TUNER_3, CEC_LOG_ADDR_TUNER_4,
+ CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
+ CEC_LOG_ADDR_INVALID
+ };
+ static const u8 playback_log_addrs[] = {
+ CEC_LOG_ADDR_PLAYBACK_1, CEC_LOG_ADDR_PLAYBACK_2,
+ CEC_LOG_ADDR_PLAYBACK_3,
+ CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
+ CEC_LOG_ADDR_INVALID
+ };
+ static const u8 audiosystem_log_addrs[] = {
+ CEC_LOG_ADDR_AUDIOSYSTEM,
+ CEC_LOG_ADDR_INVALID
+ };
+ static const u8 specific_use_log_addrs[] = {
+ CEC_LOG_ADDR_SPECIFIC,
+ CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
+ CEC_LOG_ADDR_INVALID
+ };
+ static const u8 *type2addrs[6] = {
+ [CEC_LOG_ADDR_TYPE_TV] = tv_log_addrs,
+ [CEC_LOG_ADDR_TYPE_RECORD] = record_log_addrs,
+ [CEC_LOG_ADDR_TYPE_TUNER] = tuner_log_addrs,
+ [CEC_LOG_ADDR_TYPE_PLAYBACK] = playback_log_addrs,
+ [CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = audiosystem_log_addrs,
+ [CEC_LOG_ADDR_TYPE_SPECIFIC] = specific_use_log_addrs,
+ };
+ static const u16 type2mask[] = {
+ [CEC_LOG_ADDR_TYPE_TV] = CEC_LOG_ADDR_MASK_TV,
+ [CEC_LOG_ADDR_TYPE_RECORD] = CEC_LOG_ADDR_MASK_RECORD,
+ [CEC_LOG_ADDR_TYPE_TUNER] = CEC_LOG_ADDR_MASK_TUNER,
+ [CEC_LOG_ADDR_TYPE_PLAYBACK] = CEC_LOG_ADDR_MASK_PLAYBACK,
+ [CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = CEC_LOG_ADDR_MASK_AUDIOSYSTEM,
+ [CEC_LOG_ADDR_TYPE_SPECIFIC] = CEC_LOG_ADDR_MASK_SPECIFIC,
+ };
+ struct cec_adapter *adap = arg;
+ struct cec_log_addrs *las = &adap->log_addrs;
+ int err;
+ int i, j;
+
+ mutex_lock(&adap->lock);
+ dprintk(1, "physical address: %x.%x.%x.%x, claim %d logical addresses\n",
+ cec_phys_addr_exp(adap->phys_addr), las->num_log_addrs);
+ las->log_addr_mask = 0;
+
+ if (las->log_addr_type[0] == CEC_LOG_ADDR_TYPE_UNREGISTERED)
+ goto configured;
+
+ for (i = 0; i < las->num_log_addrs; i++) {
+ unsigned int type = las->log_addr_type[i];
+ const u8 *la_list;
+ u8 last_la;
+
+ /*
+ * The TV functionality can only map to physical address 0.
+ * For any other address, try the Specific functionality
+ * instead as per the spec.
+ */
+ if (adap->phys_addr && type == CEC_LOG_ADDR_TYPE_TV)
+ type = CEC_LOG_ADDR_TYPE_SPECIFIC;
+
+ la_list = type2addrs[type];
+ last_la = las->log_addr[i];
+ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
+ if (last_la == CEC_LOG_ADDR_INVALID ||
+ last_la == CEC_LOG_ADDR_UNREGISTERED ||
+ !((1 << last_la) & type2mask[type]))
+ last_la = la_list[0];
+
+ err = cec_config_log_addr(adap, i, last_la);
+ if (err > 0) /* Reused last LA */
+ continue;
+
+ if (err < 0)
+ goto unconfigure;
+
+ for (j = 0; la_list[j] != CEC_LOG_ADDR_INVALID; j++) {
+ /* Tried this one already, skip it */
+ if (la_list[j] == last_la)
+ continue;
+ /* The backup addresses are CEC 2.0 specific */
+ if ((la_list[j] == CEC_LOG_ADDR_BACKUP_1 ||
+ la_list[j] == CEC_LOG_ADDR_BACKUP_2) &&
+ las->cec_version < CEC_OP_CEC_VERSION_2_0)
+ continue;
+
+ err = cec_config_log_addr(adap, i, la_list[j]);
+ if (err == 0) /* LA is in use */
+ continue;
+ if (err < 0)
+ goto unconfigure;
+ /* Done, claimed an LA */
+ break;
+ }
+
+ if (la_list[j] == CEC_LOG_ADDR_INVALID)
+ dprintk(1, "could not claim LA %d\n", i);
+ }
+
+ if (adap->log_addrs.log_addr_mask == 0 &&
+ !(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK))
+ goto unconfigure;
+
+configured:
+ if (adap->log_addrs.log_addr_mask == 0) {
+ /* Fall back to unregistered */
+ las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED;
+ las->log_addr_mask = 1 << las->log_addr[0];
+ for (i = 1; i < las->num_log_addrs; i++)
+ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
+ }
+ for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
+ adap->is_configured = true;
+ adap->is_configuring = false;
+ cec_post_state_event(adap);
+
+ /*
+ * Now post the Report Features and Report Physical Address broadcast
+ * messages. Note that these are non-blocking transmits, meaning that
+ * they are just queued up and once adap->lock is unlocked the main
+ * thread will kick in and start transmitting these.
+ *
+ * If after this function is done (but before one or more of these
+ * messages are actually transmitted) the CEC adapter is unconfigured,
+ * then any remaining messages will be dropped by the main thread.
+ */
+ for (i = 0; i < las->num_log_addrs; i++) {
+ struct cec_msg msg = {};
+
+ if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
+ (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
+ continue;
+
+ msg.msg[0] = (las->log_addr[i] << 4) | 0x0f;
+
+ /* Report Features must come first according to CEC 2.0 */
+ if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED &&
+ adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) {
+ cec_fill_msg_report_features(adap, &msg, i);
+ cec_transmit_msg_fh(adap, &msg, NULL, false);
+ }
+
+ /* Report Physical Address */
+ cec_msg_report_physical_addr(&msg, adap->phys_addr,
+ las->primary_device_type[i]);
+ dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
+ las->log_addr[i],
+ cec_phys_addr_exp(adap->phys_addr));
+ cec_transmit_msg_fh(adap, &msg, NULL, false);
+ }
+ adap->kthread_config = NULL;
+ complete(&adap->config_completion);
+ mutex_unlock(&adap->lock);
+ return 0;
+
+unconfigure:
+ for (i = 0; i < las->num_log_addrs; i++)
+ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
+ cec_adap_unconfigure(adap);
+ adap->kthread_config = NULL;
+ mutex_unlock(&adap->lock);
+ complete(&adap->config_completion);
+ return 0;
+}
+
+/*
+ * Called from either __cec_s_phys_addr or __cec_s_log_addrs to claim the
+ * logical addresses.
+ *
+ * This function is called with adap->lock held.
+ */
+static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
+{
+ if (WARN_ON(adap->is_configuring || adap->is_configured))
+ return;
+
+ init_completion(&adap->config_completion);
+
+ /* Ready to kick off the thread */
+ adap->is_configuring = true;
+ adap->kthread_config = kthread_run(cec_config_thread_func, adap,
+ "ceccfg-%s", adap->name);
+ if (IS_ERR(adap->kthread_config)) {
+ adap->kthread_config = NULL;
+ } else if (block) {
+ mutex_unlock(&adap->lock);
+ wait_for_completion(&adap->config_completion);
+ mutex_lock(&adap->lock);
+ }
+}
+
+/* Set a new physical address and send an event notifying userspace of this.
+ *
+ * This function is called with adap->lock held.
+ */
+void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
+{
+ if (phys_addr == adap->phys_addr || adap->devnode.unregistered)
+ return;
+
+ if (phys_addr == CEC_PHYS_ADDR_INVALID ||
+ adap->phys_addr != CEC_PHYS_ADDR_INVALID) {
+ adap->phys_addr = CEC_PHYS_ADDR_INVALID;
+ cec_post_state_event(adap);
+ cec_adap_unconfigure(adap);
+ /* Disabling monitor all mode should always succeed */
+ if (adap->monitor_all_cnt)
+ WARN_ON(call_op(adap, adap_monitor_all_enable, false));
+ WARN_ON(adap->ops->adap_enable(adap, false));
+ if (phys_addr == CEC_PHYS_ADDR_INVALID)
+ return;
+ }
+
+ if (adap->ops->adap_enable(adap, true))
+ return;
+
+ if (adap->monitor_all_cnt &&
+ call_op(adap, adap_monitor_all_enable, true)) {
+ WARN_ON(adap->ops->adap_enable(adap, false));
+ return;
+ }
+ adap->phys_addr = phys_addr;
+ cec_post_state_event(adap);
+ if (adap->log_addrs.num_log_addrs)
+ cec_claim_log_addrs(adap, block);
+}
+
+void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
+{
+ if (IS_ERR_OR_NULL(adap))
+ return;
+
+ mutex_lock(&adap->lock);
+ __cec_s_phys_addr(adap, phys_addr, block);
+ mutex_unlock(&adap->lock);
+}
+EXPORT_SYMBOL_GPL(cec_s_phys_addr);
+
+/*
+ * Called from either the ioctl or a driver to set the logical addresses.
+ *
+ * This function is called with adap->lock held.
+ */
+int __cec_s_log_addrs(struct cec_adapter *adap,
+ struct cec_log_addrs *log_addrs, bool block)
+{
+ u16 type_mask = 0;
+ int i;
+
+ if (adap->devnode.unregistered)
+ return -ENODEV;
+
+ if (!log_addrs || log_addrs->num_log_addrs == 0) {
+ adap->log_addrs.num_log_addrs = 0;
+ cec_adap_unconfigure(adap);
+ return 0;
+ }
+
+ if (log_addrs->flags & CEC_LOG_ADDRS_FL_CDC_ONLY) {
+ /*
+ * Sanitize log_addrs fields if a CDC-Only device is
+ * requested.
+ */
+ log_addrs->num_log_addrs = 1;
+ log_addrs->osd_name[0] = '\0';
+ log_addrs->vendor_id = CEC_VENDOR_ID_NONE;
+ log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
+ /*
+ * This is just an internal convention since a CDC-Only device
+ * doesn't have to be a switch. But switches already use
+ * unregistered, so it makes some kind of sense to pick this
+ * as the primary device. Since a CDC-Only device never sends
+ * any 'normal' CEC messages this primary device type is never
+ * sent over the CEC bus.
+ */
+ log_addrs->primary_device_type[0] = CEC_OP_PRIM_DEVTYPE_SWITCH;
+ log_addrs->all_device_types[0] = 0;
+ log_addrs->features[0][0] = 0;
+ log_addrs->features[0][1] = 0;
+ }
+
+ /* Ensure the osd name is 0-terminated */
+ log_addrs->osd_name[sizeof(log_addrs->osd_name) - 1] = '\0';
+
+ /* Sanity checks */
+ if (log_addrs->num_log_addrs > adap->available_log_addrs) {
+ dprintk(1, "num_log_addrs > %d\n", adap->available_log_addrs);
+ return -EINVAL;
+ }
+
+ /*
+ * Vendor ID is a 24 bit number, so check if the value is
+ * within the correct range.
+ */
+ if (log_addrs->vendor_id != CEC_VENDOR_ID_NONE &&
+ (log_addrs->vendor_id & 0xff000000) != 0)
+ return -EINVAL;
+
+ if (log_addrs->cec_version != CEC_OP_CEC_VERSION_1_4 &&
+ log_addrs->cec_version != CEC_OP_CEC_VERSION_2_0)
+ return -EINVAL;
+
+ if (log_addrs->num_log_addrs > 1)
+ for (i = 0; i < log_addrs->num_log_addrs; i++)
+ if (log_addrs->log_addr_type[i] ==
+ CEC_LOG_ADDR_TYPE_UNREGISTERED) {
+ dprintk(1, "num_log_addrs > 1 can't be combined with unregistered LA\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < log_addrs->num_log_addrs; i++) {
+ const u8 feature_sz = ARRAY_SIZE(log_addrs->features[0]);
+ u8 *features = log_addrs->features[i];
+ bool op_is_dev_features = false;
+ unsigned j;
+
+ log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
+ if (type_mask & (1 << log_addrs->log_addr_type[i])) {
+ dprintk(1, "duplicate logical address type\n");
+ return -EINVAL;
+ }
+ type_mask |= 1 << log_addrs->log_addr_type[i];
+ if ((type_mask & (1 << CEC_LOG_ADDR_TYPE_RECORD)) &&
+ (type_mask & (1 << CEC_LOG_ADDR_TYPE_PLAYBACK))) {
+ /* Record already contains the playback functionality */
+ dprintk(1, "invalid record + playback combination\n");
+ return -EINVAL;
+ }
+ if (log_addrs->primary_device_type[i] >
+ CEC_OP_PRIM_DEVTYPE_PROCESSOR) {
+ dprintk(1, "unknown primary device type\n");
+ return -EINVAL;
+ }
+ if (log_addrs->primary_device_type[i] == 2) {
+ dprintk(1, "invalid primary device type\n");
+ return -EINVAL;
+ }
+ if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
+ dprintk(1, "unknown logical address type\n");
+ return -EINVAL;
+ }
+ for (j = 0; j < feature_sz; j++) {
+ if ((features[j] & 0x80) == 0) {
+ if (op_is_dev_features)
+ break;
+ op_is_dev_features = true;
+ }
+ }
+ if (!op_is_dev_features || j == feature_sz) {
+ dprintk(1, "malformed features\n");
+ return -EINVAL;
+ }
+ /* Zero unused part of the feature array */
+ memset(features + j + 1, 0, feature_sz - j - 1);
+ }
+
+ if (log_addrs->cec_version >= CEC_OP_CEC_VERSION_2_0) {
+ if (log_addrs->num_log_addrs > 2) {
+ dprintk(1, "CEC 2.0 allows no more than 2 logical addresses\n");
+ return -EINVAL;
+ }
+ if (log_addrs->num_log_addrs == 2) {
+ if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_AUDIOSYSTEM) |
+ (1 << CEC_LOG_ADDR_TYPE_TV)))) {
+ dprintk(1, "Two LAs is only allowed for audiosystem and TV\n");
+ return -EINVAL;
+ }
+ if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_PLAYBACK) |
+ (1 << CEC_LOG_ADDR_TYPE_RECORD)))) {
+ dprintk(1, "An audiosystem/TV can only be combined with record or playback\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Zero unused LAs */
+ for (i = log_addrs->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++) {
+ log_addrs->primary_device_type[i] = 0;
+ log_addrs->log_addr_type[i] = 0;
+ log_addrs->all_device_types[i] = 0;
+ memset(log_addrs->features[i], 0,
+ sizeof(log_addrs->features[i]));
+ }
+
+ log_addrs->log_addr_mask = adap->log_addrs.log_addr_mask;
+ adap->log_addrs = *log_addrs;
+ if (adap->phys_addr != CEC_PHYS_ADDR_INVALID)
+ cec_claim_log_addrs(adap, block);
+ return 0;
+}
+
+int cec_s_log_addrs(struct cec_adapter *adap,
+ struct cec_log_addrs *log_addrs, bool block)
+{
+ int err;
+
+ mutex_lock(&adap->lock);
+ err = __cec_s_log_addrs(adap, log_addrs, block);
+ mutex_unlock(&adap->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(cec_s_log_addrs);
+
+/* High-level core CEC message handling */
+
+/* Fill in the Report Features message */
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+ struct cec_msg *msg,
+ unsigned int la_idx)
+{
+ const struct cec_log_addrs *las = &adap->log_addrs;
+ const u8 *features = las->features[la_idx];
+ bool op_is_dev_features = false;
+ unsigned int idx;
+
+ /* Report Features */
+ msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
+ msg->len = 4;
+ msg->msg[1] = CEC_MSG_REPORT_FEATURES;
+ msg->msg[2] = adap->log_addrs.cec_version;
+ msg->msg[3] = las->all_device_types[la_idx];
+
+ /* Write RC Profiles first, then Device Features */
+ for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
+ msg->msg[msg->len++] = features[idx];
+ if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
+ if (op_is_dev_features)
+ break;
+ op_is_dev_features = true;
+ }
+ }
+}
+
+/* Transmit the Feature Abort message */
+static int cec_feature_abort_reason(struct cec_adapter *adap,
+ struct cec_msg *msg, u8 reason)
+{
+ struct cec_msg tx_msg = { };
+
+ /*
+ * Don't reply with CEC_MSG_FEATURE_ABORT to a CEC_MSG_FEATURE_ABORT
+ * message!
+ */
+ if (msg->msg[1] == CEC_MSG_FEATURE_ABORT)
+ return 0;
+ cec_msg_set_reply_to(&tx_msg, msg);
+ cec_msg_feature_abort(&tx_msg, msg->msg[1], reason);
+ return cec_transmit_msg(adap, &tx_msg, false);
+}
+
+static int cec_feature_abort(struct cec_adapter *adap, struct cec_msg *msg)
+{
+ return cec_feature_abort_reason(adap, msg,
+ CEC_OP_ABORT_UNRECOGNIZED_OP);
+}
+
+static int cec_feature_refused(struct cec_adapter *adap, struct cec_msg *msg)
+{
+ return cec_feature_abort_reason(adap, msg,
+ CEC_OP_ABORT_REFUSED);
+}
+
+/*
+ * Called when a CEC message is received. This function will do any
+ * necessary core processing. The is_reply bool is true if this message
+ * is a reply to an earlier transmit.
+ *
+ * The message is either a broadcast message or a valid directed message.
+ */
+static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
+ bool is_reply)
+{
+ bool is_broadcast = cec_msg_is_broadcast(msg);
+ u8 dest_laddr = cec_msg_destination(msg);
+ u8 init_laddr = cec_msg_initiator(msg);
+ u8 devtype = cec_log_addr2dev(adap, dest_laddr);
+ int la_idx = cec_log_addr2idx(adap, dest_laddr);
+ bool from_unregistered = init_laddr == 0xf;
+ struct cec_msg tx_cec_msg = { };
+
+ dprintk(1, "cec_receive_notify: %*ph\n", msg->len, msg->msg);
+
+ /* If this is a CDC-Only device, then ignore any non-CDC messages */
+ if (cec_is_cdc_only(&adap->log_addrs) &&
+ msg->msg[1] != CEC_MSG_CDC_MESSAGE)
+ return 0;
+
+ if (adap->ops->received) {
+ /* Allow drivers to process the message first */
+ if (adap->ops->received(adap, msg) != -ENOMSG)
+ return 0;
+ }
+
+ /*
+ * REPORT_PHYSICAL_ADDR, CEC_MSG_USER_CONTROL_PRESSED and
+ * CEC_MSG_USER_CONTROL_RELEASED messages always have to be
+ * handled by the CEC core, even if the passthrough mode is on.
+ * The others are just ignored if passthrough mode is on.
+ */
+ switch (msg->msg[1]) {
+ case CEC_MSG_GET_CEC_VERSION:
+ case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
+ case CEC_MSG_ABORT:
+ case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
+ case CEC_MSG_GIVE_PHYSICAL_ADDR:
+ case CEC_MSG_GIVE_OSD_NAME:
+ case CEC_MSG_GIVE_FEATURES:
+ /*
+ * Skip processing these messages if the passthrough mode
+ * is on.
+ */
+ if (adap->passthrough)
+ goto skip_processing;
+ /* Ignore if addressing is wrong */
+ if (is_broadcast || from_unregistered)
+ return 0;
+ break;
+
+ case CEC_MSG_USER_CONTROL_PRESSED:
+ case CEC_MSG_USER_CONTROL_RELEASED:
+ /* Wrong addressing mode: don't process */
+ if (is_broadcast || from_unregistered)
+ goto skip_processing;
+ break;
+
+ case CEC_MSG_REPORT_PHYSICAL_ADDR:
+ /*
+ * This message is always processed, regardless of the
+ * passthrough setting.
+ *
+ * Exception: don't process if wrong addressing mode.
+ */
+ if (!is_broadcast)
+ goto skip_processing;
+ break;
+
+ default:
+ break;
+ }
+
+ cec_msg_set_reply_to(&tx_cec_msg, msg);
+
+ switch (msg->msg[1]) {
+ /* The following messages are processed but still passed through */
+ case CEC_MSG_REPORT_PHYSICAL_ADDR: {
+ u16 pa = (msg->msg[2] << 8) | msg->msg[3];
+
+ if (!from_unregistered)
+ adap->phys_addrs[init_laddr] = pa;
+ dprintk(1, "Reported physical address %x.%x.%x.%x for logical address %d\n",
+ cec_phys_addr_exp(pa), init_laddr);
+ break;
+ }
+
+ case CEC_MSG_USER_CONTROL_PRESSED:
+ if (!(adap->capabilities & CEC_CAP_RC) ||
+ !(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
+ break;
+
+#if IS_REACHABLE(CONFIG_RC_CORE)
+ switch (msg->msg[2]) {
+ /*
+ * Play function, this message can have variable length
+ * depending on the specific play function that is used.
+ */
+ case 0x60:
+ if (msg->len == 2)
+ rc_keydown(adap->rc, RC_TYPE_CEC,
+ msg->msg[2], 0);
+ else
+ rc_keydown(adap->rc, RC_TYPE_CEC,
+ msg->msg[2] << 8 | msg->msg[3], 0);
+ break;
+ /*
+ * Other function messages that are not handled.
+ * Currently the RC framework does not allow to supply an
+ * additional parameter to a keypress. These "keys" contain
+ * other information such as channel number, an input number
+ * etc.
+ * For the time being these messages are not processed by the
+ * framework and are simply forwarded to the user space.
+ */
+ case 0x56: case 0x57:
+ case 0x67: case 0x68: case 0x69: case 0x6a:
+ break;
+ default:
+ rc_keydown(adap->rc, RC_TYPE_CEC, msg->msg[2], 0);
+ break;
+ }
+#endif
+ break;
+
+ case CEC_MSG_USER_CONTROL_RELEASED:
+ if (!(adap->capabilities & CEC_CAP_RC) ||
+ !(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
+ break;
+#if IS_REACHABLE(CONFIG_RC_CORE)
+ rc_keyup(adap->rc);
+#endif
+ break;
+
+ /*
+ * The remaining messages are only processed if the passthrough mode
+ * is off.
+ */
+ case CEC_MSG_GET_CEC_VERSION:
+ cec_msg_cec_version(&tx_cec_msg, adap->log_addrs.cec_version);
+ return cec_transmit_msg(adap, &tx_cec_msg, false);
+
+ case CEC_MSG_GIVE_PHYSICAL_ADDR:
+ /* Do nothing for CEC switches using addr 15 */
+ if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH && dest_laddr == 15)
+ return 0;
+ cec_msg_report_physical_addr(&tx_cec_msg, adap->phys_addr, devtype);
+ return cec_transmit_msg(adap, &tx_cec_msg, false);
+
+ case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
+ if (adap->log_addrs.vendor_id == CEC_VENDOR_ID_NONE)
+ return cec_feature_abort(adap, msg);
+ cec_msg_device_vendor_id(&tx_cec_msg, adap->log_addrs.vendor_id);
+ return cec_transmit_msg(adap, &tx_cec_msg, false);
+
+ case CEC_MSG_ABORT:
+ /* Do nothing for CEC switches */
+ if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH)
+ return 0;
+ return cec_feature_refused(adap, msg);
+
+ case CEC_MSG_GIVE_OSD_NAME: {
+ if (adap->log_addrs.osd_name[0] == 0)
+ return cec_feature_abort(adap, msg);
+ cec_msg_set_osd_name(&tx_cec_msg, adap->log_addrs.osd_name);
+ return cec_transmit_msg(adap, &tx_cec_msg, false);
+ }
+
+ case CEC_MSG_GIVE_FEATURES:
+ if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
+ return cec_feature_abort(adap, msg);
+ cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx);
+ return cec_transmit_msg(adap, &tx_cec_msg, false);
+
+ default:
+ /*
+ * Unprocessed messages are aborted if userspace isn't doing
+ * any processing either.
+ */
+ if (!is_broadcast && !is_reply && !adap->follower_cnt &&
+ !adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT)
+ return cec_feature_abort(adap, msg);
+ break;
+ }
+
+skip_processing:
+ /* If this was a reply, then we're done, unless otherwise specified */
+ if (is_reply && !(msg->flags & CEC_MSG_FL_REPLY_TO_FOLLOWERS))
+ return 0;
+
+ /*
+ * Send to the exclusive follower if there is one, otherwise send
+ * to all followers.
+ */
+ if (adap->cec_follower)
+ cec_queue_msg_fh(adap->cec_follower, msg);
+ else
+ cec_queue_msg_followers(adap, msg);
+ return 0;
+}
+
+/*
+ * Helper functions to keep track of the 'monitor all' use count.
+ *
+ * These functions are called with adap->lock held.
+ */
+int cec_monitor_all_cnt_inc(struct cec_adapter *adap)
+{
+ int ret = 0;
+
+ if (adap->monitor_all_cnt == 0)
+ ret = call_op(adap, adap_monitor_all_enable, 1);
+ if (ret == 0)
+ adap->monitor_all_cnt++;
+ return ret;
+}
+
+void cec_monitor_all_cnt_dec(struct cec_adapter *adap)
+{
+ adap->monitor_all_cnt--;
+ if (adap->monitor_all_cnt == 0)
+ WARN_ON(call_op(adap, adap_monitor_all_enable, 0));
+}
+
+#ifdef CONFIG_MEDIA_CEC_DEBUG
+/*
+ * Log the current state of the CEC adapter.
+ * Very useful for debugging.
+ */
+int cec_adap_status(struct seq_file *file, void *priv)
+{
+ struct cec_adapter *adap = dev_get_drvdata(file->private);
+ struct cec_data *data;
+
+ mutex_lock(&adap->lock);
+ seq_printf(file, "configured: %d\n", adap->is_configured);
+ seq_printf(file, "configuring: %d\n", adap->is_configuring);
+ seq_printf(file, "phys_addr: %x.%x.%x.%x\n",
+ cec_phys_addr_exp(adap->phys_addr));
+ seq_printf(file, "number of LAs: %d\n", adap->log_addrs.num_log_addrs);
+ seq_printf(file, "LA mask: 0x%04x\n", adap->log_addrs.log_addr_mask);
+ if (adap->cec_follower)
+ seq_printf(file, "has CEC follower%s\n",
+ adap->passthrough ? " (in passthrough mode)" : "");
+ if (adap->cec_initiator)
+ seq_puts(file, "has CEC initiator\n");
+ if (adap->monitor_all_cnt)
+ seq_printf(file, "file handles in Monitor All mode: %u\n",
+ adap->monitor_all_cnt);
+ data = adap->transmitting;
+ if (data)
+ seq_printf(file, "transmitting message: %*ph (reply: %02x, timeout: %ums)\n",
+ data->msg.len, data->msg.msg, data->msg.reply,
+ data->msg.timeout);
+ seq_printf(file, "pending transmits: %u\n", adap->transmit_queue_sz);
+ list_for_each_entry(data, &adap->transmit_queue, list) {
+ seq_printf(file, "queued tx message: %*ph (reply: %02x, timeout: %ums)\n",
+ data->msg.len, data->msg.msg, data->msg.reply,
+ data->msg.timeout);
+ }
+ list_for_each_entry(data, &adap->wait_queue, list) {
+ seq_printf(file, "message waiting for reply: %*ph (reply: %02x, timeout: %ums)\n",
+ data->msg.len, data->msg.msg, data->msg.reply,
+ data->msg.timeout);
+ }
+
+ call_void_op(adap, adap_status, file);
+ mutex_unlock(&adap->lock);
+ return 0;
+}
+#endif
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
new file mode 100644
index 000000000000..8950b6c9d6a9
--- /dev/null
+++ b/drivers/media/cec/cec-api.c
@@ -0,0 +1,588 @@
+/*
+ * cec-api.c - HDMI Consumer Electronics Control framework - API
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+
+#include "cec-priv.h"
+
+static inline struct cec_devnode *cec_devnode_data(struct file *filp)
+{
+ struct cec_fh *fh = filp->private_data;
+
+ return &fh->adap->devnode;
+}
+
+/* CEC file operations */
+
+static unsigned int cec_poll(struct file *filp,
+ struct poll_table_struct *poll)
+{
+ struct cec_devnode *devnode = cec_devnode_data(filp);
+ struct cec_fh *fh = filp->private_data;
+ struct cec_adapter *adap = fh->adap;
+ unsigned int res = 0;
+
+ if (!devnode->registered)
+ return POLLERR | POLLHUP;
+ mutex_lock(&adap->lock);
+ if (adap->is_configured &&
+ adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
+ res |= POLLOUT | POLLWRNORM;
+ if (fh->queued_msgs)
+ res |= POLLIN | POLLRDNORM;
+ if (fh->pending_events)
+ res |= POLLPRI;
+ poll_wait(filp, &fh->wait, poll);
+ mutex_unlock(&adap->lock);
+ return res;
+}
+
+static bool cec_is_busy(const struct cec_adapter *adap,
+ const struct cec_fh *fh)
+{
+ bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
+ bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
+
+ /*
+ * Exclusive initiators and followers can always access the CEC adapter
+ */
+ if (valid_initiator || valid_follower)
+ return false;
+ /*
+ * All others can only access the CEC adapter if there is no
+ * exclusive initiator and they are in INITIATOR mode.
+ */
+ return adap->cec_initiator ||
+ fh->mode_initiator == CEC_MODE_NO_INITIATOR;
+}
+
+static long cec_adap_g_caps(struct cec_adapter *adap,
+ struct cec_caps __user *parg)
+{
+ struct cec_caps caps = {};
+
+ strlcpy(caps.driver, adap->devnode.dev.parent->driver->name,
+ sizeof(caps.driver));
+ strlcpy(caps.name, adap->name, sizeof(caps.name));
+ caps.available_log_addrs = adap->available_log_addrs;
+ caps.capabilities = adap->capabilities;
+ caps.version = LINUX_VERSION_CODE;
+ if (copy_to_user(parg, &caps, sizeof(caps)))
+ return -EFAULT;
+ return 0;
+}
+
+static long cec_adap_g_phys_addr(struct cec_adapter *adap,
+ __u16 __user *parg)
+{
+ u16 phys_addr;
+
+ mutex_lock(&adap->lock);
+ phys_addr = adap->phys_addr;
+ mutex_unlock(&adap->lock);
+ if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
+ return -EFAULT;
+ return 0;
+}
+
+static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
+ bool block, __u16 __user *parg)
+{
+ u16 phys_addr;
+ long err;
+
+ if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
+ return -ENOTTY;
+ if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
+ return -EFAULT;
+
+ err = cec_phys_addr_validate(phys_addr, NULL, NULL);
+ if (err)
+ return err;
+ mutex_lock(&adap->lock);
+ if (cec_is_busy(adap, fh))
+ err = -EBUSY;
+ else
+ __cec_s_phys_addr(adap, phys_addr, block);
+ mutex_unlock(&adap->lock);
+ return err;
+}
+
+static long cec_adap_g_log_addrs(struct cec_adapter *adap,
+ struct cec_log_addrs __user *parg)
+{
+ struct cec_log_addrs log_addrs;
+
+ mutex_lock(&adap->lock);
+ log_addrs = adap->log_addrs;
+ if (!adap->is_configured)
+ memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
+ sizeof(log_addrs.log_addr));
+ mutex_unlock(&adap->lock);
+
+ if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
+ return -EFAULT;
+ return 0;
+}
+
+static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
+ bool block, struct cec_log_addrs __user *parg)
+{
+ struct cec_log_addrs log_addrs;
+ long err = -EBUSY;
+
+ if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
+ return -ENOTTY;
+ if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
+ return -EFAULT;
+ log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
+ CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
+ CEC_LOG_ADDRS_FL_CDC_ONLY;
+ mutex_lock(&adap->lock);
+ if (!adap->is_configuring &&
+ (!log_addrs.num_log_addrs || !adap->is_configured) &&
+ !cec_is_busy(adap, fh)) {
+ err = __cec_s_log_addrs(adap, &log_addrs, block);
+ if (!err)
+ log_addrs = adap->log_addrs;
+ }
+ mutex_unlock(&adap->lock);
+ if (err)
+ return err;
+ if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
+ return -EFAULT;
+ return 0;
+}
+
+static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
+ bool block, struct cec_msg __user *parg)
+{
+ struct cec_msg msg = {};
+ long err = 0;
+
+ if (!(adap->capabilities & CEC_CAP_TRANSMIT))
+ return -ENOTTY;
+ if (copy_from_user(&msg, parg, sizeof(msg)))
+ return -EFAULT;
+
+ /* A CDC-Only device can only send CDC messages */
+ if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
+ (msg.len == 1 || msg.msg[1] != CEC_MSG_CDC_MESSAGE))
+ return -EINVAL;
+
+ mutex_lock(&adap->lock);
+ if (!adap->is_configured)
+ err = -ENONET;
+ else if (cec_is_busy(adap, fh))
+ err = -EBUSY;
+ else
+ err = cec_transmit_msg_fh(adap, &msg, fh, block);
+ mutex_unlock(&adap->lock);
+ if (err)
+ return err;
+ if (copy_to_user(parg, &msg, sizeof(msg)))
+ return -EFAULT;
+ return 0;
+}
+
+/* Called by CEC_RECEIVE: wait for a message to arrive */
+static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
+{
+ u32 timeout = msg->timeout;
+ int res;
+
+ do {
+ mutex_lock(&fh->lock);
+ /* Are there received messages queued up? */
+ if (fh->queued_msgs) {
+ /* Yes, return the first one */
+ struct cec_msg_entry *entry =
+ list_first_entry(&fh->msgs,
+ struct cec_msg_entry, list);
+
+ list_del(&entry->list);
+ *msg = entry->msg;
+ kfree(entry);
+ fh->queued_msgs--;
+ mutex_unlock(&fh->lock);
+ /* restore original timeout value */
+ msg->timeout = timeout;
+ return 0;
+ }
+
+ /* No, return EAGAIN in non-blocking mode or wait */
+ mutex_unlock(&fh->lock);
+
+ /* Return when in non-blocking mode */
+ if (!block)
+ return -EAGAIN;
+
+ if (msg->timeout) {
+ /* The user specified a timeout */
+ res = wait_event_interruptible_timeout(fh->wait,
+ fh->queued_msgs,
+ msecs_to_jiffies(msg->timeout));
+ if (res == 0)
+ res = -ETIMEDOUT;
+ else if (res > 0)
+ res = 0;
+ } else {
+ /* Wait indefinitely */
+ res = wait_event_interruptible(fh->wait,
+ fh->queued_msgs);
+ }
+ /* Exit on error, otherwise loop to get the new message */
+ } while (!res);
+ return res;
+}
+
+static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
+ bool block, struct cec_msg __user *parg)
+{
+ struct cec_msg msg = {};
+ long err = 0;
+
+ if (copy_from_user(&msg, parg, sizeof(msg)))
+ return -EFAULT;
+ mutex_lock(&adap->lock);
+ if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR)
+ err = -ENONET;
+ mutex_unlock(&adap->lock);
+ if (err)
+ return err;
+
+ err = cec_receive_msg(fh, &msg, block);
+ if (err)
+ return err;
+ msg.flags = 0;
+ if (copy_to_user(parg, &msg, sizeof(msg)))
+ return -EFAULT;
+ return 0;
+}
+
+static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
+ bool block, struct cec_event __user *parg)
+{
+ struct cec_event *ev = NULL;
+ u64 ts = ~0ULL;
+ unsigned int i;
+ long err = 0;
+
+ mutex_lock(&fh->lock);
+ while (!fh->pending_events && block) {
+ mutex_unlock(&fh->lock);
+ err = wait_event_interruptible(fh->wait, fh->pending_events);
+ if (err)
+ return err;
+ mutex_lock(&fh->lock);
+ }
+
+ /* Find the oldest event */
+ for (i = 0; i < CEC_NUM_EVENTS; i++) {
+ if (fh->pending_events & (1 << (i + 1)) &&
+ fh->events[i].ts <= ts) {
+ ev = &fh->events[i];
+ ts = ev->ts;
+ }
+ }
+ if (!ev) {
+ err = -EAGAIN;
+ goto unlock;
+ }
+
+ if (copy_to_user(parg, ev, sizeof(*ev))) {
+ err = -EFAULT;
+ goto unlock;
+ }
+
+ fh->pending_events &= ~(1 << ev->event);
+
+unlock:
+ mutex_unlock(&fh->lock);
+ return err;
+}
+
+static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
+ u32 __user *parg)
+{
+ u32 mode = fh->mode_initiator | fh->mode_follower;
+
+ if (copy_to_user(parg, &mode, sizeof(mode)))
+ return -EFAULT;
+ return 0;
+}
+
+static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
+ u32 __user *parg)
+{
+ u32 mode;
+ u8 mode_initiator;
+ u8 mode_follower;
+ long err = 0;
+
+ if (copy_from_user(&mode, parg, sizeof(mode)))
+ return -EFAULT;
+ if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK))
+ return -EINVAL;
+
+ mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
+ mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
+
+ if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
+ mode_follower > CEC_MODE_MONITOR_ALL)
+ return -EINVAL;
+
+ if (mode_follower == CEC_MODE_MONITOR_ALL &&
+ !(adap->capabilities & CEC_CAP_MONITOR_ALL))
+ return -EINVAL;
+
+ /* Follower modes should always be able to send CEC messages */
+ if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
+ !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
+ mode_follower >= CEC_MODE_FOLLOWER &&
+ mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU)
+ return -EINVAL;
+
+ /* Monitor modes require CEC_MODE_NO_INITIATOR */
+ if (mode_initiator && mode_follower >= CEC_MODE_MONITOR)
+ return -EINVAL;
+
+ /* Monitor modes require CAP_NET_ADMIN */
+ if (mode_follower >= CEC_MODE_MONITOR && !capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ mutex_lock(&adap->lock);
+ /*
+ * You can't become exclusive follower if someone else already
+ * has that job.
+ */
+ if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
+ mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
+ adap->cec_follower && adap->cec_follower != fh)
+ err = -EBUSY;
+ /*
+ * You can't become exclusive initiator if someone else already
+ * has that job.
+ */
+ if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
+ adap->cec_initiator && adap->cec_initiator != fh)
+ err = -EBUSY;
+
+ if (!err) {
+ bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
+ bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
+
+ if (old_mon_all != new_mon_all) {
+ if (new_mon_all)
+ err = cec_monitor_all_cnt_inc(adap);
+ else
+ cec_monitor_all_cnt_dec(adap);
+ }
+ }
+
+ if (err) {
+ mutex_unlock(&adap->lock);
+ return err;
+ }
+
+ if (fh->mode_follower == CEC_MODE_FOLLOWER)
+ adap->follower_cnt--;
+ if (mode_follower == CEC_MODE_FOLLOWER)
+ adap->follower_cnt++;
+ if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
+ mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
+ adap->passthrough =
+ mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
+ adap->cec_follower = fh;
+ } else if (adap->cec_follower == fh) {
+ adap->passthrough = false;
+ adap->cec_follower = NULL;
+ }
+ if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
+ adap->cec_initiator = fh;
+ else if (adap->cec_initiator == fh)
+ adap->cec_initiator = NULL;
+ fh->mode_initiator = mode_initiator;
+ fh->mode_follower = mode_follower;
+ mutex_unlock(&adap->lock);
+ return 0;
+}
+
+static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct cec_devnode *devnode = cec_devnode_data(filp);
+ struct cec_fh *fh = filp->private_data;
+ struct cec_adapter *adap = fh->adap;
+ bool block = !(filp->f_flags & O_NONBLOCK);
+ void __user *parg = (void __user *)arg;
+
+ if (!devnode->registered)
+ return -ENODEV;
+
+ switch (cmd) {
+ case CEC_ADAP_G_CAPS:
+ return cec_adap_g_caps(adap, parg);
+
+ case CEC_ADAP_G_PHYS_ADDR:
+ return cec_adap_g_phys_addr(adap, parg);
+
+ case CEC_ADAP_S_PHYS_ADDR:
+ return cec_adap_s_phys_addr(adap, fh, block, parg);
+
+ case CEC_ADAP_G_LOG_ADDRS:
+ return cec_adap_g_log_addrs(adap, parg);
+
+ case CEC_ADAP_S_LOG_ADDRS:
+ return cec_adap_s_log_addrs(adap, fh, block, parg);
+
+ case CEC_TRANSMIT:
+ return cec_transmit(adap, fh, block, parg);
+
+ case CEC_RECEIVE:
+ return cec_receive(adap, fh, block, parg);
+
+ case CEC_DQEVENT:
+ return cec_dqevent(adap, fh, block, parg);
+
+ case CEC_G_MODE:
+ return cec_g_mode(adap, fh, parg);
+
+ case CEC_S_MODE:
+ return cec_s_mode(adap, fh, parg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int cec_open(struct inode *inode, struct file *filp)
+{
+ struct cec_devnode *devnode =
+ container_of(inode->i_cdev, struct cec_devnode, cdev);
+ struct cec_adapter *adap = to_cec_adapter(devnode);
+ struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ /*
+ * Initial events that are automatically sent when the cec device is
+ * opened.
+ */
+ struct cec_event ev_state = {
+ .event = CEC_EVENT_STATE_CHANGE,
+ .flags = CEC_EVENT_FL_INITIAL_STATE,
+ };
+ int err;
+
+ if (!fh)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&fh->msgs);
+ INIT_LIST_HEAD(&fh->xfer_list);
+ mutex_init(&fh->lock);
+ init_waitqueue_head(&fh->wait);
+
+ fh->mode_initiator = CEC_MODE_INITIATOR;
+ fh->adap = adap;
+
+ err = cec_get_device(devnode);
+ if (err) {
+ kfree(fh);
+ return err;
+ }
+
+ filp->private_data = fh;
+
+ mutex_lock(&devnode->lock);
+ /* Queue up initial state events */
+ ev_state.state_change.phys_addr = adap->phys_addr;
+ ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+ cec_queue_event_fh(fh, &ev_state, 0);
+
+ list_add(&fh->list, &devnode->fhs);
+ mutex_unlock(&devnode->lock);
+
+ return 0;
+}
+
+/* Override for the release function */
+static int cec_release(struct inode *inode, struct file *filp)
+{
+ struct cec_devnode *devnode = cec_devnode_data(filp);
+ struct cec_adapter *adap = to_cec_adapter(devnode);
+ struct cec_fh *fh = filp->private_data;
+
+ mutex_lock(&adap->lock);
+ if (adap->cec_initiator == fh)
+ adap->cec_initiator = NULL;
+ if (adap->cec_follower == fh) {
+ adap->cec_follower = NULL;
+ adap->passthrough = false;
+ }
+ if (fh->mode_follower == CEC_MODE_FOLLOWER)
+ adap->follower_cnt--;
+ if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
+ cec_monitor_all_cnt_dec(adap);
+ mutex_unlock(&adap->lock);
+
+ mutex_lock(&devnode->lock);
+ list_del(&fh->list);
+ mutex_unlock(&devnode->lock);
+
+ /* Unhook pending transmits from this filehandle. */
+ mutex_lock(&adap->lock);
+ while (!list_empty(&fh->xfer_list)) {
+ struct cec_data *data =
+ list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
+
+ data->blocking = false;
+ data->fh = NULL;
+ list_del(&data->xfer_list);
+ }
+ mutex_unlock(&adap->lock);
+ while (!list_empty(&fh->msgs)) {
+ struct cec_msg_entry *entry =
+ list_first_entry(&fh->msgs, struct cec_msg_entry, list);
+
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ kfree(fh);
+
+ cec_put_device(devnode);
+ filp->private_data = NULL;
+ return 0;
+}
+
+const struct file_operations cec_devnode_fops = {
+ .owner = THIS_MODULE,
+ .open = cec_open,
+ .unlocked_ioctl = cec_ioctl,
+ .release = cec_release,
+ .poll = cec_poll,
+ .llseek = no_llseek,
+};
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
new file mode 100644
index 000000000000..aca3ab83a8a1
--- /dev/null
+++ b/drivers/media/cec/cec-core.c
@@ -0,0 +1,413 @@
+/*
+ * cec-core.c - HDMI Consumer Electronics Control framework - Core
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "cec-priv.h"
+
+#define CEC_NUM_DEVICES 256
+#define CEC_NAME "cec"
+
+int cec_debug;
+module_param_named(debug, cec_debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+static dev_t cec_dev_t;
+
+/* Active devices */
+static DEFINE_MUTEX(cec_devnode_lock);
+static DECLARE_BITMAP(cec_devnode_nums, CEC_NUM_DEVICES);
+
+static struct dentry *top_cec_dir;
+
+/* dev to cec_devnode */
+#define to_cec_devnode(cd) container_of(cd, struct cec_devnode, dev)
+
+int cec_get_device(struct cec_devnode *devnode)
+{
+ /*
+ * Check if the cec device is available. This needs to be done with
+ * the devnode->lock held to prevent an open/unregister race:
+ * without the lock, the device could be unregistered and freed between
+ * the devnode->registered check and get_device() calls, leading to
+ * a crash.
+ */
+ mutex_lock(&devnode->lock);
+ /*
+ * return ENXIO if the cec device has been removed
+ * already or if it is not registered anymore.
+ */
+ if (!devnode->registered) {
+ mutex_unlock(&devnode->lock);
+ return -ENXIO;
+ }
+ /* and increase the device refcount */
+ get_device(&devnode->dev);
+ mutex_unlock(&devnode->lock);
+ return 0;
+}
+
+void cec_put_device(struct cec_devnode *devnode)
+{
+ put_device(&devnode->dev);
+}
+
+/* Called when the last user of the cec device exits. */
+static void cec_devnode_release(struct device *cd)
+{
+ struct cec_devnode *devnode = to_cec_devnode(cd);
+
+ mutex_lock(&cec_devnode_lock);
+ /* Mark device node number as free */
+ clear_bit(devnode->minor, cec_devnode_nums);
+ mutex_unlock(&cec_devnode_lock);
+
+ cec_delete_adapter(to_cec_adapter(devnode));
+}
+
+static struct bus_type cec_bus_type = {
+ .name = CEC_NAME,
+};
+
+/*
+ * Register a cec device node
+ *
+ * The registration code assigns minor numbers and registers the new device node
+ * with the kernel. An error is returned if no free minor number can be found,
+ * or if the registration of the device node fails.
+ *
+ * Zero is returned on success.
+ *
+ * Note that if the cec_devnode_register call fails, the release() callback of
+ * the cec_devnode structure is *not* called, so the caller is responsible for
+ * freeing any data.
+ */
+static int __must_check cec_devnode_register(struct cec_devnode *devnode,
+ struct module *owner)
+{
+ int minor;
+ int ret;
+
+ /* Initialization */
+ INIT_LIST_HEAD(&devnode->fhs);
+ mutex_init(&devnode->lock);
+
+ /* Part 1: Find a free minor number */
+ mutex_lock(&cec_devnode_lock);
+ minor = find_next_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES, 0);
+ if (minor == CEC_NUM_DEVICES) {
+ mutex_unlock(&cec_devnode_lock);
+ pr_err("could not get a free minor\n");
+ return -ENFILE;
+ }
+
+ set_bit(minor, cec_devnode_nums);
+ mutex_unlock(&cec_devnode_lock);
+
+ devnode->minor = minor;
+ devnode->dev.bus = &cec_bus_type;
+ devnode->dev.devt = MKDEV(MAJOR(cec_dev_t), minor);
+ devnode->dev.release = cec_devnode_release;
+ dev_set_name(&devnode->dev, "cec%d", devnode->minor);
+ device_initialize(&devnode->dev);
+
+ /* Part 2: Initialize and register the character device */
+ cdev_init(&devnode->cdev, &cec_devnode_fops);
+ devnode->cdev.kobj.parent = &devnode->dev.kobj;
+ devnode->cdev.owner = owner;
+
+ ret = cdev_add(&devnode->cdev, devnode->dev.devt, 1);
+ if (ret < 0) {
+ pr_err("%s: cdev_add failed\n", __func__);
+ goto clr_bit;
+ }
+
+ ret = device_add(&devnode->dev);
+ if (ret)
+ goto cdev_del;
+
+ devnode->registered = true;
+ return 0;
+
+cdev_del:
+ cdev_del(&devnode->cdev);
+clr_bit:
+ mutex_lock(&cec_devnode_lock);
+ clear_bit(devnode->minor, cec_devnode_nums);
+ mutex_unlock(&cec_devnode_lock);
+ return ret;
+}
+
+/*
+ * Unregister a cec device node
+ *
+ * This unregisters the passed device. Future open calls will be met with
+ * errors.
+ *
+ * This function can safely be called if the device node has never been
+ * registered or has already been unregistered.
+ */
+static void cec_devnode_unregister(struct cec_devnode *devnode)
+{
+ struct cec_fh *fh;
+
+ mutex_lock(&devnode->lock);
+
+ /* Check if devnode was never registered or already unregistered */
+ if (!devnode->registered || devnode->unregistered) {
+ mutex_unlock(&devnode->lock);
+ return;
+ }
+
+ list_for_each_entry(fh, &devnode->fhs, list)
+ wake_up_interruptible(&fh->wait);
+
+ devnode->registered = false;
+ devnode->unregistered = true;
+ mutex_unlock(&devnode->lock);
+
+ device_del(&devnode->dev);
+ cdev_del(&devnode->cdev);
+ put_device(&devnode->dev);
+}
+
+struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
+ void *priv, const char *name, u32 caps,
+ u8 available_las)
+{
+ struct cec_adapter *adap;
+ int res;
+
+ if (WARN_ON(!caps))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON(!ops))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON(!available_las || available_las > CEC_MAX_LOG_ADDRS))
+ return ERR_PTR(-EINVAL);
+ adap = kzalloc(sizeof(*adap), GFP_KERNEL);
+ if (!adap)
+ return ERR_PTR(-ENOMEM);
+ strlcpy(adap->name, name, sizeof(adap->name));
+ adap->phys_addr = CEC_PHYS_ADDR_INVALID;
+ adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
+ adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE;
+ adap->capabilities = caps;
+ adap->available_log_addrs = available_las;
+ adap->sequence = 0;
+ adap->ops = ops;
+ adap->priv = priv;
+ memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs));
+ mutex_init(&adap->lock);
+ INIT_LIST_HEAD(&adap->transmit_queue);
+ INIT_LIST_HEAD(&adap->wait_queue);
+ init_waitqueue_head(&adap->kthread_waitq);
+
+ adap->kthread = kthread_run(cec_thread_func, adap, "cec-%s", name);
+ if (IS_ERR(adap->kthread)) {
+ pr_err("cec-%s: kernel_thread() failed\n", name);
+ res = PTR_ERR(adap->kthread);
+ kfree(adap);
+ return ERR_PTR(res);
+ }
+
+ if (!(caps & CEC_CAP_RC))
+ return adap;
+
+#if IS_REACHABLE(CONFIG_RC_CORE)
+ /* Prepare the RC input device */
+ adap->rc = rc_allocate_device();
+ if (!adap->rc) {
+ pr_err("cec-%s: failed to allocate memory for rc_dev\n",
+ name);
+ kthread_stop(adap->kthread);
+ kfree(adap);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ snprintf(adap->input_name, sizeof(adap->input_name),
+ "RC for %s", name);
+ snprintf(adap->input_phys, sizeof(adap->input_phys),
+ "%s/input0", name);
+
+ adap->rc->input_name = adap->input_name;
+ adap->rc->input_phys = adap->input_phys;
+ adap->rc->input_id.bustype = BUS_CEC;
+ adap->rc->input_id.vendor = 0;
+ adap->rc->input_id.product = 0;
+ adap->rc->input_id.version = 1;
+ adap->rc->driver_type = RC_DRIVER_SCANCODE;
+ adap->rc->driver_name = CEC_NAME;
+ adap->rc->allowed_protocols = RC_BIT_CEC;
+ adap->rc->priv = adap;
+ adap->rc->map_name = RC_MAP_CEC;
+ adap->rc->timeout = MS_TO_NS(100);
+#else
+ adap->capabilities &= ~CEC_CAP_RC;
+#endif
+ return adap;
+}
+EXPORT_SYMBOL_GPL(cec_allocate_adapter);
+
+int cec_register_adapter(struct cec_adapter *adap,
+ struct device *parent)
+{
+ int res;
+
+ if (IS_ERR_OR_NULL(adap))
+ return 0;
+
+ if (WARN_ON(!parent))
+ return -EINVAL;
+
+ adap->owner = parent->driver->owner;
+ adap->devnode.dev.parent = parent;
+
+#if IS_REACHABLE(CONFIG_RC_CORE)
+ adap->rc->dev.parent = parent;
+ if (adap->capabilities & CEC_CAP_RC) {
+ res = rc_register_device(adap->rc);
+
+ if (res) {
+ pr_err("cec-%s: failed to prepare input device\n",
+ adap->name);
+ rc_free_device(adap->rc);
+ adap->rc = NULL;
+ return res;
+ }
+ }
+#endif
+
+ res = cec_devnode_register(&adap->devnode, adap->owner);
+ if (res) {
+#if IS_REACHABLE(CONFIG_RC_CORE)
+ /* Note: rc_unregister also calls rc_free */
+ rc_unregister_device(adap->rc);
+ adap->rc = NULL;
+#endif
+ return res;
+ }
+
+ dev_set_drvdata(&adap->devnode.dev, adap);
+#ifdef CONFIG_MEDIA_CEC_DEBUG
+ if (!top_cec_dir)
+ return 0;
+
+ adap->cec_dir = debugfs_create_dir(dev_name(&adap->devnode.dev), top_cec_dir);
+ if (IS_ERR_OR_NULL(adap->cec_dir)) {
+ pr_warn("cec-%s: Failed to create debugfs dir\n", adap->name);
+ return 0;
+ }
+ adap->status_file = debugfs_create_devm_seqfile(&adap->devnode.dev,
+ "status", adap->cec_dir, cec_adap_status);
+ if (IS_ERR_OR_NULL(adap->status_file)) {
+ pr_warn("cec-%s: Failed to create status file\n", adap->name);
+ debugfs_remove_recursive(adap->cec_dir);
+ adap->cec_dir = NULL;
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cec_register_adapter);
+
+void cec_unregister_adapter(struct cec_adapter *adap)
+{
+ if (IS_ERR_OR_NULL(adap))
+ return;
+
+#if IS_REACHABLE(CONFIG_RC_CORE)
+ /* Note: rc_unregister also calls rc_free */
+ rc_unregister_device(adap->rc);
+ adap->rc = NULL;
+#endif
+ debugfs_remove_recursive(adap->cec_dir);
+ cec_devnode_unregister(&adap->devnode);
+}
+EXPORT_SYMBOL_GPL(cec_unregister_adapter);
+
+void cec_delete_adapter(struct cec_adapter *adap)
+{
+ if (IS_ERR_OR_NULL(adap))
+ return;
+ mutex_lock(&adap->lock);
+ __cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false);
+ mutex_unlock(&adap->lock);
+ kthread_stop(adap->kthread);
+ if (adap->kthread_config)
+ kthread_stop(adap->kthread_config);
+#if IS_REACHABLE(CONFIG_RC_CORE)
+ rc_free_device(adap->rc);
+#endif
+ kfree(adap);
+}
+EXPORT_SYMBOL_GPL(cec_delete_adapter);
+
+/*
+ * Initialise cec for linux
+ */
+static int __init cec_devnode_init(void)
+{
+ int ret;
+
+ pr_info("Linux cec interface: v0.10\n");
+ ret = alloc_chrdev_region(&cec_dev_t, 0, CEC_NUM_DEVICES,
+ CEC_NAME);
+ if (ret < 0) {
+ pr_warn("cec: unable to allocate major\n");
+ return ret;
+ }
+
+#ifdef CONFIG_MEDIA_CEC_DEBUG
+ top_cec_dir = debugfs_create_dir("cec", NULL);
+ if (IS_ERR_OR_NULL(top_cec_dir)) {
+ pr_warn("cec: Failed to create debugfs cec dir\n");
+ top_cec_dir = NULL;
+ }
+#endif
+
+ ret = bus_register(&cec_bus_type);
+ if (ret < 0) {
+ unregister_chrdev_region(cec_dev_t, CEC_NUM_DEVICES);
+ pr_warn("cec: bus_register failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void __exit cec_devnode_exit(void)
+{
+ debugfs_remove_recursive(top_cec_dir);
+ bus_unregister(&cec_bus_type);
+ unregister_chrdev_region(cec_dev_t, CEC_NUM_DEVICES);
+}
+
+subsys_initcall(cec_devnode_init);
+module_exit(cec_devnode_exit)
+
+MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
+MODULE_DESCRIPTION("Device node registration for cec drivers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/cec/cec-priv.h b/drivers/media/cec/cec-priv.h
new file mode 100644
index 000000000000..70767a7900f2
--- /dev/null
+++ b/drivers/media/cec/cec-priv.h
@@ -0,0 +1,56 @@
+/*
+ * cec-priv.h - HDMI Consumer Electronics Control internal header
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _CEC_PRIV_H
+#define _CEC_PRIV_H
+
+#include <linux/cec-funcs.h>
+#include <media/cec.h>
+
+#define dprintk(lvl, fmt, arg...) \
+ do { \
+ if (lvl <= cec_debug) \
+ pr_info("cec-%s: " fmt, adap->name, ## arg); \
+ } while (0)
+
+/* devnode to cec_adapter */
+#define to_cec_adapter(node) container_of(node, struct cec_adapter, devnode)
+
+/* cec-core.c */
+extern int cec_debug;
+int cec_get_device(struct cec_devnode *devnode);
+void cec_put_device(struct cec_devnode *devnode);
+
+/* cec-adap.c */
+int cec_monitor_all_cnt_inc(struct cec_adapter *adap);
+void cec_monitor_all_cnt_dec(struct cec_adapter *adap);
+int cec_adap_status(struct seq_file *file, void *priv);
+int cec_thread_func(void *_adap);
+void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block);
+int __cec_s_log_addrs(struct cec_adapter *adap,
+ struct cec_log_addrs *log_addrs, bool block);
+int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
+ struct cec_fh *fh, bool block);
+void cec_queue_event_fh(struct cec_fh *fh,
+ const struct cec_event *new_ev, u64 ts);
+
+/* cec-api.c */
+extern const struct file_operations cec_devnode_fops;
+
+#endif
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 65eaf4066149..487009857a6f 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -352,6 +352,9 @@ int dvb_dmx_video_pattern_search(
buf_size)
break;
+ if (current_size >= DVB_DMX_MAX_PATTERN_LEN)
+ break;
+
if (dvb_dmx_patterns_match(
(patterns[j]->pattern + current_size),
buf, (patterns[j]->mask + current_size),
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 7fa84a019809..ade6ecaad80d 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -27,6 +27,17 @@ obj-$(CONFIG_VIDEO_ADV7343) += adv7343.o
obj-$(CONFIG_VIDEO_ADV7393) += adv7393.o
obj-$(CONFIG_VIDEO_ADV7604) += adv7604.o
obj-$(CONFIG_VIDEO_ADV7842) += adv7842.o
+ifeq ($(CONFIG_MSM_AIS),y)
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+else
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+endif
obj-$(CONFIG_VIDEO_ADV7481) += adv7481.o
obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o
obj-$(CONFIG_VIDEO_ADV7511) += adv7511.o
diff --git a/drivers/media/platform/msm/Kconfig b/drivers/media/platform/msm/Kconfig
index e2523e06ab76..3fbce2948fa8 100644
--- a/drivers/media/platform/msm/Kconfig
+++ b/drivers/media/platform/msm/Kconfig
@@ -2,35 +2,35 @@
# MSM camera configuration
#
-comment "Qualcomm MSM Camera And Video"
+comment "QTI MSM Camera And Video & AIS"
menuconfig MSM_CAMERA
- bool "Qualcomm MSM camera and video capture support"
+ bool "QTI MSM camera and video capture support"
depends on ARCH_QCOM && VIDEO_V4L2 && I2C
---help---
Say Y here to enable selecting the video adapters for
- Qualcomm msm camera and video capture drivers. enabling this
+ QTI msm camera and video capture drivers. enabling this
adds support for the camera driver stack including sensor, isp
and postprocessing drivers for legacy chipsets.
config MSM_CAMERA_DEBUG
- bool "Qualcomm MSM camera debugging with printk"
+ bool "QTI MSM camera debugging with printk"
depends on MSM_CAMERA
default n
---help---
Enable printk() debug for msm camera
menuconfig MSMB_CAMERA
- bool "Qualcomm MSM camera and video capture 2.0 support"
+ bool "QTI MSM camera and video capture 2.0 support"
depends on ARCH_QCOM && VIDEO_V4L2 && I2C
---help---
Say Y here to enable selecting the video adapters for
- Qualcomm msm camera and video capture 2.0, enabling this
+ QTI msm camera and video capture 2.0, enabling this
adds support for the camera driver stack including sensor, isp
and postprocessing drivers.
config MSMB_CAMERA_DEBUG
- bool "Qualcomm MSM camera 2.0 debugging with printk"
+ bool "QTI MSM camera 2.0 debugging with printk"
depends on MSMB_CAMERA
---help---
Enable printk() debug for msm camera 2.0
@@ -41,5 +41,6 @@ endif # MSMB_CAMERA
source "drivers/media/platform/msm/vidc/Kconfig"
source "drivers/media/platform/msm/sde/Kconfig"
+source "drivers/media/platform/msm/ais/Kconfig"
source "drivers/media/platform/msm/dvb/Kconfig"
source "drivers/media/platform/msm/broadcast/Kconfig"
diff --git a/drivers/media/platform/msm/Makefile b/drivers/media/platform/msm/Makefile
index a3f802d3ce59..3457fb2afaed 100644
--- a/drivers/media/platform/msm/Makefile
+++ b/drivers/media/platform/msm/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_MSM_VIDC_V4L2) += vidc/
obj-y += sde/
obj-y += broadcast/
obj-$(CONFIG_DVB_MPQ) += dvb/
+obj-$(CONFIG_MSM_AIS) += ais/
diff --git a/drivers/media/platform/msm/ais/Kconfig b/drivers/media/platform/msm/ais/Kconfig
new file mode 100644
index 000000000000..c9b4252213a0
--- /dev/null
+++ b/drivers/media/platform/msm/ais/Kconfig
@@ -0,0 +1,85 @@
+menuconfig MSM_AIS
+ bool "QTI MSM Automotive Imaging Subsystem"
+ depends on ARCH_QCOM && VIDEO_V4L2 && I2C
+ ---help---
+ Say Y here to enable msm AIS
+
+config MSM_AIS_DEBUG
+ bool "QTI MSM AIS debugging with printk"
+ depends on MSM_AIS
+ default n
+ ---help---
+ Enable printk() debug for msm AIS.
+ Enabling ais debug will affect performance.
+ This feature is only applicable to
+ Automotive platforms.
+
+config MSM_AIS_CAMERA_SENSOR
+ bool "QTI MSM camera sensor support"
+ depends on MSM_AIS
+ select NEW_LEDS
+ select LEDS_CLASS
+ ---help---
+ This flag enables support for Camera Sensor.
+ The sensor driver is capable of providing real time
+ data for camera support. The driver support V4L2
+ subdev APIs.
+
+config MSM_AIS_CPP
+ bool "QTI MSM Camera Post Processing Engine support"
+ depends on MSM_AIS
+ ---help---
+ Enable support for Camera Post-processing Engine
+ The Post processing engine is capable of scaling
+ and cropping image. The driver support V4L2 subdev
+ APIs.
+
+config MSM_AIS_EEPROM
+ bool "QTI MSM Camera ROM Interface for Calibration support"
+ depends on MSM_AIS
+ ---help---
+ Enable support for ROM Interface for Calibration
+ Provides interface for reading the Calibration data
+ and also provides support for writing data in case of FLASH ROM.
+ Currently supports I2C, CCI and SPI protocol
+
+config MSM_AIS_JPEG
+ bool "QTI MSM Jpeg Encoder Engine support"
+ depends on MSM_AIS
+ ---help---
+ Enable support for Jpeg Encoder/Decoder
+ Engine for 8974.
+ This module serves as the common driver
+ for the JPEG 1.0 encoder and decoder.
+
+config MSM_AIS_FD
+ bool "QTI MSM FD face detection engine support"
+ depends on MSM_AIS
+ ---help---
+ Enables support for the MSM FD face detection engine.
+ MSM Face Detection library
+ enables the Face detection
+ hardware block.
+
+config MSM_AIS_JPEGDMA
+ bool "QTI MSM Jpeg dma"
+ depends on MSM_AIS
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ Enable support for Jpeg dma engine.
+ The jpeg DMA engine is a hardware enabled
+ jpeg decode.
+ This feature is currently not supported on
+ Automotive platforms.
+
+config MSM_AIS_SEC_CCI_TA_NAME
+ string "Name of TA to handle Secure CCI transactions"
+ depends on MSM_AIS_CCI
+ default "seccamdemo64"
+
+config MSM_AIS_SEC_CCI_DEBUG
+ bool "QTI MSM Secure CCI Relay Debug"
+ depends on MSM_AIS_CCI
+ ---help---
+ Enables simulation of secure camera for Secure CCI Realy
+ debugging.
diff --git a/drivers/media/platform/msm/ais/Makefile b/drivers/media/platform/msm/ais/Makefile
new file mode 100644
index 000000000000..b09636a72413
--- /dev/null
+++ b/drivers/media/platform/msm/ais/Makefile
@@ -0,0 +1,24 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor
+ccflags-y += -Idrivers/media/platform/msm/ais/codecs
+ccflags-y += -Idrivers/media/platform/msm/ais/isps
+ccflags-y += -Idrivers/media/platform/msm/ais/pproc
+ccflags-y += -Idrivers/media/platform/msm/ais/msm_vb2
+ccflags-y += -Idrivers/media/platform/msm/ais/camera
+ccflags-y += -Idrivers/media/platform/msm/ais/jpeg_10
+ccflags-y += -Idrivers/media/platform/msm/ais/jpeg_dma
+ccflags-y += -Idrivers/media/platform/msm/ais/fd
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+
+obj-$(CONFIG_MSM_AIS) += common/
+obj-$(CONFIG_MSM_AIS) += msm.o
+obj-$(CONFIG_MSM_AIS) += camera/
+obj-$(CONFIG_MSM_AIS) += msm_vb2/
+obj-$(CONFIG_MSM_AIS) += sensor/
+obj-$(CONFIG_MSM_AIS) += pproc/
+obj-$(CONFIG_MSM_AIS) += isp/
+obj-$(CONFIG_MSM_AIS) += ispif/
+obj-$(CONFIG_MSM_AIS_JPEG) += jpeg_10/
+obj-$(CONFIG_MSM_AIS_JPEGDMA) += jpeg_dma/
+obj-$(CONFIG_MSM_AIS) += msm_buf_mgr/
+obj-$(CONFIG_MSM_AIS_FD) += fd/
diff --git a/drivers/media/platform/msm/ais/camera/Makefile b/drivers/media/platform/msm/ais/camera/Makefile
new file mode 100644
index 000000000000..e465a034b9e5
--- /dev/null
+++ b/drivers/media/platform/msm/ais/camera/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/msm_vb2
+obj-$(CONFIG_MSM_AIS) += camera.o
diff --git a/drivers/media/platform/msm/ais/camera/camera.c b/drivers/media/platform/msm/ais/camera/camera.c
new file mode 100644
index 000000000000..158b83c12d00
--- /dev/null
+++ b/drivers/media/platform/msm/ais/camera/camera.c
@@ -0,0 +1,956 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioctl.h>
+#include <linux/spinlock.h>
+#include <linux/proc_fs.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/videodev2.h>
+#include <linux/msm_ion.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "camera.h"
+#include "msm.h"
+#include "msm_vb2.h"
+
+#define fh_to_private(__fh) \
+ container_of(__fh, struct camera_v4l2_private, fh)
+
+struct camera_v4l2_private {
+ struct v4l2_fh fh;
+ unsigned int stream_id;
+ unsigned int is_vb2_valid; /*0 if no vb2 buffers on stream, else 1*/
+ struct vb2_queue vb2_q;
+ bool stream_created;
+ struct mutex lock;
+};
+
+static void camera_pack_event(struct file *filep, int evt_id,
+ int command, int value, struct v4l2_event *event)
+{
+ struct msm_v4l2_event_data *event_data =
+ (struct msm_v4l2_event_data *)&event->u.data[0];
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ struct camera_v4l2_private *sp = fh_to_private(filep->private_data);
+
+ /* always MSM_CAMERA_V4L2_EVENT_TYPE */
+ event->type = MSM_CAMERA_V4L2_EVENT_TYPE;
+ event->id = evt_id;
+ event_data->command = command;
+ event_data->session_id = pvdev->vdev->num;
+ event_data->stream_id = sp->stream_id;
+ event_data->arg_value = value;
+}
+
+static int camera_check_event_status(struct v4l2_event *event)
+{
+ struct msm_v4l2_event_data *event_data =
+ (struct msm_v4l2_event_data *)&event->u.data[0];
+
+ if (event_data->status > MSM_CAMERA_ERR_EVT_BASE) {
+ pr_err("%s : event_data status out of bounds\n",
+ __func__);
+ pr_err("%s : Line %d event_data->status 0X%x\n",
+ __func__, __LINE__, event_data->status);
+
+ switch (event_data->status) {
+ case MSM_CAMERA_ERR_CMD_FAIL:
+ case MSM_CAMERA_ERR_MAPPING:
+ return -EFAULT;
+ case MSM_CAMERA_ERR_DEVICE_BUSY:
+ return -EBUSY;
+ default:
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static int camera_v4l2_querycap(struct file *filep, void *fh,
+ struct v4l2_capability *cap)
+{
+ int rc;
+ struct v4l2_event event;
+
+ if (msm_is_daemon_present() == false)
+ return 0;
+
+ /* can use cap->driver to make differentiation */
+ camera_pack_event(filep, MSM_CAMERA_GET_PARM,
+ MSM_CAMERA_PRIV_QUERY_CAP, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+
+ return rc;
+}
+
+static int camera_v4l2_s_crop(struct file *filep, void *fh,
+ const struct v4l2_crop *crop)
+{
+ int rc = 0;
+ struct v4l2_event event;
+
+ if (msm_is_daemon_present() == false)
+ return 0;
+
+ if (crop->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM,
+ MSM_CAMERA_PRIV_S_CROP, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+ }
+
+ return rc;
+}
+
+static int camera_v4l2_g_crop(struct file *filep, void *fh,
+ struct v4l2_crop *crop)
+{
+ int rc = 0;
+ struct v4l2_event event;
+
+ if (msm_is_daemon_present() == false)
+ return 0;
+
+ if (crop->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ camera_pack_event(filep, MSM_CAMERA_GET_PARM,
+ MSM_CAMERA_PRIV_G_CROP, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+ }
+
+ return rc;
+}
+
+static int camera_v4l2_queryctrl(struct file *filep, void *fh,
+ struct v4l2_queryctrl *ctrl)
+{
+ int rc = 0;
+ struct v4l2_event event;
+
+ if (msm_is_daemon_present() == false)
+ return 0;
+
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
+
+ camera_pack_event(filep, MSM_CAMERA_GET_PARM,
+ ctrl->id, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+ }
+
+ return rc;
+}
+
+static int camera_v4l2_g_ctrl(struct file *filep, void *fh,
+ struct v4l2_control *ctrl)
+{
+ int rc = 0;
+ struct v4l2_event event;
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int session_id = pvdev->vdev->num;
+
+ if (ctrl->id >= V4L2_CID_PRIVATE_BASE) {
+ if (ctrl->id == MSM_CAMERA_PRIV_G_SESSION_ID) {
+ ctrl->value = session_id;
+ } else {
+ camera_pack_event(filep, MSM_CAMERA_GET_PARM,
+ ctrl->id, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+ }
+ }
+
+ return rc;
+}
+
+static int camera_v4l2_s_ctrl(struct file *filep, void *fh,
+ struct v4l2_control *ctrl)
+{
+ int rc = 0;
+ struct v4l2_event event;
+ struct msm_v4l2_event_data *event_data;
+
+ if (ctrl->id >= V4L2_CID_PRIVATE_BASE) {
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM, ctrl->id,
+ ctrl->value, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+ event_data = (struct msm_v4l2_event_data *)event.u.data;
+ ctrl->value = event_data->ret_value;
+ rc = camera_check_event_status(&event);
+ }
+
+ return rc;
+}
+
+static int camera_v4l2_reqbufs(struct file *filep, void *fh,
+ struct v4l2_requestbuffers *req)
+{
+ int ret;
+ struct msm_session *session;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int session_id = pvdev->vdev->num;
+
+ session = msm_session_find(session_id);
+ if (WARN_ON(!session))
+ return -EIO;
+ mutex_lock(&sp->lock);
+ ret = vb2_reqbufs(&sp->vb2_q, req);
+ mutex_unlock(&sp->lock);
+ return ret;
+}
+
+static int camera_v4l2_querybuf(struct file *filep, void *fh,
+ struct v4l2_buffer *pb)
+{
+ return 0;
+}
+
+static int camera_v4l2_qbuf(struct file *filep, void *fh,
+ struct v4l2_buffer *pb)
+{
+ int ret;
+ struct msm_session *session;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int session_id = pvdev->vdev->num;
+
+ session = msm_session_find(session_id);
+ if (WARN_ON(!session))
+ return -EIO;
+ mutex_lock(&sp->lock);
+ ret = vb2_qbuf(&sp->vb2_q, pb);
+ mutex_unlock(&sp->lock);
+ return ret;
+}
+
+static int camera_v4l2_dqbuf(struct file *filep, void *fh,
+ struct v4l2_buffer *pb)
+{
+ int ret;
+ struct msm_session *session;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int session_id = pvdev->vdev->num;
+
+ session = msm_session_find(session_id);
+ if (WARN_ON(!session))
+ return -EIO;
+ mutex_lock(&sp->lock);
+ ret = vb2_dqbuf(&sp->vb2_q, pb, filep->f_flags & O_NONBLOCK);
+ mutex_unlock(&sp->lock);
+ return ret;
+}
+
+static int camera_v4l2_streamon(struct file *filep, void *fh,
+ enum v4l2_buf_type buf_type)
+{
+ struct v4l2_event event;
+ int rc;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+
+ mutex_lock(&sp->lock);
+ rc = vb2_streamon(&sp->vb2_q, buf_type);
+ mutex_unlock(&sp->lock);
+
+ if (msm_is_daemon_present() == false)
+ return 0;
+
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM,
+ MSM_CAMERA_PRIV_STREAM_ON, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+ return rc;
+}
+
+static int camera_v4l2_streamoff(struct file *filep, void *fh,
+ enum v4l2_buf_type buf_type)
+{
+ struct v4l2_event event;
+ int rc = 0;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+
+ if (msm_is_daemon_present() != false) {
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM,
+ MSM_CAMERA_PRIV_STREAM_OFF, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+ rc = camera_check_event_status(&event);
+ }
+ mutex_lock(&sp->lock);
+ vb2_streamoff(&sp->vb2_q, buf_type);
+ mutex_unlock(&sp->lock);
+ return rc;
+}
+
+static int camera_v4l2_g_fmt_vid_cap_mplane(struct file *filep, void *fh,
+ struct v4l2_format *pfmt)
+{
+ int rc = -EINVAL;
+
+ if (msm_is_daemon_present() == false)
+ return 0;
+
+ if (pfmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ struct v4l2_event event;
+
+ camera_pack_event(filep, MSM_CAMERA_GET_PARM,
+ MSM_CAMERA_PRIV_G_FMT, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+ }
+
+ return rc;
+}
+
+static int camera_v4l2_s_fmt_vid_cap_mplane(struct file *filep, void *fh,
+ struct v4l2_format *pfmt)
+{
+ int rc = 0;
+ int i = 0;
+ struct v4l2_event event;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+ struct msm_v4l2_format_data *user_fmt;
+
+ if (pfmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+
+ if (WARN_ON(!sp->vb2_q.drv_priv))
+ return -ENOMEM;
+
+ memcpy(sp->vb2_q.drv_priv, pfmt->fmt.raw_data,
+ sizeof(struct msm_v4l2_format_data));
+ user_fmt = (struct msm_v4l2_format_data *)sp->vb2_q.drv_priv;
+
+ pr_debug("%s: num planes :%c\n", __func__,
+ user_fmt->num_planes);
+ /* num_planes need to bound checked, otherwise for loop
+ * can execute forever
+ */
+ if (WARN_ON(user_fmt->num_planes > VIDEO_MAX_PLANES))
+ return -EINVAL;
+ for (i = 0; i < user_fmt->num_planes; i++)
+ pr_debug("%s: plane size[%d]\n", __func__,
+ user_fmt->plane_sizes[i]);
+
+ if (msm_is_daemon_present() != false) {
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM,
+ MSM_CAMERA_PRIV_S_FMT, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+ if (rc < 0)
+ return rc;
+ }
+ sp->is_vb2_valid = 1;
+ }
+
+ return rc;
+}
+
+static int camera_v4l2_try_fmt_vid_cap_mplane(struct file *filep, void *fh,
+ struct v4l2_format *pfmt)
+{
+ return 0;
+}
+
+
+static int camera_v4l2_g_parm(struct file *filep, void *fh,
+ struct v4l2_streamparm *a)
+{
+ /* TODO */
+ return 0;
+}
+
+static int camera_v4l2_s_parm(struct file *filep, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ int rc = 0;
+ struct v4l2_event event;
+ struct msm_v4l2_event_data *event_data =
+ (struct msm_v4l2_event_data *)&event.u.data[0];
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM,
+ MSM_CAMERA_PRIV_NEW_STREAM, -1, &event);
+
+ rc = msm_create_stream(event_data->session_id,
+ event_data->stream_id, &sp->vb2_q);
+ if (rc < 0)
+ return rc;
+
+ if (msm_is_daemon_present() != false) {
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ goto error;
+
+ rc = camera_check_event_status(&event);
+ if (rc < 0)
+ goto error;
+ }
+ /* use stream_id as stream index */
+ parm->parm.capture.extendedmode = sp->stream_id;
+ sp->stream_created = true;
+
+ return rc;
+
+error:
+ msm_delete_stream(event_data->session_id,
+ event_data->stream_id);
+ return rc;
+}
+
+static int camera_v4l2_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ int rc = 0;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+
+ rc = v4l2_event_subscribe(&sp->fh, sub, 5, NULL);
+
+ return rc;
+}
+
+static int camera_v4l2_unsubscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ int rc = 0;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+
+ rc = v4l2_event_unsubscribe(&sp->fh, sub);
+
+ return rc;
+}
+
+static long camera_v4l2_vidioc_private_ioctl(struct file *filep, void *fh,
+ bool valid_prio, unsigned int cmd, void *arg)
+{
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ struct msm_camera_private_ioctl_arg *k_ioctl = arg;
+ long rc = -EINVAL;
+
+ if (WARN_ON(!k_ioctl || !pvdev))
+ return -EIO;
+
+ switch (k_ioctl->id) {
+ case MSM_CAMERA_PRIV_IOCTL_ID_RETURN_BUF: {
+ struct msm_camera_return_buf ptr, *tmp = NULL;
+
+ MSM_CAM_GET_IOCTL_ARG_PTR(&tmp, &k_ioctl->ioctl_ptr,
+ sizeof(tmp));
+ if (copy_from_user(&ptr, tmp,
+ sizeof(struct msm_camera_return_buf))) {
+ return -EFAULT;
+ }
+ rc = msm_vb2_return_buf_by_idx(pvdev->vdev->num, sp->stream_id,
+ ptr.index);
+ }
+ break;
+ default:
+ pr_debug("unimplemented id %d", k_ioctl->id);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+static const struct v4l2_ioctl_ops camera_v4l2_ioctl_ops = {
+ .vidioc_querycap = camera_v4l2_querycap,
+ .vidioc_s_crop = camera_v4l2_s_crop,
+ .vidioc_g_crop = camera_v4l2_g_crop,
+ .vidioc_queryctrl = camera_v4l2_queryctrl,
+ .vidioc_g_ctrl = camera_v4l2_g_ctrl,
+ .vidioc_s_ctrl = camera_v4l2_s_ctrl,
+ .vidioc_reqbufs = camera_v4l2_reqbufs,
+ .vidioc_querybuf = camera_v4l2_querybuf,
+ .vidioc_qbuf = camera_v4l2_qbuf,
+ .vidioc_dqbuf = camera_v4l2_dqbuf,
+ .vidioc_streamon = camera_v4l2_streamon,
+ .vidioc_streamoff = camera_v4l2_streamoff,
+ .vidioc_g_fmt_vid_cap_mplane = camera_v4l2_g_fmt_vid_cap_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = camera_v4l2_s_fmt_vid_cap_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = camera_v4l2_try_fmt_vid_cap_mplane,
+
+ /* Stream type-dependent parameter ioctls */
+ .vidioc_g_parm = camera_v4l2_g_parm,
+ .vidioc_s_parm = camera_v4l2_s_parm,
+
+ /* event subscribe/unsubscribe */
+ .vidioc_subscribe_event = camera_v4l2_subscribe_event,
+ .vidioc_unsubscribe_event = camera_v4l2_unsubscribe_event,
+ .vidioc_default = camera_v4l2_vidioc_private_ioctl,
+};
+
+static int camera_v4l2_fh_open(struct file *filep)
+{
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ struct camera_v4l2_private *sp;
+ unsigned int stream_id;
+
+ sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+ if (!sp)
+ return -ENOMEM;
+
+ filep->private_data = &sp->fh;
+
+ /* stream_id = open id */
+ stream_id = atomic_read(&pvdev->opened);
+ sp->stream_id = find_first_zero_bit(
+ (const unsigned long *)&stream_id, MSM_CAMERA_STREAM_CNT_BITS);
+ pr_debug("%s: Found stream_id=%d\n", __func__, sp->stream_id);
+
+ mutex_init(&sp->lock);
+
+ v4l2_fh_init(&sp->fh, pvdev->vdev);
+ v4l2_fh_add(&sp->fh);
+
+ return 0;
+}
+
+static int camera_v4l2_fh_release(struct file *filep)
+{
+ struct camera_v4l2_private *sp = fh_to_private(filep->private_data);
+
+ if (sp) {
+ v4l2_fh_del(&sp->fh);
+ v4l2_fh_exit(&sp->fh);
+ }
+
+ mutex_destroy(&sp->lock);
+ kzfree(sp);
+ return 0;
+}
+
+static int camera_v4l2_vb2_q_init(struct file *filep)
+{
+ struct camera_v4l2_private *sp = fh_to_private(filep->private_data);
+ struct vb2_queue *q = &sp->vb2_q;
+
+ memset(q, 0, sizeof(struct vb2_queue));
+
+ /* free up this buffer when stream is done */
+ q->drv_priv =
+ kzalloc(sizeof(struct msm_v4l2_format_data), GFP_KERNEL);
+ if (!q->drv_priv) {
+ pr_err("%s : memory not available\n", __func__);
+ return -ENOMEM;
+ }
+
+ q->mem_ops = msm_vb2_get_q_mem_ops();
+ q->ops = msm_vb2_get_q_ops();
+
+ /* default queue type */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_USERPTR;
+ q->buf_struct_size = sizeof(struct msm_vb2_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ return vb2_queue_init(q);
+}
+
+static void camera_v4l2_vb2_q_release(struct file *filep)
+{
+ struct camera_v4l2_private *sp = filep->private_data;
+
+ kzfree(sp->vb2_q.drv_priv);
+ mutex_lock(&sp->lock);
+ vb2_queue_release(&sp->vb2_q);
+ mutex_unlock(&sp->lock);
+}
+
+static int camera_v4l2_open(struct file *filep)
+{
+ int rc = 0;
+ struct v4l2_event event;
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int opn_idx, idx;
+
+ if (WARN_ON(!pvdev))
+ return -EIO;
+
+ rc = camera_v4l2_fh_open(filep);
+ if (rc < 0) {
+ pr_err("%s : camera_v4l2_fh_open failed Line %d rc %d\n",
+ __func__, __LINE__, rc);
+ goto fh_open_fail;
+ }
+
+ opn_idx = atomic_read(&pvdev->opened);
+ idx = opn_idx;
+ /* every stream has a vb2 queue */
+ rc = camera_v4l2_vb2_q_init(filep);
+ if (rc < 0) {
+ pr_err("%s : vb2 queue init fails Line %d rc %d\n",
+ __func__, __LINE__, rc);
+ goto vb2_q_fail;
+ }
+
+ if (!atomic_read(&pvdev->opened)) {
+ pm_stay_awake(&pvdev->vdev->dev);
+
+ /* Disable power collapse latency */
+ msm_pm_qos_update_request(CAMERA_DISABLE_PC_LATENCY);
+
+ /* create a new session when first opened */
+ rc = msm_create_session(pvdev->vdev->num, pvdev->vdev);
+ if (rc < 0) {
+ pr_err("%s : session creation failed Line %d rc %d\n",
+ __func__, __LINE__, rc);
+ goto session_fail;
+ }
+
+ rc = msm_create_command_ack_q(pvdev->vdev->num,
+ find_first_zero_bit((const unsigned long *)&opn_idx,
+ MSM_CAMERA_STREAM_CNT_BITS));
+ if (rc < 0) {
+ pr_err("%s : creation of command_ack queue failed\n",
+ __func__);
+ pr_err("%s : Line %d rc %d\n", __func__, __LINE__, rc);
+ goto command_ack_q_fail;
+ }
+
+ if (msm_is_daemon_present() != false) {
+ camera_pack_event(filep, MSM_CAMERA_NEW_SESSION,
+ 0, -1, &event);
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0) {
+ pr_err("%s : NEW_SESSION event failed,rc %d\n",
+ __func__, rc);
+ goto post_fail;
+ }
+
+ rc = camera_check_event_status(&event);
+ if (rc < 0)
+ goto post_fail;
+ }
+ /* Enable power collapse latency */
+ msm_pm_qos_update_request(CAMERA_ENABLE_PC_LATENCY);
+ } else {
+ rc = msm_create_command_ack_q(pvdev->vdev->num,
+ find_first_zero_bit((const unsigned long *)&opn_idx,
+ MSM_CAMERA_STREAM_CNT_BITS));
+ if (rc < 0) {
+ pr_err("%s : creation of command_ack queue failed Line %d rc %d\n",
+ __func__, __LINE__, rc);
+ goto stream_fail;
+ }
+ }
+ idx |= (1 << find_first_zero_bit((const unsigned long *)&opn_idx,
+ MSM_CAMERA_STREAM_CNT_BITS));
+ atomic_cmpxchg(&pvdev->opened, opn_idx, idx);
+
+ return rc;
+
+post_fail:
+ msm_delete_command_ack_q(pvdev->vdev->num, 0);
+command_ack_q_fail:
+ msm_destroy_session(pvdev->vdev->num);
+session_fail:
+ pm_relax(&pvdev->vdev->dev);
+stream_fail:
+ camera_v4l2_vb2_q_release(filep);
+vb2_q_fail:
+ camera_v4l2_fh_release(filep);
+fh_open_fail:
+ return rc;
+}
+
+static unsigned int camera_v4l2_poll(struct file *filep,
+ struct poll_table_struct *wait)
+{
+ int rc = 0;
+ struct camera_v4l2_private *sp = fh_to_private(filep->private_data);
+
+ if (sp->is_vb2_valid == 1)
+ rc = vb2_poll(&sp->vb2_q, filep, wait);
+
+ poll_wait(filep, &sp->fh.wait, wait);
+ if (v4l2_event_pending(&sp->fh))
+ rc |= POLLPRI;
+
+ return rc;
+}
+
+static int camera_v4l2_close(struct file *filep)
+{
+ struct v4l2_event event;
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ struct camera_v4l2_private *sp = fh_to_private(filep->private_data);
+ unsigned int opn_idx, mask;
+ struct msm_session *session;
+
+ if (WARN_ON(!pvdev))
+ return -EIO;
+
+ session = msm_session_find(pvdev->vdev->num);
+ if (WARN_ON(!session))
+ return -EIO;
+
+ mutex_lock(&session->close_lock);
+ opn_idx = atomic_read(&pvdev->opened);
+ mask = (1 << sp->stream_id);
+ opn_idx &= ~mask;
+ atomic_set(&pvdev->opened, opn_idx);
+
+ if (msm_is_daemon_present() != false && sp->stream_created == true) {
+ pr_debug("%s: close stream_id=%d\n", __func__, sp->stream_id);
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM,
+ MSM_CAMERA_PRIV_DEL_STREAM, -1, &event);
+ msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ }
+
+ if (sp->stream_created == true)
+ sp->stream_created = false;
+
+ if (atomic_read(&pvdev->opened) == 0) {
+ if (msm_is_daemon_present() != false) {
+ camera_pack_event(filep, MSM_CAMERA_DEL_SESSION,
+ 0, -1, &event);
+ msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ }
+ msm_delete_command_ack_q(pvdev->vdev->num, 0);
+ msm_delete_stream(pvdev->vdev->num, sp->stream_id);
+ mutex_unlock(&session->close_lock);
+ /* This should take care of both normal close
+ * and application crashes
+ */
+ camera_v4l2_vb2_q_release(filep);
+ msm_destroy_session(pvdev->vdev->num);
+
+ pm_relax(&pvdev->vdev->dev);
+ } else {
+ msm_delete_command_ack_q(pvdev->vdev->num,
+ sp->stream_id);
+
+ camera_v4l2_vb2_q_release(filep);
+ msm_delete_stream(pvdev->vdev->num, sp->stream_id);
+ mutex_unlock(&session->close_lock);
+ }
+
+ camera_v4l2_fh_release(filep);
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long camera_handle_internal_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ long rc = 0;
+ struct msm_camera_private_ioctl_arg k_ioctl;
+ void __user *tmp_compat_ioctl_ptr = NULL;
+
+ rc = msm_copy_camera_private_ioctl_args(arg,
+ &k_ioctl, &tmp_compat_ioctl_ptr);
+ if (rc < 0) {
+ pr_err("Subdev cmd %d failed\n", cmd);
+ return rc;
+ }
+ switch (k_ioctl.id) {
+ case MSM_CAMERA_PRIV_IOCTL_ID_RETURN_BUF: {
+ if (k_ioctl.size != sizeof(struct msm_camera_return_buf)) {
+ pr_debug("Invalid size for id %d with size %d",
+ k_ioctl.id, k_ioctl.size);
+ return -EINVAL;
+ }
+ k_ioctl.ioctl_ptr = (__u64)tmp_compat_ioctl_ptr;
+ if (!k_ioctl.ioctl_ptr) {
+ pr_debug("Invalid ptr for id %d", k_ioctl.id);
+ return -EINVAL;
+ }
+ rc = camera_v4l2_vidioc_private_ioctl(file, file->private_data,
+ 0, cmd, (void *)&k_ioctl);
+ }
+ break;
+ default:
+ pr_debug("unimplemented id %d", k_ioctl.id);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+long camera_v4l2_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long ret = 0;
+
+ switch (cmd) {
+ case VIDIOC_MSM_CAMERA_PRIVATE_IOCTL_CMD: {
+ ret = camera_handle_internal_compat_ioctl(file, cmd, arg);
+ if (ret < 0) {
+ pr_debug("Subdev cmd %d fail\n", cmd);
+ return ret;
+ }
+ }
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+
+ }
+ return ret;
+}
+#endif
+static struct v4l2_file_operations camera_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .open = camera_v4l2_open,
+ .poll = camera_v4l2_poll,
+ .release = camera_v4l2_close,
+ .unlocked_ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = camera_v4l2_compat_ioctl,
+#endif
+};
+
+int camera_init_v4l2(struct device *dev, unsigned int *session)
+{
+ struct msm_video_device *pvdev;
+ struct v4l2_device *v4l2_dev;
+ int rc = 0;
+
+ pvdev = kzalloc(sizeof(struct msm_video_device),
+ GFP_KERNEL);
+ if (!pvdev) {
+ rc = -ENOMEM;
+ goto init_end;
+ }
+
+ pvdev->vdev = video_device_alloc();
+ if (!pvdev->vdev) {
+ rc = -ENOMEM;
+ goto video_fail;
+ }
+
+ v4l2_dev = kzalloc(sizeof(struct v4l2_device), GFP_KERNEL);
+ if (!v4l2_dev) {
+ rc = -ENOMEM;
+ goto v4l2_fail;
+ }
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ v4l2_dev->mdev = kzalloc(sizeof(struct media_device),
+ GFP_KERNEL);
+ if (!v4l2_dev->mdev) {
+ rc = -ENOMEM;
+ goto mdev_fail;
+ }
+ strlcpy(v4l2_dev->mdev->model, MSM_CAMERA_NAME,
+ sizeof(v4l2_dev->mdev->model));
+
+ v4l2_dev->mdev->dev = dev;
+
+ rc = media_device_register(v4l2_dev->mdev);
+ if (WARN_ON(rc < 0))
+ goto media_fail;
+
+ rc = media_entity_init(&pvdev->vdev->entity, 0, NULL, 0);
+ if (WARN_ON(rc < 0))
+ goto entity_fail;
+ pvdev->vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
+ pvdev->vdev->entity.group_id = QCAMERA_VNODE_GROUP_ID;
+#endif
+
+ v4l2_dev->notify = NULL;
+ pvdev->vdev->v4l2_dev = v4l2_dev;
+
+ rc = v4l2_device_register(dev, pvdev->vdev->v4l2_dev);
+ if (WARN_ON(rc < 0))
+ goto register_fail;
+
+ strlcpy(pvdev->vdev->name, "msm-sensor", sizeof(pvdev->vdev->name));
+ pvdev->vdev->release = video_device_release;
+ pvdev->vdev->fops = &camera_v4l2_fops;
+ pvdev->vdev->ioctl_ops = &camera_v4l2_ioctl_ops;
+ pvdev->vdev->minor = -1;
+ pvdev->vdev->vfl_type = VFL_TYPE_GRABBER;
+ rc = video_register_device(pvdev->vdev,
+ VFL_TYPE_GRABBER, -1);
+ if (WARN_ON(rc < 0))
+ goto video_register_fail;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ /* FIXME: How to get rid of this messy? */
+ pvdev->vdev->entity.name = video_device_node_name(pvdev->vdev);
+#endif
+
+ *session = pvdev->vdev->num;
+ atomic_set(&pvdev->opened, 0);
+ video_set_drvdata(pvdev->vdev, pvdev);
+ device_init_wakeup(&pvdev->vdev->dev, 1);
+ goto init_end;
+
+video_register_fail:
+ v4l2_device_unregister(pvdev->vdev->v4l2_dev);
+register_fail:
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&pvdev->vdev->entity);
+entity_fail:
+ media_device_unregister(v4l2_dev->mdev);
+media_fail:
+ kzfree(v4l2_dev->mdev);
+mdev_fail:
+#endif
+ kzfree(v4l2_dev);
+v4l2_fail:
+ video_device_release(pvdev->vdev);
+video_fail:
+ kzfree(pvdev);
+init_end:
+ return rc;
+}
diff --git a/drivers/media/platform/msm/ais/camera/camera.h b/drivers/media/platform/msm/ais/camera/camera.h
new file mode 100644
index 000000000000..197991decb16
--- /dev/null
+++ b/drivers/media/platform/msm/ais/camera/camera.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAMERA_H
+#define _CAMERA_H
+
+enum stream_state {
+ START_STREAM = 0,
+ STOP_STREAM,
+};
+
+int camera_init_v4l2(struct device *dev, unsigned int *session);
+
+#endif /*_CAMERA_H */
diff --git a/drivers/media/platform/msm/ais/common/Makefile b/drivers/media/platform/msm/ais/common/Makefile
new file mode 100644
index 000000000000..e1fa3f2ea848
--- /dev/null
+++ b/drivers/media/platform/msm/ais/common/Makefile
@@ -0,0 +1,2 @@
+ccflags-y += -Idrivers/media/platform/msm/ais/
+obj-$(CONFIG_MSM_AIS) += msm_camera_io_util.o cam_smmu_api.o cam_hw_ops.o cam_soc_api.o
diff --git a/drivers/media/platform/msm/ais/common/cam_hw_ops.c b/drivers/media/platform/msm/ais/common/cam_hw_ops.c
new file mode 100644
index 000000000000..073778c9edcc
--- /dev/null
+++ b/drivers/media/platform/msm/ais/common/cam_hw_ops.c
@@ -0,0 +1,338 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-AHB %s:%d " fmt, __func__, __LINE__
+#define TRUE 1
+#include <linux/module.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include "cam_hw_ops.h"
+
+#ifdef CONFIG_CAM_AHB_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+struct cam_ahb_client {
+ enum cam_ahb_clk_vote vote;
+};
+
+struct cam_bus_vector {
+ const char *name;
+};
+
+struct cam_ahb_client_data {
+ struct msm_bus_scale_pdata *pbus_data;
+ u32 ahb_client;
+ u32 ahb_clk_state;
+ struct msm_bus_vectors *paths;
+ struct msm_bus_paths *usecases;
+ struct cam_bus_vector *vectors;
+ u32 *votes;
+ u32 cnt;
+ u32 probe_done;
+ struct cam_ahb_client clients[CAM_AHB_CLIENT_MAX];
+ struct mutex lock;
+};
+
+static struct cam_ahb_client_data data;
+
+int get_vector_index(char *name)
+{
+ int i = 0, rc = -1;
+
+ for (i = 0; i < data.cnt; i++) {
+ if (strcmp(name, data.vectors[i].name) == 0)
+ return i;
+ }
+
+ return rc;
+}
+
+int cam_ahb_clk_init(struct platform_device *pdev)
+{
+ int i = 0, cnt = 0, rc = 0, index = 0;
+ struct device_node *of_node;
+
+ if (!pdev) {
+ pr_err("invalid pdev argument\n");
+ return -EINVAL;
+ }
+
+ of_node = pdev->dev.of_node;
+ data.cnt = of_property_count_strings(of_node, "bus-vectors");
+ if (data.cnt == 0) {
+ pr_err("no vectors strings found in device tree, count=%d",
+ data.cnt);
+ return 0;
+ }
+
+ cnt = of_property_count_u32_elems(of_node, "qcom,bus-votes");
+ if (cnt == 0) {
+ pr_err("no vector values found in device tree, count=%d", cnt);
+ return 0;
+ }
+
+ if (data.cnt != cnt) {
+ pr_err("vector mismatch num of strings=%u, num of values %d\n",
+ data.cnt, cnt);
+ return -EINVAL;
+ }
+
+ CDBG("number of bus vectors: %d\n", data.cnt);
+
+ data.vectors = devm_kzalloc(&pdev->dev,
+ sizeof(struct cam_bus_vector) * cnt,
+ GFP_KERNEL);
+ if (!data.vectors)
+ return -ENOMEM;
+
+ for (i = 0; i < data.cnt; i++) {
+ rc = of_property_read_string_index(of_node, "bus-vectors",
+ i, &(data.vectors[i].name));
+ CDBG("dbg: names[%d] = %s\n", i, data.vectors[i].name);
+ if (rc < 0) {
+ pr_err("failed\n");
+ rc = -EINVAL;
+ goto err1;
+ }
+ }
+
+ data.paths = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_vectors) * cnt,
+ GFP_KERNEL);
+ if (!data.paths) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ data.usecases = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_paths) * cnt,
+ GFP_KERNEL);
+ if (!data.usecases) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ data.pbus_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_scale_pdata),
+ GFP_KERNEL);
+ if (!data.pbus_data) {
+ rc = -ENOMEM;
+ goto err3;
+ }
+
+ data.votes = devm_kzalloc(&pdev->dev, sizeof(u32) * cnt,
+ GFP_KERNEL);
+ if (!data.votes) {
+ rc = -ENOMEM;
+ goto err4;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,bus-votes",
+ data.votes, cnt);
+
+ for (i = 0; i < data.cnt; i++) {
+ data.paths[i] = (struct msm_bus_vectors) {
+ MSM_BUS_MASTER_AMPSS_M0,
+ MSM_BUS_SLAVE_CAMERA_CFG,
+ 0,
+ data.votes[i]
+ };
+ data.usecases[i] = (struct msm_bus_paths) {
+ .num_paths = 1,
+ .vectors = &data.paths[i],
+ };
+ CDBG("dbg: votes[%d] = %u\n", i, data.votes[i]);
+ }
+
+ *data.pbus_data = (struct msm_bus_scale_pdata) {
+ .name = "msm_camera_ahb",
+ .num_usecases = data.cnt,
+ .usecase = data.usecases,
+ };
+
+ data.ahb_client =
+ msm_bus_scale_register_client(data.pbus_data);
+ if (!data.ahb_client) {
+ pr_err("ahb vote registering failed\n");
+ rc = -EINVAL;
+ goto err5;
+ }
+
+ index = get_vector_index("suspend");
+ if (index < 0) {
+ pr_err("svs vector not supported\n");
+ rc = -EINVAL;
+ goto err6;
+ }
+
+ /* request for svs in init */
+ msm_bus_scale_client_update_request(data.ahb_client,
+ index);
+ data.ahb_clk_state = CAM_AHB_SUSPEND_VOTE;
+ data.probe_done = TRUE;
+ mutex_init(&data.lock);
+
+ CDBG("dbg, done registering ahb votes\n");
+ CDBG("dbg, clk state :%u, probe :%d\n",
+ data.ahb_clk_state, data.probe_done);
+ return rc;
+
+err6:
+ msm_bus_scale_unregister_client(data.ahb_client);
+err5:
+ devm_kfree(&pdev->dev, data.votes);
+ data.votes = NULL;
+err4:
+ devm_kfree(&pdev->dev, data.pbus_data);
+ data.pbus_data = NULL;
+err3:
+ devm_kfree(&pdev->dev, data.usecases);
+ data.usecases = NULL;
+err2:
+ devm_kfree(&pdev->dev, data.paths);
+ data.paths = NULL;
+err1:
+ devm_kfree(&pdev->dev, data.vectors);
+ data.vectors = NULL;
+ return rc;
+}
+EXPORT_SYMBOL(cam_ahb_clk_init);
+
+int cam_consolidate_ahb_vote(enum cam_ahb_clk_client id,
+ enum cam_ahb_clk_vote vote)
+{
+ int i = 0;
+ u32 max = 0;
+
+ CDBG("dbg: id :%u, vote : 0x%x\n", id, vote);
+ mutex_lock(&data.lock);
+ data.clients[id].vote = vote;
+
+ if (vote == data.ahb_clk_state) {
+ CDBG("dbg: already at desired vote\n");
+ mutex_unlock(&data.lock);
+ return 0;
+ }
+
+ for (i = 0; i < CAM_AHB_CLIENT_MAX; i++) {
+ if (data.clients[i].vote > max)
+ max = data.clients[i].vote;
+ }
+
+ CDBG("dbg: max vote : %u\n", max);
+ if (max >= 0) {
+ if (max != data.ahb_clk_state) {
+ msm_bus_scale_client_update_request(data.ahb_client,
+ max);
+ data.ahb_clk_state = max;
+ CDBG("dbg: state : %u, vector : %d\n",
+ data.ahb_clk_state, max);
+ }
+ } else {
+ pr_err("err: no bus vector found\n");
+ mutex_unlock(&data.lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&data.lock);
+ return 0;
+}
+
+static int cam_ahb_get_voltage_level(unsigned int corner)
+{
+ switch (corner) {
+ case RPM_REGULATOR_CORNER_NONE:
+ return CAM_AHB_SUSPEND_VOTE;
+
+ case RPM_REGULATOR_CORNER_SVS_KRAIT:
+ case RPM_REGULATOR_CORNER_SVS_SOC:
+ return CAM_AHB_SVS_VOTE;
+
+ case RPM_REGULATOR_CORNER_NORMAL:
+ return CAM_AHB_NOMINAL_VOTE;
+
+ case RPM_REGULATOR_CORNER_SUPER_TURBO:
+ return CAM_AHB_TURBO_VOTE;
+
+ case RPM_REGULATOR_CORNER_TURBO:
+ case RPM_REGULATOR_CORNER_RETENTION:
+ default:
+ return -EINVAL;
+ }
+}
+
+int cam_config_ahb_clk(struct device *dev, unsigned long freq,
+ enum cam_ahb_clk_client id, enum cam_ahb_clk_vote vote)
+{
+ struct dev_pm_opp *opp;
+ unsigned int corner;
+ enum cam_ahb_clk_vote dyn_vote = vote;
+ int rc = -EINVAL;
+
+ if (id >= CAM_AHB_CLIENT_MAX) {
+ pr_err("err: invalid argument\n");
+ return -EINVAL;
+ }
+
+ if (data.probe_done != TRUE) {
+ pr_err("ahb init is not done yet\n");
+ return -EINVAL;
+ }
+
+ CDBG("dbg: id :%u, vote : 0x%x\n", id, vote);
+ switch (dyn_vote) {
+ case CAM_AHB_SUSPEND_VOTE:
+ case CAM_AHB_SVS_VOTE:
+ case CAM_AHB_NOMINAL_VOTE:
+ case CAM_AHB_TURBO_VOTE:
+ break;
+ case CAM_AHB_DYNAMIC_VOTE:
+ if (!dev) {
+ pr_err("device is NULL\n");
+ return -EINVAL;
+ }
+ opp = dev_pm_opp_find_freq_exact(dev, freq, true);
+ if (IS_ERR(opp)) {
+ pr_err("Error on OPP freq :%ld\n", freq);
+ return -EINVAL;
+ }
+ corner = dev_pm_opp_get_voltage(opp);
+ if (corner == 0) {
+ pr_err("Bad voltage corner for OPP freq :%ld\n", freq);
+ return -EINVAL;
+ }
+ dyn_vote = cam_ahb_get_voltage_level(corner);
+ if (dyn_vote < 0) {
+ pr_err("Bad vote requested\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("err: invalid vote argument\n");
+ return -EINVAL;
+ }
+
+ rc = cam_consolidate_ahb_vote(id, dyn_vote);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ goto end;
+ }
+
+end:
+ return rc;
+}
+EXPORT_SYMBOL(cam_config_ahb_clk);
diff --git a/drivers/media/platform/msm/ais/common/cam_hw_ops.h b/drivers/media/platform/msm/ais/common/cam_hw_ops.h
new file mode 100644
index 000000000000..32f93f7b6e0e
--- /dev/null
+++ b/drivers/media/platform/msm/ais/common/cam_hw_ops.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_HW_OPS_H_
+#define _CAM_HW_OPS_H_
+
+enum cam_ahb_clk_vote {
+ /* need to update the voting requests
+ * according to dtsi entries.
+ */
+ CAM_AHB_SUSPEND_VOTE = 0x0,
+ CAM_AHB_SVS_VOTE = 0x01,
+ CAM_AHB_NOMINAL_VOTE = 0x02,
+ CAM_AHB_TURBO_VOTE = 0x03,
+ CAM_AHB_DYNAMIC_VOTE = 0xFF,
+};
+
+enum cam_ahb_clk_client {
+ CAM_AHB_CLIENT_CSIPHY,
+ CAM_AHB_CLIENT_CSID,
+ CAM_AHB_CLIENT_CCI,
+ CAM_AHB_CLIENT_ISPIF,
+ CAM_AHB_CLIENT_VFE0,
+ CAM_AHB_CLIENT_VFE1,
+ CAM_AHB_CLIENT_CPP,
+ CAM_AHB_CLIENT_FD,
+ CAM_AHB_CLIENT_JPEG,
+ CAM_AHB_CLIENT_MAX
+};
+
+int cam_config_ahb_clk(struct device *dev, unsigned long freq,
+ enum cam_ahb_clk_client id, enum cam_ahb_clk_vote vote);
+int cam_ahb_clk_init(struct platform_device *pdev);
+#endif
diff --git a/drivers/media/platform/msm/ais/common/cam_smmu_api.c b/drivers/media/platform/msm/ais/common/cam_smmu_api.c
new file mode 100644
index 000000000000..d3b239e9f304
--- /dev/null
+++ b/drivers/media/platform/msm/ais/common/cam_smmu_api.c
@@ -0,0 +1,1680 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-SMMU %s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-attrs.h>
+#include <linux/of_platform.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/qcom_iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <linux/workqueue.h>
+#include "cam_smmu_api.h"
+
+#define SCRATCH_ALLOC_START SZ_128K
+#define SCRATCH_ALLOC_END SZ_256M
+#define VA_SPACE_END SZ_2G
+#define IOMMU_INVALID_DIR -1
+#define BYTE_SIZE 8
+#define COOKIE_NUM_BYTE 2
+#define COOKIE_SIZE (BYTE_SIZE*COOKIE_NUM_BYTE)
+#define COOKIE_MASK ((1<<COOKIE_SIZE)-1)
+#define HANDLE_INIT (-1)
+#define CAM_SMMU_CB_MAX 2
+
+#define GET_SMMU_HDL(x, y) (((x) << COOKIE_SIZE) | ((y) & COOKIE_MASK))
+#define GET_SMMU_TABLE_IDX(x) (((x) >> COOKIE_SIZE) & COOKIE_MASK)
+
+#ifdef CONFIG_CAM_SMMU_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+struct cam_smmu_work_payload {
+ int idx;
+ struct iommu_domain *domain;
+ struct device *dev;
+ unsigned long iova;
+ int flags;
+ void *token;
+ struct list_head list;
+};
+
+enum cam_protection_type {
+ CAM_PROT_INVALID,
+ CAM_NON_SECURE,
+ CAM_SECURE,
+ CAM_PROT_MAX,
+};
+
+enum cam_iommu_type {
+ CAM_SMMU_INVALID,
+ CAM_QSMMU,
+ CAM_ARM_SMMU,
+ CAM_SMMU_MAX,
+};
+
+enum cam_smmu_buf_state {
+ CAM_SMMU_BUFF_EXIST,
+ CAM_SMMU_BUFF_NOT_EXIST
+};
+
+enum cam_smmu_init_dir {
+ CAM_SMMU_TABLE_INIT,
+ CAM_SMMU_TABLE_DEINIT,
+};
+
+struct scratch_mapping {
+ void *bitmap;
+ size_t bits;
+ unsigned int order;
+ dma_addr_t base;
+};
+
+struct cam_context_bank_info {
+ struct device *dev;
+ struct dma_iommu_mapping *mapping;
+ dma_addr_t va_start;
+ size_t va_len;
+ const char *name;
+ bool is_secure;
+ uint8_t scratch_buf_support;
+ struct scratch_mapping scratch_map;
+ struct list_head smmu_buf_list;
+ struct mutex lock;
+ int handle;
+ enum cam_smmu_ops_param state;
+
+ void (*handler[CAM_SMMU_CB_MAX])(struct iommu_domain *,
+ struct device *, unsigned long,
+ int, void*);
+ void *token[CAM_SMMU_CB_MAX];
+ int cb_count;
+};
+
+struct cam_iommu_cb_set {
+ struct cam_context_bank_info *cb_info;
+ u32 cb_num;
+ u32 cb_init_count;
+ struct work_struct smmu_work;
+ struct mutex payload_list_lock;
+ struct list_head payload_list;
+};
+
+static const struct of_device_id msm_cam_smmu_dt_match[] = {
+ { .compatible = "qcom,msm-cam-smmu", },
+ { .compatible = "qcom,msm-cam-smmu-cb", },
+ { .compatible = "qcom,qsmmu-cam-cb", },
+ {}
+};
+
+struct cam_dma_buff_info {
+ struct dma_buf *buf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *table;
+ enum dma_data_direction dir;
+ int iommu_dir;
+ int ref_count;
+ dma_addr_t paddr;
+ struct list_head list;
+ int ion_fd;
+ size_t len;
+ size_t phys_len;
+};
+
+static struct cam_iommu_cb_set iommu_cb_set;
+
+static enum dma_data_direction cam_smmu_translate_dir(
+ enum cam_smmu_map_dir dir);
+
+static int cam_smmu_check_handle_unique(int hdl);
+
+static int cam_smmu_create_iommu_handle(int idx);
+
+static int cam_smmu_create_add_handle_in_table(char *name,
+ int *hdl);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
+ int ion_fd);
+
+static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
+ dma_addr_t base, size_t size,
+ int order);
+
+static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping,
+ size_t size,
+ dma_addr_t *iova);
+
+static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping,
+ dma_addr_t addr, size_t size);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
+ dma_addr_t virt_addr);
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+ enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr);
+
+static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
+ size_t virt_len,
+ size_t phys_len,
+ unsigned int iommu_dir,
+ dma_addr_t *virt_addr);
+static int cam_smmu_unmap_buf_and_remove_from_list(
+ struct cam_dma_buff_info *mapping_info, int idx);
+
+static int cam_smmu_free_scratch_buffer_remove_from_list(
+ struct cam_dma_buff_info *mapping_info,
+ int idx);
+
+static void cam_smmu_clean_buffer_list(int idx);
+
+static void cam_smmu_print_list(int idx);
+
+static void cam_smmu_print_table(void);
+
+static int cam_smmu_probe(struct platform_device *pdev);
+
+static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr);
+
+static void cam_smmu_page_fault_work(struct work_struct *work)
+{
+ int j;
+ int idx;
+ struct cam_smmu_work_payload *payload;
+
+ mutex_lock(&iommu_cb_set.payload_list_lock);
+ payload = list_first_entry(&iommu_cb_set.payload_list,
+ struct cam_smmu_work_payload,
+ list);
+ list_del(&payload->list);
+ mutex_unlock(&iommu_cb_set.payload_list_lock);
+
+ /* Dereference the payload to call the handler */
+ idx = payload->idx;
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ cam_smmu_check_vaddr_in_range(idx, (void *)payload->iova);
+ for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
+ if ((iommu_cb_set.cb_info[idx].handler[j])) {
+ iommu_cb_set.cb_info[idx].handler[j](
+ payload->domain,
+ payload->dev,
+ payload->iova,
+ payload->flags,
+ iommu_cb_set.cb_info[idx].token[j]);
+ }
+ }
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ kfree(payload);
+}
+
+static void cam_smmu_print_list(int idx)
+{
+ struct cam_dma_buff_info *mapping;
+
+ pr_err("index = %d ", idx);
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ pr_err("ion_fd = %d, paddr= 0x%pK, len = %u\n",
+ mapping->ion_fd, (void *)mapping->paddr,
+ (unsigned int)mapping->len);
+ }
+}
+
+static void cam_smmu_print_table(void)
+{
+ int i;
+
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ pr_err("i= %d, handle= %d, name_addr=%pK\n", i,
+ (int)iommu_cb_set.cb_info[i].handle,
+ (void *)iommu_cb_set.cb_info[i].name);
+ pr_err("dev = %pK ", iommu_cb_set.cb_info[i].dev);
+ }
+}
+
+
+int cam_smmu_query_vaddr_in_range(int handle,
+ unsigned long fault_addr, unsigned long *start_addr,
+ unsigned long *end_addr, int *fd)
+{
+ int idx, rc = -EINVAL;
+ struct cam_dma_buff_info *mapping;
+ unsigned long sa, ea;
+
+ if (!start_addr || !end_addr || !fd) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ sa = (unsigned long)mapping->paddr;
+ ea = (unsigned long)mapping->paddr + mapping->len;
+
+ if (sa <= fault_addr && fault_addr < ea) {
+ *start_addr = sa;
+ *end_addr = ea;
+ *fd = mapping->ion_fd;
+ rc = 0;
+ break;
+ }
+ }
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_query_vaddr_in_range);
+
+static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr)
+{
+ struct cam_dma_buff_info *mapping;
+ unsigned long start_addr, end_addr, current_addr;
+
+ current_addr = (unsigned long)vaddr;
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ start_addr = (unsigned long)mapping->paddr;
+ end_addr = (unsigned long)mapping->paddr + mapping->len;
+
+ if (start_addr <= current_addr && current_addr < end_addr) {
+ pr_err("Error: va %pK is valid: range:%pK-%pK, fd = %d cb: %s\n",
+ vaddr, (void *)start_addr, (void *)end_addr,
+ mapping->ion_fd,
+ iommu_cb_set.cb_info[idx].name);
+ return;
+ }
+ CDBG("va %pK is not in this range: %pK-%pK, fd = %d\n",
+ vaddr, (void *)start_addr, (void *)end_addr,
+ mapping->ion_fd);
+ }
+ pr_err("Cannot find vaddr:%pK in SMMU. %s uses invalid virtual address\n",
+ vaddr, iommu_cb_set.cb_info[idx].name);
+}
+
+void cam_smmu_reg_client_page_fault_handler(int handle,
+ void (*client_page_fault_handler)(struct iommu_domain *,
+ struct device *, unsigned long,
+ int, void*), void *token)
+{
+ int idx, i = 0;
+
+ if (!token) {
+ pr_err("Error: token is NULL\n");
+ return;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return;
+ }
+
+ if (client_page_fault_handler) {
+ if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) {
+ pr_err("%s Should not regiester more handlers\n",
+ iommu_cb_set.cb_info[idx].name);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return;
+ }
+ iommu_cb_set.cb_info[idx].cb_count++;
+ for (i = 0; i < iommu_cb_set.cb_info[idx].cb_count; i++) {
+ if (iommu_cb_set.cb_info[idx].token[i] == NULL) {
+ iommu_cb_set.cb_info[idx].token[i] = token;
+ iommu_cb_set.cb_info[idx].handler[i] =
+ client_page_fault_handler;
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < CAM_SMMU_CB_MAX; i++) {
+ if (iommu_cb_set.cb_info[idx].token[i] == token) {
+ iommu_cb_set.cb_info[idx].token[i] = NULL;
+ iommu_cb_set.cb_info[idx].handler[i] =
+ NULL;
+ iommu_cb_set.cb_info[idx].cb_count--;
+ break;
+ }
+ }
+ if (i == CAM_SMMU_CB_MAX)
+ pr_err("Error: hdl %x no matching tokens: %s\n",
+ handle, iommu_cb_set.cb_info[idx].name);
+ }
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+}
+
+static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova,
+ int flags, void *token)
+{
+ char *cb_name;
+ int idx;
+ struct cam_smmu_work_payload *payload;
+
+ if (!token) {
+ pr_err("Error: token is NULL\n");
+ pr_err("Error: domain = %pK, device = %pK\n", domain, dev);
+ pr_err("iova = %lX, flags = %d\n", iova, flags);
+ return 0;
+ }
+
+ cb_name = (char *)token;
+ /* check whether it is in the table */
+ for (idx = 0; idx < iommu_cb_set.cb_num; idx++) {
+ if (!strcmp(iommu_cb_set.cb_info[idx].name, cb_name))
+ break;
+ }
+
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: index is not valid, index = %d, token = %s\n",
+ idx, cb_name);
+ return 0;
+ }
+
+ payload = kzalloc(sizeof(struct cam_smmu_work_payload), GFP_ATOMIC);
+ if (!payload)
+ return 0;
+
+ payload->domain = domain;
+ payload->dev = dev;
+ payload->iova = iova;
+ payload->flags = flags;
+ payload->token = token;
+ payload->idx = idx;
+
+ mutex_lock(&iommu_cb_set.payload_list_lock);
+ list_add_tail(&payload->list, &iommu_cb_set.payload_list);
+ mutex_unlock(&iommu_cb_set.payload_list_lock);
+
+ schedule_work(&iommu_cb_set.smmu_work);
+
+ return 0;
+}
+
+static int cam_smmu_translate_dir_to_iommu_dir(
+ enum cam_smmu_map_dir dir)
+{
+ switch (dir) {
+ case CAM_SMMU_MAP_READ:
+ return IOMMU_READ;
+ case CAM_SMMU_MAP_WRITE:
+ return IOMMU_WRITE;
+ case CAM_SMMU_MAP_RW:
+ return IOMMU_READ|IOMMU_WRITE;
+ case CAM_SMMU_MAP_INVALID:
+ default:
+ pr_err("Error: Direction is invalid. dir = %d\n", dir);
+ break;
+ };
+ return IOMMU_INVALID_DIR;
+}
+
+static enum dma_data_direction cam_smmu_translate_dir(
+ enum cam_smmu_map_dir dir)
+{
+ switch (dir) {
+ case CAM_SMMU_MAP_READ:
+ return DMA_FROM_DEVICE;
+ case CAM_SMMU_MAP_WRITE:
+ return DMA_TO_DEVICE;
+ case CAM_SMMU_MAP_RW:
+ return DMA_BIDIRECTIONAL;
+ case CAM_SMMU_MAP_INVALID:
+ default:
+ pr_err("Error: Direction is invalid. dir = %d\n", (int)dir);
+ break;
+ }
+ return DMA_NONE;
+}
+
+void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops)
+{
+ unsigned int i;
+ int j = 0;
+
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ iommu_cb_set.cb_info[i].handle = HANDLE_INIT;
+ INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_list);
+ iommu_cb_set.cb_info[i].state = CAM_SMMU_DETACH;
+ iommu_cb_set.cb_info[i].dev = NULL;
+ iommu_cb_set.cb_info[i].cb_count = 0;
+ for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
+ iommu_cb_set.cb_info[i].token[j] = NULL;
+ iommu_cb_set.cb_info[i].handler[j] = NULL;
+ }
+ if (ops == CAM_SMMU_TABLE_INIT)
+ mutex_init(&iommu_cb_set.cb_info[i].lock);
+ else
+ mutex_destroy(&iommu_cb_set.cb_info[i].lock);
+ }
+}
+
+static int cam_smmu_check_handle_unique(int hdl)
+{
+ int i;
+
+ if (hdl == HANDLE_INIT) {
+ CDBG("iommu handle is init number. Need to try again\n");
+ return 1;
+ }
+
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ if (iommu_cb_set.cb_info[i].handle == HANDLE_INIT)
+ continue;
+
+ if (iommu_cb_set.cb_info[i].handle == hdl) {
+ CDBG("iommu handle %d conflicts\n", (int)hdl);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * use low 2 bytes for handle cookie
+ */
+static int cam_smmu_create_iommu_handle(int idx)
+{
+ int rand, hdl = 0;
+
+ get_random_bytes(&rand, COOKIE_NUM_BYTE);
+ hdl = GET_SMMU_HDL(idx, rand);
+ CDBG("create handle value = %x\n", (int)hdl);
+ return hdl;
+}
+
+static int cam_smmu_attach_device(int idx)
+{
+ int rc;
+ struct cam_context_bank_info *cb = &iommu_cb_set.cb_info[idx];
+
+ /* attach the mapping to device */
+ rc = arm_iommu_attach_device(cb->dev, cb->mapping);
+ if (rc < 0) {
+ pr_err("Error: ARM IOMMU attach failed. ret = %d\n", rc);
+ return -ENODEV;
+ }
+ return rc;
+}
+
+static int cam_smmu_create_add_handle_in_table(char *name,
+ int *hdl)
+{
+ int i;
+ int handle;
+
+ /* create handle and add in the iommu hardware table */
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ if (!strcmp(iommu_cb_set.cb_info[i].name, name)) {
+ mutex_lock(&iommu_cb_set.cb_info[i].lock);
+ if (iommu_cb_set.cb_info[i].handle != HANDLE_INIT) {
+ pr_err("Error: %s already got handle 0x%x\n",
+ name,
+ iommu_cb_set.cb_info[i].handle);
+ mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+ return -EINVAL;
+ }
+
+ /* make sure handle is unique */
+ do {
+ handle = cam_smmu_create_iommu_handle(i);
+ } while (cam_smmu_check_handle_unique(handle));
+
+ /* put handle in the table */
+ iommu_cb_set.cb_info[i].handle = handle;
+ iommu_cb_set.cb_info[i].cb_count = 0;
+ *hdl = handle;
+ CDBG("%s creates handle 0x%x\n", name, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+ return 0;
+ }
+ }
+
+ /* if i == iommu_cb_set.cb_num */
+ pr_err("Error: Cannot find name %s or all handle exist!\n",
+ name);
+ cam_smmu_print_table();
+ return -EINVAL;
+}
+
+static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
+ dma_addr_t base, size_t size,
+ int order)
+{
+ unsigned int count = size >> (PAGE_SHIFT + order);
+ unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ int err = 0;
+
+ if (!count) {
+ err = -EINVAL;
+ pr_err("Error: wrong size passed, page count can't be zero");
+ goto bail;
+ }
+
+ scratch_map->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!scratch_map->bitmap) {
+ err = -ENOMEM;
+ goto bail;
+ }
+
+ scratch_map->base = base;
+ scratch_map->bits = BITS_PER_BYTE * bitmap_size;
+ scratch_map->order = order;
+
+bail:
+ return err;
+}
+
+static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping,
+ size_t size,
+ dma_addr_t *iova)
+{
+ int rc = 0;
+ unsigned int order = get_order(size);
+ unsigned int align = 0;
+ unsigned int count, start;
+
+ count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+
+ /* Transparently, add a guard page to the total count of pages
+ * to be allocated
+ */
+ count++;
+
+ if (order > mapping->order)
+ align = (1 << (order - mapping->order)) - 1;
+
+ start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+ count, align);
+
+ if (start > mapping->bits)
+ rc = -ENOMEM;
+
+ bitmap_set(mapping->bitmap, start, count);
+
+ *iova = mapping->base + (start << (mapping->order + PAGE_SHIFT));
+ return rc;
+}
+
+static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping,
+ dma_addr_t addr, size_t size)
+{
+ unsigned int start = (addr - mapping->base) >>
+ (mapping->order + PAGE_SHIFT);
+ unsigned int count = ((size >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+
+ if (!addr) {
+ pr_err("Error: Invalid address\n");
+ return -EINVAL;
+ }
+
+ if (start + count > mapping->bits) {
+ pr_err("Error: Invalid page bits in scratch map\n");
+ return -EINVAL;
+ }
+
+ /* Transparently, add a guard page to the total count of pages
+ * to be freed
+ */
+ count++;
+
+ bitmap_clear(mapping->bitmap, start, count);
+
+ return 0;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
+ dma_addr_t virt_addr)
+{
+ struct cam_dma_buff_info *mapping;
+
+ list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+ list) {
+ if (mapping->paddr == virt_addr) {
+ CDBG("Found virtual address %lx\n",
+ (unsigned long)virt_addr);
+ return mapping;
+ }
+ }
+
+ pr_err("Error: Cannot find virtual address %lx by index %d\n",
+ (unsigned long)virt_addr, idx);
+ return NULL;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
+ int ion_fd)
+{
+ struct cam_dma_buff_info *mapping;
+
+ list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+ list) {
+ if (mapping->ion_fd == ion_fd) {
+ CDBG(" find ion_fd %d\n", ion_fd);
+ return mapping;
+ }
+ }
+
+ pr_err("Error: Cannot find fd %d by index %d\n",
+ ion_fd, idx);
+ return NULL;
+}
+
+static void cam_smmu_clean_buffer_list(int idx)
+{
+ int ret;
+ struct cam_dma_buff_info *mapping_info, *temp;
+
+ list_for_each_entry_safe(mapping_info, temp,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ CDBG("Free mapping address %pK, i = %d, fd = %d\n",
+ (void *)mapping_info->paddr, idx,
+ mapping_info->ion_fd);
+
+ if (mapping_info->ion_fd == 0xDEADBEEF)
+ /* Clean up scratch buffers */
+ ret = cam_smmu_free_scratch_buffer_remove_from_list(
+ mapping_info, idx);
+ else
+ /* Clean up regular mapped buffers */
+ ret = cam_smmu_unmap_buf_and_remove_from_list(
+ mapping_info,
+ idx);
+
+ if (ret < 0) {
+ pr_err("Buffer delete failed: idx = %d\n", idx);
+ pr_err("Buffer delete failed: addr = %lx, fd = %d\n",
+ (unsigned long)mapping_info->paddr,
+ mapping_info->ion_fd);
+ /*
+ * Ignore this error and continue to delete other
+ * buffers in the list
+ */
+ continue;
+ }
+ }
+}
+
+static int cam_smmu_attach(int idx)
+{
+ int ret;
+
+ if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_ATTACH) {
+ ret = 0;
+ } else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) {
+ ret = cam_smmu_attach_device(idx);
+ if (ret < 0) {
+ pr_err("Error: ATTACH fail\n");
+ return -ENODEV;
+ }
+ iommu_cb_set.cb_info[idx].state = CAM_SMMU_ATTACH;
+ ret = 0;
+ } else {
+ pr_err("Error: Not detach/attach\n");
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+ enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr)
+{
+ int rc = -1;
+ struct cam_dma_buff_info *mapping_info;
+ struct dma_buf *buf = NULL;
+ struct dma_buf_attachment *attach = NULL;
+ struct sg_table *table = NULL;
+
+ if (!paddr_ptr) {
+ pr_err("Error: Input pointer invalid\n");
+ rc = -EINVAL;
+ goto err_out;
+ }
+
+ mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+ if (!mapping_info) {
+ rc = -ENOSPC;
+ goto err_out;
+ }
+
+ /* allocate memory for each buffer information */
+ buf = dma_buf_get(ion_fd);
+ if (IS_ERR_OR_NULL(buf)) {
+ rc = PTR_ERR(buf);
+ pr_err("Error: dma get buf failed. fd = %d\n", ion_fd);
+ goto err_alloc;
+ }
+
+ attach = dma_buf_attach(buf, iommu_cb_set.cb_info[idx].dev);
+ if (IS_ERR_OR_NULL(attach)) {
+ rc = PTR_ERR(attach);
+ pr_err("Error: dma buf attach failed\n");
+ goto err_put;
+ }
+
+ table = dma_buf_map_attachment(attach, dma_dir);
+ if (IS_ERR_OR_NULL(table)) {
+ rc = PTR_ERR(table);
+ pr_err("Error: dma buf map attachment failed\n");
+ goto err_detach;
+ }
+
+ rc = msm_dma_map_sg_lazy(iommu_cb_set.cb_info[idx].dev, table->sgl,
+ table->nents, dma_dir, buf);
+ if (rc != table->nents) {
+ pr_err("Error: msm_dma_map_sg_lazy failed\n");
+ rc = -ENOMEM;
+ goto err_unmap_sg;
+ }
+
+ if (table->sgl) {
+ CDBG("DMA buf: %pK, device: %pK, attach: %pK, table: %pK\n",
+ (void *)buf,
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)attach, (void *)table);
+ CDBG("table sgl: %pK, rc: %d, dma_address: 0x%x\n",
+ (void *)table->sgl, rc,
+ (unsigned int)table->sgl->dma_address);
+ } else {
+ rc = -EINVAL;
+ pr_err("Error: table sgl is null\n");
+ goto err_unmap_sg;
+ }
+
+ /* fill up mapping_info */
+ mapping_info->ion_fd = ion_fd;
+ mapping_info->buf = buf;
+ mapping_info->attach = attach;
+ mapping_info->table = table;
+ mapping_info->paddr = sg_dma_address(table->sgl);
+ mapping_info->len = (size_t)sg_dma_len(table->sgl);
+ mapping_info->dir = dma_dir;
+ mapping_info->ref_count = 1;
+
+ /* return paddr and len to client */
+ *paddr_ptr = sg_dma_address(table->sgl);
+ *len_ptr = (size_t)sg_dma_len(table->sgl);
+
+ if (!*paddr_ptr || !*len_ptr) {
+ pr_err("Error: Space Allocation failed!\n");
+ rc = -ENOSPC;
+ goto err_unmap_sg;
+ }
+ CDBG("ion_fd = %d, dev = %pK, paddr= %pK, len = %u\n", ion_fd,
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)*paddr_ptr, (unsigned int)*len_ptr);
+
+ /* add to the list */
+ list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+ return 0;
+
+err_unmap_sg:
+ dma_buf_unmap_attachment(attach, table, dma_dir);
+err_detach:
+ dma_buf_detach(buf, attach);
+err_put:
+ dma_buf_put(buf);
+err_alloc:
+ kfree(mapping_info);
+err_out:
+ return rc;
+}
+
+static int cam_smmu_unmap_buf_and_remove_from_list(
+ struct cam_dma_buff_info *mapping_info,
+ int idx)
+{
+ if ((!mapping_info->buf) || (!mapping_info->table) ||
+ (!mapping_info->attach)) {
+ pr_err("Error: Invalid params dev = %pK, table = %pK",
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)mapping_info->table);
+ pr_err("Error:dma_buf = %pK, attach = %pK\n",
+ (void *)mapping_info->buf,
+ (void *)mapping_info->attach);
+ return -EINVAL;
+ }
+
+ /* iommu buffer clean up */
+ msm_dma_unmap_sg(iommu_cb_set.cb_info[idx].dev,
+ mapping_info->table->sgl, mapping_info->table->nents,
+ mapping_info->dir, mapping_info->buf);
+ dma_buf_unmap_attachment(mapping_info->attach,
+ mapping_info->table, mapping_info->dir);
+ dma_buf_detach(mapping_info->buf, mapping_info->attach);
+ dma_buf_put(mapping_info->buf);
+ mapping_info->buf = NULL;
+
+ list_del_init(&mapping_info->list);
+
+ /* free one buffer */
+ kfree(mapping_info);
+ return 0;
+}
+
+static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
+ int ion_fd, dma_addr_t *paddr_ptr,
+ size_t *len_ptr)
+{
+ struct cam_dma_buff_info *mapping;
+
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list,
+ list) {
+ if (mapping->ion_fd == ion_fd) {
+ mapping->ref_count++;
+ *paddr_ptr = mapping->paddr;
+ *len_ptr = mapping->len;
+ return CAM_SMMU_BUFF_EXIST;
+ }
+ }
+ return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
+int cam_smmu_get_handle(char *identifier, int *handle_ptr)
+{
+ int ret = 0;
+
+ if (!identifier) {
+ pr_err("Error: iommu hardware name is NULL\n");
+ return -EFAULT;
+ }
+
+ if (!handle_ptr) {
+ pr_err("Error: handle pointer is NULL\n");
+ return -EFAULT;
+ }
+
+ /* create and put handle in the table */
+ ret = cam_smmu_create_add_handle_in_table(identifier, handle_ptr);
+ if (ret < 0) {
+ pr_err("Error: %s get handle fail\n", identifier);
+ return ret;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(cam_smmu_get_handle);
+
+int cam_smmu_ops(int handle, enum cam_smmu_ops_param ops)
+{
+ int ret = 0, idx;
+
+ CDBG("E: ops = %d\n", ops);
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -EINVAL;
+ }
+
+ switch (ops) {
+ case CAM_SMMU_ATTACH: {
+ ret = cam_smmu_attach(idx);
+ break;
+ }
+ case CAM_SMMU_DETACH: {
+ ret = 0;
+ break;
+ }
+ case CAM_SMMU_VOTE:
+ case CAM_SMMU_DEVOTE:
+ default:
+ pr_err("Error: idx = %d, ops = %d\n", idx, ops);
+ ret = -EINVAL;
+ }
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return ret;
+}
+EXPORT_SYMBOL(cam_smmu_ops);
+
+static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
+ size_t virt_len,
+ size_t phys_len,
+ unsigned int iommu_dir,
+ dma_addr_t *virt_addr)
+{
+ unsigned long nents = virt_len / phys_len;
+ struct cam_dma_buff_info *mapping_info = NULL;
+ size_t unmapped;
+ dma_addr_t iova = 0;
+ struct scatterlist *sg;
+ int i = 0;
+ int rc;
+ struct iommu_domain *domain = NULL;
+ struct page *page;
+ struct sg_table *table = NULL;
+
+ CDBG("%s: nents = %lu, idx = %d, virt_len = %zx\n",
+ __func__, nents, idx, virt_len);
+ CDBG("%s: phys_len = %zx, iommu_dir = %d, virt_addr = %pK\n",
+ __func__, phys_len, iommu_dir, virt_addr);
+
+ /* This table will go inside the 'mapping' structure
+ * where it will be held until put_scratch_buffer is called
+ */
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table) {
+ rc = -ENOMEM;
+ goto err_table_alloc;
+ }
+
+ rc = sg_alloc_table(table, nents, GFP_KERNEL);
+ if (rc < 0) {
+ rc = -EINVAL;
+ goto err_sg_alloc;
+ }
+
+ page = alloc_pages(GFP_KERNEL, get_order(phys_len));
+ if (!page) {
+ rc = -ENOMEM;
+ goto err_page_alloc;
+ }
+
+ /* Now we create the sg list */
+ for_each_sg(table->sgl, sg, table->nents, i)
+ sg_set_page(sg, page, phys_len, 0);
+
+
+ /* Get the domain from within our cb_set struct and map it*/
+ domain = iommu_cb_set.cb_info[idx].mapping->domain;
+
+ rc = cam_smmu_alloc_scratch_va(&iommu_cb_set.cb_info[idx].scratch_map,
+ virt_len, &iova);
+
+ if (rc < 0) {
+ pr_err("Could not find valid iova for scratch buffer");
+ goto err_iommu_map;
+ }
+
+ if (iommu_map_sg(domain,
+ iova,
+ table->sgl,
+ table->nents,
+ iommu_dir) != virt_len) {
+ pr_err("iommu_map_sg() failed");
+ goto err_iommu_map;
+ }
+
+ /* Now update our mapping information within the cb_set struct */
+ mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+ if (!mapping_info) {
+ rc = -ENOMEM;
+ goto err_mapping_info;
+ }
+
+ mapping_info->ion_fd = 0xDEADBEEF;
+ mapping_info->buf = NULL;
+ mapping_info->attach = NULL;
+ mapping_info->table = table;
+ mapping_info->paddr = iova;
+ mapping_info->len = virt_len;
+ mapping_info->iommu_dir = iommu_dir;
+ mapping_info->ref_count = 1;
+ mapping_info->phys_len = phys_len;
+
+ CDBG("%s: paddr = %pK, len = %zx, phys_len = %zx",
+ __func__, (void *)mapping_info->paddr,
+ mapping_info->len, mapping_info->phys_len);
+
+ list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+
+ *virt_addr = (dma_addr_t)iova;
+
+ CDBG("%s: mapped virtual address = %lx\n", __func__,
+ (unsigned long)*virt_addr);
+ return 0;
+
+err_mapping_info:
+ unmapped = iommu_unmap(domain, iova, virt_len);
+ if (unmapped != virt_len)
+ pr_err("Unmapped only %zx instead of %zx", unmapped, virt_len);
+err_iommu_map:
+ __free_pages(sg_page(table->sgl), get_order(phys_len));
+err_page_alloc:
+ sg_free_table(table);
+err_sg_alloc:
+ kfree(table);
+err_table_alloc:
+ return rc;
+}
+
+static int cam_smmu_free_scratch_buffer_remove_from_list(
+ struct cam_dma_buff_info *mapping_info,
+ int idx)
+{
+ int rc = 0;
+ size_t unmapped;
+ struct iommu_domain *domain =
+ iommu_cb_set.cb_info[idx].mapping->domain;
+ struct scratch_mapping *scratch_map =
+ &iommu_cb_set.cb_info[idx].scratch_map;
+
+ if (!mapping_info->table) {
+ pr_err("Error: Invalid params: dev = %pK, table = %pK, ",
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)mapping_info->table);
+ return -EINVAL;
+ }
+
+ /* Clean up the mapping_info struct from the list */
+ unmapped = iommu_unmap(domain, mapping_info->paddr, mapping_info->len);
+ if (unmapped != mapping_info->len)
+ pr_err("Unmapped only %zx instead of %zx",
+ unmapped, mapping_info->len);
+
+ rc = cam_smmu_free_scratch_va(scratch_map,
+ mapping_info->paddr,
+ mapping_info->len);
+ if (rc < 0) {
+ pr_err("Error: Invalid iova while freeing scratch buffer\n");
+ rc = -EINVAL;
+ }
+
+ __free_pages(sg_page(mapping_info->table->sgl),
+ get_order(mapping_info->phys_len));
+ sg_free_table(mapping_info->table);
+ kfree(mapping_info->table);
+ list_del_init(&mapping_info->list);
+
+ kfree(mapping_info);
+ mapping_info = NULL;
+
+ return rc;
+}
+
+int cam_smmu_get_phy_addr_scratch(int handle,
+ enum cam_smmu_map_dir dir,
+ dma_addr_t *paddr_ptr,
+ size_t virt_len,
+ size_t phys_len)
+{
+ int idx, rc;
+ unsigned int iommu_dir;
+
+ if (!paddr_ptr || !virt_len || !phys_len) {
+ pr_err("Error: Input pointer or lengths invalid\n");
+ return -EINVAL;
+ }
+
+ if (virt_len < phys_len) {
+ pr_err("Error: virt_len > phys_len");
+ return -EINVAL;
+ }
+
+ iommu_dir = cam_smmu_translate_dir_to_iommu_dir(dir);
+ if (iommu_dir == IOMMU_INVALID_DIR) {
+ pr_err("Error: translate direction failed. dir = %d\n", dir);
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
+ pr_err("Error: Context bank does not support scratch bufs\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ CDBG("%s: smmu handle = %x, idx = %d, dir = %d\n",
+ __func__, handle, idx, dir);
+ CDBG("%s: virt_len = %zx, phys_len = %zx\n",
+ __func__, phys_len, virt_len);
+
+ if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+ pr_err("Error: Device %s should call SMMU attach before map buffer\n",
+ iommu_cb_set.cb_info[idx].name);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (!IS_ALIGNED(virt_len, PAGE_SIZE)) {
+ pr_err("Requested scratch buffer length not page aligned");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (!IS_ALIGNED(virt_len, phys_len)) {
+ pr_err("Requested virtual length not aligned with physical length");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ rc = cam_smmu_alloc_scratch_buffer_add_to_list(idx,
+ virt_len,
+ phys_len,
+ iommu_dir,
+ paddr_ptr);
+ if (rc < 0) {
+ pr_err("Error: mapping or add list fail\n");
+ goto error;
+ }
+
+error:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+
+int cam_smmu_put_phy_addr_scratch(int handle,
+ dma_addr_t paddr)
+{
+ int idx;
+ int rc = -1;
+ struct cam_dma_buff_info *mapping_info;
+
+ /* find index in the iommu_cb_set.cb_info */
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto handle_err;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
+ pr_err("Error: Context bank does not support scratch buffers");
+ rc = -EINVAL;
+ goto handle_err;
+ }
+
+ /* Based on virtual address and index, we can find mapping info
+ * of the scratch buffer
+ */
+ mapping_info = cam_smmu_find_mapping_by_virt_address(idx, paddr);
+ if (!mapping_info) {
+ pr_err("Error: Invalid params\n");
+ rc = -EINVAL;
+ goto handle_err;
+ }
+
+ /* unmapping one buffer from device */
+ rc = cam_smmu_free_scratch_buffer_remove_from_list(mapping_info, idx);
+ if (rc < 0) {
+ pr_err("Error: unmap or remove list fail\n");
+ goto handle_err;
+ }
+
+handle_err:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+
+int cam_smmu_get_phy_addr(int handle, int ion_fd,
+ enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr)
+{
+ int idx, rc;
+ enum dma_data_direction dma_dir;
+ enum cam_smmu_buf_state buf_state;
+
+ if (!paddr_ptr || !len_ptr) {
+ pr_err("Error: Input pointers are invalid\n");
+ return -EINVAL;
+ }
+ /* clean the content from clients */
+ *paddr_ptr = (dma_addr_t)NULL;
+ *len_ptr = (size_t)0;
+
+ dma_dir = cam_smmu_translate_dir(dir);
+ if (dma_dir == DMA_NONE) {
+ pr_err("Error: translate direction failed. dir = %d\n", dir);
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+ pr_err("Error: Device %s should call SMMU attach before map buffer\n",
+ iommu_cb_set.cb_info[idx].name);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
+ if (buf_state == CAM_SMMU_BUFF_EXIST) {
+ CDBG("ion_fd:%d already in the list, give same addr back",
+ ion_fd);
+ rc = 0;
+ goto get_addr_end;
+ }
+ rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, dma_dir,
+ paddr_ptr, len_ptr);
+ if (rc < 0) {
+ pr_err("Error: mapping or add list fail\n");
+ goto get_addr_end;
+ }
+
+get_addr_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_get_phy_addr);
+
+int cam_smmu_put_phy_addr(int handle, int ion_fd)
+{
+ int idx, rc;
+ struct cam_dma_buff_info *mapping_info;
+
+ /* find index in the iommu_cb_set.cb_info */
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto put_addr_end;
+ }
+
+ /* based on ion fd and index, we can find mapping info of buffer */
+ mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
+ if (!mapping_info) {
+ pr_err("Error: Invalid params! idx = %d, fd = %d\n",
+ idx, ion_fd);
+ rc = -EINVAL;
+ goto put_addr_end;
+ }
+
+ mapping_info->ref_count--;
+ if (mapping_info->ref_count > 0) {
+ CDBG("There are still %u buffer(s) with same fd %d",
+ mapping_info->ref_count, mapping_info->ion_fd);
+ rc = 0;
+ goto put_addr_end;
+ }
+
+ /* unmapping one buffer from device */
+ rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx);
+ if (rc < 0) {
+ pr_err("Error: unmap or remove list fail\n");
+ goto put_addr_end;
+ }
+
+put_addr_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_put_phy_addr);
+
+int cam_smmu_destroy_handle(int handle)
+{
+ int idx;
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -EINVAL;
+ }
+
+ if (!list_empty_careful(&iommu_cb_set.cb_info[idx].smmu_buf_list)) {
+ pr_err("Client %s buffer list is not clean!\n",
+ iommu_cb_set.cb_info[idx].name);
+ cam_smmu_print_list(idx);
+ cam_smmu_clean_buffer_list(idx);
+ }
+
+ iommu_cb_set.cb_info[idx].cb_count = 0;
+ iommu_cb_set.cb_info[idx].handle = HANDLE_INIT;
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return 0;
+}
+EXPORT_SYMBOL(cam_smmu_destroy_handle);
+
+/*This function can only be called after smmu driver probe*/
+int cam_smmu_get_num_of_clients(void)
+{
+ return iommu_cb_set.cb_num;
+}
+
+static void cam_smmu_release_cb(struct platform_device *pdev)
+{
+ int i = 0;
+
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ arm_iommu_detach_device(iommu_cb_set.cb_info[i].dev);
+ arm_iommu_release_mapping(iommu_cb_set.cb_info[i].mapping);
+ }
+
+ devm_kfree(&pdev->dev, iommu_cb_set.cb_info);
+ iommu_cb_set.cb_num = 0;
+}
+
+static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
+ struct device *dev)
+{
+ int rc = 0;
+
+ if (!cb || !dev) {
+ pr_err("Error: invalid input params\n");
+ return -EINVAL;
+ }
+
+ cb->dev = dev;
+ /* Reserve 256M if scratch buffer support is desired
+ * and initialize the scratch mapping structure
+ */
+ if (cb->scratch_buf_support) {
+ cb->va_start = SCRATCH_ALLOC_END;
+ cb->va_len = VA_SPACE_END - SCRATCH_ALLOC_END;
+
+ rc = cam_smmu_init_scratch_map(&cb->scratch_map,
+ SCRATCH_ALLOC_START,
+ SCRATCH_ALLOC_END - SCRATCH_ALLOC_START,
+ 0);
+ if (rc < 0) {
+ pr_err("Error: failed to create scratch map\n");
+ rc = -ENODEV;
+ goto end;
+ }
+ } else {
+ cb->va_start = SZ_128K;
+ cb->va_len = VA_SPACE_END - SZ_128K;
+ }
+
+ /* create a virtual mapping */
+ cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
+ cb->va_start, cb->va_len);
+ if (IS_ERR(cb->mapping)) {
+ pr_err("Error: create mapping Failed\n");
+ rc = -ENODEV;
+ goto end;
+ }
+
+ return 0;
+
+end:
+ return rc;
+}
+
+static int cam_alloc_smmu_context_banks(struct device *dev)
+{
+ struct device_node *domains_child_node = NULL;
+
+ if (!dev) {
+ pr_err("Error: Invalid device\n");
+ return -ENODEV;
+ }
+
+ iommu_cb_set.cb_num = 0;
+
+ /* traverse thru all the child nodes and increment the cb count */
+ for_each_child_of_node(dev->of_node, domains_child_node) {
+ if (of_device_is_compatible(domains_child_node,
+ "qcom,msm-cam-smmu-cb"))
+ iommu_cb_set.cb_num++;
+
+ if (of_device_is_compatible(domains_child_node,
+ "qcom,qsmmu-cam-cb"))
+ iommu_cb_set.cb_num++;
+ }
+
+ if (iommu_cb_set.cb_num == 0) {
+ pr_err("Error: no context banks present\n");
+ return -ENOENT;
+ }
+
+ /* allocate memory for the context banks */
+ iommu_cb_set.cb_info = devm_kzalloc(dev,
+ iommu_cb_set.cb_num * sizeof(struct cam_context_bank_info),
+ GFP_KERNEL);
+
+ if (!iommu_cb_set.cb_info) {
+ pr_err("Error: cannot allocate context banks\n");
+ return -ENOMEM;
+ }
+
+ cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_INIT);
+ iommu_cb_set.cb_init_count = 0;
+
+ CDBG("no of context banks :%d\n", iommu_cb_set.cb_num);
+ return 0;
+}
+
+static int cam_populate_smmu_context_banks(struct device *dev,
+ enum cam_iommu_type type)
+{
+ int rc = 0;
+ struct cam_context_bank_info *cb;
+ struct device *ctx;
+
+ if (!dev) {
+ pr_err("Error: Invalid device\n");
+ return -ENODEV;
+ }
+
+ /* check the bounds */
+ if (iommu_cb_set.cb_init_count >= iommu_cb_set.cb_num) {
+ pr_err("Error: populate more than allocated cb\n");
+ rc = -EBADHANDLE;
+ goto cb_init_fail;
+ }
+
+ /* read the context bank from cb set */
+ cb = &iommu_cb_set.cb_info[iommu_cb_set.cb_init_count];
+
+ /* set the name of the context bank */
+ rc = of_property_read_string(dev->of_node, "label", &cb->name);
+ if (rc) {
+ pr_err("Error: failed to read label from sub device\n");
+ goto cb_init_fail;
+ }
+
+ /* Check if context bank supports scratch buffers */
+ if (of_property_read_bool(dev->of_node, "qcom,scratch-buf-support"))
+ cb->scratch_buf_support = 1;
+ else
+ cb->scratch_buf_support = 0;
+
+ /* set the secure/non secure domain type */
+ if (of_property_read_bool(dev->of_node, "qcom,secure-context"))
+ cb->is_secure = true;
+ else
+ cb->is_secure = false;
+
+ CDBG("cb->name :%s, cb->is_secure :%d, cb->scratch_support :%d\n",
+ cb->name, cb->is_secure, cb->scratch_buf_support);
+
+ /* set up the iommu mapping for the context bank */
+ if (type == CAM_QSMMU) {
+ ctx = msm_iommu_get_ctx(cb->name);
+ if (IS_ERR_OR_NULL(ctx)) {
+ rc = PTR_ERR(ctx);
+ pr_err("Invalid pointer of ctx : %s rc = %d\n",
+ cb->name, rc);
+ return -EINVAL;
+ }
+ CDBG("getting QSMMU ctx : %s\n", cb->name);
+ } else {
+ ctx = dev;
+ CDBG("getting Arm SMMU ctx : %s\n", cb->name);
+ }
+ rc = cam_smmu_setup_cb(cb, ctx);
+ if (rc < 0)
+ pr_err("Error: failed to setup cb : %s\n", cb->name);
+
+ iommu_set_fault_handler(cb->mapping->domain,
+ cam_smmu_iommu_fault_handler,
+ (void *)cb->name);
+
+ /* increment count to next bank */
+ iommu_cb_set.cb_init_count++;
+
+ CDBG("X: cb init count :%d\n", iommu_cb_set.cb_init_count);
+ return rc;
+
+cb_init_fail:
+ iommu_cb_set.cb_info = NULL;
+ return rc;
+}
+
+static int cam_smmu_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct device *dev = &pdev->dev;
+
+ if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu")) {
+ rc = cam_alloc_smmu_context_banks(dev);
+ if (rc < 0) {
+ pr_err("Error: allocating context banks\n");
+ return -ENOMEM;
+ }
+ }
+ if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-cb")) {
+ rc = cam_populate_smmu_context_banks(dev, CAM_ARM_SMMU);
+ if (rc < 0) {
+ pr_err("Error: populating context banks\n");
+ return -ENOMEM;
+ }
+ return rc;
+ }
+ if (of_device_is_compatible(dev->of_node, "qcom,qsmmu-cam-cb")) {
+ rc = cam_populate_smmu_context_banks(dev, CAM_QSMMU);
+ if (rc < 0) {
+ pr_err("Error: populating context banks\n");
+ return -ENOMEM;
+ }
+ return rc;
+ }
+
+ /* probe thru all the subdevices */
+ rc = of_platform_populate(pdev->dev.of_node, msm_cam_smmu_dt_match,
+ NULL, &pdev->dev);
+ if (rc < 0)
+ pr_err("Error: populating devices\n");
+
+ INIT_WORK(&iommu_cb_set.smmu_work, cam_smmu_page_fault_work);
+ mutex_init(&iommu_cb_set.payload_list_lock);
+ INIT_LIST_HEAD(&iommu_cb_set.payload_list);
+
+ return rc;
+}
+
+static int cam_smmu_remove(struct platform_device *pdev)
+{
+ /* release all the context banks and memory allocated */
+ cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_DEINIT);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-cam-smmu"))
+ cam_smmu_release_cb(pdev);
+ return 0;
+}
+
+static struct platform_driver cam_smmu_driver = {
+ .probe = cam_smmu_probe,
+ .remove = cam_smmu_remove,
+ .driver = {
+ .name = "msm_cam_smmu",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_cam_smmu_dt_match,
+ },
+};
+
+static int __init cam_smmu_init_module(void)
+{
+ return platform_driver_register(&cam_smmu_driver);
+}
+
+static void __exit cam_smmu_exit_module(void)
+{
+ platform_driver_unregister(&cam_smmu_driver);
+}
+
+module_init(cam_smmu_init_module);
+module_exit(cam_smmu_exit_module);
+MODULE_DESCRIPTION("MSM Camera SMMU driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/common/cam_smmu_api.h b/drivers/media/platform/msm/ais/common/cam_smmu_api.h
new file mode 100644
index 000000000000..4a13598dc719
--- /dev/null
+++ b/drivers/media/platform/msm/ais/common/cam_smmu_api.h
@@ -0,0 +1,166 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SMMU_API_H_
+#define _CAM_SMMU_API_H_
+
+#include <linux/dma-direction.h>
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-attrs.h>
+#include <linux/of_platform.h>
+#include <linux/iommu.h>
+#include <linux/random.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+
+/*
+ * Enum for possible CAM SMMU operations
+ */
+enum cam_smmu_ops_param {
+ CAM_SMMU_ATTACH,
+ CAM_SMMU_DETACH,
+ CAM_SMMU_VOTE,
+ CAM_SMMU_DEVOTE,
+ CAM_SMMU_OPS_INVALID
+};
+
+enum cam_smmu_map_dir {
+ CAM_SMMU_MAP_READ,
+ CAM_SMMU_MAP_WRITE,
+ CAM_SMMU_MAP_RW,
+ CAM_SMMU_MAP_INVALID
+};
+
+/**
+ * @param identifier: Unique identifier to be used by clients which they
+ * should get from device tree. CAM SMMU driver will
+ * not enforce how this string is obtained and will
+ * only validate this against the list of permitted
+ * identifiers
+ * @param handle_ptr: Based on the indentifier, CAM SMMU drivier will
+ * fill the handle pointed by handle_ptr
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_handle(char *identifier, int *handle_ptr);
+
+/**
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param op : Operation to be performed. Can be either CAM_SMMU_ATTACH
+ * or CAM_SMMU_DETACH
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
+
+/**
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param ion_fd: ION handle identifying the memory buffer.
+ * @phys_addr : Pointer to physical address where mapped address will be
+ * returned.
+ * @dir : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
+ * DMA_TO_DEVICE or DMA_FROM_DEVICE
+ * @len : Length of buffer mapped returned by CAM SMMU driver.
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_phy_addr(int handle,
+ int ion_fd, enum cam_smmu_map_dir dir,
+ dma_addr_t *dma_addr, size_t *len_ptr);
+
+/**
+ * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param ion_fd: ION handle identifying the memory buffer.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_put_phy_addr(int handle, int ion_fd);
+
+/**
+ * @brief : Allocates a scratch buffer
+ *
+ * This function allocates a scratch virtual buffer of length virt_len in the
+ * device virtual address space mapped to phys_len physically contiguous bytes
+ * in that device's SMMU.
+ *
+ * virt_len and phys_len are expected to be aligned to PAGE_SIZE and with each
+ * other, otherwise -EINVAL is returned.
+ *
+ * -EINVAL will be returned if virt_len is less than phys_len.
+ *
+ * Passing a too large phys_len might also cause failure if that much size is
+ * not available for allocation in a physically contiguous way.
+ *
+ * @param handle : Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param dir : Direction of mapping which will translate to IOMMU_READ
+ * IOMMU_WRITE or a bit mask of both.
+ * @param paddr_ptr: Device virtual address that the client device will be
+ * able to read from/write to
+ * @param virt_len : Virtual length of the scratch buffer
+ * @param phys_len : Physical length of the scratch buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int cam_smmu_get_phy_addr_scratch(int handle,
+ enum cam_smmu_map_dir dir,
+ dma_addr_t *paddr_ptr,
+ size_t virt_len,
+ size_t phys_len);
+
+/**
+ * @brief : Frees a scratch buffer
+ *
+ * This function frees a scratch buffer and releases the corresponding SMMU
+ * mappings.
+ *
+ * @param handle : Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * IOMMU_WRITE or a bit mask of both.
+ * @param paddr_ptr: Device virtual address of client's scratch buffer that
+ * will be freed.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int cam_smmu_put_phy_addr_scratch(int handle,
+ dma_addr_t paddr);
+
+/**
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_destroy_handle(int handle);
+
+/**
+ * @return numger of client. Zero in case of error.
+ */
+int cam_smmu_get_num_of_clients(void);
+
+/**
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @return Index of SMMU client. Nagative in case of error.
+ */
+int cam_smmu_find_index_by_handle(int hdl);
+
+/**
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param client_page_fault_handler: It is triggered in IOMMU page fault
+ * @param token: It is input param when trigger page fault handler
+ */
+void cam_smmu_reg_client_page_fault_handler(int handle,
+ void (*client_page_fault_handler)(struct iommu_domain *,
+ struct device *, unsigned long,
+ int, void*), void *token);
+
+#endif /* _CAM_SMMU_API_H_ */
diff --git a/drivers/media/platform/msm/ais/common/cam_soc_api.c b/drivers/media/platform/msm/ais/common/cam_soc_api.c
new file mode 100644
index 000000000000..09d470ed6eb0
--- /dev/null
+++ b/drivers/media/platform/msm/ais/common/cam_soc_api.c
@@ -0,0 +1,1015 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-SOC %s:%d " fmt, __func__, __LINE__
+#define NO_SET_RATE -1
+#define INIT_RATE -2
+
+#ifdef CONFIG_CAM_SOC_API_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/msm-bus.h>
+#include "cam_soc_api.h"
+
+struct msm_cam_bus_pscale_data {
+ struct msm_bus_scale_pdata *pdata;
+ uint32_t bus_client;
+ uint32_t num_usecases;
+ uint32_t num_paths;
+ unsigned int vector_index;
+ bool dyn_vote;
+ struct mutex lock;
+};
+
+struct msm_cam_bus_pscale_data g_cv[CAM_BUS_CLIENT_MAX];
+
+/* Get all clocks from DT */
+static int msm_camera_get_clk_info_internal(struct device *dev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr,
+ size_t *num_clk)
+{
+ int rc = 0;
+ size_t cnt, tmp;
+ uint32_t *rates, i = 0;
+ const char *clk_ctl = NULL;
+ bool clock_cntl_support = false;
+ struct device_node *of_node;
+
+ of_node = dev->of_node;
+
+ cnt = of_property_count_strings(of_node, "clock-names");
+ if (cnt <= 0) {
+ pr_err("err: No clocks found in DT=%zu\n", cnt);
+ return -EINVAL;
+ }
+
+ tmp = of_property_count_u32_elems(of_node, "qcom,clock-rates");
+ if (tmp <= 0) {
+ pr_err("err: No clk rates device tree, count=%zu", tmp);
+ return -EINVAL;
+ }
+
+ if (cnt != tmp) {
+ pr_err("err: clk name/rates mismatch, strings=%zu, rates=%zu\n",
+ cnt, tmp);
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(of_node, "qcom,clock-cntl-support")) {
+ tmp = of_property_count_strings(of_node,
+ "qcom,clock-control");
+ if (tmp <= 0) {
+ pr_err("err: control strings not found in DT count=%zu",
+ tmp);
+ return -EINVAL;
+ }
+ if (cnt != tmp) {
+ pr_err("err: controls mismatch, strings=%zu, ctl=%zu\n",
+ cnt, tmp);
+ return -EINVAL;
+ }
+ clock_cntl_support = true;
+ }
+
+ *num_clk = cnt;
+
+ *clk_info = devm_kcalloc(dev, cnt,
+ sizeof(struct msm_cam_clk_info), GFP_KERNEL);
+ if (!*clk_info)
+ return -ENOMEM;
+
+ *clk_ptr = devm_kcalloc(dev, cnt, sizeof(struct clk *),
+ GFP_KERNEL);
+ if (!*clk_ptr) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ rates = devm_kcalloc(dev, cnt, sizeof(long), GFP_KERNEL);
+ if (!rates) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+ rates, cnt);
+ if (rc < 0) {
+ pr_err("err: failed reading clock rates\n");
+ rc = -EINVAL;
+ goto err3;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ rc = of_property_read_string_index(of_node, "clock-names",
+ i, &((*clk_info)[i].clk_name));
+ if (rc < 0) {
+ pr_err("%s reading clock-name failed index %d\n",
+ __func__, i);
+ rc = -EINVAL;
+ goto err3;
+ }
+
+ CDBG("dbg: clk-name[%d] = %s\n", i, (*clk_info)[i].clk_name);
+ if (clock_cntl_support) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,clock-control", i, &clk_ctl);
+ if (rc < 0) {
+ pr_err("%s reading clock-control failed index %d\n",
+ __func__, i);
+ rc = -EINVAL;
+ goto err3;
+ }
+
+ if (!strcmp(clk_ctl, "NO_SET_RATE"))
+ (*clk_info)[i].clk_rate = NO_SET_RATE;
+ else if (!strcmp(clk_ctl, "INIT_RATE"))
+ (*clk_info)[i].clk_rate = INIT_RATE;
+ else if (!strcmp(clk_ctl, "SET_RATE"))
+ (*clk_info)[i].clk_rate = rates[i];
+ else {
+ pr_err("%s: error: clock control has invalid value\n",
+ __func__);
+ rc = -EBUSY;
+ goto err3;
+ }
+ } else
+ (*clk_info)[i].clk_rate =
+ (rates[i] == 0) ? (long)-1 : rates[i];
+
+ CDBG("dbg: clk-rate[%d] = rate: %ld\n",
+ i, (*clk_info)[i].clk_rate);
+
+ (*clk_ptr)[i] =
+ devm_clk_get(dev, (*clk_info)[i].clk_name);
+ if (IS_ERR((*clk_ptr)[i])) {
+ rc = PTR_ERR((*clk_ptr)[i]);
+ goto err4;
+ }
+ CDBG("clk ptr[%d] :%pK\n", i, (*clk_ptr)[i]);
+ }
+
+ devm_kfree(dev, rates);
+ return rc;
+
+err4:
+ for (--i; i >= 0; i--)
+ devm_clk_put(dev, (*clk_ptr)[i]);
+err3:
+ devm_kfree(dev, rates);
+err2:
+ devm_kfree(dev, *clk_ptr);
+err1:
+ devm_kfree(dev, *clk_info);
+ return rc;
+}
+
+/* Get all clocks from DT for I2C devices */
+int msm_camera_i2c_dev_get_clk_info(struct device *dev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr,
+ size_t *num_clk)
+{
+ int rc = 0;
+
+ if (!dev || !clk_info || !clk_ptr || !num_clk)
+ return -EINVAL;
+
+ rc = msm_camera_get_clk_info_internal(dev, clk_info, clk_ptr, num_clk);
+ return rc;
+}
+EXPORT_SYMBOL(msm_camera_i2c_dev_get_clk_info);
+
+/* Get all clocks from DT for platform devices */
+int msm_camera_get_clk_info(struct platform_device *pdev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr,
+ size_t *num_clk)
+{
+ int rc = 0;
+
+ if (!pdev || !&pdev->dev || !clk_info || !clk_ptr || !num_clk)
+ return -EINVAL;
+
+ rc = msm_camera_get_clk_info_internal(&pdev->dev,
+ clk_info, clk_ptr, num_clk);
+ return rc;
+}
+EXPORT_SYMBOL(msm_camera_get_clk_info);
+
+/* Get all clocks and multiple rates from DT */
+int msm_camera_get_clk_info_and_rates(
+ struct platform_device *pdev,
+ struct msm_cam_clk_info **pclk_info,
+ struct clk ***pclks,
+ uint32_t ***pclk_rates,
+ size_t *num_set,
+ size_t *num_clk)
+{
+ int rc = 0, tmp_var, cnt, tmp;
+ uint32_t i = 0, j = 0;
+ struct device_node *of_node;
+ uint32_t **rates;
+ struct clk **clks;
+ struct msm_cam_clk_info *clk_info;
+
+ if (!pdev || !pclk_info || !num_clk
+ || !pclk_rates || !pclks || !num_set)
+ return -EINVAL;
+
+ of_node = pdev->dev.of_node;
+
+ cnt = of_property_count_strings(of_node, "clock-names");
+ if (cnt <= 0) {
+ pr_err("err: No clocks found in DT=%d\n", cnt);
+ return -EINVAL;
+ }
+
+ tmp = of_property_count_u32_elems(of_node, "qcom,clock-rates");
+ if (tmp <= 0) {
+ pr_err("err: No clk rates device tree, count=%d\n", tmp);
+ return -EINVAL;
+ }
+
+ if ((tmp % cnt) != 0) {
+ pr_err("err: clk name/rates mismatch, strings=%d, rates=%d\n",
+ cnt, tmp);
+ return -EINVAL;
+ }
+
+ *num_clk = cnt;
+ *num_set = (tmp / cnt);
+
+ clk_info = devm_kcalloc(&pdev->dev, cnt,
+ sizeof(struct msm_cam_clk_info), GFP_KERNEL);
+ if (!clk_info)
+ return -ENOMEM;
+
+ clks = devm_kcalloc(&pdev->dev, cnt, sizeof(struct clk *),
+ GFP_KERNEL);
+ if (!clks) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ rates = devm_kcalloc(&pdev->dev, *num_set,
+ sizeof(uint32_t *), GFP_KERNEL);
+ if (!rates) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ for (i = 0; i < *num_set; i++) {
+ rates[i] = devm_kcalloc(&pdev->dev, *num_clk,
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!rates[i]) {
+ rc = -ENOMEM;
+ for (--i; i >= 0; i--)
+ devm_kfree(&pdev->dev, rates[i]);
+ goto err3;
+ }
+ }
+
+ tmp_var = 0;
+ for (i = 0; i < *num_set; i++) {
+ for (j = 0; j < *num_clk; j++) {
+ rc = of_property_read_u32_index(of_node,
+ "qcom,clock-rates", tmp_var++, &rates[i][j]);
+ if (rc < 0) {
+ pr_err("err: failed reading clock rates\n");
+ rc = -EINVAL;
+ goto err4;
+ }
+ CDBG("Clock rate idx %d idx %d value %d\n",
+ i, j, rates[i][j]);
+ }
+ }
+ for (i = 0; i < *num_clk; i++) {
+ rc = of_property_read_string_index(of_node, "clock-names",
+ i, &clk_info[i].clk_name);
+ if (rc < 0) {
+ pr_err("%s reading clock-name failed index %d\n",
+ __func__, i);
+ rc = -EINVAL;
+ goto err4;
+ }
+
+ CDBG("dbg: clk-name[%d] = %s\n", i, clk_info[i].clk_name);
+
+ clks[i] =
+ devm_clk_get(&pdev->dev, clk_info[i].clk_name);
+ if (IS_ERR(clks[i])) {
+ rc = PTR_ERR(clks[i]);
+ goto err5;
+ }
+ CDBG("clk ptr[%d] :%pK\n", i, clks[i]);
+ }
+ *pclk_info = clk_info;
+ *pclks = clks;
+ *pclk_rates = rates;
+
+ return rc;
+
+err5:
+ for (--i; i >= 0; i--)
+ devm_clk_put(&pdev->dev, clks[i]);
+err4:
+ for (i = 0; i < *num_set; i++)
+ devm_kfree(&pdev->dev, rates[i]);
+err3:
+ devm_kfree(&pdev->dev, rates);
+err2:
+ devm_kfree(&pdev->dev, clks);
+err1:
+ devm_kfree(&pdev->dev, clk_info);
+ return rc;
+}
+EXPORT_SYMBOL(msm_camera_get_clk_info_and_rates);
+
+/* Enable/Disable all clocks */
+int msm_camera_clk_enable(struct device *dev,
+ struct msm_cam_clk_info *clk_info,
+ struct clk **clk_ptr, int num_clk, int enable)
+{
+ int i;
+ int rc = 0;
+ long clk_rate;
+
+ if (enable) {
+ for (i = 0; i < num_clk; i++) {
+ CDBG("enable %s\n", clk_info[i].clk_name);
+ if (clk_info[i].clk_rate > 0) {
+ clk_rate = clk_round_rate(clk_ptr[i],
+ clk_info[i].clk_rate);
+ if (clk_rate < 0) {
+ pr_err("%s round failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+ rc = clk_set_rate(clk_ptr[i],
+ clk_rate);
+ if (rc < 0) {
+ pr_err("%s set failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+
+ } else if (clk_info[i].clk_rate == INIT_RATE) {
+ clk_rate = clk_get_rate(clk_ptr[i]);
+ if (clk_rate == 0) {
+ clk_rate =
+ clk_round_rate(clk_ptr[i], 0);
+ if (clk_rate < 0) {
+ pr_err("%s round rate failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+ rc = clk_set_rate(clk_ptr[i],
+ clk_rate);
+ if (rc < 0) {
+ pr_err("%s set rate failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+ }
+ }
+ rc = clk_prepare_enable(clk_ptr[i]);
+ if (rc < 0) {
+ pr_err("%s enable failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_enable_err;
+ }
+ if (clk_info[i].delay > 20) {
+ msleep(clk_info[i].delay);
+ } else if (clk_info[i].delay) {
+ usleep_range(clk_info[i].delay * 1000,
+ (clk_info[i].delay * 1000) + 1000);
+ }
+ }
+ } else {
+ for (i = num_clk - 1; i >= 0; i--) {
+ if (clk_ptr[i] != NULL) {
+ CDBG("%s disable %s\n", __func__,
+ clk_info[i].clk_name);
+ clk_disable_unprepare(clk_ptr[i]);
+ }
+ }
+ }
+ return rc;
+
+cam_clk_enable_err:
+cam_clk_set_err:
+ for (i--; i >= 0; i--) {
+ if (clk_ptr[i] != NULL)
+ clk_disable_unprepare(clk_ptr[i]);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(msm_camera_clk_enable);
+
+/* Set rate on a specific clock */
+long msm_camera_clk_set_rate(struct device *dev,
+ struct clk *clk,
+ long clk_rate)
+{
+ int rc = 0;
+ long rate = 0;
+
+ if (!dev || !clk || (clk_rate < 0))
+ return -EINVAL;
+
+ CDBG("clk : %pK, enable : %ld\n", clk, clk_rate);
+
+ if (clk_rate > 0) {
+ rate = clk_round_rate(clk, clk_rate);
+ if (rate < 0) {
+ pr_err("round rate failed\n");
+ return -EINVAL;
+ }
+
+ rc = clk_set_rate(clk, rate);
+ if (rc < 0) {
+ pr_err("set rate failed\n");
+ return -EINVAL;
+ }
+ }
+
+ return rate;
+}
+EXPORT_SYMBOL(msm_camera_clk_set_rate);
+
+/* release memory allocated for clocks */
+static int msm_camera_put_clk_info_internal(struct device *dev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr, int cnt)
+{
+ int i;
+
+ for (i = cnt - 1; i >= 0; i--) {
+ if (clk_ptr[i] != NULL)
+ devm_clk_put(dev, (*clk_ptr)[i]);
+
+ CDBG("clk ptr[%d] :%pK\n", i, (*clk_ptr)[i]);
+ }
+ devm_kfree(dev, *clk_info);
+ devm_kfree(dev, *clk_ptr);
+ *clk_info = NULL;
+ *clk_ptr = NULL;
+ return 0;
+}
+
+/* release memory allocated for clocks for i2c devices */
+int msm_camera_i2c_dev_put_clk_info(struct device *dev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr, int cnt)
+{
+ int rc = 0;
+
+ if (!dev || !clk_info || !clk_ptr)
+ return -EINVAL;
+
+ rc = msm_camera_put_clk_info_internal(dev, clk_info, clk_ptr, cnt);
+ return rc;
+}
+EXPORT_SYMBOL(msm_camera_i2c_dev_put_clk_info);
+
+/* release memory allocated for clocks for platform devices */
+int msm_camera_put_clk_info(struct platform_device *pdev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr, int cnt)
+{
+ int rc = 0;
+
+ if (!pdev || !&pdev->dev || !clk_info || !clk_ptr)
+ return -EINVAL;
+
+ rc = msm_camera_put_clk_info_internal(&pdev->dev,
+ clk_info, clk_ptr, cnt);
+ return rc;
+}
+EXPORT_SYMBOL(msm_camera_put_clk_info);
+
+int msm_camera_put_clk_info_and_rates(struct platform_device *pdev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr, uint32_t ***clk_rates,
+ size_t set, size_t cnt)
+{
+ int i;
+
+ for (i = set - 1; i >= 0; i--)
+ devm_kfree(&pdev->dev, (*clk_rates)[i]);
+
+ devm_kfree(&pdev->dev, *clk_rates);
+ for (i = cnt - 1; i >= 0; i--) {
+ if (clk_ptr[i] != NULL)
+ devm_clk_put(&pdev->dev, (*clk_ptr)[i]);
+ CDBG("clk ptr[%d] :%pK\n", i, (*clk_ptr)[i]);
+ }
+ devm_kfree(&pdev->dev, *clk_info);
+ devm_kfree(&pdev->dev, *clk_ptr);
+ *clk_info = NULL;
+ *clk_ptr = NULL;
+ *clk_rates = NULL;
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_put_clk_info_and_rates);
+
+/* Get regulators from DT */
+int msm_camera_get_regulator_info(struct platform_device *pdev,
+ struct msm_cam_regulator **vdd_info,
+ int *num_reg)
+{
+ uint32_t cnt;
+ int i, rc;
+ struct device_node *of_node;
+ char prop_name[32];
+ struct msm_cam_regulator *tmp_reg;
+
+ if (!pdev || !vdd_info || !num_reg)
+ return -EINVAL;
+
+ of_node = pdev->dev.of_node;
+
+ if (!of_get_property(of_node, "qcom,vdd-names", NULL)) {
+ pr_err("err: Regulators property not found\n");
+ return -EINVAL;
+ }
+
+ cnt = of_property_count_strings(of_node, "qcom,vdd-names");
+ if (cnt <= 0) {
+ pr_err("err: no regulators found in device tree, count=%d",
+ cnt);
+ return -EINVAL;
+ }
+
+ tmp_reg = devm_kcalloc(&pdev->dev, cnt,
+ sizeof(struct msm_cam_regulator), GFP_KERNEL);
+ if (!tmp_reg)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,vdd-names", i, &tmp_reg[i].name);
+ if (rc < 0) {
+ pr_err("Fail to fetch regulators: %d\n", i);
+ rc = -EINVAL;
+ goto err1;
+ }
+
+ CDBG("regulator-names[%d] = %s\n", i, tmp_reg[i].name);
+
+ snprintf(prop_name, 32, "%s-supply", tmp_reg[i].name);
+
+ if (of_get_property(of_node, prop_name, NULL)) {
+ tmp_reg[i].vdd =
+ devm_regulator_get(&pdev->dev, tmp_reg[i].name);
+ if (IS_ERR(tmp_reg[i].vdd)) {
+ rc = -EINVAL;
+ pr_err("Fail to get regulator :%d\n", i);
+ goto err1;
+ }
+ } else {
+ pr_err("Regulator phandle not found :%s\n",
+ tmp_reg[i].name);
+ rc = -EINVAL;
+ goto err1;
+ }
+ CDBG("vdd ptr[%d] :%pK\n", i, tmp_reg[i].vdd);
+ }
+
+ *num_reg = cnt;
+ *vdd_info = tmp_reg;
+
+ return 0;
+
+err1:
+ for (--i; i >= 0; i--)
+ devm_regulator_put(tmp_reg[i].vdd);
+ devm_kfree(&pdev->dev, tmp_reg);
+ return rc;
+}
+EXPORT_SYMBOL(msm_camera_get_regulator_info);
+
+/* Enable/Disable regulators */
+int msm_camera_regulator_enable(struct msm_cam_regulator *vdd_info,
+ int cnt, int enable)
+{
+ int i;
+ int rc;
+ struct msm_cam_regulator *tmp = vdd_info;
+
+ if (!tmp) {
+ pr_err("Invalid params");
+ return -EINVAL;
+ }
+ CDBG("cnt : %d\n", cnt);
+
+ for (i = 0; i < cnt; i++) {
+ if (tmp && !IS_ERR_OR_NULL(tmp->vdd)) {
+ CDBG("name : %s, enable : %d\n", tmp->name, enable);
+ if (enable) {
+ rc = regulator_enable(tmp->vdd);
+ if (rc < 0) {
+ pr_err("regulator enable failed %d\n",
+ i);
+ goto error;
+ }
+ } else {
+ rc = regulator_disable(tmp->vdd);
+ if (rc < 0)
+ pr_err("regulator disable failed %d\n",
+ i);
+ }
+ }
+ tmp++;
+ }
+
+ return 0;
+error:
+ for (--i; i > 0; i--) {
+ --tmp;
+ if (!IS_ERR_OR_NULL(tmp->vdd))
+ regulator_disable(tmp->vdd);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(msm_camera_regulator_enable);
+
+/* Put regulators regulators */
+void msm_camera_put_regulators(struct platform_device *pdev,
+ struct msm_cam_regulator **vdd_info, int cnt)
+{
+ int i;
+
+ if (!vdd_info || !*vdd_info) {
+ pr_err("Invalid params\n");
+ return;
+ }
+
+ for (i = cnt - 1; i >= 0; i--) {
+ if (vdd_info[i] && !IS_ERR_OR_NULL(vdd_info[i]->vdd))
+ devm_regulator_put(vdd_info[i]->vdd);
+ CDBG("vdd ptr[%d] :%pK\n", i, vdd_info[i]->vdd);
+ }
+
+ devm_kfree(&pdev->dev, *vdd_info);
+ *vdd_info = NULL;
+}
+EXPORT_SYMBOL(msm_camera_put_regulators);
+
+struct resource *msm_camera_get_irq(struct platform_device *pdev,
+ char *irq_name)
+{
+ if (!pdev || !irq_name) {
+ pr_err("Invalid params\n");
+ return NULL;
+ }
+
+ CDBG("Get irq for %s\n", irq_name);
+ return platform_get_resource_byname(pdev, IORESOURCE_IRQ, irq_name);
+}
+EXPORT_SYMBOL(msm_camera_get_irq);
+
+int msm_camera_register_irq(struct platform_device *pdev,
+ struct resource *irq, irq_handler_t handler,
+ unsigned long irqflags, char *irq_name, void *dev_id)
+{
+ int rc = 0;
+
+ if (!pdev || !irq || !handler || !irq_name || !dev_id) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ rc = devm_request_irq(&pdev->dev, irq->start, handler,
+ irqflags, irq_name, dev_id);
+ if (rc < 0) {
+ pr_err("irq request fail\n");
+ rc = -EINVAL;
+ }
+
+ CDBG("Registered irq for %s[resource - %pK]\n", irq_name, irq);
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_camera_register_irq);
+
+int msm_camera_register_threaded_irq(struct platform_device *pdev,
+ struct resource *irq, irq_handler_t handler_fn,
+ irq_handler_t thread_fn, unsigned long irqflags,
+ const char *irq_name, void *dev_id)
+{
+ int rc = 0;
+
+ if (!pdev || !irq || !irq_name || !dev_id) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ rc = devm_request_threaded_irq(&pdev->dev, irq->start, handler_fn,
+ thread_fn, irqflags, irq_name, dev_id);
+ if (rc < 0) {
+ pr_err("irq request fail\n");
+ rc = -EINVAL;
+ }
+
+ CDBG("Registered irq for %s[resource - %pK]\n", irq_name, irq);
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_camera_register_threaded_irq);
+
+int msm_camera_enable_irq(struct resource *irq, int enable)
+{
+ if (!irq) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ CDBG("irq Enable %d\n", enable);
+ if (enable)
+ enable_irq(irq->start);
+ else
+ disable_irq(irq->start);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_enable_irq);
+
+int msm_camera_unregister_irq(struct platform_device *pdev,
+ struct resource *irq, void *dev_id)
+{
+
+ if (!pdev || !irq || !dev_id) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ CDBG("Un Registering irq for [resource - %pK]\n", irq);
+ devm_free_irq(&pdev->dev, irq->start, dev_id);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_unregister_irq);
+
+void __iomem *msm_camera_get_reg_base(struct platform_device *pdev,
+ char *device_name, int reserve_mem)
+{
+ struct resource *mem;
+ void *base;
+
+ if (!pdev || !device_name) {
+ pr_err("Invalid params\n");
+ return NULL;
+ }
+
+ CDBG("device name :%s\n", device_name);
+ mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, device_name);
+ if (!mem) {
+ pr_err("err: mem resource %s not found\n", device_name);
+ return NULL;
+ }
+
+ if (reserve_mem) {
+ CDBG("device:%pK, mem : %pK, size : %d\n",
+ &pdev->dev, mem, (int)resource_size(mem));
+ if (!devm_request_mem_region(&pdev->dev, mem->start,
+ resource_size(mem),
+ device_name)) {
+ pr_err("err: no valid mem region for device:%s\n",
+ device_name);
+ return NULL;
+ }
+ }
+
+ base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ if (!base) {
+ devm_release_mem_region(&pdev->dev, mem->start,
+ resource_size(mem));
+ pr_err("err: ioremap failed: %s\n", device_name);
+ return NULL;
+ }
+
+ CDBG("base : %pK\n", base);
+ return base;
+}
+EXPORT_SYMBOL(msm_camera_get_reg_base);
+
+uint32_t msm_camera_get_res_size(struct platform_device *pdev,
+ char *device_name)
+{
+ struct resource *mem;
+
+ if (!pdev || !device_name) {
+ pr_err("Invalid params\n");
+ return 0;
+ }
+
+ CDBG("device name :%s\n", device_name);
+ mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, device_name);
+ if (!mem) {
+ pr_err("err: mem resource %s not found\n", device_name);
+ return 0;
+ }
+ return resource_size(mem);
+}
+EXPORT_SYMBOL(msm_camera_get_res_size);
+
+
+int msm_camera_put_reg_base(struct platform_device *pdev,
+ void __iomem *base, char *device_name, int reserve_mem)
+{
+ struct resource *mem;
+
+ if (!pdev || !base || !device_name) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ CDBG("device name :%s\n", device_name);
+ mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, device_name);
+ if (!mem) {
+ pr_err("err: mem resource %s not found\n", device_name);
+ return -EINVAL;
+ }
+ CDBG("mem : %pK, size : %d\n", mem, (int)resource_size(mem));
+
+ devm_iounmap(&pdev->dev, base);
+ if (reserve_mem)
+ devm_release_mem_region(&pdev->dev,
+ mem->start, resource_size(mem));
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_put_reg_base);
+
+/* Register the bus client */
+uint32_t msm_camera_register_bus_client(struct platform_device *pdev,
+ enum cam_bus_client id)
+{
+ int rc = 0;
+ uint32_t bus_client, num_usecases, num_paths;
+ struct msm_bus_scale_pdata *pdata;
+ struct device_node *of_node;
+
+ CDBG("Register client ID: %d\n", id);
+
+ if (id >= CAM_BUS_CLIENT_MAX || !pdev) {
+ pr_err("Invalid params");
+ return -EINVAL;
+ }
+
+ of_node = pdev->dev.of_node;
+
+ if (!g_cv[id].pdata) {
+ rc = of_property_read_u32(of_node, "qcom,msm-bus,num-cases",
+ &num_usecases);
+ if (rc) {
+ pr_err("num-usecases not found\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(of_node, "qcom,msm-bus,num-paths",
+ &num_paths);
+ if (rc) {
+ pr_err("num-usecases not found\n");
+ return -EINVAL;
+ }
+
+ if (num_paths != 1) {
+ pr_err("Exceeds number of paths\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(of_node,
+ "qcom,msm-bus-vector-dyn-vote")) {
+ if (num_usecases != 2) {
+ pr_err("Excess or less vectors\n");
+ return -EINVAL;
+ }
+ g_cv[id].dyn_vote = true;
+ }
+
+ pdata = msm_bus_cl_get_pdata(pdev);
+ if (!pdata) {
+ pr_err("failed get_pdata client_id :%d\n", id);
+ return -EINVAL;
+ }
+ bus_client = msm_bus_scale_register_client(pdata);
+ if (!bus_client) {
+ pr_err("Unable to register bus client :%d\n", id);
+ return -EINVAL;
+ }
+ } else {
+ pr_err("vector already setup client_id : %d\n", id);
+ return -EINVAL;
+ }
+
+ g_cv[id].pdata = pdata;
+ g_cv[id].bus_client = bus_client;
+ g_cv[id].vector_index = 0;
+ g_cv[id].num_usecases = num_usecases;
+ g_cv[id].num_paths = num_paths;
+ mutex_init(&g_cv[id].lock);
+ CDBG("Exit Client ID: %d\n", id);
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_register_bus_client);
+
+/* Update the bus bandwidth */
+uint32_t msm_camera_update_bus_bw(int id, uint64_t ab, uint64_t ib)
+{
+ struct msm_bus_paths *path;
+ struct msm_bus_scale_pdata *pdata;
+ int idx = 0;
+
+ if (id >= CAM_BUS_CLIENT_MAX) {
+ pr_err("Invalid params");
+ return -EINVAL;
+ }
+ if (g_cv[id].num_usecases != 2 ||
+ g_cv[id].num_paths != 1 ||
+ g_cv[id].dyn_vote != true) {
+ pr_err("dynamic update not allowed\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&g_cv[id].lock);
+ idx = g_cv[id].vector_index;
+ idx = 1 - idx;
+ g_cv[id].vector_index = idx;
+ mutex_unlock(&g_cv[id].lock);
+
+ pdata = g_cv[id].pdata;
+ path = &(pdata->usecase[idx]);
+ path->vectors[0].ab = ab;
+ path->vectors[0].ib = ib;
+
+ CDBG("Register client ID : %d [ab : %llx, ib : %llx], update :%d\n",
+ id, ab, ib, idx);
+ msm_bus_scale_client_update_request(g_cv[id].bus_client, idx);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_update_bus_bw);
+
+/* Update the bus vector */
+uint32_t msm_camera_update_bus_vector(enum cam_bus_client id,
+ int vector_index)
+{
+ if (id >= CAM_BUS_CLIENT_MAX || g_cv[id].dyn_vote == true) {
+ pr_err("Invalid params");
+ return -EINVAL;
+ }
+
+ if (vector_index < 0 || vector_index > g_cv[id].num_usecases) {
+ pr_err("Invalid params");
+ return -EINVAL;
+ }
+
+ CDBG("Register client ID : %d vector idx: %d,\n", id, vector_index);
+ msm_bus_scale_client_update_request(g_cv[id].bus_client,
+ vector_index);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_update_bus_vector);
+
+/* Unregister the bus client */
+uint32_t msm_camera_unregister_bus_client(enum cam_bus_client id)
+{
+ if (id >= CAM_BUS_CLIENT_MAX) {
+ pr_err("Invalid params");
+ return -EINVAL;
+ }
+
+ CDBG("UnRegister client ID: %d\n", id);
+
+ mutex_destroy(&g_cv[id].lock);
+ msm_bus_scale_unregister_client(g_cv[id].bus_client);
+ g_cv[id].bus_client = 0;
+ g_cv[id].num_usecases = 0;
+ g_cv[id].num_paths = 0;
+ g_cv[id].vector_index = 0;
+ g_cv[id].dyn_vote = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_unregister_bus_client);
diff --git a/drivers/media/platform/msm/ais/common/cam_soc_api.h b/drivers/media/platform/msm/ais/common/cam_soc_api.h
new file mode 100644
index 000000000000..b9089e874acf
--- /dev/null
+++ b/drivers/media/platform/msm/ais/common/cam_soc_api.h
@@ -0,0 +1,425 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_SOC_API_H_
+#define _CAM_SOC_API_H_
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <soc/qcom/ais.h>
+
+enum cam_bus_client {
+ CAM_BUS_CLIENT_VFE,
+ CAM_BUS_CLIENT_CPP,
+ CAM_BUS_CLIENT_FD,
+ CAM_BUS_CLIENT_JPEG_ENC0,
+ CAM_BUS_CLIENT_JPEG_ENC1,
+ CAM_BUS_CLIENT_JPEG_DEC,
+ CAM_BUS_CLIENT_JPEG_DMA,
+ CAM_BUS_CLIENT_MAX
+};
+
+struct msm_cam_regulator {
+ const char *name;
+ struct regulator *vdd;
+};
+
+/**
+ * @brief : Gets clock information from dtsi
+ *
+ * This function extracts the clocks information for a specific
+ * platform device
+ *
+ * @param pdev : Platform device to get clocks information
+ * @param clk_info : Pointer to populate clock information array
+ * @param clk_ptr : Pointer to populate clock resource pointers
+ * @param num_clk: Pointer to populate the number of clocks
+ * extracted from dtsi
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_get_clk_info(struct platform_device *pdev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr,
+ size_t *num_clk);
+
+/**
+ * @brief : Gets clock information from dtsi
+ *
+ * This function extracts the clocks information for a specific
+ * i2c device
+ *
+ * @param dev : i2c device to get clocks information
+ * @param clk_info : Pointer to populate clock information array
+ * @param clk_ptr : Pointer to populate clock resource pointers
+ * @param num_clk: Pointer to populate the number of clocks
+ * extracted from dtsi
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_i2c_dev_get_clk_info(struct device *dev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr,
+ size_t *num_clk);
+
+/**
+ * @brief : Gets clock information and rates from dtsi
+ *
+ * This function extracts the clocks information for a specific
+ * platform device
+ *
+ * @param pdev : Platform device to get clocks information
+ * @param clk_info : Pointer to populate clock information array
+ * @param clk_ptr : Pointer to populate clock resource pointers
+ * @param clk_rates : Pointer to populate clock rates
+ * @param num_set: Pointer to populate the number of sets of rates
+ * @param num_clk: Pointer to populate the number of clocks
+ * extracted from dtsi
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_get_clk_info_and_rates(
+ struct platform_device *pdev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr,
+ uint32_t ***clk_rates,
+ size_t *num_set,
+ size_t *num_clk);
+
+/**
+ * @brief : Puts clock information
+ *
+ * This function releases the memory allocated for the clocks
+ *
+ * @param pdev : Pointer to platform device
+ * @param clk_info : Pointer to release the allocated memory
+ * @param clk_ptr : Pointer to release the clock resources
+ * @param cnt : Number of clk resources
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_put_clk_info(struct platform_device *pdev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr, int cnt);
+
+/**
+ * @brief : Puts clock information
+ *
+ * This function releases the memory allocated for the clocks
+ *
+ * @param dev : Pointer to i2c device
+ * @param clk_info : Pointer to release the allocated memory
+ * @param clk_ptr : Pointer to release the clock resources
+ * @param cnt : Number of clk resources
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_i2c_dev_put_clk_info(struct device *dev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr, int cnt);
+
+/**
+ * @brief : Puts clock information
+ *
+ * This function releases the memory allocated for the clocks
+ *
+ * @param pdev : Pointer to platform device
+ * @param clk_info : Pointer to release the allocated memory
+ * @param clk_ptr : Pointer to release the clock resources
+ * @param clk_ptr : Pointer to release the clock rates
+ * @param set : Number of sets of clock rates
+ * @param cnt : Number of clk resources
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_put_clk_info_and_rates(struct platform_device *pdev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr, uint32_t ***clk_rates,
+ size_t set, size_t cnt);
+/**
+ * @brief : Enable clocks
+ *
+ * This function enables the clocks for a specified device
+ *
+ * @param dev : Device to get clocks information
+ * @param clk_info : Pointer to populate clock information
+ * @param clk_ptr : Pointer to populate clock information
+ * @param num_clk: Pointer to populate the number of clocks
+ * extracted from dtsi
+ * @param enable : Flag to specify enable/disable
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_clk_enable(struct device *dev,
+ struct msm_cam_clk_info *clk_info,
+ struct clk **clk_ptr,
+ int num_clk,
+ int enable);
+/**
+ * @brief : Set clock rate
+ *
+ * This function sets the rate for a specified clock and
+ * returns the rounded value
+ *
+ * @param dev : Device to get clocks information
+ * @param clk : Pointer to clock to set rate
+ * @param clk_rate : Rate to be set
+ *
+ * @return Status of operation. Negative in case of error. clk rate otherwise.
+ */
+
+long msm_camera_clk_set_rate(struct device *dev,
+ struct clk *clk,
+ long clk_rate);
+/**
+ * @brief : Gets regulator info
+ *
+ * This function extracts the regulator information for a specific
+ * platform device
+ *
+ * @param pdev : platform device to get regulator information
+ * @param vdd_info: Pointer to populate the regulator names
+ * @param num_reg: Pointer to populate the number of regulators
+ * extracted from dtsi
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_get_regulator_info(struct platform_device *pdev,
+ struct msm_cam_regulator **vdd_info, int *num_reg);
+/**
+ * @brief : Enable/Disable the regultors
+ *
+ * This function enables/disables the regulators for a specific
+ * platform device
+ *
+ * @param vdd_info: Pointer to list of regulators
+ * @param cnt: Number of regulators to enable/disable
+ * @param enable: Flags specifies either enable/disable
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_regulator_enable(struct msm_cam_regulator *vdd_info,
+ int cnt, int enable);
+
+/**
+ * @brief : Release the regulators
+ *
+ * This function releases the regulator resources.
+ *
+ * @param pdev: Pointer to platform device
+ * @param vdd_info: Pointer to list of regulators
+ * @param cnt: Number of regulators to release
+ */
+
+void msm_camera_put_regulators(struct platform_device *pdev,
+ struct msm_cam_regulator **vdd_info, int cnt);
+/**
+ * @brief : Get the IRQ resource
+ *
+ * This function gets the irq resource from dtsi for a specific
+ * platform device
+ *
+ * @param pdev : Platform device to get IRQ
+ * @param irq_name: Name of the IRQ resource to get from DTSI
+ *
+ * @return Pointer to resource if success else null
+ */
+
+struct resource *msm_camera_get_irq(struct platform_device *pdev,
+ char *irq_name);
+/**
+ * @brief : Register the IRQ
+ *
+ * This function registers the irq resource for specified hardware
+ *
+ * @param pdev : Platform device to register IRQ resource
+ * @param irq : IRQ resource
+ * @param handler : IRQ handler
+ * @param irqflags : IRQ flags
+ * @param irq_name: Name of the IRQ
+ * @param dev : Token of the device
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_register_irq(struct platform_device *pdev,
+ struct resource *irq,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ char *irq_name,
+ void *dev);
+
+/**
+ * @brief : Register the threaded IRQ
+ *
+ * This function registers the irq resource for specified hardware
+ *
+ * @param pdev : Platform device to register IRQ resource
+ * @param irq : IRQ resource
+ * @param handler_fn : IRQ handler function
+ * @param thread_fn : thread handler function
+ * @param irqflags : IRQ flags
+ * @param irq_name: Name of the IRQ
+ * @param dev : Token of the device
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_register_threaded_irq(struct platform_device *pdev,
+ struct resource *irq,
+ irq_handler_t handler_fn,
+ irq_handler_t thread_fn,
+ unsigned long irqflags,
+ const char *irq_name,
+ void *dev);
+
+/**
+ * @brief : Enable/Disable the IRQ
+ *
+ * This function enables or disables a specific IRQ
+ *
+ * @param irq : IRQ resource
+ * @param flag : flag to enable/disable
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_enable_irq(struct resource *irq, int flag);
+
+/**
+ * @brief : UnRegister the IRQ
+ *
+ * This function Unregisters/Frees the irq resource
+ *
+ * @param pdev : Pointer to platform device
+ * @param irq : IRQ resource
+ * @param dev : Token of the device
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_unregister_irq(struct platform_device *pdev,
+ struct resource *irq, void *dev_id);
+
+/**
+ * @brief : Gets device register base
+ *
+ * This function extracts the device's register base from the dtsi
+ * for the specified platform device
+ *
+ * @param pdev : Platform device to get regulator infor
+ * @param device_name : Name of the device to fetch the register base
+ * @param reserve_mem : Flag to decide whether to reserve memory
+ * region or not.
+ *
+ * @return Pointer to resource if success else null
+ */
+
+void __iomem *msm_camera_get_reg_base(struct platform_device *pdev,
+ char *device_name, int reserve_mem);
+
+/**
+ * @brief : Puts device register base
+ *
+ * This function releases the memory region for the specified
+ * resource
+ *
+ * @param pdev : Pointer to platform device
+ * @param base : Pointer to base to unmap
+ * @param device_name : Device name
+ * @param reserve_mem : Flag to decide whether to release memory
+ * region or not.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_put_reg_base(struct platform_device *pdev, void __iomem *base,
+ char *device_name, int reserve_mem);
+
+/**
+ * @brief : Register the bus client
+ *
+ * This function registers the bus client
+ *
+ * @param pdev : Pointer to platform device
+ * @param id : client identifier
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+uint32_t msm_camera_register_bus_client(struct platform_device *pdev,
+ enum cam_bus_client id);
+
+/**
+ * @brief : Update bus vector
+ *
+ * This function votes for the specified vector to the bus
+ *
+ * @param id : client identifier
+ * @param vector_index : vector index to register
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+uint32_t msm_camera_update_bus_vector(enum cam_bus_client id,
+ int vector_index);
+
+/**
+ * @brief : Update the bus bandwidth
+ *
+ * This function updates the bandwidth for the specific client
+ *
+ * @param client_id : client identifier
+ * @param ab : Asolute bandwidth
+ * @param ib : Instantaneous bandwidth
+ *
+ * @return non-zero as client id if success else fail
+ */
+
+uint32_t msm_camera_update_bus_bw(int id, uint64_t ab, uint64_t ib);
+
+/**
+ * @brief : UnRegister the bus client
+ *
+ * This function unregisters the bus client
+ *
+ * @param id : client identifier
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+uint32_t msm_camera_unregister_bus_client(enum cam_bus_client id);
+
+/**
+ * @brief : Gets resource size
+ *
+ * This function returns the size of the resource for the
+ * specified platform device
+ *
+ * @param pdev : Platform device to get regulator infor
+ * @param device_name : Name of the device to fetch the register base
+ *
+ * @return size of the resource
+ */
+
+uint32_t msm_camera_get_res_size(struct platform_device *pdev,
+ char *device_name);
+
+#endif
diff --git a/drivers/media/platform/msm/ais/common/msm_camera_io_util.c b/drivers/media/platform/msm/ais/common/msm_camera_io_util.c
new file mode 100644
index 000000000000..8370f556a40d
--- /dev/null
+++ b/drivers/media/platform/msm/ais/common/msm_camera_io_util.c
@@ -0,0 +1,851 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <soc/qcom/ais.h>
+#include <linux/msm-bus.h>
+#include "msm_camera_io_util.h"
+
+#define BUFF_SIZE_128 128
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+void msm_camera_io_w(u32 data, void __iomem *addr)
+{
+ CDBG("%s: 0x%pK %08x\n", __func__, (addr), (data));
+ writel_relaxed((data), (addr));
+}
+
+/* This API is to write a block of data
+* to same address
+*/
+int32_t msm_camera_io_w_block(const u32 *addr, void __iomem *base,
+ u32 len)
+{
+ int i;
+
+ if (!addr || !len || !base)
+ return -EINVAL;
+
+ for (i = 0; i < len; i++) {
+ CDBG("%s: len =%d val=%x base =%pK\n", __func__,
+ len, addr[i], base);
+ writel_relaxed(addr[i], base);
+ }
+ return 0;
+}
+
+/* This API is to write a block of registers
+* which is like a 2 dimensional array table with
+* register offset and data
+*/
+int32_t msm_camera_io_w_reg_block(const u32 *addr, void __iomem *base,
+ u32 len)
+{
+ int i;
+
+ if (!addr || !len || !base)
+ return -EINVAL;
+
+ for (i = 0; i < len; i = i + 2) {
+ CDBG("%s: len =%d val=%x base =%pK reg=%x\n", __func__,
+ len, addr[i + 1], base, addr[i]);
+ writel_relaxed(addr[i + 1], base + addr[i]);
+ }
+ return 0;
+}
+
+void msm_camera_io_w_mb(u32 data, void __iomem *addr)
+{
+ CDBG("%s: 0x%pK %08x\n", __func__, (addr), (data));
+ /* ensure write is done */
+ wmb();
+ writel_relaxed((data), (addr));
+ /* ensure write is done */
+ wmb();
+}
+
+int32_t msm_camera_io_w_mb_block(const u32 *addr, void __iomem *base, u32 len)
+{
+ int i;
+
+ if (!addr || !len || !base)
+ return -EINVAL;
+
+ for (i = 0; i < len; i++) {
+ /* ensure write is done */
+ wmb();
+ CDBG("%s: len =%d val=%x base =%pK\n", __func__,
+ len, addr[i], base);
+ writel_relaxed(addr[i], base);
+ }
+ /* ensure last write is done */
+ wmb();
+ return 0;
+}
+
+u32 msm_camera_io_r(void __iomem *addr)
+{
+ uint32_t data = readl_relaxed(addr);
+
+ CDBG("%s: 0x%pK %08x\n", __func__, (addr), (data));
+ return data;
+}
+
+u32 msm_camera_io_r_mb(void __iomem *addr)
+{
+ uint32_t data;
+ /* ensure read is done */
+ rmb();
+ data = readl_relaxed(addr);
+ /* ensure read is done */
+ rmb();
+ CDBG("%s: 0x%pK %08x\n", __func__, (addr), (data));
+ return data;
+}
+
+void msm_camera_io_memcpy_toio(void __iomem *dest_addr,
+ void __iomem *src_addr, u32 len)
+{
+ int i;
+ u32 *d = (u32 *) dest_addr;
+ u32 *s = (u32 *) src_addr;
+
+ for (i = 0; i < len; i++)
+ writel_relaxed(*s++, d++);
+}
+
+int32_t msm_camera_io_poll_value(void __iomem *addr, u32 wait_data, u32 retry,
+ unsigned long min_usecs, unsigned long max_usecs)
+{
+ uint32_t tmp, cnt = 0;
+ int32_t rc = 0;
+
+ if (!addr)
+ return -EINVAL;
+
+ tmp = msm_camera_io_r(addr);
+ while ((tmp != wait_data) && (cnt++ < retry)) {
+ if (min_usecs > 0 && max_usecs > 0)
+ usleep_range(min_usecs, max_usecs);
+ tmp = msm_camera_io_r(addr);
+ }
+ if (cnt > retry) {
+ pr_debug("Poll failed by value\n");
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+int32_t msm_camera_io_poll_value_wmask(void __iomem *addr, u32 wait_data,
+ u32 bmask, u32 retry, unsigned long min_usecs, unsigned long max_usecs)
+{
+ uint32_t tmp, cnt = 0;
+ int32_t rc = 0;
+
+ if (!addr)
+ return -EINVAL;
+
+ tmp = msm_camera_io_r(addr);
+ while (((tmp & bmask) != wait_data) && (cnt++ < retry)) {
+ if (min_usecs > 0 && max_usecs > 0)
+ usleep_range(min_usecs, max_usecs);
+ tmp = msm_camera_io_r(addr);
+ }
+ if (cnt > retry) {
+ pr_debug("Poll failed with mask\n");
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+void msm_camera_io_dump(void __iomem *addr, int size, int enable)
+{
+ char line_str[128], *p_str;
+ int i;
+ u32 *p = (u32 *) addr;
+ u32 data;
+
+ CDBG("%s: addr=%pK size=%d\n", __func__, addr, size);
+
+ if (!p || (size <= 0) || !enable)
+ return;
+
+ line_str[0] = '\0';
+ p_str = line_str;
+ for (i = 0; i < size/4; i++) {
+ if (i % 4 == 0) {
+#ifdef CONFIG_COMPAT
+ snprintf(p_str, 20, "%016lx: ", (unsigned long) p);
+ p_str += 18;
+#else
+ snprintf(p_str, 12, "%08lx: ", (unsigned long) p);
+ p_str += 10;
+#endif
+ }
+ data = readl_relaxed(p++);
+ snprintf(p_str, 12, "%08x ", data);
+ p_str += 9;
+ if ((i + 1) % 4 == 0) {
+ pr_err("%s\n", line_str);
+ line_str[0] = '\0';
+ p_str = line_str;
+ }
+ }
+ if (line_str[0] != '\0')
+ pr_err("%s\n", line_str);
+}
+
+void msm_camera_io_dump_wstring_base(void __iomem *addr,
+ struct msm_cam_dump_string_info *dump_data,
+ int size)
+{
+ int i, u = sizeof(struct msm_cam_dump_string_info);
+
+ pr_debug("%s: addr=%pK data=%pK size=%d u=%d, cnt=%d\n", __func__,
+ addr, dump_data, size, u,
+ (size/u));
+
+ if (!addr || (size <= 0) || !dump_data) {
+ pr_err("%s: addr=%pK data=%pK size=%d\n", __func__,
+ addr, dump_data, size);
+ return;
+ }
+ for (i = 0; i < (size / u); i++)
+ pr_debug("%s 0x%x\n", (dump_data + i)->print,
+ readl_relaxed((dump_data + i)->offset + addr));
+}
+
+void msm_camera_io_memcpy(void __iomem *dest_addr,
+ void __iomem *src_addr, u32 len)
+{
+ CDBG("%s: %pK %pK %d\n", __func__, dest_addr, src_addr, len);
+ msm_camera_io_memcpy_toio(dest_addr, src_addr, len / 4);
+}
+
+void msm_camera_io_memcpy_mb(void __iomem *dest_addr,
+ void __iomem *src_addr, u32 len)
+{
+ int i;
+ u32 *d = (u32 *) dest_addr;
+ u32 *s = (u32 *) src_addr;
+ /* This is generic function called who needs to register
+ * writes with memory barrier
+ */
+ wmb();
+ for (i = 0; i < (len / 4); i++) {
+ msm_camera_io_w(*s++, d++);
+ /* ensure write is done after every iteration */
+ wmb();
+ }
+}
+
+int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info,
+ struct msm_cam_clk_info *clk_src_info, int num_clk)
+{
+ int i;
+ int rc = 0;
+ struct clk *mux_clk = NULL;
+ struct clk *src_clk = NULL;
+
+ for (i = 0; i < num_clk; i++) {
+ if (clk_src_info[i].clk_name) {
+ mux_clk = clk_get(dev, clk_info[i].clk_name);
+ if (IS_ERR(mux_clk)) {
+ pr_err("%s get failed\n",
+ clk_info[i].clk_name);
+ continue;
+ }
+ src_clk = clk_get(dev, clk_src_info[i].clk_name);
+ if (IS_ERR(src_clk)) {
+ pr_err("%s get failed\n",
+ clk_src_info[i].clk_name);
+ continue;
+ }
+ clk_set_parent(mux_clk, src_clk);
+ }
+ }
+ return rc;
+}
+
+int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
+ struct clk **clk_ptr, int num_clk, int enable)
+{
+ int i;
+ int rc = 0;
+ long clk_rate;
+
+ if (enable) {
+ for (i = 0; i < num_clk; i++) {
+ CDBG("%s enable %s\n", __func__, clk_info[i].clk_name);
+ clk_ptr[i] = clk_get(dev, clk_info[i].clk_name);
+ if (IS_ERR(clk_ptr[i])) {
+ pr_err("%s get failed\n", clk_info[i].clk_name);
+ rc = PTR_ERR(clk_ptr[i]);
+ goto cam_clk_get_err;
+ }
+ if (clk_info[i].clk_rate > 0) {
+ clk_rate = clk_round_rate(clk_ptr[i],
+ clk_info[i].clk_rate);
+ if (clk_rate < 0) {
+ pr_err("%s round failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+ rc = clk_set_rate(clk_ptr[i],
+ clk_rate);
+ if (rc < 0) {
+ pr_err("%s set failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+
+ } else if (clk_info[i].clk_rate == INIT_RATE) {
+ clk_rate = clk_get_rate(clk_ptr[i]);
+ if (clk_rate == 0) {
+ clk_rate =
+ clk_round_rate(clk_ptr[i], 0);
+ if (clk_rate < 0) {
+ pr_err("%s round rate failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+ rc = clk_set_rate(clk_ptr[i],
+ clk_rate);
+ if (rc < 0) {
+ pr_err("%s set rate failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+ }
+ }
+ rc = clk_prepare(clk_ptr[i]);
+ if (rc < 0) {
+ pr_err("%s prepare failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_prepare_err;
+ }
+
+ rc = clk_enable(clk_ptr[i]);
+ if (rc < 0) {
+ pr_err("%s enable failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_enable_err;
+ }
+ if (clk_info[i].delay > 20) {
+ msleep(clk_info[i].delay);
+ } else if (clk_info[i].delay) {
+ usleep_range(clk_info[i].delay * 1000,
+ (clk_info[i].delay * 1000) + 1000);
+ }
+ }
+ } else {
+ for (i = num_clk - 1; i >= 0; i--) {
+ if (clk_ptr[i] != NULL) {
+ CDBG("%s disable %s\n", __func__,
+ clk_info[i].clk_name);
+ clk_disable(clk_ptr[i]);
+ clk_unprepare(clk_ptr[i]);
+ clk_put(clk_ptr[i]);
+ }
+ }
+ }
+ return rc;
+
+
+cam_clk_enable_err:
+ clk_unprepare(clk_ptr[i]);
+cam_clk_prepare_err:
+cam_clk_set_err:
+ clk_put(clk_ptr[i]);
+cam_clk_get_err:
+ for (i--; i >= 0; i--) {
+ if (clk_ptr[i] != NULL) {
+ clk_disable(clk_ptr[i]);
+ clk_unprepare(clk_ptr[i]);
+ clk_put(clk_ptr[i]);
+ }
+ }
+ return rc;
+}
+
+int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
+ int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
+ int num_vreg_seq, struct regulator **reg_ptr, int config)
+{
+ int i = 0, j = 0;
+ int rc = 0;
+ struct camera_vreg_t *curr_vreg;
+
+ if (num_vreg_seq > num_vreg) {
+ pr_err("%s:%d vreg sequence invalid\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (!num_vreg_seq)
+ num_vreg_seq = num_vreg;
+
+ if ((cam_vreg == NULL) && num_vreg_seq) {
+ pr_err("%s:%d cam_vreg NULL\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (config) {
+ for (i = 0; i < num_vreg_seq; i++) {
+ if (vreg_seq) {
+ j = vreg_seq[i];
+ if (j >= num_vreg)
+ continue;
+ } else
+ j = i;
+ curr_vreg = &cam_vreg[j];
+ reg_ptr[j] = regulator_get(dev,
+ curr_vreg->reg_name);
+ if (IS_ERR(reg_ptr[j])) {
+ pr_err("%s: %s get failed\n",
+ __func__,
+ curr_vreg->reg_name);
+ reg_ptr[j] = NULL;
+ goto vreg_get_fail;
+ }
+ if (regulator_count_voltages(reg_ptr[j]) > 0) {
+ rc = regulator_set_voltage(
+ reg_ptr[j],
+ curr_vreg->min_voltage,
+ curr_vreg->max_voltage);
+ if (rc < 0) {
+ pr_err("%s: %s set voltage failed\n",
+ __func__,
+ curr_vreg->reg_name);
+ goto vreg_set_voltage_fail;
+ }
+ if (curr_vreg->op_mode >= 0) {
+ rc = regulator_set_load(
+ reg_ptr[j],
+ curr_vreg->op_mode);
+ rc = 0;
+ if (rc < 0) {
+ pr_err(
+ "%s:%s set optimum mode fail\n",
+ __func__,
+ curr_vreg->reg_name);
+ goto vreg_set_opt_mode_fail;
+ }
+ }
+ }
+ }
+ } else {
+ for (i = num_vreg_seq-1; i >= 0; i--) {
+ if (vreg_seq) {
+ j = vreg_seq[i];
+ if (j >= num_vreg)
+ continue;
+ } else
+ j = i;
+ curr_vreg = &cam_vreg[j];
+ if (reg_ptr[j]) {
+ if (regulator_count_voltages(reg_ptr[j]) > 0) {
+ if (curr_vreg->op_mode >= 0) {
+ regulator_set_load(
+ reg_ptr[j], 0);
+ }
+ regulator_set_voltage(
+ reg_ptr[j], 0, curr_vreg->
+ max_voltage);
+ }
+ regulator_put(reg_ptr[j]);
+ reg_ptr[j] = NULL;
+ }
+ }
+ }
+ return 0;
+
+vreg_unconfig:
+if (regulator_count_voltages(reg_ptr[j]) > 0)
+ regulator_set_load(reg_ptr[j], 0);
+
+vreg_set_opt_mode_fail:
+if (regulator_count_voltages(reg_ptr[j]) > 0)
+ regulator_set_voltage(reg_ptr[j], 0,
+ curr_vreg->max_voltage);
+
+vreg_set_voltage_fail:
+ regulator_put(reg_ptr[j]);
+ reg_ptr[j] = NULL;
+
+vreg_get_fail:
+ for (i--; i >= 0; i--) {
+ if (vreg_seq) {
+ j = vreg_seq[i];
+ if (j >= num_vreg)
+ continue;
+ } else
+ j = i;
+ curr_vreg = &cam_vreg[j];
+ goto vreg_unconfig;
+ }
+ return -ENODEV;
+}
+
+int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
+ int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
+ int num_vreg_seq, struct regulator **reg_ptr, int enable)
+{
+ int i = 0, j = 0, rc = 0;
+
+ if (num_vreg_seq > num_vreg) {
+ pr_err("%s:%d vreg sequence invalid\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ if (!num_vreg_seq)
+ num_vreg_seq = num_vreg;
+
+ if (enable) {
+ for (i = 0; i < num_vreg_seq; i++) {
+ if (vreg_seq) {
+ j = vreg_seq[i];
+ if (j >= num_vreg)
+ continue;
+ } else
+ j = i;
+ if (IS_ERR(reg_ptr[j])) {
+ pr_err("%s: %s null regulator\n",
+ __func__, cam_vreg[j].reg_name);
+ goto disable_vreg;
+ }
+ rc = regulator_enable(reg_ptr[j]);
+ if (rc < 0) {
+ pr_err("%s: %s enable failed\n",
+ __func__, cam_vreg[j].reg_name);
+ goto disable_vreg;
+ }
+ if (cam_vreg[j].delay > 20)
+ msleep(cam_vreg[j].delay);
+ else if (cam_vreg[j].delay)
+ usleep_range(cam_vreg[j].delay * 1000,
+ (cam_vreg[j].delay * 1000) + 1000);
+ }
+ } else {
+ for (i = num_vreg_seq-1; i >= 0; i--) {
+ if (vreg_seq) {
+ j = vreg_seq[i];
+ if (j >= num_vreg)
+ continue;
+ } else
+ j = i;
+ regulator_disable(reg_ptr[j]);
+ if (cam_vreg[j].delay > 20)
+ msleep(cam_vreg[j].delay);
+ else if (cam_vreg[j].delay)
+ usleep_range(cam_vreg[j].delay * 1000,
+ (cam_vreg[j].delay * 1000) + 1000);
+ }
+ }
+ return rc;
+disable_vreg:
+ for (i--; i >= 0; i--) {
+ if (vreg_seq) {
+ j = vreg_seq[i];
+ if (j >= num_vreg)
+ continue;
+ } else
+ j = i;
+ regulator_disable(reg_ptr[j]);
+ if (cam_vreg[j].delay > 20)
+ msleep(cam_vreg[j].delay);
+ else if (cam_vreg[j].delay)
+ usleep_range(cam_vreg[j].delay * 1000,
+ (cam_vreg[j].delay * 1000) + 1000);
+ }
+ return rc;
+}
+
+void msm_camera_bus_scale_cfg(uint32_t bus_perf_client,
+ enum msm_bus_perf_setting perf_setting)
+{
+ int rc = 0;
+
+ if (!bus_perf_client) {
+ pr_err("%s: Bus Client NOT Registered!!!\n", __func__);
+ return;
+ }
+
+ switch (perf_setting) {
+ case S_EXIT:
+ rc = msm_bus_scale_client_update_request(bus_perf_client, 1);
+ msm_bus_scale_unregister_client(bus_perf_client);
+ break;
+ case S_PREVIEW:
+ rc = msm_bus_scale_client_update_request(bus_perf_client, 1);
+ break;
+ case S_VIDEO:
+ rc = msm_bus_scale_client_update_request(bus_perf_client, 2);
+ break;
+ case S_CAPTURE:
+ rc = msm_bus_scale_client_update_request(bus_perf_client, 3);
+ break;
+ case S_ZSL:
+ rc = msm_bus_scale_client_update_request(bus_perf_client, 4);
+ break;
+ case S_LIVESHOT:
+ rc = msm_bus_scale_client_update_request(bus_perf_client, 5);
+ break;
+ case S_DEFAULT:
+ break;
+ default:
+ pr_err("%s: INVALID CASE\n", __func__);
+ }
+}
+
+int msm_camera_set_gpio_table(struct msm_gpio_set_tbl *gpio_tbl,
+ uint8_t gpio_tbl_size, int gpio_en)
+{
+ int rc = 0, i;
+
+ if (gpio_en) {
+ for (i = 0; i < gpio_tbl_size; i++) {
+ gpio_set_value_cansleep(gpio_tbl[i].gpio,
+ gpio_tbl[i].flags);
+ usleep_range(gpio_tbl[i].delay,
+ gpio_tbl[i].delay + 1000);
+ }
+ } else {
+ for (i = gpio_tbl_size - 1; i >= 0; i--) {
+ if (gpio_tbl[i].flags)
+ gpio_set_value_cansleep(gpio_tbl[i].gpio,
+ GPIOF_OUT_INIT_LOW);
+ }
+ }
+ return rc;
+}
+
+int msm_camera_config_single_vreg(struct device *dev,
+ struct camera_vreg_t *cam_vreg, struct regulator **reg_ptr, int config)
+{
+ int rc = 0;
+ const char *vreg_name = NULL;
+
+ if (!dev || !cam_vreg || !reg_ptr) {
+ pr_err("%s: get failed NULL parameter\n", __func__);
+ goto vreg_get_fail;
+ }
+ if (cam_vreg->type == VREG_TYPE_CUSTOM) {
+ if (cam_vreg->custom_vreg_name == NULL) {
+ pr_err("%s : can't find sub reg name",
+ __func__);
+ goto vreg_get_fail;
+ }
+ vreg_name = cam_vreg->custom_vreg_name;
+ } else {
+ if (cam_vreg->reg_name == NULL) {
+ pr_err("%s : can't find reg name", __func__);
+ goto vreg_get_fail;
+ }
+ vreg_name = cam_vreg->reg_name;
+ }
+
+ if (config) {
+ CDBG("%s enable %s\n", __func__, vreg_name);
+ *reg_ptr = regulator_get(dev, vreg_name);
+ if (IS_ERR(*reg_ptr)) {
+ pr_err("%s: %s get failed\n", __func__, vreg_name);
+ *reg_ptr = NULL;
+ goto vreg_get_fail;
+ }
+ if (regulator_count_voltages(*reg_ptr) > 0) {
+ CDBG("%s: voltage min=%d, max=%d\n",
+ __func__, cam_vreg->min_voltage,
+ cam_vreg->max_voltage);
+ rc = regulator_set_voltage(
+ *reg_ptr, cam_vreg->min_voltage,
+ cam_vreg->max_voltage);
+ if (rc < 0) {
+ pr_err("%s: %s set voltage failed\n",
+ __func__, vreg_name);
+ goto vreg_set_voltage_fail;
+ }
+ if (cam_vreg->op_mode >= 0) {
+ rc = regulator_set_load(*reg_ptr,
+ cam_vreg->op_mode);
+ if (rc < 0) {
+ pr_err(
+ "%s: %s set optimum mode failed\n",
+ __func__, vreg_name);
+ goto vreg_set_opt_mode_fail;
+ }
+ }
+ }
+ rc = regulator_enable(*reg_ptr);
+ if (rc < 0) {
+ pr_err("%s: %s regulator_enable failed\n", __func__,
+ vreg_name);
+ goto vreg_unconfig;
+ }
+ } else {
+ CDBG("%s disable %s\n", __func__, vreg_name);
+ if (*reg_ptr) {
+ CDBG("%s disable %s\n", __func__, vreg_name);
+ regulator_disable(*reg_ptr);
+ if (regulator_count_voltages(*reg_ptr) > 0) {
+ if (cam_vreg->op_mode >= 0)
+ regulator_set_load(*reg_ptr, 0);
+ regulator_set_voltage(
+ *reg_ptr, 0, cam_vreg->max_voltage);
+ }
+ regulator_put(*reg_ptr);
+ *reg_ptr = NULL;
+ } else {
+ pr_err("%s can't disable %s\n", __func__, vreg_name);
+ }
+ }
+ return 0;
+
+vreg_unconfig:
+if (regulator_count_voltages(*reg_ptr) > 0)
+ regulator_set_load(*reg_ptr, 0);
+
+vreg_set_opt_mode_fail:
+if (regulator_count_voltages(*reg_ptr) > 0)
+ regulator_set_voltage(*reg_ptr, 0, cam_vreg->max_voltage);
+
+vreg_set_voltage_fail:
+ regulator_put(*reg_ptr);
+ *reg_ptr = NULL;
+
+vreg_get_fail:
+ return -ENODEV;
+}
+
+int msm_camera_request_gpio_table(struct gpio *gpio_tbl, uint8_t size,
+ int gpio_en)
+{
+ int rc = 0, i = 0, err = 0;
+
+ if (!gpio_tbl || !size) {
+ pr_err("%s:%d invalid gpio_tbl %pK / size %d\n", __func__,
+ __LINE__, gpio_tbl, size);
+ return -EINVAL;
+ }
+ for (i = 0; i < size; i++) {
+ CDBG("%s:%d i %d, gpio %d dir %ld\n", __func__, __LINE__, i,
+ gpio_tbl[i].gpio, gpio_tbl[i].flags);
+ }
+ if (gpio_en) {
+ for (i = 0; i < size; i++) {
+ err = gpio_request_one(gpio_tbl[i].gpio,
+ gpio_tbl[i].flags, gpio_tbl[i].label);
+ if (err) {
+ /*
+ * After GPIO request fails, contine to
+ * apply new gpios, outout a error message
+ * for driver bringup debug
+ */
+ pr_err("%s:%d gpio %d:%s request fails\n",
+ __func__, __LINE__,
+ gpio_tbl[i].gpio, gpio_tbl[i].label);
+ }
+ }
+ } else {
+ gpio_free_array(gpio_tbl, size);
+ }
+ return rc;
+}
+
+/*
+ * msm_camera_get_dt_reg_settings - Get dt reg settings from device-tree.
+ * @of_node: Pointer to device of_node from dev.
+ * @dt_prop_name: String of the property to search in of_node from dev.
+ * @reg_s: Double pointer will be allocated by this function and filled.
+ * @size: Pointer to fill the length of the available entries.
+ */
+int msm_camera_get_dt_reg_settings(struct device_node *of_node,
+ const char *dt_prop_name, uint32_t **reg_s,
+ unsigned int *size)
+{
+ int ret;
+ unsigned int cnt;
+
+ if (!of_node || !dt_prop_name || !size || !reg_s) {
+ pr_err("%s: Error invalid args %pK:%pK:%pK:%pK\n",
+ __func__, size, reg_s, of_node, dt_prop_name);
+ return -EINVAL;
+ }
+ if (!of_get_property(of_node, dt_prop_name, &cnt)) {
+ pr_debug("Missing dt reg settings for %s\n", dt_prop_name);
+ return -ENOENT;
+ }
+
+ if (!cnt || (cnt % 8)) {
+ pr_err("%s: Error invalid number of entries cnt=%d\n",
+ __func__, cnt);
+ return -EINVAL;
+ }
+ cnt /= 4;
+ if (cnt != 0) {
+ *reg_s = kcalloc(cnt, sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!*reg_s)
+ return -ENOMEM;
+ ret = of_property_read_u32_array(of_node,
+ dt_prop_name,
+ *reg_s,
+ cnt);
+ if (ret < 0) {
+ pr_err("%s: No dt reg info read for %s ret=%d\n",
+ __func__, dt_prop_name, ret);
+ kfree(*reg_s);
+ return -ENOENT;
+ }
+ *size = cnt;
+ } else {
+ pr_err("%s: Error invalid entries\n", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/*
+ * msm_camera_get_dt_reg_settings - Free dt reg settings memory.
+ * @reg_s: Double pointer will be allocated by this function and filled.
+ * @size: Pointer to set the length as invalid.
+ */
+void msm_camera_put_dt_reg_settings(uint32_t **reg_s,
+ unsigned int *size)
+{
+ kfree(*reg_s);
+ *reg_s = NULL;
+ *size = 0;
+}
+
+int msm_camera_hw_write_dt_reg_settings(void __iomem *base,
+ uint32_t *reg_s,
+ unsigned int size)
+{
+ int32_t rc = 0;
+
+ if (!reg_s || !base || !size) {
+ pr_err("%s: Error invalid args\n", __func__);
+ return -EINVAL;
+ }
+ rc = msm_camera_io_w_reg_block((const u32 *) reg_s,
+ base, size);
+ if (rc < 0)
+ pr_err("%s: Failed dt reg setting write\n", __func__);
+ return rc;
+}
diff --git a/drivers/media/platform/msm/ais/common/msm_camera_io_util.h b/drivers/media/platform/msm/ais/common/msm_camera_io_util.h
new file mode 100644
index 000000000000..338e24d45500
--- /dev/null
+++ b/drivers/media/platform/msm/ais/common/msm_camera_io_util.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CAMERA_IO_UTIL_H
+#define __MSM_CAMERA_IO_UTIL_H
+
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <soc/qcom/ais.h>
+#include <media/ais/msm_ais_sensor.h>
+#include <media/v4l2-ioctl.h>
+
+#define NO_SET_RATE -1
+#define INIT_RATE -2
+
+struct msm_gpio_set_tbl {
+ unsigned gpio;
+ unsigned long flags;
+ uint32_t delay;
+};
+
+struct msm_cam_dump_string_info {
+ const char *print;
+ uint32_t offset;
+};
+
+void msm_camera_io_w(u32 data, void __iomem *addr);
+void msm_camera_io_w_mb(u32 data, void __iomem *addr);
+u32 msm_camera_io_r(void __iomem *addr);
+u32 msm_camera_io_r_mb(void __iomem *addr);
+void msm_camera_io_dump(void __iomem *addr, int size, int enable);
+void msm_camera_io_memcpy(void __iomem *dest_addr,
+ void __iomem *src_addr, u32 len);
+void msm_camera_io_memcpy_mb(void __iomem *dest_addr,
+ void __iomem *src_addr, u32 len);
+int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info,
+ struct msm_cam_clk_info *clk_src_info, int num_clk);
+int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
+ struct clk **clk_ptr, int num_clk, int enable);
+
+int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
+ int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
+ int num_vreg_seq, struct regulator **reg_ptr, int config);
+int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
+ int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
+ int num_vreg_seq, struct regulator **reg_ptr, int enable);
+
+void msm_camera_bus_scale_cfg(uint32_t bus_perf_client,
+ enum msm_bus_perf_setting perf_setting);
+
+int msm_camera_set_gpio_table(struct msm_gpio_set_tbl *gpio_tbl,
+ uint8_t gpio_tbl_size, int gpio_en);
+
+void msm_camera_config_single_gpio(uint16_t gpio, unsigned long flags,
+ int gpio_en);
+
+int msm_camera_config_single_vreg(struct device *dev,
+ struct camera_vreg_t *cam_vreg, struct regulator **reg_ptr, int config);
+
+int msm_camera_request_gpio_table(struct gpio *gpio_tbl, uint8_t size,
+ int gpio_en);
+void msm_camera_io_dump_wstring_base(void __iomem *addr,
+ struct msm_cam_dump_string_info *dump_data,
+ int size);
+int32_t msm_camera_io_poll_value_wmask(void __iomem *addr, u32 wait_data,
+ u32 bmask, u32 retry, unsigned long min_usecs,
+ unsigned long max_usecs);
+int32_t msm_camera_io_poll_value(void __iomem *addr, u32 wait_data, u32 retry,
+ unsigned long min_usecs, unsigned long max_usecs);
+int32_t msm_camera_io_w_block(const u32 *addr, void __iomem *base, u32 len);
+int32_t msm_camera_io_w_reg_block(const u32 *addr, void __iomem *base, u32 len);
+int32_t msm_camera_io_w_mb_block(const u32 *addr, void __iomem *base, u32 len);
+int msm_camera_get_dt_reg_settings(struct device_node *of_node,
+ const char *dt_prop_name, uint32_t **reg_s,
+ unsigned int *size);
+void msm_camera_put_dt_reg_settings(uint32_t **reg_s,
+ unsigned int *size);
+int msm_camera_hw_write_dt_reg_settings(void __iomem *base,
+ uint32_t *reg_s,
+ unsigned int size);
+#endif
diff --git a/drivers/media/platform/msm/ais/fd/Makefile b/drivers/media/platform/msm/ais/fd/Makefile
new file mode 100644
index 000000000000..6a5e9edc3736
--- /dev/null
+++ b/drivers/media/platform/msm/ais/fd/Makefile
@@ -0,0 +1,8 @@
+GCC_VERSION := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/pproc/cpp
+ccflags-y += -Idrivers/media/platform/msm/ais/msm_buf_mgr/
+
+obj-$(CONFIG_MSM_AIS_FD) += msm_fd_dev.o msm_fd_hw.o
diff --git a/drivers/media/platform/msm/ais/fd/msm_fd_dev.c b/drivers/media/platform/msm/ais/fd/msm_fd_dev.c
new file mode 100644
index 000000000000..7a4acf6ec815
--- /dev/null
+++ b/drivers/media/platform/msm/ais/fd/msm_fd_dev.c
@@ -0,0 +1,1441 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ion.h>
+#include <linux/msm_ion.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-core.h>
+
+#include "msm_fd_dev.h"
+#include "msm_fd_hw.h"
+#include "msm_fd_regs.h"
+
+#define MSM_FD_DRV_NAME "msm_fd"
+
+#define MSM_FD_WORD_SIZE_BYTES 4
+
+/* Face detection thresholds definitions */
+#define MSM_FD_DEF_THRESHOLD 5
+#define MSM_FD_MAX_THRESHOLD_VALUE 9
+
+/* Face angle lookup table */
+#define MSM_FD_DEF_ANGLE_IDX 2
+static int msm_fd_angle[] = {45, 135, 359};
+
+/* Face direction lookup table */
+#define MSM_FD_DEF_DIR_IDX 0
+static int msm_fd_dir[] = {0, 90, 270, 180};
+
+/* Minimum face size lookup table */
+#define MSM_FD_DEF_MIN_SIZE_IDX 0
+static int msm_fd_min_size[] = {20, 25, 32, 40};
+
+/* Face detection size lookup table */
+static struct msm_fd_size fd_size[] = {
+ {
+ .width = 320,
+ .height = 240,
+ .reg_val = MSM_FD_IMAGE_SIZE_QVGA,
+ .work_size = (13120 * MSM_FD_WORD_SIZE_BYTES),
+ },
+ {
+ .width = 427,
+ .height = 240,
+ .reg_val = MSM_FD_IMAGE_SIZE_WQVGA,
+ .work_size = (17744 * MSM_FD_WORD_SIZE_BYTES),
+ },
+ {
+ .width = 640,
+ .height = 480,
+ .reg_val = MSM_FD_IMAGE_SIZE_VGA,
+ .work_size = (52624 * MSM_FD_WORD_SIZE_BYTES),
+ },
+ {
+ .width = 854,
+ .height = 480,
+ .reg_val = MSM_FD_IMAGE_SIZE_WVGA,
+ .work_size = (70560 * MSM_FD_WORD_SIZE_BYTES),
+ },
+};
+
+/*
+ * msm_fd_ctx_from_fh - Get fd context from v4l2 fh.
+ * @fh: Pointer to v4l2 fh.
+ */
+static inline struct fd_ctx *msm_fd_ctx_from_fh(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct fd_ctx, fh);
+}
+
+/*
+ * msm_fd_get_format_index - Get format index from v4l2 format.
+ * @f: Pointer to v4l2 format struct.
+ */
+static int msm_fd_get_format_index(struct v4l2_format *f)
+{
+ int index;
+
+ for (index = 0; index < ARRAY_SIZE(fd_size); index++) {
+ if (f->fmt.pix.width <= fd_size[index].width &&
+ f->fmt.pix.height <= fd_size[index].height)
+ return index;
+ }
+ return index - 1;
+}
+
+/*
+ * msm_fd_get_idx_from_value - Get array index from value.
+ * @value: Value for which index is needed.
+ * @array: Array in which index is searched for.
+ * @array_size: Array size.
+ */
+static int msm_fd_get_idx_from_value(int value, int *array, int array_size)
+{
+ int index;
+ int i;
+
+ index = 0;
+ for (i = 1; i < array_size; i++) {
+ if (value == array[i]) {
+ index = i;
+ break;
+ }
+ if (abs(value - array[i]) < abs(value - array[index]))
+ index = i;
+ }
+ return index;
+}
+
+/*
+ * msm_fd_fill_format_from_index - Fill v4l2 format struct from size index.
+ * @f: Pointer of v4l2 struct which will be filled.
+ * @index: Size index (Format will be filled based on this index).
+ */
+static int msm_fd_fill_format_from_index(struct v4l2_format *f, int index)
+{
+ f->fmt.pix.width = fd_size[index].width;
+ f->fmt.pix.height = fd_size[index].height;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_GREY;
+ if (f->fmt.pix.bytesperline < f->fmt.pix.width)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+
+ f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.bytesperline, 16);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+/*
+ * msm_fd_fill_format_from_ctx - Fill v4l2 format struct from fd context.
+ * @f: Pointer of v4l2 struct which will be filled.
+ * @c: Pointer to fd context.
+ */
+static int msm_fd_fill_format_from_ctx(struct v4l2_format *f, struct fd_ctx *c)
+{
+ if (c->format.size == NULL)
+ return -EINVAL;
+
+ f->fmt.pix.width = c->format.size->width;
+ f->fmt.pix.height = c->format.size->height;
+ f->fmt.pix.pixelformat = c->format.pixelformat;
+ f->fmt.pix.bytesperline = c->format.bytesperline;
+ f->fmt.pix.sizeimage = c->format.sizeimage;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+/*
+ * msm_fd_queue_setup - vb2_ops queue_setup callback.
+ * @q: Pointer to vb2 queue struct.
+ * @fmt: Pointer to v4l2 format struct (NULL is valid argument).
+ * @num_buffers: Pointer of number of buffers requested.
+ * @num_planes: Pointer to number of planes requested.
+ * @sizes: Array containing sizes of planes.
+ * @alloc_ctxs: Array of allocated contexts for each plane.
+ */
+static int msm_fd_queue_setup(struct vb2_queue *q,
+ const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct fd_ctx *ctx = vb2_get_drv_priv(q);
+
+ *num_planes = 1;
+
+ if (fmt == NULL)
+ sizes[0] = ctx->format.sizeimage;
+ else
+ sizes[0] = fmt->fmt.pix.sizeimage;
+
+ alloc_ctxs[0] = &ctx->mem_pool;
+
+ return 0;
+}
+
+/*
+ * msm_fd_buf_init - vb2_ops buf_init callback.
+ * @vb: Pointer to vb2 buffer struct.
+ */
+int msm_fd_buf_init(struct vb2_buffer *vb)
+{
+ struct msm_fd_buffer *fd_buffer =
+ (struct msm_fd_buffer *)vb;
+
+ INIT_LIST_HEAD(&fd_buffer->list);
+ atomic_set(&fd_buffer->active, 0);
+
+ return 0;
+}
+
+/*
+ * msm_fd_buf_queue - vb2_ops buf_queue callback.
+ * @vb: Pointer to vb2 buffer struct.
+ */
+static void msm_fd_buf_queue(struct vb2_buffer *vb)
+{
+ struct fd_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct msm_fd_buffer *fd_buffer =
+ (struct msm_fd_buffer *)vb;
+
+ fd_buffer->format = ctx->format;
+ fd_buffer->settings = ctx->settings;
+ fd_buffer->work_addr = ctx->work_buf.addr;
+ msm_fd_hw_add_buffer(ctx->fd_device, fd_buffer);
+
+ if (vb->vb2_queue->streaming)
+ msm_fd_hw_schedule_and_start(ctx->fd_device);
+}
+
+/*
+ * msm_fd_start_streaming - vb2_ops start_streaming callback.
+ * @q: Pointer to vb2 queue struct.
+ * @count: Number of buffer queued before stream on call.
+ */
+static int msm_fd_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fd_ctx *ctx = vb2_get_drv_priv(q);
+ int ret;
+
+ if (ctx->work_buf.fd == -1) {
+ dev_err(ctx->fd_device->dev, "Missing working buffer\n");
+ return -EINVAL;
+ }
+
+ ret = msm_fd_hw_get(ctx->fd_device, ctx->settings.speed);
+ if (ret < 0) {
+ dev_err(ctx->fd_device->dev, "Can not acquire fd hw\n");
+ goto out;
+ }
+
+ ret = msm_fd_hw_schedule_and_start(ctx->fd_device);
+ if (ret < 0)
+ dev_err(ctx->fd_device->dev, "Can not start fd hw\n");
+
+out:
+ return ret;
+}
+
+/*
+ * msm_fd_stop_streaming - vb2_ops stop_streaming callback.
+ * @q: Pointer to vb2 queue struct.
+ */
+static void msm_fd_stop_streaming(struct vb2_queue *q)
+{
+ struct fd_ctx *ctx = vb2_get_drv_priv(q);
+
+ mutex_lock(&ctx->fd_device->recovery_lock);
+ msm_fd_hw_remove_buffers_from_queue(ctx->fd_device, q);
+ msm_fd_hw_put(ctx->fd_device);
+ mutex_unlock(&ctx->fd_device->recovery_lock);
+}
+
+/* Videobuf2 queue callbacks. */
+static struct vb2_ops msm_fd_vb2_q_ops = {
+ .queue_setup = msm_fd_queue_setup,
+ .buf_init = msm_fd_buf_init,
+ .buf_queue = msm_fd_buf_queue,
+ .start_streaming = msm_fd_start_streaming,
+ .stop_streaming = msm_fd_stop_streaming,
+};
+
+/*
+ * msm_fd_get_userptr - Map and get buffer handler for user pointer buffer.
+ * @alloc_ctx: Contexts allocated in buf_setup.
+ * @vaddr: Virtual addr passed from userpsace (in our case ion fd)
+ * @size: Size of the buffer
+ * @write: True if buffer will be used for writing the data.
+ */
+static void *msm_fd_get_userptr(void *alloc_ctx,
+ unsigned long vaddr, unsigned long size, int write)
+{
+ struct msm_fd_mem_pool *pool = alloc_ctx;
+ struct msm_fd_buf_handle *buf;
+ int ret;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ ret = msm_fd_hw_map_buffer(pool, vaddr, buf);
+ if (ret < 0 || buf->size < size)
+ goto error;
+
+ return buf;
+error:
+ kzfree(buf);
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * msm_fd_put_userptr - Unmap and free buffer handler.
+ * @buf_priv: Buffer handler allocated get_userptr callback.
+ */
+static void msm_fd_put_userptr(void *buf_priv)
+{
+ if (IS_ERR_OR_NULL(buf_priv))
+ return;
+
+ msm_fd_hw_unmap_buffer(buf_priv);
+
+ kzfree(buf_priv);
+}
+
+/* Videobuf2 memory callbacks. */
+static struct vb2_mem_ops msm_fd_vb2_mem_ops = {
+ .get_userptr = msm_fd_get_userptr,
+ .put_userptr = msm_fd_put_userptr,
+};
+
+/*
+ * msm_fd_vbif_error_handler - FD VBIF Error handler
+ * @handle: FD Device handle
+ * @error: CPP-VBIF Error code
+ */
+static int msm_fd_vbif_error_handler(void *handle, uint32_t error)
+{
+ struct fd_ctx *ctx;
+ struct msm_fd_device *fd;
+ struct msm_fd_buffer *active_buf;
+ int ret;
+
+ if (handle == NULL) {
+ dev_err(fd->dev, "FD Ctx is null, Cannot recover\n");
+ return 0;
+ }
+ ctx = (struct fd_ctx *)handle;
+ fd = (struct msm_fd_device *)ctx->fd_device;
+
+ if (error == CPP_VBIF_ERROR_HANG) {
+ mutex_lock(&fd->recovery_lock);
+ dev_err(fd->dev, "Handling FD VBIF Hang\n");
+ if (fd->state != MSM_FD_DEVICE_RUNNING) {
+ dev_err(fd->dev, "FD is not FD_DEVICE_RUNNING, %d\n",
+ fd->state);
+ mutex_unlock(&fd->recovery_lock);
+ return 0;
+ }
+ fd->recovery_mode = 1;
+
+ /* Halt and reset */
+ msm_fd_hw_put(fd);
+ msm_fd_hw_get(fd, ctx->settings.speed);
+
+ /* Get active buffer */
+ active_buf = msm_fd_hw_get_active_buffer(fd);
+
+ if (active_buf == NULL) {
+ dev_dbg(fd->dev, "no active buffer, return\n");
+ fd->recovery_mode = 0;
+ mutex_unlock(&fd->recovery_lock);
+ return 0;
+ }
+
+ dev_dbg(fd->dev, "Active Buffer present.. Start re-schedule\n");
+
+ /* Queue the buffer again */
+ msm_fd_hw_add_buffer(fd, active_buf);
+
+ /* Schedule and restart */
+ ret = msm_fd_hw_schedule_next_buffer(fd);
+ if (ret) {
+ dev_err(fd->dev, "Cannot reschedule buffer, recovery failed\n");
+ fd->recovery_mode = 0;
+ mutex_unlock(&fd->recovery_lock);
+ return ret;
+ }
+ dev_dbg(fd->dev, "Restarted FD after VBIF HAng\n");
+ mutex_unlock(&fd->recovery_lock);
+ }
+ return 0;
+}
+
+/*
+ * msm_fd_open - Fd device open method.
+ * @file: Pointer to file struct.
+ */
+static int msm_fd_open(struct file *file)
+{
+ struct msm_fd_device *device = video_drvdata(file);
+ struct video_device *video = video_devdata(file);
+ struct fd_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->fd_device = device;
+
+ /* Initialize work buffer handler */
+ ctx->work_buf.pool = NULL;
+ ctx->work_buf.fd = -1;
+
+ /* Set ctx defaults */
+ ctx->settings.speed = ctx->fd_device->clk_rates_num - 1;
+ ctx->settings.angle_index = MSM_FD_DEF_ANGLE_IDX;
+ ctx->settings.direction_index = MSM_FD_DEF_DIR_IDX;
+ ctx->settings.min_size_index = MSM_FD_DEF_MIN_SIZE_IDX;
+ ctx->settings.threshold = MSM_FD_DEF_THRESHOLD;
+
+ atomic_set(&ctx->subscribed_for_event, 0);
+
+ v4l2_fh_init(&ctx->fh, video);
+
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ctx->vb2_q.drv_priv = ctx;
+ ctx->vb2_q.mem_ops = &msm_fd_vb2_mem_ops;
+ ctx->vb2_q.ops = &msm_fd_vb2_q_ops;
+ ctx->vb2_q.buf_struct_size = sizeof(struct msm_fd_buffer);
+ ctx->vb2_q.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ctx->vb2_q.io_modes = VB2_USERPTR;
+ ctx->vb2_q.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ ret = vb2_queue_init(&ctx->vb2_q);
+ if (ret < 0) {
+ dev_err(device->dev, "Error queue init\n");
+ goto error_vb2_queue_init;
+ }
+
+ ctx->mem_pool.fd_device = ctx->fd_device;
+ ctx->stats = vmalloc(sizeof(*ctx->stats) * MSM_FD_MAX_RESULT_BUFS);
+ if (!ctx->stats) {
+ dev_err(device->dev, "No memory for face statistics\n");
+ ret = -ENOMEM;
+ goto error_stats_vmalloc;
+ }
+
+ ret = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_FD,
+ CAM_AHB_SVS_VOTE);
+ if (ret < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ goto error_ahb_config;
+ }
+
+ /* Register with CPP VBIF error handler */
+ msm_cpp_vbif_register_error_handler((void *)ctx,
+ VBIF_CLIENT_FD, msm_fd_vbif_error_handler);
+
+ return 0;
+
+error_ahb_config:
+ vfree(ctx->stats);
+error_stats_vmalloc:
+ vb2_queue_release(&ctx->vb2_q);
+error_vb2_queue_init:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
+}
+
+/*
+ * msm_fd_release - Fd device release method.
+ * @file: Pointer to file struct.
+ */
+static int msm_fd_release(struct file *file)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(file->private_data);
+
+ /* Un-register with CPP VBIF error handler */
+ msm_cpp_vbif_register_error_handler((void *)ctx,
+ VBIF_CLIENT_FD, NULL);
+
+ vb2_queue_release(&ctx->vb2_q);
+
+ vfree(ctx->stats);
+
+ if (ctx->work_buf.fd != -1)
+ msm_fd_hw_unmap_buffer(&ctx->work_buf);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+ kfree(ctx);
+
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_FD,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+
+ return 0;
+}
+
+/*
+ * msm_fd_poll - Fd device pool method.
+ * @file: Pointer to file struct.
+ * @wait: Pointer to pool table struct.
+ */
+static unsigned int msm_fd_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(file->private_data);
+ unsigned int ret;
+
+ ret = vb2_poll(&ctx->vb2_q, file, wait);
+
+ if (atomic_read(&ctx->subscribed_for_event)) {
+ poll_wait(file, &ctx->fh.wait, wait);
+ if (v4l2_event_pending(&ctx->fh))
+ ret |= POLLPRI;
+ }
+
+ return ret;
+}
+
+/*
+ * msm_fd_private_ioctl - V4l2 private ioctl handler.
+ * @file: Pointer to file struct.
+ * @fd: V4l2 device file handle.
+ * @valid_prio: Priority ioctl valid flag.
+ * @cmd: Ioctl command.
+ * @arg: Ioctl argument.
+ */
+static long msm_fd_private_ioctl(struct file *file, void *fh,
+ bool valid_prio, unsigned int cmd, void *arg)
+{
+ struct msm_fd_result *req_result = arg;
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ struct msm_fd_stats *stats;
+ int stats_idx;
+ int ret = 0;
+ int i;
+
+ switch (cmd) {
+ case VIDIOC_MSM_FD_GET_RESULT:
+ if (req_result->frame_id == 0) {
+ dev_err(ctx->fd_device->dev, "Invalid frame id\n");
+ return -EINVAL;
+ }
+
+ stats_idx = req_result->frame_id % MSM_FD_MAX_RESULT_BUFS;
+ stats = &ctx->stats[stats_idx];
+ if (req_result->frame_id != atomic_read(&stats->frame_id)) {
+ dev_err(ctx->fd_device->dev, "Stats not available\n");
+ return -EINVAL;
+ }
+
+ if (req_result->face_cnt > stats->face_cnt)
+ req_result->face_cnt = stats->face_cnt;
+
+ for (i = 0; i < req_result->face_cnt; i++) {
+ ret = copy_to_user((void __user *)
+ &req_result->face_data[i],
+ &stats->face_data[i],
+ sizeof(struct msm_fd_face_data));
+ if (ret) {
+ dev_err(ctx->fd_device->dev, "Copy to user\n");
+ return -EFAULT;
+ }
+ }
+
+ if (req_result->frame_id != atomic_read(&stats->frame_id)) {
+ dev_err(ctx->fd_device->dev, "Erroneous buffer\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(ctx->fd_device->dev, "Wrong ioctl type %x\n", cmd);
+ ret = -ENOTTY;
+ break;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * msm_fd_compat_ioctl32 - Compat ioctl handler function.
+ * @file: Pointer to file struct.
+ * @cmd: Ioctl command.
+ * @arg: Ioctl argument.
+ */
+static long msm_fd_compat_ioctl32(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ switch (cmd) {
+ case VIDIOC_MSM_FD_GET_RESULT32:
+ {
+ struct msm_fd_result32 result32;
+ struct msm_fd_result result;
+
+ if (copy_from_user(&result32, (void __user *)arg,
+ sizeof(result32)))
+ return -EFAULT;
+
+ result.frame_id = result32.frame_id;
+ result.face_cnt = result32.face_cnt;
+ result.face_data = compat_ptr(result32.face_data);
+
+ ret = msm_fd_private_ioctl(file, file->private_data,
+ 0, VIDIOC_MSM_FD_GET_RESULT, (void *)&result);
+
+ result32.frame_id = result.frame_id;
+ result32.face_cnt = result.face_cnt;
+
+ if (copy_to_user((void __user *)arg, &result32,
+ sizeof(result32)))
+ return -EFAULT;
+
+ break;
+ }
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+
+ }
+
+ return ret;
+}
+#endif
+
+/* Fd device file operations callbacks */
+static const struct v4l2_file_operations fd_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_fd_open,
+ .release = msm_fd_release,
+ .poll = msm_fd_poll,
+ .unlocked_ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = msm_fd_compat_ioctl32,
+#endif
+};
+
+/*
+ * msm_fd_querycap - V4l2 ioctl query capability handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @cap: Pointer to v4l2_capability struct need to be filled.
+ */
+static int msm_fd_querycap(struct file *file,
+ void *fh, struct v4l2_capability *cap)
+{
+ cap->bus_info[0] = 0;
+ strlcpy(cap->driver, MSM_FD_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, MSM_FD_DRV_NAME, sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
+
+ return 0;
+}
+
+/*
+ * msm_fd_enum_fmt_vid_out - V4l2 ioctl enumerate format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_fmtdesc struct need to be filled.
+ */
+static int msm_fd_enum_fmt_vid_out(struct file *file,
+ void *fh, struct v4l2_fmtdesc *f)
+{
+ if (f->index > 0)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_PIX_FMT_GREY;
+ strlcpy(f->description, "8 Greyscale",
+ sizeof(f->description));
+
+ return 0;
+}
+
+/*
+ * msm_fd_g_fmt - V4l2 ioctl get format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct need to be filled.
+ */
+static int msm_fd_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ return msm_fd_fill_format_from_ctx(f, ctx);
+}
+
+/*
+ * msm_fd_try_fmt_vid_out - V4l2 ioctl try format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int msm_fd_try_fmt_vid_out(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ int index;
+
+ index = msm_fd_get_format_index(f);
+
+ return msm_fd_fill_format_from_index(f, index);
+}
+
+/*
+ * msm_fd_s_fmt_vid_out - V4l2 ioctl set format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int msm_fd_s_fmt_vid_out(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ int index;
+
+ index = msm_fd_get_format_index(f);
+
+ msm_fd_fill_format_from_index(f, index);
+
+ ctx->format.size = &fd_size[index];
+ ctx->format.pixelformat = f->fmt.pix.pixelformat;
+ ctx->format.bytesperline = f->fmt.pix.bytesperline;
+ ctx->format.sizeimage = f->fmt.pix.sizeimage;
+
+ /* Initialize crop */
+ ctx->format.crop.top = 0;
+ ctx->format.crop.left = 0;
+ ctx->format.crop.width = fd_size[index].width;
+ ctx->format.crop.height = fd_size[index].height;
+
+ return 0;
+}
+
+/*
+ * msm_fd_reqbufs - V4l2 ioctl request buffers handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @req: Pointer to v4l2_requestbuffer struct.
+ */
+static int msm_fd_reqbufs(struct file *file,
+ void *fh, struct v4l2_requestbuffers *req)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ return vb2_reqbufs(&ctx->vb2_q, req);
+}
+
+/*
+ * msm_fd_qbuf - V4l2 ioctl queue buffer handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @pb: Pointer to v4l2_buffer struct.
+ */
+static int msm_fd_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *pb)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ return vb2_qbuf(&ctx->vb2_q, pb);
+}
+
+/*
+ * msm_fd_dqbuf - V4l2 ioctl dequeue buffer handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @pb: Pointer to v4l2_buffer struct.
+ */
+static int msm_fd_dqbuf(struct file *file,
+ void *fh, struct v4l2_buffer *pb)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ return vb2_dqbuf(&ctx->vb2_q, pb, file->f_flags & O_NONBLOCK);
+}
+
+/*
+ * msm_fd_streamon - V4l2 ioctl stream on handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf_type: V4l2 buffer type.
+ */
+static int msm_fd_streamon(struct file *file,
+ void *fh, enum v4l2_buf_type buf_type)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ int ret;
+
+ ret = vb2_streamon(&ctx->vb2_q, buf_type);
+ if (ret < 0)
+ dev_err(ctx->fd_device->dev, "Stream on fails\n");
+
+ return ret;
+}
+
+/*
+ * msm_fd_streamoff - V4l2 ioctl stream off handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf_type: V4l2 buffer type.
+ */
+static int msm_fd_streamoff(struct file *file,
+ void *fh, enum v4l2_buf_type buf_type)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ int ret;
+
+ ret = vb2_streamoff(&ctx->vb2_q, buf_type);
+ if (ret < 0)
+ dev_err(ctx->fd_device->dev, "Stream off fails\n");
+
+ return ret;
+}
+
+/*
+ * msm_fd_subscribe_event - V4l2 ioctl subscribe for event handler.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_event_subscription containing event information.
+ */
+static int msm_fd_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ int ret;
+
+ if (sub->type != MSM_EVENT_FD)
+ return -EINVAL;
+
+ ret = v4l2_event_subscribe(fh, sub, MSM_FD_MAX_RESULT_BUFS, NULL);
+ if (!ret)
+ atomic_set(&ctx->subscribed_for_event, 1);
+
+ return ret;
+}
+
+/*
+ * msm_fd_unsubscribe_event - V4l2 ioctl unsubscribe from event handler.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_event_subscription containing event information.
+ */
+static int msm_fd_unsubscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ int ret;
+
+ ret = v4l2_event_unsubscribe(fh, sub);
+ if (!ret)
+ atomic_set(&ctx->subscribed_for_event, 0);
+
+ return ret;
+}
+
+/*
+ * msm_fd_guery_ctrl - V4l2 ioctl query control.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_queryctrl struct info need to be filled based on id.
+ */
+static int msm_fd_guery_ctrl(struct file *file, void *fh,
+ struct v4l2_queryctrl *a)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ switch (a->id) {
+ case V4L2_CID_FD_SPEED:
+ a->type = V4L2_CTRL_TYPE_INTEGER;
+ a->default_value = ctx->fd_device->clk_rates_num;
+ a->minimum = 0;
+ a->maximum = ctx->fd_device->clk_rates_num;
+ a->step = 1;
+ strlcpy(a->name, "msm fd face speed idx",
+ sizeof(a->name));
+ break;
+ case V4L2_CID_FD_FACE_ANGLE:
+ a->type = V4L2_CTRL_TYPE_INTEGER;
+ a->default_value = msm_fd_angle[MSM_FD_DEF_ANGLE_IDX];
+ a->minimum = msm_fd_angle[0];
+ a->maximum = msm_fd_angle[ARRAY_SIZE(msm_fd_angle) - 1];
+ a->step = 1;
+ strlcpy(a->name, "msm fd face angle ctrl",
+ sizeof(a->name));
+ break;
+ case V4L2_CID_FD_FACE_DIRECTION:
+ a->type = V4L2_CTRL_TYPE_INTEGER;
+ a->default_value = msm_fd_dir[MSM_FD_DEF_DIR_IDX];
+ a->minimum = msm_fd_dir[0];
+ a->maximum = msm_fd_dir[ARRAY_SIZE(msm_fd_dir) - 1];
+ a->step = 1;
+ strlcpy(a->name, "msm fd face direction ctrl",
+ sizeof(a->name));
+ break;
+ case V4L2_CID_FD_MIN_FACE_SIZE:
+ a->type = V4L2_CTRL_TYPE_INTEGER;
+ a->default_value = msm_fd_min_size[MSM_FD_DEF_MIN_SIZE_IDX];
+ a->minimum = msm_fd_min_size[0];
+ a->maximum = msm_fd_min_size[ARRAY_SIZE(msm_fd_min_size) - 1];
+ a->step = 1;
+ strlcpy(a->name, "msm fd minimum face size (pixels)",
+ sizeof(a->name));
+ break;
+ case V4L2_CID_FD_DETECTION_THRESHOLD:
+ a->type = V4L2_CTRL_TYPE_INTEGER;
+ a->default_value = MSM_FD_DEF_THRESHOLD;
+ a->minimum = 0;
+ a->maximum = MSM_FD_MAX_THRESHOLD_VALUE;
+ a->step = 1;
+ strlcpy(a->name, "msm fd detection threshold",
+ sizeof(a->name));
+ break;
+ case V4L2_CID_FD_WORK_MEMORY_SIZE:
+ a->type = V4L2_CTRL_TYPE_INTEGER;
+ a->default_value = fd_size[0].work_size;
+ a->minimum = fd_size[(ARRAY_SIZE(fd_size) - 1)].work_size;
+ a->maximum = fd_size[0].work_size;
+ a->step = 1;
+ strlcpy(a->name, "msm fd working memory size",
+ sizeof(a->name));
+ break;
+ case V4L2_CID_FD_WORK_MEMORY_FD:
+ a->type = V4L2_CTRL_TYPE_INTEGER;
+ a->default_value = -1;
+ a->minimum = 0;
+ a->maximum = INT_MAX;
+ a->step = 1;
+ strlcpy(a->name, "msm fd ion fd of working memory",
+ sizeof(a->name));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * msm_fd_g_ctrl - V4l2 ioctl get control.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_queryctrl struct need to be filled.
+ */
+static int msm_fd_g_ctrl(struct file *file, void *fh, struct v4l2_control *a)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ switch (a->id) {
+ case V4L2_CID_FD_SPEED:
+ a->value = ctx->settings.speed;
+ break;
+ case V4L2_CID_FD_FACE_ANGLE:
+ a->value = msm_fd_angle[ctx->settings.angle_index];
+ break;
+ case V4L2_CID_FD_FACE_DIRECTION:
+ a->value = msm_fd_dir[ctx->settings.direction_index];
+ break;
+ case V4L2_CID_FD_MIN_FACE_SIZE:
+ a->value = msm_fd_min_size[ctx->settings.min_size_index];
+ break;
+ case V4L2_CID_FD_DETECTION_THRESHOLD:
+ a->value = ctx->settings.threshold;
+ break;
+ case V4L2_CID_FD_WORK_MEMORY_SIZE:
+ if (!ctx->format.size)
+ return -EINVAL;
+
+ a->value = ctx->format.size->work_size;
+ break;
+ case V4L2_CID_FD_WORK_MEMORY_FD:
+ if (ctx->work_buf.fd == -1)
+ return -EINVAL;
+
+ a->value = ctx->work_buf.fd;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * msm_fd_s_ctrl - V4l2 ioctl set control.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_queryctrl struct need to be set.
+ */
+static int msm_fd_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ int idx;
+ int ret;
+
+ switch (a->id) {
+ case V4L2_CID_FD_SPEED:
+ if (a->value > ctx->fd_device->clk_rates_num - 1)
+ a->value = ctx->fd_device->clk_rates_num - 1;
+ else if (a->value < 0)
+ a->value = 0;
+
+ ctx->settings.speed = a->value;
+ break;
+ case V4L2_CID_FD_FACE_ANGLE:
+ idx = msm_fd_get_idx_from_value(a->value, msm_fd_angle,
+ ARRAY_SIZE(msm_fd_angle));
+
+ ctx->settings.angle_index = idx;
+ a->value = msm_fd_angle[ctx->settings.angle_index];
+ break;
+ case V4L2_CID_FD_FACE_DIRECTION:
+ idx = msm_fd_get_idx_from_value(a->value, msm_fd_dir,
+ ARRAY_SIZE(msm_fd_dir));
+
+ ctx->settings.direction_index = idx;
+ a->value = msm_fd_dir[ctx->settings.direction_index];
+ break;
+ case V4L2_CID_FD_MIN_FACE_SIZE:
+ idx = msm_fd_get_idx_from_value(a->value, msm_fd_min_size,
+ ARRAY_SIZE(msm_fd_min_size));
+
+ ctx->settings.min_size_index = idx;
+ a->value = msm_fd_min_size[ctx->settings.min_size_index];
+ break;
+ case V4L2_CID_FD_DETECTION_THRESHOLD:
+ if (a->value > MSM_FD_MAX_THRESHOLD_VALUE)
+ a->value = MSM_FD_MAX_THRESHOLD_VALUE;
+ else if (a->value < 0)
+ a->value = 0;
+
+ ctx->settings.threshold = a->value;
+ break;
+ case V4L2_CID_FD_WORK_MEMORY_SIZE:
+ if (!ctx->format.size)
+ return -EINVAL;
+
+ if (a->value < ctx->format.size->work_size)
+ a->value = ctx->format.size->work_size;
+ break;
+ case V4L2_CID_FD_WORK_MEMORY_FD:
+ if (ctx->work_buf.fd != -1)
+ msm_fd_hw_unmap_buffer(&ctx->work_buf);
+ if (a->value >= 0) {
+ ret = msm_fd_hw_map_buffer(&ctx->mem_pool,
+ a->value, &ctx->work_buf);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * msm_fd_cropcap - V4l2 ioctl crop capabilities.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_cropcap struct need to be set.
+ */
+static int msm_fd_cropcap(struct file *file, void *fh, struct v4l2_cropcap *a)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ if (!ctx->format.size) {
+ dev_err(ctx->fd_device->dev, "Cropcap fails format missing\n");
+ return -EINVAL;
+ }
+
+ a->bounds.top = 0;
+ a->bounds.left = 0;
+ a->bounds.width = ctx->format.size->width;
+ a->bounds.height = ctx->format.size->height;
+
+ a->defrect = ctx->format.crop;
+
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+/*
+ * msm_fd_g_crop - V4l2 ioctl get crop.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_crop struct need to be set.
+ */
+static int msm_fd_g_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ if (!ctx->format.size) {
+ dev_err(ctx->fd_device->dev, "Get crop, format missing!\n");
+ return -EINVAL;
+ }
+
+ crop->c = ctx->format.crop;
+
+ return 0;
+}
+
+/*
+ * msm_fd_s_crop - V4l2 ioctl set crop.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_crop struct need to be set.
+ */
+static int msm_fd_s_crop(struct file *file, void *fh,
+ const struct v4l2_crop *crop)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ int min_face_size;
+
+ if (!ctx->format.size) {
+ dev_err(ctx->fd_device->dev, "Get crop, format missing!\n");
+ return -EINVAL;
+ }
+
+ /* First check that crop is valid */
+ min_face_size = msm_fd_min_size[ctx->settings.min_size_index];
+
+ if (crop->c.width < min_face_size || crop->c.height < min_face_size)
+ return -EINVAL;
+
+ if (crop->c.width + crop->c.left > ctx->format.size->width)
+ return -EINVAL;
+
+ if (crop->c.height + crop->c.top > ctx->format.size->height)
+ return -EINVAL;
+
+ ctx->format.crop = crop->c;
+
+ return 0;
+}
+
+/* V4l2 ioctl handlers */
+static const struct v4l2_ioctl_ops fd_ioctl_ops = {
+ .vidioc_querycap = msm_fd_querycap,
+ .vidioc_enum_fmt_vid_out = msm_fd_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = msm_fd_g_fmt,
+ .vidioc_try_fmt_vid_out = msm_fd_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = msm_fd_s_fmt_vid_out,
+ .vidioc_reqbufs = msm_fd_reqbufs,
+ .vidioc_qbuf = msm_fd_qbuf,
+ .vidioc_dqbuf = msm_fd_dqbuf,
+ .vidioc_streamon = msm_fd_streamon,
+ .vidioc_streamoff = msm_fd_streamoff,
+ .vidioc_queryctrl = msm_fd_guery_ctrl,
+ .vidioc_s_ctrl = msm_fd_s_ctrl,
+ .vidioc_g_ctrl = msm_fd_g_ctrl,
+ .vidioc_cropcap = msm_fd_cropcap,
+ .vidioc_g_crop = msm_fd_g_crop,
+ .vidioc_s_crop = msm_fd_s_crop,
+ .vidioc_subscribe_event = msm_fd_subscribe_event,
+ .vidioc_unsubscribe_event = msm_fd_unsubscribe_event,
+ .vidioc_default = msm_fd_private_ioctl,
+};
+
+/*
+ * msm_fd_fill_results - Read and fill face detection result.
+ * @fd: Pointer to fd device.
+ * @face: Pointer of face data which information need to be stored.
+ * @idx: Face number index need to be filled.
+ */
+static void msm_fd_fill_results(struct msm_fd_device *fd,
+ struct msm_fd_face_data *face, int idx)
+{
+ int half_face_size;
+
+ msm_fd_hw_get_result_angle_pose(fd, idx, &face->angle, &face->pose);
+
+ msm_fd_hw_get_result_conf_size(fd, idx, &face->confidence,
+ &face->face.width);
+ face->face.height = face->face.width;
+
+ face->face.left = msm_fd_hw_get_result_x(fd, idx);
+ face->face.top = msm_fd_hw_get_result_y(fd, idx);
+
+ half_face_size = (face->face.width >> 1);
+ if (face->face.left > half_face_size)
+ face->face.left -= half_face_size;
+ else
+ face->face.left = 0;
+
+ half_face_size = (face->face.height >> 1);
+ if (face->face.top > half_face_size)
+ face->face.top -= half_face_size;
+ else
+ face->face.top = 0;
+}
+
+/*
+ * msm_fd_wq_handler - Fd device workqueue handler.
+ * @work: Pointer to work struct.
+ *
+ * This function is bottom half of fd irq what it does:
+ *
+ * - Stop the fd engine.
+ * - Getter fd result and store in stats buffer.
+ * - If available schedule next buffer for processing.
+ * - Sent event to v4l2.
+ * - Release buffer from v4l2 queue.
+ */
+static void msm_fd_wq_handler(struct work_struct *work)
+{
+ struct msm_fd_buffer *active_buf;
+ struct msm_fd_stats *stats;
+ struct msm_fd_event *fd_event;
+ struct msm_fd_device *fd;
+ struct fd_ctx *ctx;
+ struct v4l2_event event;
+ int i;
+
+ fd = container_of(work, struct msm_fd_device, work);
+
+ active_buf = msm_fd_hw_get_active_buffer(fd);
+ if (!active_buf) {
+ /* This should never happen, something completely wrong */
+ dev_err(fd->dev, "Oops no active buffer empty queue\n");
+ return;
+ }
+ ctx = vb2_get_drv_priv(active_buf->vb.vb2_queue);
+
+ /* Increment sequence number, 0 means sequence is not valid */
+ ctx->sequence++;
+ if (unlikely(!ctx->sequence))
+ ctx->sequence = 1;
+
+ /* Fill face detection statistics */
+ stats = &ctx->stats[ctx->sequence % MSM_FD_MAX_RESULT_BUFS];
+
+ /* First mark stats as invalid */
+ atomic_set(&stats->frame_id, 0);
+
+ stats->face_cnt = msm_fd_hw_get_face_count(fd);
+ for (i = 0; i < stats->face_cnt; i++)
+ msm_fd_fill_results(fd, &stats->face_data[i], i);
+
+ /* Stats are ready, set correct frame id */
+ atomic_set(&stats->frame_id, ctx->sequence);
+
+ /* If Recovery mode is on, we got IRQ after recovery, reset it */
+ if (fd->recovery_mode) {
+ fd->recovery_mode = 0;
+ dev_dbg(fd->dev, "Got IRQ after Recovery\n");
+ }
+
+ /* We have the data from fd hw, we can start next processing */
+ msm_fd_hw_schedule_next_buffer(fd);
+
+ /* Return buffer to vb queue */
+ active_buf->vb.v4l2_buf.sequence = ctx->fh.sequence;
+ vb2_buffer_done(&active_buf->vb, VB2_BUF_STATE_DONE);
+
+ /* Sent event */
+ memset(&event, 0x00, sizeof(event));
+ event.type = MSM_EVENT_FD;
+ fd_event = (struct msm_fd_event *)event.u.data;
+ fd_event->face_cnt = stats->face_cnt;
+ fd_event->buf_index = active_buf->vb.v4l2_buf.index;
+ fd_event->frame_id = ctx->sequence;
+ v4l2_event_queue_fh(&ctx->fh, &event);
+
+ /* Release buffer from the device */
+ msm_fd_hw_buffer_done(fd, active_buf);
+}
+
+/*
+ * fd_probe - Fd device probe method.
+ * @pdev: Pointer fd platform device.
+ */
+static int fd_probe(struct platform_device *pdev)
+{
+ struct msm_fd_device *fd;
+ int ret;
+
+ /* Face detection device struct */
+ fd = kzalloc(sizeof(struct msm_fd_device), GFP_KERNEL);
+ if (!fd)
+ return -ENOMEM;
+
+ mutex_init(&fd->lock);
+ spin_lock_init(&fd->slock);
+ mutex_init(&fd->recovery_lock);
+ init_completion(&fd->hw_halt_completion);
+ INIT_LIST_HEAD(&fd->buf_queue);
+ fd->pdev = pdev;
+ fd->dev = &pdev->dev;
+
+ /* Get resources */
+ ret = msm_fd_hw_get_mem_resources(pdev, fd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Fail get resources\n");
+ ret = -ENODEV;
+ goto error_mem_resources;
+ }
+
+ ret = msm_camera_get_regulator_info(pdev, &fd->vdd_info,
+ &fd->num_reg);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Fail to get regulators\n");
+ goto error_get_regulator;
+ }
+ ret = msm_camera_get_clk_info_and_rates(pdev, &fd->clk_info,
+ &fd->clk, &fd->clk_rates, &fd->clk_rates_num, &fd->clk_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Fail to get clocks\n");
+ goto error_get_clocks;
+ }
+
+ ret = msm_camera_register_bus_client(pdev, CAM_BUS_CLIENT_FD);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Fail to get bus\n");
+ goto error_get_bus;
+ }
+
+ /* Get face detect hw before read engine revision */
+ ret = msm_fd_hw_get(fd, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Fail to get hw\n");
+ goto error_hw_get_request_irq;
+ }
+ fd->hw_revision = msm_fd_hw_get_revision(fd);
+
+ msm_fd_hw_put(fd);
+
+ ret = msm_fd_hw_request_irq(pdev, fd, msm_fd_wq_handler);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Fail request irq\n");
+ goto error_hw_get_request_irq;
+ }
+
+ /* v4l2 device */
+ ret = v4l2_device_register(&pdev->dev, &fd->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ ret = -ENOENT;
+ goto error_v4l2_register;
+ }
+
+ fd->video.fops = &fd_fops;
+ fd->video.ioctl_ops = &fd_ioctl_ops;
+ fd->video.minor = -1;
+ fd->video.release = video_device_release;
+ fd->video.v4l2_dev = &fd->v4l2_dev;
+ fd->video.vfl_dir = VFL_DIR_TX;
+ fd->video.vfl_type = VFL_TYPE_GRABBER;
+ strlcpy(fd->video.name, MSM_FD_DRV_NAME, sizeof(fd->video.name));
+
+ ret = video_register_device(&fd->video, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ v4l2_err(&fd->v4l2_dev, "Failed to register video device\n");
+ goto error_video_register;
+ }
+
+ video_set_drvdata(&fd->video, fd);
+
+ platform_set_drvdata(pdev, fd);
+
+ return 0;
+
+error_video_register:
+ v4l2_device_unregister(&fd->v4l2_dev);
+error_v4l2_register:
+ msm_fd_hw_release_irq(fd);
+error_hw_get_request_irq:
+ msm_camera_unregister_bus_client(CAM_BUS_CLIENT_FD);
+error_get_bus:
+ msm_camera_put_clk_info_and_rates(pdev, &fd->clk_info,
+ &fd->clk, &fd->clk_rates, fd->clk_rates_num, fd->clk_num);
+error_get_clocks:
+ msm_camera_put_regulators(pdev, &fd->vdd_info, fd->num_reg);
+error_get_regulator:
+ msm_fd_hw_release_mem_resources(fd);
+error_mem_resources:
+ kfree(fd);
+ return ret;
+}
+
+/*
+ * fd_device_remove - Fd device remove method.
+ * @pdev: Pointer fd platform device.
+ */
+static int fd_device_remove(struct platform_device *pdev)
+{
+ struct msm_fd_device *fd;
+
+ fd = platform_get_drvdata(pdev);
+ if (fd == NULL) {
+ dev_err(&pdev->dev, "Can not get fd drvdata\n");
+ return 0;
+ }
+ video_unregister_device(&fd->video);
+ v4l2_device_unregister(&fd->v4l2_dev);
+ msm_fd_hw_release_irq(fd);
+ msm_camera_unregister_bus_client(CAM_BUS_CLIENT_FD);
+ msm_camera_put_clk_info_and_rates(pdev, &fd->clk_info,
+ &fd->clk, &fd->clk_rates, fd->clk_rates_num, fd->clk_num);
+ msm_camera_put_regulators(pdev, &fd->vdd_info, fd->num_reg);
+ msm_fd_hw_release_mem_resources(fd);
+ kfree(fd);
+
+ return 0;
+}
+
+/* Device tree match struct */
+static const struct of_device_id msm_fd_dt_match[] = {
+ {.compatible = "qcom,face-detection"},
+ {}
+};
+
+/* Fd platform driver definition */
+static struct platform_driver fd_driver = {
+ .probe = fd_probe,
+ .remove = fd_device_remove,
+ .driver = {
+ .name = MSM_FD_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_fd_dt_match,
+ },
+};
+
+static int __init msm_fd_init_module(void)
+{
+ return platform_driver_register(&fd_driver);
+}
+
+static void __exit msm_fd_exit_module(void)
+{
+ platform_driver_unregister(&fd_driver);
+}
+
+module_init(msm_fd_init_module);
+module_exit(msm_fd_exit_module);
+MODULE_DESCRIPTION("MSM FD driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/fd/msm_fd_dev.h b/drivers/media/platform/msm/ais/fd/msm_fd_dev.h
new file mode 100644
index 000000000000..c15032256f4d
--- /dev/null
+++ b/drivers/media/platform/msm/ais/fd/msm_fd_dev.h
@@ -0,0 +1,258 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_FD_DEV_H__
+#define __MSM_FD_DEV_H__
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/msm-bus.h>
+#include <media/msm_fd.h>
+#include <linux/dma-buf.h>
+#include <linux/msm_ion.h>
+#include "cam_soc_api.h"
+#include "cam_hw_ops.h"
+#include "msm_cpp.h"
+
+/* Maximum number of result buffers */
+#define MSM_FD_MAX_RESULT_BUFS 5
+/* Max number of clocks defined in device tree */
+#define MSM_FD_MAX_CLK_NUM 15
+/* Max number of clock rates defined in device tree */
+#define MSM_FD_MAX_CLK_RATES 5
+/* Max number of faces which can be detected in one hw processing */
+#define MSM_FD_MAX_FACES_DETECTED 32
+/* Max number of regulators defined in device tree */
+#define MSM_FD_MAX_REGULATOR_NUM 3
+
+/*
+ * struct msm_fd_size - Structure contain FD size related values.
+ * @width: Image width.
+ * @height: Image height.
+ * @reg_val: Register value for this size.
+ * @work_size: Working buffer size in bytes for this size.
+ */
+struct msm_fd_size {
+ int width;
+ int height;
+ u32 reg_val;
+ int work_size;
+};
+
+/*
+ * struct msm_fd_setings - Structure contain FD settings values.
+ * @min_size_index: Minimum face size array index.
+ * @angle_index: Face detection angle array index.
+ * @direction_index: Face detection direction array index.
+ * @threshold: Face detection threshold value.
+ * @speed: Face detection speed value (it should match with clock rate index).
+ */
+struct msm_fd_setings {
+ unsigned int min_size_index;
+ unsigned int angle_index;
+ unsigned int direction_index;
+ unsigned int threshold;
+ unsigned int speed;
+};
+
+/*
+ * struct msm_fd_format - Structure contain FD format settings.
+ * @size: Pointer to fd size struct used for this format.
+ * @crop: V4l2 crop structure.
+ * @bytesperline: Bytes per line of input image buffer.
+ * @sizeimage: Size of input image buffer.
+ * @pixelformat: Pixel format of input image buffer.
+ */
+struct msm_fd_format {
+ struct msm_fd_size *size;
+ struct v4l2_rect crop;
+ int bytesperline;
+ int sizeimage;
+ u32 pixelformat;
+};
+
+/*
+ * struct msm_fd_mem_pool - Structure contain FD memory pool information.
+ * @fd_device: Pointer to fd device.
+ * @client: Pointer to ion client.
+ * @domain_num: Domain number associated with FD hw.
+ */
+struct msm_fd_mem_pool {
+ struct msm_fd_device *fd_device;
+};
+
+/*
+ * struct msm_fd_buf_handle - Structure contain FD buffer handle information.
+ * @fd: ion FD from which this buffer is imported.
+ * @pool: Pointer to FD memory pool struct.
+ * @handle: Pointer to ion handle.
+ * @size: Size of the buffer.
+ * @addr: Adders of FD mmu mapped buffer. This address should be set to FD hw.
+ */
+struct msm_fd_buf_handle {
+ int fd;
+ struct msm_fd_mem_pool *pool;
+ size_t size;
+ ion_phys_addr_t addr;
+};
+
+/*
+ * struct msm_fd_buffer - Vb2 buffer wrapper structure.
+ * @vb: Videobuf 2 buffer structure.
+ * @active: Flag indicating if buffer currently used by FD hw.
+ * @completion: Completion need to wait on, if buffer is used by FD hw.
+ * @format: Format information of this buffer.
+ * @settings: Settings value of this buffer.
+ * @work_addr: Working buffer address need to be used when for this buffer.
+ * @list: Buffer is part of FD device processing queue
+ */
+struct msm_fd_buffer {
+ struct vb2_buffer vb;
+ atomic_t active;
+ struct completion completion;
+ struct msm_fd_format format;
+ struct msm_fd_setings settings;
+ ion_phys_addr_t work_addr;
+ struct list_head list;
+};
+
+/*
+ * struct msm_fd_stats - Structure contains FD result statistic information.
+ * @frame_id: Frame id for which statistic corresponds to.
+ * @face_cnt: Number of faces detected and included in face data.
+ * @face_data: Structure containing detected face data information.
+ */
+struct msm_fd_stats {
+ atomic_t frame_id;
+ u32 face_cnt;
+ struct msm_fd_face_data face_data[MSM_FD_MAX_FACES_DETECTED];
+};
+
+/*
+ * struct fd_ctx - Structure contains per open file handle context.
+ * @fd_device: Pointer to fd device.
+ * @fh: V4l2 file handle.
+ * @vb2_q: Videobuf 2 queue.
+ * @sequence: Sequence number for this statistic.
+ * @format: Current format.
+ * @settings: Current settings.
+ * @mem_pool: FD hw memory pool.
+ * @stats: Pointer to statistic buffers.
+ * @work_buf: Working memory buffer handle.
+ */
+struct fd_ctx {
+ struct msm_fd_device *fd_device;
+ struct v4l2_fh fh;
+ struct vb2_queue vb2_q;
+ unsigned int sequence;
+ atomic_t subscribed_for_event;
+ struct msm_fd_format format;
+ struct msm_fd_setings settings;
+ struct msm_fd_mem_pool mem_pool;
+ struct msm_fd_stats *stats;
+ struct msm_fd_buf_handle work_buf;
+};
+
+/*
+ * enum msm_fd_device_state - FD device state.
+ * @MSM_FD_DEVICE_IDLE: Device is idle, we can start with processing.
+ * @MSM_FD_DEVICE_RUNNING: Device is running, next processing will be
+ * scheduled from fd irq.
+ */
+enum msm_fd_device_state {
+ MSM_FD_DEVICE_IDLE,
+ MSM_FD_DEVICE_RUNNING,
+};
+
+/*
+ * enum msm_fd_mem_resources - FD device iomem resources.
+ * @MSM_FD_IOMEM_CORE: Index of fd core registers.
+ * @MSM_FD_IOMEM_MISC: Index of fd misc registers.
+ * @MSM_FD_IOMEM_VBIF: Index of fd vbif registers.
+ * @MSM_FD_IOMEM_LAST: Not valid.
+ */
+enum msm_fd_mem_resources {
+ MSM_FD_IOMEM_CORE,
+ MSM_FD_IOMEM_MISC,
+ MSM_FD_IOMEM_VBIF,
+ MSM_FD_IOMEM_LAST
+};
+
+/*
+ * struct msm_fd_device - FD device structure.
+ * @hw_revision: Face detection hw revision.
+ * @lock: Lock used for reference count.
+ * @slock: Spinlock used to protect FD device struct.
+ * @irq_num: Face detection irq number.
+ * @ref_count: Device reference count.
+ * @res_mem: Array of memory resources used by FD device.
+ * @iomem_base: Array of register mappings used by FD device.
+ * @vdd: Pointer to vdd regulator.
+ * @clk_num: Number of clocks attached to the device.
+ * @clk: Array of clock resources used by fd device.
+ * @clk_rates: Array of clock rates set.
+ * @bus_vectors: Pointer to bus vectors array.
+ * @bus_paths: Pointer to bus paths array.
+ * @bus_scale_data: Memory access bus scale data.
+ * @bus_client: Memory access bus client.
+ * @iommu_attached_cnt: Iommu attached devices reference count.
+ * @iommu_hdl: reference for iommu context.
+ * @dev: Pointer to device struct.
+ * @v4l2_dev: V4l2 device.
+ * @video: Video device.
+ * @state: FD device state.
+ * @buf_queue: FD device processing queue.
+ * @work_queue: Pointer to FD device IRQ bottom half workqueue.
+ * @work: IRQ bottom half work struct.
+ * @hw_halt_completion: Completes when face detection hw halt completes.
+ * @recovery_mode: Indicates if FD is in recovery mode
+ */
+struct msm_fd_device {
+ u32 hw_revision;
+
+ struct mutex lock;
+ spinlock_t slock;
+ struct mutex recovery_lock;
+ int ref_count;
+
+ int irq_num;
+ void __iomem *iomem_base[MSM_FD_IOMEM_LAST];
+ struct msm_cam_clk_info *clk_info;
+ struct msm_cam_regulator *vdd_info;
+ int num_reg;
+ struct resource *irq;
+
+ size_t clk_num;
+ size_t clk_rates_num;
+ struct clk **clk;
+ uint32_t **clk_rates;
+ uint32_t bus_client;
+
+ unsigned int iommu_attached_cnt;
+
+ int iommu_hdl;
+ struct device *dev;
+ struct platform_device *pdev;
+ struct v4l2_device v4l2_dev;
+ struct video_device video;
+
+ enum msm_fd_device_state state;
+ struct list_head buf_queue;
+ struct workqueue_struct *work_queue;
+ struct work_struct work;
+ struct completion hw_halt_completion;
+ int recovery_mode;
+ uint32_t clk_rate_idx;
+};
+
+#endif /* __MSM_FD_DEV_H__ */
diff --git a/drivers/media/platform/msm/ais/fd/msm_fd_hw.c b/drivers/media/platform/msm/ais/fd/msm_fd_hw.c
new file mode 100644
index 000000000000..415658c1c3e8
--- /dev/null
+++ b/drivers/media/platform/msm/ais/fd/msm_fd_hw.c
@@ -0,0 +1,1313 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/msm_ion.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <media/videobuf2-core.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include "msm_fd_dev.h"
+#include "msm_fd_hw.h"
+#include "msm_fd_regs.h"
+#include "cam_smmu_api.h"
+#include "msm_camera_io_util.h"
+
+/* After which revision misc irq for engine is needed */
+#define MSM_FD_MISC_IRQ_FROM_REV 0x10010000
+/* Face detection workqueue name */
+#define MSM_FD_WORQUEUE_NAME "face-detection"
+/* Face detection bus client name */
+#define MSM_FD_BUS_CLIENT_NAME "msm_face_detect"
+/* Face detection processing timeout in ms */
+#define MSM_FD_PROCESSING_TIMEOUT_MS 150
+/* Face detection halt timeout in ms */
+#define MSM_FD_HALT_TIMEOUT_MS 100
+/* Smmu callback name */
+#define MSM_FD_SMMU_CB_NAME "camera_fd"
+/*
+ * enum msm_fd_reg_setting_entries - FD register setting entries in DT.
+ * @MSM_FD_REG_ADDR_OFFSET_IDX: Register address offset index.
+ * @MSM_FD_REG_VALUE_IDX: Register value index.
+ * @MSM_FD_REG_MASK_IDX: Regester mask index.
+ * @MSM_FD_REG_LAST_IDX: Index count.
+ */
+enum msm_fd_dt_reg_setting_index {
+ MSM_FD_REG_ADDR_OFFSET_IDX,
+ MSM_FD_REG_VALUE_IDX,
+ MSM_FD_REG_MASK_IDX,
+ MSM_FD_REG_LAST_IDX
+};
+
+/*
+ * msm_fd_hw_read_reg - Fd read from register.
+ * @fd: Pointer to fd device.
+ * @base_idx: Fd memory resource index.
+ * @reg: Register addr need to be read from.
+ */
+static inline u32 msm_fd_hw_read_reg(struct msm_fd_device *fd,
+ enum msm_fd_mem_resources base_idx, u32 reg)
+{
+ return msm_camera_io_r(fd->iomem_base[base_idx] + reg);
+}
+
+/*
+ * msm_fd_hw_read_reg - Fd write to register.
+ * @fd: Pointer to fd device.
+ * @base_idx: Fd memory resource index.
+ * @reg: Register addr need to be read from.
+ e @value: Value to be written.
+ */
+static inline void msm_fd_hw_write_reg(struct msm_fd_device *fd,
+ enum msm_fd_mem_resources base_idx, u32 reg, u32 value)
+{
+ msm_camera_io_w(value, fd->iomem_base[base_idx] + reg);
+}
+
+/*
+ * msm_fd_hw_reg_clr - Fd clear register bits.
+ * @fd: Pointer to fd device.
+ * @base_idx: Fd memory resource index.
+ * @reg: Register addr need to be read from.
+ * @clr_bits: Bits need to be clear from register.
+ */
+static inline void msm_fd_hw_reg_clr(struct msm_fd_device *fd,
+ enum msm_fd_mem_resources mmio_range, u32 reg, u32 clr_bits)
+{
+ u32 bits = msm_fd_hw_read_reg(fd, mmio_range, reg);
+
+ msm_fd_hw_write_reg(fd, mmio_range, reg, (bits & ~clr_bits));
+}
+
+/*
+ * msm_fd_hw_reg_clr - Fd set register bits.
+ * @fd: Pointer to fd device.
+ * @base_idx: Fd memory resource index.
+ * @reg: Register addr need to be read from.
+ * @set_bits: Bits need to be set to register.
+ */
+static inline void msm_fd_hw_reg_set(struct msm_fd_device *fd,
+ enum msm_fd_mem_resources mmio_range, u32 reg, u32 set_bits)
+{
+ u32 bits = msm_fd_hw_read_reg(fd, mmio_range, reg);
+
+ msm_fd_hw_write_reg(fd, mmio_range, reg, (bits | set_bits));
+}
+
+/*
+ * msm_fd_hw_reg_clr - Fd set size mode register.
+ * @fd: Pointer to fd device.
+ * @mode: Size mode to be set.
+ */
+static inline void msm_fd_hw_set_size_mode(struct msm_fd_device *fd, u32 mode)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_IMAGE_SIZE, mode);
+}
+
+/*
+ * msm_fd_hw_reg_clr - Fd set crop registers.
+ * @fd: Pointer to fd device.
+ * @crop: Pointer to v4l2 crop struct containing the crop information
+ */
+static inline void msm_fd_hw_set_crop(struct msm_fd_device *fd,
+ struct v4l2_rect *crop)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_START_X,
+ (crop->top & MSM_FD_START_X_MASK));
+
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_START_Y,
+ (crop->left & MSM_FD_START_Y_MASK));
+
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_SIZE_X,
+ (crop->width & MSM_FD_SIZE_X_MASK));
+
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_SIZE_Y,
+ (crop->height & MSM_FD_SIZE_Y_MASK));
+}
+
+/*
+ * msm_fd_hw_reg_clr - Fd set bytes per line register.
+ * @fd: Pointer to fd device.
+ * @b: Bytes per line need to be set.
+ */
+static inline void msm_fd_hw_set_bytesperline(struct msm_fd_device *fd, u32 b)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_LINE_BYTES,
+ (b & MSM_FD_LINE_BYTES_MASK));
+}
+
+/*
+ * msm_fd_hw_reg_clr - Fd set image address.
+ * @fd: Pointer to fd device.
+ * @addr: Input image address need to be set.
+ */
+static inline void msm_fd_hw_set_image_addr(struct msm_fd_device *fd, u32 addr)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_IMAGE_ADDR, addr);
+}
+
+/*
+ * msm_fd_hw_set_work_addr - Fd set working buffer address.
+ * @fd: Pointer to fd device.
+ * @addr: Working buffer address need to be set.
+ */
+static inline void msm_fd_hw_set_work_addr(struct msm_fd_device *fd, u32 addr)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_WORK_ADDR, addr);
+}
+
+/*
+ * msm_fd_hw_set_direction_angle - Fd set face direction and face angle.
+ * @fd: Pointer to fd device.
+ * @direction: Face direction need to be set.
+ * @angle: Face angle need to be set.
+ */
+static inline void msm_fd_hw_set_direction_angle(struct msm_fd_device *fd,
+ u32 direction, u32 angle)
+{
+ u32 reg;
+ u32 value;
+
+ value = direction | (angle ? 1 << (angle + 1) : 0);
+ if (value > MSM_FD_CONDT_DIR_MAX)
+ value = MSM_FD_CONDT_DIR_MAX;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONDT);
+
+ reg &= ~MSM_FD_CONDT_DIR_MASK;
+ reg |= (value << MSM_FD_CONDT_DIR_SHIFT);
+
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONDT, reg);
+}
+
+/*
+ * msm_fd_hw_set_min_face - Fd set minimum face size register.
+ * @fd: Pointer to fd device.
+ * @size: Minimum face size need to be set.
+ */
+static inline void msm_fd_hw_set_min_face(struct msm_fd_device *fd, u32 size)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONDT);
+
+ reg &= ~MSM_FD_CONDT_MIN_MASK;
+ reg |= (size << MSM_FD_CONDT_MIN_SHIFT);
+
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONDT, reg);
+}
+
+/*
+ * msm_fd_hw_set_threshold - Fd set detection threshold register.
+ * @fd: Pointer to fd device.
+ * @c: Maximum face count need to be set.
+ */
+static inline void msm_fd_hw_set_threshold(struct msm_fd_device *fd, u32 thr)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_DHINT,
+ (thr & MSM_FD_DHINT_MASK));
+}
+
+/*
+ * msm_fd_hw_srst - Sw reset control registers.
+ * @fd: Pointer to fd device.
+ *
+ * Before every processing we need to toggle this bit,
+ * This functions set sw reset control bit to 1/0.
+ */
+static inline void msm_fd_hw_srst(struct msm_fd_device *fd)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONTROL,
+ MSM_FD_CONTROL_SRST);
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONTROL, 0);
+}
+
+/*
+ * msm_fd_hw_get_face_count - Fd read face count register.
+ * @fd: Pointer to fd device.
+ */
+int msm_fd_hw_get_face_count(struct msm_fd_device *fd)
+{
+ u32 reg;
+ u32 value;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_RESULT_CNT);
+
+ value = reg & MSM_FD_RESULT_CNT_MASK;
+ if (value > MSM_FD_MAX_FACES_DETECTED) {
+ dev_warn(fd->dev, "Face count %d out of limit\n", value);
+ value = MSM_FD_MAX_FACES_DETECTED;
+ }
+
+ return value;
+}
+
+/*
+ * msm_fd_hw_run - Starts face detection engine.
+ * @fd: Pointer to fd device.
+ *
+ * Before call this function make sure that control sw reset is perfomed
+ * (see function msm_fd_hw_srst).
+ * NOTE: Engine need to be reset before started again.
+ */
+static inline void msm_fd_hw_run(struct msm_fd_device *fd)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONTROL,
+ MSM_FD_CONTROL_RUN);
+}
+
+/*
+ * msm_fd_hw_is_finished - Check if fd hw engine is done with processing.
+ * @fd: Pointer to fd device.
+ *
+ * NOTE: If finish bit is not set, we should not read the result.
+ */
+static int msm_fd_hw_is_finished(struct msm_fd_device *fd)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONTROL);
+
+ return reg & MSM_FD_CONTROL_FINISH;
+}
+
+/*
+ * msm_fd_hw_is_runnig - Check if fd hw engine is busy.
+ * @fd: Pointer to fd device.
+ */
+static int msm_fd_hw_is_runnig(struct msm_fd_device *fd)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONTROL);
+
+ return reg & MSM_FD_CONTROL_RUN;
+}
+
+/*
+ * msm_fd_hw_misc_irq_is_core - Check if fd received misc core irq.
+ * @fd: Pointer to fd device.
+ */
+static int msm_fd_hw_misc_irq_is_core(struct msm_fd_device *fd)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_MISC,
+ MSM_FD_MISC_IRQ_STATUS);
+
+ return reg & MSM_FD_MISC_IRQ_STATUS_CORE_IRQ;
+}
+
+/*
+ * msm_fd_hw_misc_irq_is_halt - Check if fd received misc halt irq.
+ * @fd: Pointer to fd device.
+ */
+static int msm_fd_hw_misc_irq_is_halt(struct msm_fd_device *fd)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_MISC,
+ MSM_FD_MISC_IRQ_STATUS);
+
+ return reg & MSM_FD_MISC_IRQ_STATUS_HALT_REQ;
+}
+
+/*
+* msm_fd_hw_misc_clear_all_irq - Clear all misc irq statuses.
+* @fd: Pointer to fd device.
+*/
+static void msm_fd_hw_misc_clear_all_irq(struct msm_fd_device *fd)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_MISC, MSM_FD_MISC_IRQ_CLEAR,
+ MSM_FD_MISC_IRQ_CLEAR_HALT | MSM_FD_MISC_IRQ_CLEAR_CORE);
+}
+
+/*
+* msm_fd_hw_misc_irq_enable - Enable fd misc core and halt irq.
+* @fd: Pointer to fd device.
+*/
+static void msm_fd_hw_misc_irq_enable(struct msm_fd_device *fd)
+{
+ msm_fd_hw_reg_set(fd, MSM_FD_IOMEM_MISC, MSM_FD_MISC_IRQ_MASK,
+ MSM_FD_MISC_IRQ_CLEAR_HALT | MSM_FD_MISC_IRQ_CLEAR_CORE);
+}
+
+/*
+* msm_fd_hw_misc_irq_disable - Disable fd misc core and halt irq.
+* @fd: Pointer to fd device.
+*/
+static void msm_fd_hw_misc_irq_disable(struct msm_fd_device *fd)
+{
+ msm_fd_hw_reg_clr(fd, MSM_FD_IOMEM_MISC, MSM_FD_MISC_IRQ_MASK,
+ MSM_FD_MISC_IRQ_CLEAR_HALT | MSM_FD_MISC_IRQ_CLEAR_CORE);
+}
+
+/*
+ * msm_fd_hw_get_revision - Get hw revision and store in to device.
+ * @fd: Pointer to fd device.
+ */
+int msm_fd_hw_get_revision(struct msm_fd_device *fd)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_MISC,
+ MSM_FD_MISC_HW_VERSION);
+
+ dev_dbg(fd->dev, "Face detection hw revision 0x%x\n", reg);
+
+ return reg;
+}
+
+/*
+ * msm_fd_hw_get_result_x - Get fd result center x coordinate.
+ * @fd: Pointer to fd device.
+ * @idx: Result face index
+ */
+int msm_fd_hw_get_result_x(struct msm_fd_device *fd, int idx)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE,
+ MSM_FD_RESULT_CENTER_X(idx));
+
+ return reg;
+}
+
+/*
+ * msm_fd_hw_get_result_y - Get fd result center y coordinate.
+ * @fd: Pointer to fd device.
+ * @idx: Result face index
+ */
+int msm_fd_hw_get_result_y(struct msm_fd_device *fd, int idx)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE,
+ MSM_FD_RESULT_CENTER_Y(idx));
+
+ return reg;
+}
+
+/*
+ * msm_fd_hw_get_result_conf_size - Get fd result confident level and size.
+ * @fd: Pointer to fd device.
+ * @idx: Result face index.
+ * @conf: Pointer to confident value need to be filled.
+ * @size: Pointer to size value need to be filled.
+ */
+void msm_fd_hw_get_result_conf_size(struct msm_fd_device *fd,
+ int idx, u32 *conf, u32 *size)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE,
+ MSM_FD_RESULT_CONF_SIZE(idx));
+
+ *conf = (reg >> MSM_FD_RESULT_CONF_SHIFT) & MSM_FD_RESULT_CONF_MASK;
+ *size = (reg >> MSM_FD_RESULT_SIZE_SHIFT) & MSM_FD_RESULT_SIZE_MASK;
+}
+
+/*
+ * msm_fd_hw_get_result_angle_pose - Get fd result angle and pose.
+ * @fd: Pointer to fd device.
+ * @idx: Result face index.
+ * @angle: Pointer to angle value need to be filled.
+ * @pose: Pointer to pose value need to be filled.
+ */
+void msm_fd_hw_get_result_angle_pose(struct msm_fd_device *fd, int idx,
+ u32 *angle, u32 *pose)
+{
+ u32 reg;
+ u32 pose_reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE,
+ MSM_FD_RESULT_ANGLE_POSE(idx));
+ *angle = (reg >> MSM_FD_RESULT_ANGLE_SHIFT) & MSM_FD_RESULT_ANGLE_MASK;
+ pose_reg = (reg >> MSM_FD_RESULT_POSE_SHIFT) & MSM_FD_RESULT_POSE_MASK;
+
+ switch (pose_reg) {
+ case MSM_FD_RESULT_POSE_FRONT:
+ *pose = MSM_FD_POSE_FRONT;
+ break;
+ case MSM_FD_RESULT_POSE_RIGHT_DIAGONAL:
+ *pose = MSM_FD_POSE_RIGHT_DIAGONAL;
+ break;
+ case MSM_FD_RESULT_POSE_RIGHT:
+ *pose = MSM_FD_POSE_RIGHT;
+ break;
+ case MSM_FD_RESULT_POSE_LEFT_DIAGONAL:
+ *pose = MSM_FD_POSE_LEFT_DIAGONAL;
+ break;
+ case MSM_FD_RESULT_POSE_LEFT:
+ *pose = MSM_FD_POSE_LEFT;
+ break;
+ default:
+ dev_err(fd->dev, "Invalid pose from the engine\n");
+ *pose = MSM_FD_POSE_FRONT;
+ break;
+ }
+}
+
+/*
+ * msm_fd_hw_misc_irq_supported - Check if misc irq is supported.
+ * @fd: Pointer to fd device.
+ */
+static int msm_fd_hw_misc_irq_supported(struct msm_fd_device *fd)
+{
+ return fd->hw_revision >= MSM_FD_MISC_IRQ_FROM_REV;
+}
+
+/*
+ * msm_fd_hw_halt - Halt fd core.
+ * @fd: Pointer to fd device.
+ */
+static void msm_fd_hw_halt(struct msm_fd_device *fd)
+{
+ unsigned long time;
+
+ if (msm_fd_hw_misc_irq_supported(fd)) {
+ init_completion(&fd->hw_halt_completion);
+
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_MISC, MSM_FD_HW_STOP, 1);
+
+ time = wait_for_completion_timeout(&fd->hw_halt_completion,
+ msecs_to_jiffies(MSM_FD_HALT_TIMEOUT_MS));
+ if (!time)
+ dev_err(fd->dev, "Face detection halt timeout\n");
+
+ /* Reset sequence after halt */
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_MISC, MSM_FD_MISC_SW_RESET,
+ MSM_FD_MISC_SW_RESET_SET);
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONTROL,
+ MSM_FD_CONTROL_SRST);
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_MISC,
+ MSM_FD_MISC_SW_RESET, 0);
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONTROL, 0);
+ }
+}
+
+/*
+ * msm_fd_core_irq - Face detection core irq handler.
+ * @irq: Irq number.
+ * @dev_id: Pointer to fd device.
+ */
+static irqreturn_t msm_fd_hw_core_irq(int irq, void *dev_id)
+{
+ struct msm_fd_device *fd = dev_id;
+
+ if (msm_fd_hw_is_finished(fd))
+ queue_work(fd->work_queue, &fd->work);
+ else
+ dev_err(fd->dev, "Something wrong! FD still running\n");
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * msm_fd_hw_misc_irq - Face detection misc irq handler.
+ * @irq: Irq number.
+ * @dev_id: Pointer to fd device.
+ */
+static irqreturn_t msm_fd_hw_misc_irq(int irq, void *dev_id)
+{
+ struct msm_fd_device *fd = dev_id;
+
+ if (msm_fd_hw_misc_irq_is_core(fd))
+ msm_fd_hw_core_irq(irq, dev_id);
+
+ if (msm_fd_hw_misc_irq_is_halt(fd))
+ complete_all(&fd->hw_halt_completion);
+
+ msm_fd_hw_misc_clear_all_irq(fd);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * msm_fd_hw_request_irq - Configure and enable vbif interface.
+ * @pdev: Pointer to platform device.
+ * @fd: Pointer to fd device.
+ * @work_func: Pointer to work func used for irq bottom half.
+ */
+int msm_fd_hw_request_irq(struct platform_device *pdev,
+ struct msm_fd_device *fd, work_func_t work_func)
+{
+ int ret;
+
+ fd->irq = msm_camera_get_irq(pdev, "fd");
+ if (fd->irq_num < 0) {
+ dev_err(fd->dev, "Can not get fd core irq resource\n");
+ ret = -ENODEV;
+ goto error_irq;
+ }
+
+ /* If vbif is shared we will need wrapper irq for releasing vbif */
+ if (msm_fd_hw_misc_irq_supported(fd)) {
+ ret = msm_camera_register_irq(pdev,
+ fd->irq, msm_fd_hw_misc_irq,
+ IRQF_TRIGGER_RISING, "fd", fd);
+ if (ret) {
+ dev_err(fd->dev, "Can not claim wrapper IRQ\n");
+ goto error_irq;
+ }
+ } else {
+ ret = msm_camera_register_irq(pdev,
+ fd->irq, msm_fd_hw_core_irq,
+ IRQF_TRIGGER_RISING, "fd", fd);
+ if (ret) {
+ dev_err(&pdev->dev, "Can not claim core IRQ\n");
+ goto error_irq;
+ }
+
+ }
+
+ fd->work_queue = alloc_workqueue(MSM_FD_WORQUEUE_NAME,
+ WQ_HIGHPRI | WQ_UNBOUND, 0);
+ if (!fd->work_queue) {
+ dev_err(fd->dev, "Can not register workqueue\n");
+ ret = -ENOMEM;
+ goto error_alloc_workqueue;
+ }
+ INIT_WORK(&fd->work, work_func);
+
+ return 0;
+
+error_alloc_workqueue:
+ msm_camera_unregister_irq(pdev, fd->irq, fd);
+error_irq:
+ return ret;
+}
+
+/*
+ * msm_fd_hw_release_irq - Free core and wrap irq.
+ * @fd: Pointer to fd device.
+ */
+void msm_fd_hw_release_irq(struct msm_fd_device *fd)
+{
+ if (fd->irq)
+ msm_camera_unregister_irq(fd->pdev, fd->irq, fd);
+
+ if (fd->work_queue) {
+ destroy_workqueue(fd->work_queue);
+ fd->work_queue = NULL;
+ }
+}
+
+/*
+ * msm_fd_hw_set_dt_parms_by_name() - read DT params and write to registers.
+ * @fd: Pointer to fd device.
+ * @dt_prop_name: Name of the device tree property to read.
+ * @base_idx: Fd memory resource index.
+ *
+ * This function reads register offset and value pairs from dtsi based on
+ * device tree property name and writes to FD registers.
+ *
+ * Return: 0 on success and negative error on failure.
+ */
+int32_t msm_fd_hw_set_dt_parms_by_name(struct msm_fd_device *fd,
+ const char *dt_prop_name,
+ enum msm_fd_mem_resources base_idx)
+{
+ struct device_node *of_node;
+ int32_t i = 0, rc = 0;
+ uint32_t *dt_reg_settings = NULL;
+ uint32_t dt_count = 0;
+
+ of_node = fd->dev->of_node;
+ pr_debug("%s:%d E\n", __func__, __LINE__);
+
+ if (!of_get_property(of_node, dt_prop_name, &dt_count)) {
+ pr_err("%s: Error property does not exist\n", __func__);
+ return -ENOENT;
+ }
+ if (dt_count % (sizeof(int32_t) * MSM_FD_REG_LAST_IDX)) {
+ pr_err("%s: Error invalid entries\n", __func__);
+ return -EINVAL;
+ }
+ dt_count /= sizeof(int32_t);
+ if (dt_count != 0) {
+ dt_reg_settings = kcalloc(dt_count,
+ sizeof(uint32_t),
+ GFP_KERNEL);
+
+ if (!dt_reg_settings)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(of_node,
+ dt_prop_name,
+ dt_reg_settings,
+ dt_count);
+ if (rc < 0) {
+ pr_err("%s: No reg info\n", __func__);
+ kfree(dt_reg_settings);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dt_count; i = i + MSM_FD_REG_LAST_IDX) {
+ msm_fd_hw_reg_clr(fd, base_idx,
+ dt_reg_settings[i + MSM_FD_REG_ADDR_OFFSET_IDX],
+ dt_reg_settings[i + MSM_FD_REG_MASK_IDX]);
+ msm_fd_hw_reg_set(fd, base_idx,
+ dt_reg_settings[i + MSM_FD_REG_ADDR_OFFSET_IDX],
+ dt_reg_settings[i + MSM_FD_REG_VALUE_IDX] &
+ dt_reg_settings[i + MSM_FD_REG_MASK_IDX]);
+ pr_debug("%s:%d] %pK %08x\n", __func__, __LINE__,
+ fd->iomem_base[base_idx] +
+ dt_reg_settings[i + MSM_FD_REG_ADDR_OFFSET_IDX],
+ dt_reg_settings[i + MSM_FD_REG_VALUE_IDX] &
+ dt_reg_settings[i + MSM_FD_REG_MASK_IDX]);
+ }
+ kfree(dt_reg_settings);
+ }
+ return 0;
+}
+
+/*
+ * msm_fd_hw_set_dt_parms() - set FD device tree configuration.
+ * @fd: Pointer to fd device.
+ *
+ * This function holds an array of device tree property names and calls
+ * msm_fd_hw_set_dt_parms_by_name() for each property.
+ *
+ * Return: 0 on success and negative error on failure.
+ */
+int msm_fd_hw_set_dt_parms(struct msm_fd_device *fd)
+{
+ int rc = 0;
+ uint8_t dt_prop_cnt = MSM_FD_IOMEM_LAST;
+ char *dt_prop_name[MSM_FD_IOMEM_LAST] = {"qcom,fd-core-reg-settings",
+ "qcom,fd-misc-reg-settings", "qcom,fd-vbif-reg-settings"};
+
+ while (dt_prop_cnt) {
+ dt_prop_cnt--;
+ rc = msm_fd_hw_set_dt_parms_by_name(fd,
+ dt_prop_name[dt_prop_cnt],
+ dt_prop_cnt);
+ if (rc == -ENOENT) {
+ pr_debug("%s: No %s property\n", __func__,
+ dt_prop_name[dt_prop_cnt]);
+ rc = 0;
+ } else if (rc < 0) {
+ pr_err("%s: %s params set fail\n", __func__,
+ dt_prop_name[dt_prop_cnt]);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+/*
+ * msm_fd_hw_release_mem_resources - Releases memory resources.
+ * @fd: Pointer to fd device.
+ */
+void msm_fd_hw_release_mem_resources(struct msm_fd_device *fd)
+{
+ msm_camera_put_reg_base(fd->pdev,
+ fd->iomem_base[MSM_FD_IOMEM_MISC], "fd_misc", true);
+ msm_camera_put_reg_base(fd->pdev,
+ fd->iomem_base[MSM_FD_IOMEM_CORE], "fd_core", true);
+ msm_camera_put_reg_base(fd->pdev,
+ fd->iomem_base[MSM_FD_IOMEM_VBIF], "fd_vbif", false);
+}
+
+/*
+ * msm_fd_hw_get_mem_resources - Get memory resources.
+ * @pdev: Pointer to fd platform device.
+ * @fd: Pointer to fd device.
+ *
+ * Get and ioremap platform memory resources.
+ */
+int msm_fd_hw_get_mem_resources(struct platform_device *pdev,
+ struct msm_fd_device *fd)
+{
+ int ret = 0;
+
+ /* Prepare memory resources */
+ fd->iomem_base[MSM_FD_IOMEM_CORE] =
+ msm_camera_get_reg_base(pdev, "fd_core", true);
+ if (!fd->iomem_base[MSM_FD_IOMEM_CORE]) {
+ dev_err(fd->dev, "%s can not map fd_core region\n", __func__);
+ ret = -ENODEV;
+ goto fd_core_base_failed;
+ }
+
+ fd->iomem_base[MSM_FD_IOMEM_MISC] =
+ msm_camera_get_reg_base(pdev, "fd_misc", true);
+ if (!fd->iomem_base[MSM_FD_IOMEM_MISC]) {
+ dev_err(fd->dev, "%s can not map fd_misc region\n", __func__);
+ ret = -ENODEV;
+ goto fd_misc_base_failed;
+ }
+
+ fd->iomem_base[MSM_FD_IOMEM_VBIF] =
+ msm_camera_get_reg_base(pdev, "fd_vbif", false);
+ if (!fd->iomem_base[MSM_FD_IOMEM_VBIF]) {
+ dev_err(fd->dev, "%s can not map fd_vbif region\n", __func__);
+ ret = -ENODEV;
+ goto fd_vbif_base_failed;
+ }
+
+ return ret;
+fd_vbif_base_failed:
+ msm_camera_put_reg_base(pdev,
+ fd->iomem_base[MSM_FD_IOMEM_MISC], "fd_misc", true);
+fd_misc_base_failed:
+ msm_camera_put_reg_base(pdev,
+ fd->iomem_base[MSM_FD_IOMEM_CORE], "fd_core", true);
+fd_core_base_failed:
+ return ret;
+}
+
+/*
+ * msm_fd_hw_bus_request - Request bus for memory access.
+ * @fd: Pointer to fd device.
+ * @idx: Bus bandwidth array index described in device tree.
+ */
+static int msm_fd_hw_bus_request(struct msm_fd_device *fd, unsigned int idx)
+{
+ int ret;
+
+ ret = msm_camera_update_bus_vector(CAM_BUS_CLIENT_FD, idx);
+ if (ret < 0) {
+ dev_err(fd->dev, "Fail bus scale update %d\n", ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * msm_fd_hw_set_clock_rate_idx - Set clock rate based on the index.
+ * @fd: Pointer to fd device.
+ * @idx: Clock Array index described in device tree.
+ */
+static int msm_fd_hw_set_clock_rate_idx(struct msm_fd_device *fd,
+ unsigned int idx)
+{
+ int ret;
+ int i;
+
+ if (idx >= fd->clk_rates_num) {
+ dev_err(fd->dev, "Invalid clock index %u\n", idx);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < fd->clk_num; i++) {
+ ret = msm_camera_clk_set_rate(&fd->pdev->dev,
+ fd->clk[i], fd->clk_rates[idx][i]);
+ if (ret < 0) {
+ dev_err(fd->dev, "fail set rate on idx[%u][%u]\n",
+ idx, i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * msm_fd_hw_update_settings() - API to set clock rate and bus settings
+ * @fd: Pointer to fd device.
+ * @buf: fd buffer
+ */
+static int msm_fd_hw_update_settings(struct msm_fd_device *fd,
+ struct msm_fd_buffer *buf)
+{
+ int ret = 0;
+ uint32_t clk_rate_idx;
+
+ if (!buf)
+ return 0;
+
+ clk_rate_idx = buf->settings.speed;
+ if (fd->clk_rate_idx == clk_rate_idx)
+ return 0;
+
+ if (fd->bus_client) {
+ ret = msm_fd_hw_bus_request(fd, clk_rate_idx);
+ if (ret < 0) {
+ dev_err(fd->dev, "Fail bus scale update %d\n", ret);
+ return -EINVAL;
+ }
+ }
+
+ ret = msm_fd_hw_set_clock_rate_idx(fd, clk_rate_idx);
+ if (ret < 0) {
+ dev_err(fd->dev, "Fail to set clock rate idx\n");
+ goto end;
+ }
+ dev_dbg(fd->dev, "set clk %d %d", fd->clk_rate_idx, clk_rate_idx);
+ fd->clk_rate_idx = clk_rate_idx;
+
+end:
+ return ret;
+}
+
+/*
+ * msm_fd_hw_get - Get fd hw for performing any hw operation.
+ * @fd: Pointer to fd device.
+ * @clock_rate_idx: Clock rate index.
+ *
+ * Prepare fd hw for operation. Have reference count protected by
+ * fd device mutex.
+ */
+int msm_fd_hw_get(struct msm_fd_device *fd, unsigned int clock_rate_idx)
+{
+ int ret;
+
+ mutex_lock(&fd->lock);
+
+ if (fd->ref_count == 0) {
+ ret =
+ msm_camera_regulator_enable(fd->vdd_info,
+ fd->num_reg, true);
+ if (ret < 0) {
+ dev_err(fd->dev, "Fail to enable vdd\n");
+ goto error;
+ }
+
+ ret = msm_fd_hw_bus_request(fd, clock_rate_idx);
+ if (ret < 0) {
+ dev_err(fd->dev, "Fail bus request\n");
+ goto error_bus_request;
+ }
+ ret = msm_fd_hw_set_clock_rate_idx(fd, clock_rate_idx);
+ if (ret < 0) {
+ dev_err(fd->dev, "Fail to set clock rate idx\n");
+ goto error_clocks;
+ }
+ ret = msm_camera_clk_enable(&fd->pdev->dev, fd->clk_info,
+ fd->clk, fd->clk_num, true);
+ if (ret < 0) {
+ dev_err(fd->dev, "Fail clk enable request\n");
+ goto error_clocks;
+ }
+
+ if (msm_fd_hw_misc_irq_supported(fd))
+ msm_fd_hw_misc_irq_enable(fd);
+
+ ret = msm_fd_hw_set_dt_parms(fd);
+ if (ret < 0)
+ goto error_set_dt;
+
+ fd->clk_rate_idx = clock_rate_idx;
+ }
+
+ fd->ref_count++;
+ mutex_unlock(&fd->lock);
+
+ return 0;
+
+error_set_dt:
+ if (msm_fd_hw_misc_irq_supported(fd))
+ msm_fd_hw_misc_irq_disable(fd);
+ msm_camera_clk_enable(&fd->pdev->dev, fd->clk_info,
+ fd->clk, fd->clk_num, false);
+error_clocks:
+error_bus_request:
+ msm_camera_regulator_enable(fd->vdd_info, fd->num_reg, false);
+error:
+ mutex_unlock(&fd->lock);
+ return ret;
+}
+
+/*
+ * msm_fd_hw_get - Put fd hw.
+ * @fd: Pointer to fd device.
+ *
+ * Release fd hw. Have reference count protected by
+ * fd device mutex.
+ */
+void msm_fd_hw_put(struct msm_fd_device *fd)
+{
+ mutex_lock(&fd->lock);
+ WARN_ON(fd->ref_count == 0);
+
+ if (--fd->ref_count == 0) {
+ msm_fd_hw_halt(fd);
+
+ if (msm_fd_hw_misc_irq_supported(fd))
+ msm_fd_hw_misc_irq_disable(fd);
+
+ /* vector index 0 is 0 ab and 0 ib */
+ msm_fd_hw_bus_request(fd, 0);
+ msm_camera_clk_enable(&fd->pdev->dev, fd->clk_info,
+ fd->clk, fd->clk_num, false);
+ msm_camera_regulator_enable(fd->vdd_info, fd->num_reg, false);
+ }
+ mutex_unlock(&fd->lock);
+}
+
+/*
+ * msm_fd_hw_attach_iommu - Attach iommu to face detection engine.
+ * @fd: Pointer to fd device.
+ *
+ * Iommu attach have reference count protected by
+ * fd device mutex.
+ */
+static int msm_fd_hw_attach_iommu(struct msm_fd_device *fd)
+{
+ int ret = -EINVAL;
+
+ mutex_lock(&fd->lock);
+
+ if (fd->iommu_attached_cnt == UINT_MAX) {
+ dev_err(fd->dev, "Max count reached! can not attach iommu\n");
+ goto error;
+ }
+
+ if (fd->iommu_attached_cnt == 0) {
+ ret = cam_smmu_get_handle(MSM_FD_SMMU_CB_NAME, &fd->iommu_hdl);
+ if (ret < 0) {
+ dev_err(fd->dev, "get handle failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+ ret = cam_smmu_ops(fd->iommu_hdl, CAM_SMMU_ATTACH);
+ if (ret < 0) {
+ dev_err(fd->dev, "Can not attach iommu domain.\n");
+ goto error_attach;
+ }
+ }
+ fd->iommu_attached_cnt++;
+ mutex_unlock(&fd->lock);
+
+ return 0;
+
+error_attach:
+ cam_smmu_destroy_handle(fd->iommu_hdl);
+error:
+ mutex_unlock(&fd->lock);
+ return ret;
+}
+
+/*
+ * msm_fd_hw_detach_iommu - Detach iommu from face detection engine.
+ * @fd: Pointer to fd device.
+ *
+ * Iommu detach have reference count protected by
+ * fd device mutex.
+ */
+static void msm_fd_hw_detach_iommu(struct msm_fd_device *fd)
+{
+ mutex_lock(&fd->lock);
+ if (fd->iommu_attached_cnt == 0) {
+ dev_err(fd->dev, "There is no attached device\n");
+ mutex_unlock(&fd->lock);
+ return;
+ }
+ if (--fd->iommu_attached_cnt == 0) {
+ cam_smmu_ops(fd->iommu_hdl, CAM_SMMU_DETACH);
+ cam_smmu_destroy_handle(fd->iommu_hdl);
+ }
+ mutex_unlock(&fd->lock);
+}
+
+/*
+ * msm_fd_hw_map_buffer - Map buffer to fd hw mmu.
+ * @pool: Pointer to fd memory pool.
+ * @fd: Ion fd.
+ * @buf: Fd buffer handle, for storing mapped buffer information.
+ *
+ * It will map ion fd to fd hw mmu.
+ */
+int msm_fd_hw_map_buffer(struct msm_fd_mem_pool *pool, int fd,
+ struct msm_fd_buf_handle *buf)
+{
+ int ret;
+
+ if (!pool || fd < 0)
+ return -EINVAL;
+
+ ret = msm_fd_hw_attach_iommu(pool->fd_device);
+ if (ret < 0)
+ return -ENOMEM;
+
+ buf->pool = pool;
+ buf->fd = fd;
+ ret = cam_smmu_get_phy_addr(pool->fd_device->iommu_hdl,
+ buf->fd, CAM_SMMU_MAP_RW,
+ &buf->addr, &buf->size);
+ if (ret < 0) {
+ pr_err("Error: cannot get phy addr\n");
+ return -ENOMEM;
+ }
+ return buf->size;
+}
+
+/*
+ * msm_fd_hw_unmap_buffer - Unmap buffer from fd hw mmu.
+ * @buf: Fd buffer handle, for storing mapped buffer information.
+ */
+void msm_fd_hw_unmap_buffer(struct msm_fd_buf_handle *buf)
+{
+ if (buf->size) {
+ cam_smmu_put_phy_addr(buf->pool->fd_device->iommu_hdl,
+ buf->fd);
+ msm_fd_hw_detach_iommu(buf->pool->fd_device);
+ }
+
+ buf->fd = -1;
+ buf->pool = NULL;
+}
+
+/*
+ * msm_fd_hw_enable - Configure and enable fd hw.
+ * @fd: Fd device.
+ * @buffer: Buffer need to be processed.
+ *
+ * Configure and starts fd processing with given buffer.
+ * NOTE: Fd will not be enabled if engine is in running state.
+ */
+static int msm_fd_hw_enable(struct msm_fd_device *fd,
+ struct msm_fd_buffer *buffer)
+{
+ struct msm_fd_buf_handle *buf_handle =
+ buffer->vb.planes[0].mem_priv;
+
+ if (msm_fd_hw_is_runnig(fd)) {
+ dev_err(fd->dev, "Device is busy we can not enable\n");
+ return 0;
+ }
+
+ msm_fd_hw_srst(fd);
+ msm_fd_hw_set_size_mode(fd, buffer->format.size->reg_val);
+ msm_fd_hw_set_crop(fd, &buffer->format.crop);
+ msm_fd_hw_set_bytesperline(fd, buffer->format.bytesperline);
+ msm_fd_hw_set_image_addr(fd, buf_handle->addr);
+ msm_fd_hw_set_work_addr(fd, buffer->work_addr);
+ msm_fd_hw_set_min_face(fd, buffer->settings.min_size_index);
+ msm_fd_hw_set_threshold(fd, buffer->settings.threshold);
+ msm_fd_hw_set_direction_angle(fd, buffer->settings.direction_index,
+ buffer->settings.angle_index);
+ msm_fd_hw_run(fd);
+ if (fd->recovery_mode)
+ dev_err(fd->dev, "Scheduled buffer in recovery mode\n");
+ return 1;
+}
+
+/*
+ * msm_fd_hw_try_enable - Try to enable fd hw.
+ * @fd: Fd device.
+ * @buffer: Buffer need to be processed.
+ * @state: Enable on device state
+ *
+ * It will enable fd hw if actual device state is equal with state argument.
+ */
+static int msm_fd_hw_try_enable(struct msm_fd_device *fd,
+ struct msm_fd_buffer *buffer, enum msm_fd_device_state state)
+{
+ int enabled = 0;
+
+ if (state == fd->state) {
+
+ fd->state = MSM_FD_DEVICE_RUNNING;
+ atomic_set(&buffer->active, 1);
+
+ msm_fd_hw_enable(fd, buffer);
+ enabled = 1;
+ }
+ return enabled;
+}
+
+/*
+ * msm_fd_hw_next_buffer - Get next buffer from fd device processing queue.
+ * @fd: Fd device.
+ */
+static struct msm_fd_buffer *msm_fd_hw_next_buffer(struct msm_fd_device *fd)
+{
+ struct msm_fd_buffer *buffer = NULL;
+
+ if (!list_empty(&fd->buf_queue))
+ buffer = list_first_entry(&fd->buf_queue,
+ struct msm_fd_buffer, list);
+
+ return buffer;
+}
+
+/*
+ * msm_fd_hw_add_buffer - Add buffer to fd device processing queue.
+ * @fd: Fd device.
+ */
+void msm_fd_hw_add_buffer(struct msm_fd_device *fd,
+ struct msm_fd_buffer *buffer)
+{
+ spin_lock(&fd->slock);
+
+ atomic_set(&buffer->active, 0);
+ init_completion(&buffer->completion);
+
+ INIT_LIST_HEAD(&buffer->list);
+ list_add_tail(&buffer->list, &fd->buf_queue);
+ spin_unlock(&fd->slock);
+}
+
+/*
+ * msm_fd_hw_remove_buffers_from_queue - Removes buffer from
+ * fd device processing queue.
+ * @fd: Fd device.
+ */
+void msm_fd_hw_remove_buffers_from_queue(struct msm_fd_device *fd,
+ struct vb2_queue *vb2_q)
+{
+ struct msm_fd_buffer *curr_buff;
+ struct msm_fd_buffer *temp;
+ struct msm_fd_buffer *active_buffer;
+ unsigned long time;
+
+ spin_lock(&fd->slock);
+
+ active_buffer = NULL;
+ list_for_each_entry_safe(curr_buff, temp, &fd->buf_queue, list) {
+ if (curr_buff->vb.vb2_queue == vb2_q) {
+
+ if (atomic_read(&curr_buff->active))
+ active_buffer = curr_buff;
+ else {
+ /* Do a Buffer done on all the other buffers */
+ vb2_buffer_done(&curr_buff->vb,
+ VB2_BUF_STATE_DONE);
+ list_del(&curr_buff->list);
+ }
+ }
+ }
+ spin_unlock(&fd->slock);
+
+ /* We need to wait active buffer to finish */
+ if (active_buffer) {
+ time = wait_for_completion_timeout(&active_buffer->completion,
+ msecs_to_jiffies(MSM_FD_PROCESSING_TIMEOUT_MS));
+ if (!time) {
+ /* Do a vb2 buffer done since it timed out */
+ vb2_buffer_done(&active_buffer->vb, VB2_BUF_STATE_DONE);
+ /* Remove active buffer */
+ msm_fd_hw_get_active_buffer(fd);
+ /* Schedule if other buffers are present in device */
+ msm_fd_hw_schedule_next_buffer(fd);
+ }
+ }
+}
+
+/*
+ * msm_fd_hw_buffer_done - Mark as done and removes from processing queue.
+ * @fd: Fd device.
+ * @buffer: Fd buffer.
+ */
+int msm_fd_hw_buffer_done(struct msm_fd_device *fd,
+ struct msm_fd_buffer *buffer)
+{
+ int ret = 0;
+
+ spin_lock(&fd->slock);
+
+ if (atomic_read(&buffer->active)) {
+ atomic_set(&buffer->active, 0);
+ complete_all(&buffer->completion);
+ } else {
+ dev_err(fd->dev, "Buffer is not active\n");
+ ret = -1;
+ }
+
+ spin_unlock(&fd->slock);
+
+ return ret;
+}
+
+/*
+ * msm_fd_hw_get_active_buffer - Get active buffer from fd processing queue.
+ * @fd: Fd device.
+ */
+struct msm_fd_buffer *msm_fd_hw_get_active_buffer(struct msm_fd_device *fd)
+{
+ struct msm_fd_buffer *buffer = NULL;
+
+ spin_lock(&fd->slock);
+ if (!list_empty(&fd->buf_queue)) {
+ buffer = list_first_entry(&fd->buf_queue,
+ struct msm_fd_buffer, list);
+ list_del(&buffer->list);
+ }
+ spin_unlock(&fd->slock);
+
+ return buffer;
+}
+
+/*
+ * msm_fd_hw_schedule_and_start - Schedule active buffer and start processing.
+ * @fd: Fd device.
+ *
+ * This can be executed only when device is in idle state.
+ */
+int msm_fd_hw_schedule_and_start(struct msm_fd_device *fd)
+{
+ struct msm_fd_buffer *buf;
+
+ spin_lock(&fd->slock);
+ buf = msm_fd_hw_next_buffer(fd);
+ if (buf)
+ msm_fd_hw_try_enable(fd, buf, MSM_FD_DEVICE_IDLE);
+
+ spin_unlock(&fd->slock);
+
+ msm_fd_hw_update_settings(fd, buf);
+
+ return 0;
+}
+
+/*
+ * msm_fd_hw_schedule_next_buffer - Schedule next buffer and start processing.
+ * @fd: Fd device.
+ *
+ * NOTE: This can be executed only when device is in running state.
+ */
+int msm_fd_hw_schedule_next_buffer(struct msm_fd_device *fd)
+{
+ struct msm_fd_buffer *buf;
+ int ret;
+
+ spin_lock(&fd->slock);
+
+ /* We can schedule next buffer only in running state */
+ if (fd->state != MSM_FD_DEVICE_RUNNING) {
+ dev_err(fd->dev, "Can not schedule next buffer\n");
+ spin_unlock(&fd->slock);
+ return -EBUSY;
+ }
+
+ buf = msm_fd_hw_next_buffer(fd);
+ if (buf) {
+ ret = msm_fd_hw_try_enable(fd, buf, MSM_FD_DEVICE_RUNNING);
+ if (ret == 0) {
+ dev_err(fd->dev, "Ouch can not process next buffer\n");
+ spin_unlock(&fd->slock);
+ return -EBUSY;
+ }
+ } else {
+ fd->state = MSM_FD_DEVICE_IDLE;
+ if (fd->recovery_mode)
+ dev_err(fd->dev, "No Buffer in recovery mode.Device Idle\n");
+ }
+ spin_unlock(&fd->slock);
+
+ msm_fd_hw_update_settings(fd, buf);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/ais/fd/msm_fd_hw.h b/drivers/media/platform/msm/ais/fd/msm_fd_hw.h
new file mode 100644
index 000000000000..c022230d3dc6
--- /dev/null
+++ b/drivers/media/platform/msm/ais/fd/msm_fd_hw.h
@@ -0,0 +1,82 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_FD_HW_H__
+#define __MSM_FD_HW_H__
+
+#include "msm_fd_dev.h"
+
+int msm_fd_hw_get_face_count(struct msm_fd_device *fd);
+
+int msm_fd_hw_get_result_x(struct msm_fd_device *fd, int idx);
+
+int msm_fd_hw_get_result_y(struct msm_fd_device *fd, int idx);
+
+void msm_fd_hw_get_result_conf_size(struct msm_fd_device *fd,
+ int idx, u32 *conf, u32 *size);
+
+void msm_fd_hw_get_result_angle_pose(struct msm_fd_device *fd, int idx,
+ u32 *angle, u32 *pose);
+
+int msm_fd_hw_request_irq(struct platform_device *pdev,
+ struct msm_fd_device *fd, work_func_t work_func);
+
+void msm_fd_hw_release_irq(struct msm_fd_device *fd);
+
+int msm_fd_hw_get_revision(struct msm_fd_device *fd);
+
+void msm_fd_hw_release_mem_resources(struct msm_fd_device *fd);
+
+int msm_fd_hw_get_mem_resources(struct platform_device *pdev,
+ struct msm_fd_device *fd);
+
+int msm_fd_hw_get_iommu(struct msm_fd_device *fd);
+
+void msm_fd_hw_put_iommu(struct msm_fd_device *fd);
+
+int msm_fd_hw_get_regulators(struct msm_fd_device *fd);
+
+int msm_fd_hw_put_regulators(struct msm_fd_device *fd);
+
+int msm_fd_hw_get_clocks(struct msm_fd_device *fd);
+
+int msm_fd_hw_put_clocks(struct msm_fd_device *fd);
+
+int msm_fd_hw_get_bus(struct msm_fd_device *fd);
+
+void msm_fd_hw_put_bus(struct msm_fd_device *fd);
+
+int msm_fd_hw_get(struct msm_fd_device *fd, unsigned int clock_rate_idx);
+
+void msm_fd_hw_put(struct msm_fd_device *fd);
+
+int msm_fd_hw_map_buffer(struct msm_fd_mem_pool *pool, int fd,
+ struct msm_fd_buf_handle *buf);
+
+void msm_fd_hw_unmap_buffer(struct msm_fd_buf_handle *buf);
+
+void msm_fd_hw_add_buffer(struct msm_fd_device *fd,
+ struct msm_fd_buffer *buffer);
+
+void msm_fd_hw_remove_buffers_from_queue(struct msm_fd_device *fd,
+ struct vb2_queue *vb2_q);
+
+int msm_fd_hw_buffer_done(struct msm_fd_device *fd,
+ struct msm_fd_buffer *buffer);
+
+struct msm_fd_buffer *msm_fd_hw_get_active_buffer(struct msm_fd_device *fd);
+
+int msm_fd_hw_schedule_and_start(struct msm_fd_device *fd);
+
+int msm_fd_hw_schedule_next_buffer(struct msm_fd_device *fd);
+
+#endif /* __MSM_FD_HW_H__ */
diff --git a/drivers/media/platform/msm/ais/fd/msm_fd_regs.h b/drivers/media/platform/msm/ais/fd/msm_fd_regs.h
new file mode 100644
index 000000000000..69f2af75c4c7
--- /dev/null
+++ b/drivers/media/platform/msm/ais/fd/msm_fd_regs.h
@@ -0,0 +1,169 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_FD_REGS_H__
+#define __MSM_FD_REGS_H__
+
+/* FD core registers */
+#define MSM_FD_CONTROL (0x00)
+#define MSM_FD_CONTROL_SRST (1 << 0)
+#define MSM_FD_CONTROL_RUN (1 << 1)
+#define MSM_FD_CONTROL_FINISH (1 << 2)
+
+#define MSM_FD_RESULT_CNT (0x04)
+#define MSM_FD_RESULT_CNT_MASK (0x3F)
+
+#define MSM_FD_CONDT (0x08)
+#define MSM_FD_CONDT_MIN_MASK (0x03)
+#define MSM_FD_CONDT_MIN_SHIFT (0x00)
+#define MSM_FD_CONDT_DIR_MAX (0x08)
+#define MSM_FD_CONDT_DIR_MASK (0x3C)
+#define MSM_FD_CONDT_DIR_SHIFT (0x02)
+
+#define MSM_FD_START_X (0x0C)
+#define MSM_FD_START_X_MASK (0x3FF)
+
+#define MSM_FD_START_Y (0x10)
+#define MSM_FD_START_Y_MASK (0x1FF)
+
+#define MSM_FD_SIZE_X (0x14)
+#define MSM_FD_SIZE_X_MASK (0x3FF)
+
+#define MSM_FD_SIZE_Y (0x18)
+#define MSM_FD_SIZE_Y_MASK (0x1FF)
+
+#define MSM_FD_DHINT (0x1C)
+#define MSM_FD_DHINT_MASK (0xF)
+
+#define MSM_FD_IMAGE_ADDR (0x24)
+#define MSM_FD_IMAGE_ADDR_ALIGN (0x8)
+
+#define MSM_FD_WORK_ADDR (0x28)
+#define MSM_FD_WORK_ADDR_ALIGN (0x8)
+
+#define MSM_FD_IMAGE_SIZE (0x2C)
+#define MSM_FD_IMAGE_SIZE_QVGA (0x0)
+#define MSM_FD_IMAGE_SIZE_VGA (0x1)
+#define MSM_FD_IMAGE_SIZE_WQVGA (0x2)
+#define MSM_FD_IMAGE_SIZE_WVGA (0x3)
+
+#define MSM_FD_LINE_BYTES (0x30)
+#define MSM_FD_LINE_BYTES_MASK (0x1FFF)
+#define MSM_FD_LINE_BYTES_ALIGN (0x8)
+
+#define MSM_FD_RESULT_CENTER_X(x) (0x400 + (0x10 * (x)))
+
+#define MSM_FD_RESULT_CENTER_Y(x) (0x404 + (0x10 * (x)))
+
+#define MSM_FD_RESULT_CONF_SIZE(x) (0x408 + (0x10 * (x)))
+#define MSM_FD_RESULT_SIZE_MASK (0x1FF)
+#define MSM_FD_RESULT_SIZE_SHIFT (0x000)
+#define MSM_FD_RESULT_CONF_MASK (0xF)
+#define MSM_FD_RESULT_CONF_SHIFT (0x9)
+
+#define MSM_FD_RESULT_ANGLE_POSE(x) (0x40C + (0x10 * (x)))
+#define MSM_FD_RESULT_ANGLE_MASK (0x1FF)
+#define MSM_FD_RESULT_ANGLE_SHIFT (0x000)
+#define MSM_FD_RESULT_POSE_MASK (0x7)
+#define MSM_FD_RESULT_POSE_SHIFT (0x9)
+#define MSM_FD_RESULT_POSE_FRONT (0x1)
+#define MSM_FD_RESULT_POSE_RIGHT_DIAGONAL (0x2)
+#define MSM_FD_RESULT_POSE_RIGHT (0x3)
+#define MSM_FD_RESULT_POSE_LEFT_DIAGONAL (0x4)
+#define MSM_FD_RESULT_POSE_LEFT (0x5)
+
+/* FD misc registers */
+#define MSM_FD_MISC_HW_VERSION (0x00)
+#define MSM_FD_MISC_CGC_DISABLE (0x04)
+#define MSM_FD_HW_STOP (0x08)
+
+#define MSM_FD_MISC_SW_RESET (0x10)
+#define MSM_FD_MISC_SW_RESET_SET (1 << 0)
+
+#define MSM_FD_MISC_FIFO_STATUS (0x14)
+#define MSM_FD_MISC_FIFO_STATUS_RFIFO_DCNT_MAST (0x1F)
+#define MSM_FD_MISC_FIFO_STATUS_RFIFO_DCNT_SHIFT (0)
+#define MSM_FD_MISC_FIFO_STATUS_RFIFO_FULL (1 << 13)
+#define MSM_FD_MISC_FIFO_STATUS_RFIFO_EMPTY (1 << 14)
+#define MSM_FD_MISC_FIFO_STATUS_WFIFO_DCNT_MAST (0x1F)
+#define MSM_FD_MISC_FIFO_STATUS_WFIFO_DCNT_SHIFT (16)
+#define MSM_FD_MISC_FIFO_STATUS_WFIFO_EMPTY (1 << 29)
+#define MSM_FD_MISC_FIFO_STATUS_WFIFO_FULL (1 << 30)
+
+#define MSM_FD_MISC_DATA_ENDIAN (0x18)
+#define MSM_FD_MISC_DATA_ENDIAN_BYTE_SWAP_SET (1 << 0)
+
+#define MSM_FD_MISC_VBIF_REQ_PRIO (0x20)
+#define MSM_FD_MISC_VBIF_REQ_PRIO_MASK (0x3)
+
+#define MSM_FD_MISC_VBIF_PRIO_LEVEL (0x24)
+#define MSM_FD_MISC_VBIF_PRIO_LEVEL_MASK (0x3)
+
+#define MSM_FD_MISC_VBIF_MMU_PDIRECT (0x28)
+#define MSM_FD_MISC_VBIF_MMU_PDIRECT_INCREMENT (1 << 0)
+
+#define MSM_FD_MISC_VBIF_IRQ_CLR (0x30)
+#define MSM_FD_MISC_VBIF_IRQ_CLR_ALL (1 << 0)
+
+#define MSM_FD_MISC_VBIF_DONE_STATUS (0x34)
+#define MSM_FD_MISC_VBIF_DONE_STATUS_WRITE (1 << 0)
+#define MSM_FD_MISC_VBIF_DONE_STATUS_READ (1 << 1)
+
+#define MSM_FD_MISC_IRQ_MASK (0x50)
+#define MSM_FD_MISC_IRQ_MASK_HALT_REQ (1 << 1)
+#define MSM_FD_MISC_IRQ_MASK_CORE_IRQ (1 << 0)
+
+#define MSM_FD_MISC_IRQ_STATUS (0x54)
+#define MSM_FD_MISC_IRQ_STATUS_HALT_REQ (1 << 1)
+#define MSM_FD_MISC_IRQ_STATUS_CORE_IRQ (1 << 0)
+
+#define MSM_FD_MISC_IRQ_CLEAR (0x58)
+#define MSM_FD_MISC_IRQ_CLEAR_HALT (1 << 1)
+#define MSM_FD_MISC_IRQ_CLEAR_CORE (1 << 0)
+
+#define MSM_FD_MISC_TEST_BUS_SEL (0x40)
+#define MSM_FD_MISC_TEST_BUS_SEL_TEST_MODE_MASK (0xF)
+#define MSM_FD_MISC_TEST_BUS_SEL_TEST_MODE_SHIFT (0)
+#define MSM_FD_MISC_TEST_BUS_SEL_7_0_MASK (0x3)
+#define MSM_FD_MISC_TEST_BUS_SEL_7_0_SHIFT (16)
+#define MSM_FD_MISC_TEST_BUS_SEL_15_8_MASK (0x3)
+#define MSM_FD_MISC_TEST_BUS_SEL_15_8_SHIFT (18)
+#define MSM_FD_MISC_TEST_BUS_SEL_23_16_MASK (0x3)
+#define MSM_FD_MISC_TEST_BUS_SEL_23_16_SHIFT (20)
+#define MSM_FD_MISC_TEST_BUS_SEL_31_24_MASK (0x3)
+#define MSM_FD_MISC_TEST_BUS_SEL_31_24_SHIFT (22)
+
+#define MSM_FD_MISC_AHB_TEST_EN (0x44)
+#define MSM_FD_MISC_AHB_TEST_EN_MASK (0x3)
+
+#define MSM_FD_MISC_FD2VBIF_INT_TEST_SEL (0x48)
+#define MSM_FD_MISC_FD2VBIF_INT_TEST_MASK (0xF)
+
+#define MSM_FD_MISC_TEST_BUS (0x4C)
+
+/* FD vbif registers */
+#define MSM_FD_VBIF_CLKON (0x04)
+#define MSM_FD_VBIF_QOS_OVERRIDE_EN (0x10)
+#define MSM_FD_VBIF_QOS_OVERRIDE_REQPRI (0x18)
+#define MSM_FD_VBIF_QOS_OVERRIDE_PRILVL (0x1C)
+#define MSM_FD_VBIF_IN_RD_LIM_CONF0 (0xB0)
+#define MSM_FD_VBIF_IN_WR_LIM_CONF0 (0xC0)
+#define MSM_FD_VBIF_OUT_RD_LIM_CONF0 (0xD0)
+#define MSM_FD_VBIF_OUT_WR_LIM_CONF0 (0xD4)
+#define MSM_FD_VBIF_DDR_OUT_MAX_BURST (0xD8)
+#define MSM_FD_VBIF_ARB_CTL (0xF0)
+#define MSM_FD_VBIF_OUT_AXI_AMEMTYPE_CONF0 (0x160)
+#define MSM_FD_VBIF_OUT_AXI_AOOO_EN (0x178)
+#define MSM_FD_VBIF_OUT_AXI_AOOO (0x17c)
+#define MSM_FD_VBIF_ROUND_ROBIN_QOS_ARB (0x124)
+
+#endif /* __MSM_FD_REGS_H__ */
diff --git a/drivers/media/platform/msm/ais/isp/Makefile b/drivers/media/platform/msm/ais/isp/Makefile
new file mode 100644
index 000000000000..c7202ee4ffa5
--- /dev/null
+++ b/drivers/media/platform/msm/ais/isp/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/common/
+obj-$(CONFIG_MSM_AIS) += msm_buf_mgr.o msm_isp_util.o msm_isp_axi_util.o msm_isp_stats_util.o
+obj-$(CONFIG_MSM_AIS) += msm_isp47.o msm_isp.o
diff --git a/drivers/media/platform/msm/ais/isp/msm_buf_mgr.c b/drivers/media/platform/msm/ais/isp/msm_buf_mgr.c
new file mode 100644
index 000000000000..fcd803d4b138
--- /dev/null
+++ b/drivers/media/platform/msm/ais/isp/msm_buf_mgr.c
@@ -0,0 +1,1531 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioctl.h>
+#include <linux/spinlock.h>
+#include <linux/videodev2.h>
+#include <linux/proc_fs.h>
+#include <linux/videodev2.h>
+#include <linux/vmalloc.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-core.h>
+
+#include "msm.h"
+#include "msm_buf_mgr.h"
+#include "cam_smmu_api.h"
+#include "msm_isp_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define BUF_DEBUG_FULL 0
+#define MAX_LIST_COUNT 100
+
+static int msm_buf_check_head_sanity(struct msm_isp_bufq *bufq)
+{
+ int rc = 0;
+ struct list_head *prev = NULL;
+ struct list_head *next = NULL;
+
+ if (!bufq) {
+ pr_err("%s: Error! Invalid bufq\n", __func__);
+ return -EINVAL;
+ }
+
+ prev = bufq->head.prev;
+ next = bufq->head.next;
+
+ if (!prev) {
+ pr_err("%s: Error! bufq->head.prev is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!next) {
+ pr_err("%s: Error! bufq->head.next is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (prev->next != &bufq->head) {
+ pr_err("%s: Error! head prev->next is %pK should be %pK\n",
+ __func__, prev->next, &bufq->head);
+ return -EINVAL;
+ }
+
+ if (next->prev != &bufq->head) {
+ pr_err("%s: Error! head next->prev is %pK should be %pK\n",
+ __func__, next->prev, &bufq->head);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+struct msm_isp_bufq *msm_isp_get_bufq(
+ struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle)
+{
+ struct msm_isp_bufq *bufq = NULL;
+ uint32_t bufq_index = bufq_handle & 0xFF;
+
+ /* bufq_handle cannot be 0 */
+ if ((bufq_handle == 0) ||
+ bufq_index >= BUF_MGR_NUM_BUF_Q ||
+ (bufq_index > buf_mgr->num_buf_q))
+ return NULL;
+
+ bufq = &buf_mgr->bufq[bufq_index];
+ if (bufq->bufq_handle == bufq_handle)
+ return bufq;
+
+ return NULL;
+}
+
+static struct msm_isp_buffer *msm_isp_get_buf_ptr(
+ struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index)
+{
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n", __func__);
+ return buf_info;
+ }
+
+ if (bufq->num_bufs <= buf_index) {
+ pr_err("%s: Invalid buf index\n", __func__);
+ return buf_info;
+ }
+ buf_info = &bufq->bufs[buf_index];
+ return buf_info;
+}
+
+static uint32_t msm_isp_get_buf_handle(
+ struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t vfe_id,
+ enum msm_vfe_axi_stream_src output_id)
+{
+ int i;
+ uint32_t embedded_stream_id = 0;
+
+ for (i = 0; i < buf_mgr->num_buf_q; i++) {
+ if (buf_mgr->bufq[i].bufq_handle != 0 &&
+ buf_mgr->bufq[i].vfe_id == vfe_id &&
+ buf_mgr->bufq[i].output_id == output_id) {
+ pr_err("%s - existing handle for output_id=%d",
+ __func__, output_id);
+ return 0;
+ }
+ }
+
+ /* put stream id in handle, if its stats, use FFFF */
+ if (vfe_id & (1 << 31))
+ embedded_stream_id = 0xFFFF;
+ else
+ embedded_stream_id = (vfe_id << 4) | output_id;
+
+ for (i = 0; i < buf_mgr->num_buf_q; i++) {
+ if (buf_mgr->bufq[i].bufq_handle == 0) {
+ buf_mgr->bufq[i].bufq_handle = 0xA15B0000 |
+ (embedded_stream_id << 8) | i;
+ return buf_mgr->bufq[i].bufq_handle;
+ }
+ }
+ return 0;
+}
+
+static int msm_isp_free_bufq_handle(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle)
+{
+ struct msm_isp_bufq *bufq =
+ msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq)
+ return -EINVAL;
+
+ /* Set everything except lock to 0 */
+ bufq->bufq_handle = 0;
+ bufq->bufs = 0;
+ bufq->vfe_id = 0;
+ bufq->output_id = 0;
+ bufq->num_bufs = 0;
+ bufq->buf_type = 0;
+ INIT_LIST_HEAD(&bufq->head);
+
+ return 0;
+}
+
+static void msm_isp_copy_planes_from_v4l2_buffer(
+ struct msm_isp_qbuf_buffer *qbuf_buf,
+ const struct vb2_buffer *vb2_buf)
+{
+ int i;
+
+ qbuf_buf->num_planes = vb2_buf->num_planes;
+ for (i = 0; i < qbuf_buf->num_planes; i++) {
+ qbuf_buf->planes[i].addr = vb2_buf->planes[i].m.userptr;
+ qbuf_buf->planes[i].offset = vb2_buf->planes[i].data_offset;
+ qbuf_buf->planes[i].length = vb2_buf->planes[i].length;
+ }
+}
+
+static int msm_isp_prepare_v4l2_buf(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_buffer *buf_info,
+ struct msm_isp_qbuf_buffer *qbuf_buf,
+ uint32_t output_id)
+{
+ int i, rc = -1;
+ int ret;
+ struct msm_isp_buffer_mapped_info *mapped_info;
+ uint32_t accu_length = 0;
+
+ for (i = 0; i < qbuf_buf->num_planes; i++) {
+ mapped_info = &buf_info->mapped_info[i];
+ mapped_info->buf_fd = qbuf_buf->planes[i].addr;
+ ret = cam_smmu_get_phy_addr(buf_mgr->iommu_hdl,
+ mapped_info->buf_fd,
+ CAM_SMMU_MAP_RW,
+ &(mapped_info->paddr),
+ &(mapped_info->len));
+ if (ret) {
+ rc = -EINVAL;
+ pr_err_ratelimited("%s: cannot map address", __func__);
+ goto get_phy_err;
+ }
+
+ mapped_info->paddr += accu_length;
+ accu_length += qbuf_buf->planes[i].length;
+
+ CDBG("%s: plane: %d addr:%lu\n",
+ __func__, i, (unsigned long)mapped_info->paddr);
+
+ }
+ buf_info->num_planes = qbuf_buf->num_planes;
+ return 0;
+get_phy_err:
+ i--;
+
+ return rc;
+}
+
+static void msm_isp_unprepare_v4l2_buf(
+ struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_buffer *buf_info,
+ uint32_t output_id)
+{
+ int i;
+ struct msm_isp_buffer_mapped_info *mapped_info;
+ struct msm_isp_bufq *bufq = NULL;
+
+ if (!buf_mgr || !buf_info) {
+ pr_err("%s: NULL ptr %pK %pK\n", __func__,
+ buf_mgr, buf_info);
+ return;
+ }
+
+ bufq = msm_isp_get_bufq(buf_mgr, buf_info->bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq, output_id %x\n",
+ __func__, output_id);
+ return;
+ }
+
+ for (i = 0; i < buf_info->num_planes; i++) {
+ mapped_info = &buf_info->mapped_info[i];
+
+ cam_smmu_put_phy_addr(buf_mgr->iommu_hdl, mapped_info->buf_fd);
+ }
+}
+
+static int msm_isp_map_buf(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_buffer_mapped_info *mapped_info, uint32_t fd)
+{
+ int rc = 0;
+ int ret;
+
+ if (!buf_mgr || !mapped_info) {
+ pr_err_ratelimited("%s: %d] NULL ptr buf_mgr %pK mapped_info %pK\n",
+ __func__, __LINE__, buf_mgr, mapped_info);
+ return -EINVAL;
+ }
+ ret = cam_smmu_get_phy_addr(buf_mgr->iommu_hdl,
+ fd,
+ CAM_SMMU_MAP_RW,
+ &(mapped_info->paddr),
+ &(mapped_info->len));
+
+ if (ret) {
+ rc = -EINVAL;
+ pr_err_ratelimited("%s: cannot map address", __func__);
+ goto smmu_map_error;
+ }
+ CDBG("%s: addr:%lu\n",
+ __func__, (unsigned long)mapped_info->paddr);
+
+ return rc;
+smmu_map_error:
+ cam_smmu_put_phy_addr(buf_mgr->iommu_hdl,
+ fd);
+ return rc;
+}
+
+static int msm_isp_unmap_buf(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t fd)
+{
+ if (!buf_mgr) {
+ pr_err_ratelimited("%s: %d] NULL ptr buf_mgr\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ cam_smmu_put_phy_addr(buf_mgr->iommu_hdl,
+ fd);
+
+ return 0;
+}
+
+static int msm_isp_buf_prepare(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_qbuf_info *info, struct vb2_v4l2_buffer *vb2_v4l2_buf)
+{
+ int rc = -1;
+ unsigned long flags;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+ struct msm_isp_qbuf_buffer buf;
+
+ buf_info = msm_isp_get_buf_ptr(buf_mgr,
+ info->handle, info->buf_idx);
+ if (!buf_info) {
+ pr_err("Invalid buffer prepare\n");
+ return rc;
+ }
+
+ bufq = msm_isp_get_bufq(buf_mgr, buf_info->bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n",
+ __func__);
+ return rc;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED) {
+ rc = buf_info->state;
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+ }
+
+ if (buf_info->state != MSM_ISP_BUFFER_STATE_INITIALIZED) {
+ pr_err("%s: Invalid buffer state: %d bufq %x buf-id %d\n",
+ __func__, buf_info->state, bufq->bufq_handle,
+ buf_info->buf_idx);
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+ }
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+
+ if (vb2_v4l2_buf) {
+ msm_isp_copy_planes_from_v4l2_buffer(&buf,
+ &vb2_v4l2_buf->vb2_buf);
+ buf_info->vb2_v4l2_buf = vb2_v4l2_buf;
+ } else {
+ buf = info->buffer;
+ }
+
+ rc = msm_isp_prepare_v4l2_buf(buf_mgr, buf_info, &buf, bufq->output_id);
+ if (rc < 0) {
+ pr_err_ratelimited("%s: Prepare buffer error\n", __func__);
+ return rc;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ buf_info->state = MSM_ISP_BUFFER_STATE_PREPARED;
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+}
+
+static int msm_isp_buf_unprepare_all(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t buf_handle)
+{
+ int rc = -1, i;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+
+ bufq = msm_isp_get_bufq(buf_mgr, buf_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n", __func__);
+ return rc;
+ }
+
+ for (i = 0; i < bufq->num_bufs; i++) {
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, buf_handle, i);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return rc;
+ }
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_UNUSED ||
+ buf_info->state ==
+ MSM_ISP_BUFFER_STATE_INITIALIZED)
+ continue;
+
+ if (BUF_SRC(bufq->flags) == MSM_ISP_BUFFER_SRC_HAL) {
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_DEQUEUED ||
+ buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED)
+ buf_mgr->vb2_ops->put_buf(
+ buf_info->vb2_v4l2_buf,
+ bufq->session_id, bufq->stream_id);
+ }
+ msm_isp_unprepare_v4l2_buf(buf_mgr, buf_info, bufq->stream_id);
+ }
+ return 0;
+}
+
+int msm_isp_flush_queue(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle)
+{
+ uint32_t rc = 0;
+ int i = 0;
+ struct msm_isp_bufq *bufq = NULL;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err_ratelimited("%s: Invalid bufq\n", __func__);
+ return rc;
+ }
+
+ rc = msm_isp_buf_unprepare_all(buf_mgr, bufq_handle);
+
+ for (i = 0; i < ISP_NUM_BUF_MASK; i++)
+ bufq->put_buf_mask[i] = 0;
+ INIT_LIST_HEAD(&bufq->head);
+
+ memset(bufq->bufs, 0x0, bufq->num_bufs*sizeof(bufq->bufs[0]));
+ for (i = 0; i < bufq->num_bufs; i++) {
+ bufq->bufs[i].state = MSM_ISP_BUFFER_STATE_INITIALIZED;
+ bufq->bufs[i].buf_debug.put_state[0] =
+ MSM_ISP_BUFFER_STATE_PUT_PREPARED;
+ bufq->bufs[i].buf_debug.put_state[1] =
+ MSM_ISP_BUFFER_STATE_PUT_PREPARED;
+ bufq->bufs[i].buf_debug.put_state_last = 0;
+ bufq->bufs[i].bufq_handle = bufq->bufq_handle;
+ bufq->bufs[i].buf_idx = i;
+ INIT_LIST_HEAD(&bufq->bufs[i].list);
+ }
+
+ return rc;
+
+}
+
+static int msm_isp_get_buf_by_index(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct msm_isp_buffer **buf_info)
+{
+ int rc = -EINVAL;
+ unsigned long flags;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *temp_buf_info;
+ uint32_t i = 0;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n", __func__);
+ return rc;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ if (buf_index >= bufq->num_bufs) {
+ pr_err("%s: Invalid buf index: %d max: %d\n", __func__,
+ buf_index, bufq->num_bufs);
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+ }
+
+ *buf_info = NULL;
+ for (i = 0; bufq->num_bufs; i++) {
+ temp_buf_info = &bufq->bufs[i];
+ if (temp_buf_info && temp_buf_info->buf_idx == buf_index) {
+ *buf_info = temp_buf_info;
+ break;
+ }
+ }
+
+ if (*buf_info) {
+ pr_debug("Found buf in isp buf mgr");
+ rc = 0;
+ }
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+}
+
+static int msm_isp_buf_unprepare(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t buf_handle, int32_t buf_idx)
+{
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+
+ bufq = msm_isp_get_bufq(buf_mgr, buf_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n", __func__);
+ return -EINVAL;
+ }
+
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, buf_handle, buf_idx);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return -EINVAL;
+ }
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_UNUSED ||
+ buf_info->state == MSM_ISP_BUFFER_STATE_INITIALIZED)
+ return 0;
+
+ if (BUF_SRC(bufq->flags) == MSM_ISP_BUFFER_SRC_HAL) {
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_DEQUEUED ||
+ buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED)
+ buf_mgr->vb2_ops->put_buf(buf_info->vb2_v4l2_buf,
+ bufq->session_id, bufq->stream_id);
+ }
+ msm_isp_unprepare_v4l2_buf(buf_mgr, buf_info, bufq->stream_id);
+
+ return 0;
+}
+
+static int msm_isp_get_buf(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct msm_isp_buffer **buf_info)
+{
+ int rc = -1;
+ unsigned long flags;
+ struct msm_isp_buffer *temp_buf_info = NULL;
+ struct msm_isp_bufq *bufq = NULL;
+ struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
+
+ if (buf_mgr->open_count == 0) {
+ pr_err_ratelimited("%s: bug mgr open cnt = 0\n",
+ __func__);
+ return 0;
+ }
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err_ratelimited("%s: Invalid bufq\n", __func__);
+ return rc;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ if (!bufq->bufq_handle) {
+ pr_err_ratelimited("%s: Invalid bufq handle\n", __func__);
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+ }
+
+ *buf_info = NULL;
+
+ switch (BUF_SRC(bufq->flags)) {
+ case MSM_ISP_BUFFER_SRC_NATIVE:
+ list_for_each_entry(temp_buf_info, &bufq->head, list) {
+ if (temp_buf_info->state ==
+ MSM_ISP_BUFFER_STATE_QUEUED) {
+ list_del_init(&temp_buf_info->list);
+ if (msm_buf_check_head_sanity(bufq) < 0) {
+ spin_unlock_irqrestore(
+ &bufq->bufq_lock, flags);
+ WARN(1, "%s buf_handle 0x%x buf_idx %d\n",
+ __func__,
+ bufq->bufq_handle,
+ temp_buf_info->buf_idx);
+ return -EFAULT;
+ }
+ *buf_info = temp_buf_info;
+ break;
+ }
+ }
+ break;
+ case MSM_ISP_BUFFER_SRC_HAL:
+ if (buf_index == MSM_ISP_INVALID_BUF_INDEX)
+ vb2_v4l2_buf = buf_mgr->vb2_ops->get_buf(
+ bufq->session_id, bufq->stream_id);
+ else
+ vb2_v4l2_buf = buf_mgr->vb2_ops->get_buf_by_idx(
+ bufq->session_id, bufq->stream_id, buf_index);
+ if (vb2_v4l2_buf) {
+ if (vb2_v4l2_buf->vb2_buf.index < bufq->num_bufs) {
+ *buf_info = &bufq->bufs[vb2_v4l2_buf
+ ->vb2_buf.index];
+ (*buf_info)->vb2_v4l2_buf = vb2_v4l2_buf;
+ } else {
+ pr_err("%s: Incorrect buf index %d\n",
+ __func__, vb2_v4l2_buf->vb2_buf.index);
+ rc = -EINVAL;
+ }
+ if ((*buf_info) == NULL) {
+ buf_mgr->vb2_ops->put_buf(vb2_v4l2_buf,
+ bufq->session_id, bufq->stream_id);
+ pr_err("%s: buf index %d not found!\n",
+ __func__, vb2_v4l2_buf->vb2_buf.index);
+ rc = -EINVAL;
+
+ }
+ } else {
+ CDBG("%s: No HAL Buffer session_id: %d stream_id: %d\n",
+ __func__, bufq->session_id, bufq->stream_id);
+ rc = -EINVAL;
+ }
+ break;
+ case MSM_ISP_BUFFER_SRC_SCRATCH:
+ /* In scratch buf case we have only on buffer in queue.
+ * We return every time same buffer.
+ */
+ *buf_info = list_entry(bufq->head.next, typeof(**buf_info),
+ list);
+ break;
+ default:
+ pr_err("%s: Incorrect buf source.\n", __func__);
+ rc = -EINVAL;
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+ }
+
+ if (!(*buf_info)) {
+ rc = -ENOMEM;
+ } else {
+ (*buf_info)->state = MSM_ISP_BUFFER_STATE_DEQUEUED;
+ rc = 0;
+ }
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+}
+
+static int msm_isp_put_buf_unsafe(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index)
+{
+ int rc = -1;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n", __func__);
+ return rc;
+ }
+
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, buf_index);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return rc;
+ }
+
+ switch (buf_info->state) {
+ case MSM_ISP_BUFFER_STATE_PREPARED:
+ case MSM_ISP_BUFFER_STATE_DEQUEUED:
+ if (BUF_SRC(bufq->flags)) {
+ if (!list_empty(&buf_info->list)) {
+ WARN(1, "%s: buf %x/%x double add\n",
+ __func__, bufq_handle, buf_index);
+ return -EFAULT;
+ }
+ list_add_tail(&buf_info->list, &bufq->head);
+ if (msm_buf_check_head_sanity(bufq) < 0) {
+ WARN(1, "%s buf_handle 0x%x buf_idx %d\n",
+ __func__,
+ bufq->bufq_handle,
+ buf_info->buf_idx);
+ return -EFAULT;
+ }
+ } else {
+ buf_mgr->vb2_ops->put_buf(buf_info->vb2_v4l2_buf,
+ bufq->session_id, bufq->stream_id);
+ }
+ buf_info->state = MSM_ISP_BUFFER_STATE_QUEUED;
+ rc = 0;
+ break;
+ case MSM_ISP_BUFFER_STATE_DISPATCHED:
+ buf_info->state = MSM_ISP_BUFFER_STATE_QUEUED;
+ rc = 0;
+ break;
+ case MSM_ISP_BUFFER_STATE_QUEUED:
+ case MSM_ISP_BUFFER_STATE_DIVERTED:
+ default:
+ WARN(1, "%s: bufq 0x%x, buf idx 0x%x, incorrect state = %d",
+ __func__, bufq_handle, buf_index, buf_info->state);
+ return -EFAULT;
+ }
+
+ return rc;
+}
+
+static int msm_isp_put_buf(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index)
+{
+ int rc = -1;
+ unsigned long flags;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n", __func__);
+ return rc;
+ }
+
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, buf_index);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return rc;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+
+ rc = msm_isp_put_buf_unsafe(buf_mgr, bufq_handle, buf_index);
+
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+
+ return rc;
+}
+
+static int msm_isp_update_put_buf_cnt_unsafe(
+ struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t id, uint32_t bufq_handle, int32_t buf_index,
+ struct timeval *tv, uint32_t frame_id, uint32_t pingpong_bit)
+{
+ int rc = -1;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+ uint8_t *put_buf_mask = NULL;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("Invalid bufq\n");
+ return rc;
+ }
+
+ put_buf_mask = &bufq->put_buf_mask[pingpong_bit];
+
+ if (buf_index >= 0) {
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, buf_index);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return -EFAULT;
+ }
+ if (buf_info->state != MSM_ISP_BUFFER_STATE_DEQUEUED) {
+ pr_err(
+ "%s: Invalid state, bufq_handle %x output_id %x, state %d\n",
+ __func__, bufq_handle,
+ bufq->output_id, buf_info->state);
+ return -EFAULT;
+ }
+ if (buf_info->pingpong_bit != pingpong_bit) {
+ pr_err("%s: Pingpong bit mismatch\n", __func__);
+ return -EFAULT;
+ }
+ }
+
+ if (bufq->buf_type != ISP_SHARE_BUF ||
+ (*put_buf_mask == 0)) {
+ if (buf_info)
+ buf_info->frame_id = frame_id;
+ }
+
+ if (bufq->buf_type == ISP_SHARE_BUF &&
+ ((*put_buf_mask & (1 << id)) == 0)) {
+ *put_buf_mask |= (1 << id);
+ if (*put_buf_mask != ISP_SHARE_BUF_MASK) {
+ rc = *put_buf_mask;
+ return 1;
+ }
+ *put_buf_mask = 0;
+ rc = 0;
+ } else if (bufq->buf_type == ISP_SHARE_BUF &&
+ (*put_buf_mask & (1 << id)) != 0) {
+ return -ENOTEMPTY;
+ }
+
+ if (buf_info &&
+ BUF_SRC(bufq->flags) == MSM_ISP_BUFFER_SRC_NATIVE) {
+ buf_info->state = MSM_ISP_BUFFER_STATE_DIVERTED;
+ buf_info->tv = tv;
+ }
+ return 0;
+}
+
+static int msm_isp_update_put_buf_cnt(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t id, uint32_t bufq_handle, int32_t buf_index,
+ struct timeval *tv, uint32_t frame_id, uint32_t pingpong_bit)
+{
+ int rc = -1;
+ struct msm_isp_bufq *bufq = NULL;
+ unsigned long flags;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("Invalid bufq\n");
+ return rc;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ rc = msm_isp_update_put_buf_cnt_unsafe(buf_mgr, id, bufq_handle,
+ buf_index, tv, frame_id, pingpong_bit);
+ if (-ENOTEMPTY == rc) {
+ pr_err("%s: Error! Uncleared put_buf_mask for pingpong(%d) from vfe %d bufq 0x%x buf_idx %d\n",
+ __func__, pingpong_bit, id, bufq_handle, buf_index);
+ msm_isp_dump_ping_pong_mismatch();
+ rc = -EFAULT;
+ }
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+}
+
+static int msm_isp_buf_done(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct timeval *tv, uint32_t frame_id, uint32_t output_format)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+ enum msm_isp_buffer_state state;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("Invalid bufq\n");
+ return -EINVAL;
+ }
+
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, buf_index);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ state = buf_info->state;
+
+ if (BUF_SRC(bufq->flags) == MSM_ISP_BUFFER_SRC_HAL) {
+ if (state == MSM_ISP_BUFFER_STATE_DEQUEUED) {
+ buf_info->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ buf_mgr->vb2_ops->buf_done(buf_info->vb2_v4l2_buf,
+ bufq->session_id, bufq->stream_id,
+ frame_id, tv, output_format);
+ } else {
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ }
+ goto done;
+ }
+
+ /*
+ * For native buffer put the diverted buffer back to queue since caller
+ * is not going to send it to CPP, this is error case like
+ * drop_frame/empty_buffer
+ */
+ if (state == MSM_ISP_BUFFER_STATE_DIVERTED) {
+ buf_info->state = MSM_ISP_BUFFER_STATE_PREPARED;
+ rc = msm_isp_put_buf_unsafe(buf_mgr, buf_info->bufq_handle,
+ buf_info->buf_idx);
+ if (rc < 0)
+ pr_err("%s: Buf put failed\n", __func__);
+ }
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+done:
+ return rc;
+}
+
+static int msm_isp_flush_buf(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
+ uint32_t bufq_handle, enum msm_isp_buffer_flush_t flush_type,
+ struct timeval *tv, uint32_t frame_id)
+{
+ int rc = 0, i;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+ unsigned long flags;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("Invalid bufq\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ for (i = 0; i < bufq->num_bufs; i++) {
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, i);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ continue;
+ }
+ if ((flush_type == MSM_ISP_BUFFER_FLUSH_DIVERTED) &&
+ (buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED)) {
+ buf_info->state = MSM_ISP_BUFFER_STATE_PREPARED;
+ msm_isp_put_buf_unsafe(buf_mgr,
+ bufq_handle, buf_info->buf_idx);
+ } else if (flush_type == MSM_ISP_BUFFER_FLUSH_ALL) {
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED) {
+ CDBG("%s: no need to queue Diverted buffer\n",
+ __func__);
+ } else if (buf_info->state ==
+ MSM_ISP_BUFFER_STATE_DEQUEUED) {
+ rc = msm_isp_update_put_buf_cnt_unsafe(buf_mgr,
+ id, bufq_handle, buf_info->buf_idx, tv,
+ frame_id, buf_info->pingpong_bit);
+ if (-ENOTEMPTY == rc) {
+ rc = 0;
+ continue;
+ }
+
+ if (rc == 0) {
+ buf_info->buf_debug.put_state[
+ buf_info->buf_debug.
+ put_state_last]
+ = MSM_ISP_BUFFER_STATE_FLUSH;
+ buf_info->buf_debug.put_state_last ^= 1;
+ buf_info->state =
+ MSM_ISP_BUFFER_STATE_PREPARED;
+ rc = msm_isp_put_buf_unsafe(buf_mgr,
+ bufq_handle, buf_info->buf_idx);
+ if (rc == -EFAULT) {
+ spin_unlock_irqrestore(
+ &bufq->bufq_lock,
+ flags);
+ return rc;
+ }
+ }
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return 0;
+}
+
+static int msm_isp_buf_enqueue(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_qbuf_info *info)
+{
+ int rc = 0, buf_state;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+
+ bufq = msm_isp_get_bufq(buf_mgr, info->handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq, handle 0x%x, stream id %x num_plane %d\n"
+ , __func__, info->handle, (info->handle >> 8),
+ info->buffer.num_planes);
+ return -EINVAL;
+ }
+
+ buf_state = msm_isp_buf_prepare(buf_mgr, info, NULL);
+ if (buf_state < 0) {
+ pr_err_ratelimited("%s: Buf prepare failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (buf_state == MSM_ISP_BUFFER_STATE_DIVERTED) {
+ buf_info = msm_isp_get_buf_ptr(buf_mgr,
+ info->handle, info->buf_idx);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return -EINVAL;
+ }
+ if (info->dirty_buf) {
+ buf_info->buf_debug.put_state[
+ buf_info->buf_debug.put_state_last]
+ = MSM_ISP_BUFFER_STATE_PUT_BUF;
+ buf_info->buf_debug.put_state_last ^= 1;
+ buf_info->state = MSM_ISP_BUFFER_STATE_PREPARED;
+ rc = msm_isp_put_buf(buf_mgr,
+ info->handle, info->buf_idx);
+ } else {
+ if (BUF_SRC(bufq->flags))
+ pr_err("%s: Invalid native buffer state\n",
+ __func__);
+ else {
+ buf_info->buf_debug.put_state[
+ buf_info->buf_debug.put_state_last] =
+ MSM_ISP_BUFFER_STATE_PUT_BUF;
+ buf_info->buf_debug.put_state_last ^= 1;
+ rc = msm_isp_buf_done(buf_mgr,
+ info->handle, info->buf_idx,
+ buf_info->tv, buf_info->frame_id, 0);
+ }
+ }
+ } else {
+ if (BUF_SRC(bufq->flags) != MSM_ISP_BUFFER_SRC_HAL) {
+ buf_info = msm_isp_get_buf_ptr(buf_mgr,
+ info->handle, info->buf_idx);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return -EINVAL;
+ }
+
+ buf_info->buf_debug.put_state[
+ buf_info->buf_debug.put_state_last] =
+ MSM_ISP_BUFFER_STATE_PUT_PREPARED;
+ buf_info->buf_debug.put_state_last ^= 1;
+ rc = msm_isp_put_buf(buf_mgr,
+ info->handle, info->buf_idx);
+ if (rc < 0) {
+ pr_err("%s: Buf put failed stream %x\n",
+ __func__, bufq->stream_id);
+ return rc;
+ }
+ }
+ }
+ return 0;
+}
+
+static int msm_isp_buf_dequeue(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_qbuf_info *info)
+{
+ struct msm_isp_buffer *buf_info = NULL;
+ int rc = 0;
+
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, info->handle, info->buf_idx);
+ if (!buf_info) {
+ pr_err("Invalid buffer dequeue\n");
+ return -EINVAL;
+ }
+
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_DEQUEUED ||
+ buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED) {
+ pr_err("%s: Invalid state %d\n", __func__, buf_info->state);
+ return -EINVAL;
+ }
+ msm_isp_buf_unprepare(buf_mgr, info->handle, info->buf_idx);
+
+ buf_info->state = MSM_ISP_BUFFER_STATE_INITIALIZED;
+
+ return rc;
+}
+
+static int msm_isp_get_bufq_handle(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t vfe_id, uint32_t output_id)
+{
+ int i;
+
+ for (i = 0; i < buf_mgr->num_buf_q; i++) {
+ if ((buf_mgr->bufq[i].vfe_id == vfe_id) &&
+ (buf_mgr->bufq[i].output_id == output_id)) {
+ return buf_mgr->bufq[i].bufq_handle;
+ }
+ }
+ pr_err("%s: No match found 0x%x 0x%x\n", __func__,
+ vfe_id, output_id);
+ return 0;
+}
+
+static int msm_isp_get_buf_src(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t *buf_src)
+{
+ struct msm_isp_bufq *bufq = NULL;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n",
+ __func__);
+ return -EINVAL;
+ }
+ *buf_src = BUF_SRC(bufq->flags);
+
+ return 0;
+}
+
+static int msm_isp_request_bufq(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_buf_request *buf_request)
+{
+ int i;
+ struct msm_isp_bufq *bufq = NULL;
+
+ CDBG("%s: E\n", __func__);
+
+ if (!buf_request->num_buf || buf_request->num_buf > VB2_MAX_FRAME) {
+ pr_err("Invalid buffer request\n");
+ return -EINVAL;
+ }
+
+ buf_request->handle = msm_isp_get_buf_handle(buf_mgr,
+ buf_request->vfe_id, buf_request->output_id);
+ if (!buf_request->handle) {
+ pr_err("%s: Invalid buffer handle\n", __func__);
+ return -EINVAL;
+ }
+
+ bufq = msm_isp_get_bufq(buf_mgr, buf_request->handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq output_id %x\n",
+ __func__, buf_request->output_id);
+
+ return -EINVAL;
+ }
+
+ bufq->bufs = kzalloc(sizeof(struct msm_isp_buffer) *
+ buf_request->num_buf, GFP_KERNEL);
+ if (!bufq->bufs) {
+ msm_isp_free_bufq_handle(buf_mgr, buf_request->handle);
+ return -ENOMEM;
+ }
+ spin_lock_init(&bufq->bufq_lock);
+ bufq->bufq_handle = buf_request->handle;
+ bufq->vfe_id = buf_request->vfe_id;
+ bufq->flags = buf_request->flags;
+ bufq->output_id = buf_request->output_id;
+ bufq->num_bufs = buf_request->num_buf;
+ bufq->buf_type = buf_request->buf_type;
+ for (i = 0; i < ISP_NUM_BUF_MASK; i++)
+ bufq->put_buf_mask[i] = 0;
+ INIT_LIST_HEAD(&bufq->head);
+
+ for (i = 0; i < buf_request->num_buf; i++) {
+ bufq->bufs[i].state = MSM_ISP_BUFFER_STATE_INITIALIZED;
+ bufq->bufs[i].buf_debug.put_state[0] =
+ MSM_ISP_BUFFER_STATE_PUT_PREPARED;
+ bufq->bufs[i].buf_debug.put_state[1] =
+ MSM_ISP_BUFFER_STATE_PUT_PREPARED;
+ bufq->bufs[i].buf_debug.put_state_last = 0;
+ bufq->bufs[i].bufq_handle = bufq->bufq_handle;
+ bufq->bufs[i].buf_idx = i;
+ INIT_LIST_HEAD(&bufq->bufs[i].list);
+ }
+
+ return 0;
+}
+
+static int msm_isp_release_bufq(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle)
+{
+ struct msm_isp_bufq *bufq = NULL;
+ unsigned long flags;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("Invalid bufq release\n");
+ return -EINVAL;
+ }
+
+ msm_isp_buf_unprepare_all(buf_mgr, bufq_handle);
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ kfree(bufq->bufs);
+ msm_isp_free_bufq_handle(buf_mgr, bufq_handle);
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+
+ return 0;
+}
+
+static void msm_isp_release_all_bufq(
+ struct msm_isp_buf_mgr *buf_mgr)
+{
+ struct msm_isp_bufq *bufq = NULL;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < buf_mgr->num_buf_q; i++) {
+ bufq = &buf_mgr->bufq[i];
+ if (!bufq->bufq_handle)
+ continue;
+
+ msm_isp_buf_unprepare_all(buf_mgr, bufq->bufq_handle);
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ kfree(bufq->bufs);
+ msm_isp_free_bufq_handle(buf_mgr, bufq->bufq_handle);
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ }
+}
+
+
+/**
+ * msm_isp_buf_put_scratch() - Release scratch buffers
+ * @buf_mgr: The buffer structure for h/w
+ *
+ * Returns 0 on success else error code
+ */
+static int msm_isp_buf_put_scratch(struct msm_isp_buf_mgr *buf_mgr)
+{
+ int rc;
+
+ if (!buf_mgr->scratch_buf_addr)
+ return 0;
+
+ rc = cam_smmu_put_phy_addr_scratch(buf_mgr->iommu_hdl,
+ buf_mgr->scratch_buf_addr);
+ if (rc)
+ pr_err("%s: failed to put scratch buffer to img iommu: %d\n",
+ __func__, rc);
+
+
+ if (!rc)
+ buf_mgr->scratch_buf_addr = 0;
+
+ return rc;
+}
+
+/**
+ * msm_isp_buf_get_scratch() - Create scratch buffers
+ * @buf_mgr: The buffer structure for h/w
+ *
+ * Create and map scratch buffers for all IOMMU's under the buffer
+ * manager.
+ *
+ * Returns 0 on success else error code
+ */
+static int msm_isp_buf_get_scratch(struct msm_isp_buf_mgr *buf_mgr)
+{
+ int rc;
+
+ if (buf_mgr->scratch_buf_addr || !buf_mgr->scratch_buf_range)
+ /* already mapped or not supported */
+ return 0;
+
+ rc = cam_smmu_get_phy_addr_scratch(
+ buf_mgr->iommu_hdl,
+ CAM_SMMU_MAP_RW,
+ &buf_mgr->scratch_buf_addr,
+ buf_mgr->scratch_buf_range,
+ SZ_4K);
+ if (rc) {
+ pr_err("%s: failed to map scratch buffer to img iommu: %d\n",
+ __func__, rc);
+ return rc;
+ }
+ return rc;
+}
+
+int msm_isp_smmu_attach(struct msm_isp_buf_mgr *buf_mgr,
+ void *arg)
+{
+ struct msm_vfe_smmu_attach_cmd *cmd = arg;
+ int rc = 0;
+
+ pr_debug("%s: cmd->security_mode : %d\n", __func__, cmd->security_mode);
+ mutex_lock(&buf_mgr->lock);
+ if (cmd->iommu_attach_mode == IOMMU_ATTACH) {
+ buf_mgr->secure_enable = cmd->security_mode;
+
+ /*
+ * Call hypervisor thru scm call to notify secure or
+ * non-secure mode
+ */
+ if (buf_mgr->attach_ref_cnt == 0) {
+ rc = cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_ATTACH);
+ if (rc < 0) {
+ pr_err("%s: img smmu attach error, rc :%d\n",
+ __func__, rc);
+ goto err1;
+ }
+ }
+ buf_mgr->attach_ref_cnt++;
+ rc = msm_isp_buf_get_scratch(buf_mgr);
+ if (rc)
+ goto err2;
+ } else {
+ if (buf_mgr->attach_ref_cnt > 0)
+ buf_mgr->attach_ref_cnt--;
+ else
+ pr_err("%s: Error! Invalid ref_cnt %d\n",
+ __func__, buf_mgr->attach_ref_cnt);
+
+ if (buf_mgr->attach_ref_cnt == 0) {
+ rc = msm_isp_buf_put_scratch(buf_mgr);
+ rc |= cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_DETACH);
+ if (rc < 0) {
+ pr_err("%s: img/stats smmu detach error, rc :%d\n",
+ __func__, rc);
+ }
+ }
+ }
+
+ mutex_unlock(&buf_mgr->lock);
+ return rc;
+
+err2:
+ if (cam_smmu_ops(buf_mgr->iommu_hdl, CAM_SMMU_DETACH))
+ pr_err("%s: img smmu detach error\n", __func__);
+err1:
+ mutex_unlock(&buf_mgr->lock);
+ return rc;
+}
+
+
+static int msm_isp_init_isp_buf_mgr(struct msm_isp_buf_mgr *buf_mgr,
+ const char *ctx_name)
+{
+ int rc = -1;
+ int i = 0;
+
+ mutex_lock(&buf_mgr->lock);
+ if (buf_mgr->open_count++) {
+ mutex_unlock(&buf_mgr->lock);
+ return 0;
+ }
+
+ CDBG("%s: E\n", __func__);
+ buf_mgr->attach_ref_cnt = 0;
+
+ buf_mgr->num_buf_q = BUF_MGR_NUM_BUF_Q;
+ memset(buf_mgr->bufq, 0, sizeof(buf_mgr->bufq));
+
+ rc = cam_smmu_get_handle("vfe", &buf_mgr->iommu_hdl);
+ if (rc < 0) {
+ pr_err("vfe get handle failed\n");
+ goto get_handle_error;
+ }
+
+ for (i = 0; i < BUF_MGR_NUM_BUF_Q; i++)
+ spin_lock_init(&buf_mgr->bufq[i].bufq_lock);
+
+ buf_mgr->pagefault_debug_disable = 0;
+ buf_mgr->frameId_mismatch_recovery = 0;
+ mutex_unlock(&buf_mgr->lock);
+ return 0;
+
+get_handle_error:
+ mutex_unlock(&buf_mgr->lock);
+ return rc;
+}
+
+static int msm_isp_deinit_isp_buf_mgr(
+ struct msm_isp_buf_mgr *buf_mgr)
+{
+ mutex_lock(&buf_mgr->lock);
+ if (buf_mgr->open_count > 0)
+ buf_mgr->open_count--;
+
+ if (buf_mgr->open_count) {
+ mutex_unlock(&buf_mgr->lock);
+ return 0;
+ }
+ msm_isp_release_all_bufq(buf_mgr);
+ buf_mgr->num_buf_q = 0;
+ buf_mgr->pagefault_debug_disable = 0;
+
+ msm_isp_buf_put_scratch(buf_mgr);
+ cam_smmu_ops(buf_mgr->iommu_hdl, CAM_SMMU_DETACH);
+ cam_smmu_destroy_handle(buf_mgr->iommu_hdl);
+
+ buf_mgr->attach_ref_cnt = 0;
+ mutex_unlock(&buf_mgr->lock);
+ return 0;
+}
+
+int msm_isp_proc_buf_cmd(struct msm_isp_buf_mgr *buf_mgr,
+ unsigned int cmd, void *arg)
+{
+ int rc = -EINVAL;
+
+ switch (cmd) {
+ case VIDIOC_MSM_ISP_REQUEST_BUFQ: {
+ struct msm_isp_buf_request *buf_req = arg;
+
+ rc = buf_mgr->ops->request_bufq(buf_mgr, buf_req);
+ break;
+ }
+ case VIDIOC_MSM_ISP_RELEASE_BUFQ: {
+ struct msm_isp_buf_request *buf_req = arg;
+
+ rc = buf_mgr->ops->release_bufq(buf_mgr, buf_req->handle);
+ break;
+ }
+ case VIDIOC_MSM_ISP_ENQUEUE_BUF: {
+ struct msm_isp_qbuf_info *qbuf_info = arg;
+
+ rc = buf_mgr->ops->enqueue_buf(buf_mgr, qbuf_info);
+ break;
+ }
+ case VIDIOC_MSM_ISP_DEQUEUE_BUF: {
+ struct msm_isp_qbuf_info *qbuf_info = arg;
+
+ rc = buf_mgr->ops->dequeue_buf(buf_mgr, qbuf_info);
+ break;
+ }
+ case VIDIOC_MSM_ISP_UNMAP_BUF: {
+ struct msm_isp_unmap_buf_req *unmap_req = arg;
+
+ rc = buf_mgr->ops->unmap_buf(buf_mgr, unmap_req->fd);
+ break;
+ }
+ }
+ return rc;
+}
+
+static int msm_isp_buf_mgr_debug(struct msm_isp_buf_mgr *buf_mgr,
+ unsigned long fault_addr)
+{
+ struct msm_isp_buffer *bufs = NULL;
+ uint32_t i = 0, j = 0, k = 0, rc = 0;
+ char *print_buf = NULL, temp_buf[100];
+ uint32_t start_addr = 0, end_addr = 0, print_buf_size = 2000;
+ int buf_addr_delta = -1;
+ int temp_delta = 0;
+ uint32_t debug_output_id = 0;
+ uint32_t debug_buf_idx = 0;
+ uint32_t debug_buf_plane = 0;
+ uint32_t debug_start_addr = 0;
+ uint32_t debug_end_addr = 0;
+ uint32_t debug_frame_id = 0;
+ enum msm_isp_buffer_state debug_state = MSM_ISP_BUFFER_STATE_UNUSED;
+ unsigned long flags;
+ struct msm_isp_bufq *bufq = NULL;
+
+ if (!buf_mgr) {
+ pr_err_ratelimited("%s: %d] NULL buf_mgr\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < BUF_MGR_NUM_BUF_Q; i++) {
+ bufq = &buf_mgr->bufq[i];
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ if (!bufq->bufq_handle) {
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ continue;
+ }
+
+ for (j = 0; j < bufq->num_bufs; j++) {
+ bufs = &bufq->bufs[j];
+ if (!bufs)
+ continue;
+
+ for (k = 0; k < bufs->num_planes; k++) {
+ start_addr = bufs->
+ mapped_info[k].paddr;
+ end_addr = bufs->mapped_info[k].paddr +
+ bufs->mapped_info[k].len - 1;
+ temp_delta = fault_addr - start_addr;
+ if (temp_delta < 0)
+ continue;
+
+ if (buf_addr_delta == -1 ||
+ temp_delta < buf_addr_delta) {
+ buf_addr_delta = temp_delta;
+ debug_output_id = bufq->output_id;
+ debug_buf_idx = j;
+ debug_buf_plane = k;
+ debug_start_addr = start_addr;
+ debug_end_addr = end_addr;
+ debug_frame_id = bufs->frame_id;
+ debug_state = bufs->state;
+ }
+ }
+ }
+ start_addr = 0;
+ end_addr = 0;
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ }
+
+ pr_err("%s: ==== SMMU page fault addr %lx ====\n", __func__,
+ fault_addr);
+ pr_err("%s: nearby output id %x, frame_id %d\n", __func__,
+ debug_output_id, debug_frame_id);
+ pr_err("%s: nearby buf index %d, plane %d, state %d\n", __func__,
+ debug_buf_idx, debug_buf_plane, debug_state);
+ pr_err("%s: buf address 0x%x -- 0x%x\n", __func__,
+ debug_start_addr, debug_end_addr);
+
+ if (BUF_DEBUG_FULL) {
+ print_buf = kzalloc(print_buf_size, GFP_ATOMIC);
+ if (!print_buf) {
+ pr_err("%s failed: No memory", __func__);
+ return -ENOMEM;
+ }
+ snprintf(print_buf, print_buf_size, "%s\n", __func__);
+ for (i = 0; i < BUF_MGR_NUM_BUF_Q; i++) {
+ if (i % 2 == 0 && i > 0) {
+ pr_err("%s\n", print_buf);
+ print_buf[0] = 0;
+ }
+ if (buf_mgr->bufq[i].bufq_handle != 0) {
+ snprintf(temp_buf, sizeof(temp_buf),
+ "handle %x output %x num_bufs %d\n",
+ buf_mgr->bufq[i].bufq_handle,
+ buf_mgr->bufq[i].output_id,
+ buf_mgr->bufq[i].num_bufs);
+ strlcat(print_buf, temp_buf, print_buf_size);
+ for (j = 0; j < buf_mgr->bufq[i].num_bufs;
+ j++) {
+ bufs = &buf_mgr->bufq[i].bufs[j];
+ if (!bufs)
+ break;
+
+ for (k = 0; k < bufs->num_planes; k++) {
+ start_addr = bufs->
+ mapped_info[k].paddr;
+ end_addr = bufs->mapped_info[k].
+ paddr + bufs->
+ mapped_info[k].len;
+ snprintf(temp_buf,
+ sizeof(temp_buf),
+ " buf %d plane %d start_addr %x end_addr %x\n",
+ j, k, start_addr,
+ end_addr);
+ strlcat(print_buf, temp_buf,
+ print_buf_size);
+ }
+ }
+ start_addr = 0;
+ end_addr = 0;
+ }
+ }
+ pr_err("%s\n", print_buf);
+ kfree(print_buf);
+ }
+ return rc;
+}
+
+static struct msm_isp_buf_ops isp_buf_ops = {
+ .request_bufq = msm_isp_request_bufq,
+ .release_bufq = msm_isp_release_bufq,
+ .enqueue_buf = msm_isp_buf_enqueue,
+ .dequeue_buf = msm_isp_buf_dequeue,
+ .get_bufq_handle = msm_isp_get_bufq_handle,
+ .get_buf_src = msm_isp_get_buf_src,
+ .get_buf = msm_isp_get_buf,
+ .get_buf_by_index = msm_isp_get_buf_by_index,
+ .map_buf = msm_isp_map_buf,
+ .unmap_buf = msm_isp_unmap_buf,
+ .put_buf = msm_isp_put_buf,
+ .flush_buf = msm_isp_flush_buf,
+ .buf_done = msm_isp_buf_done,
+ .buf_mgr_init = msm_isp_init_isp_buf_mgr,
+ .buf_mgr_deinit = msm_isp_deinit_isp_buf_mgr,
+ .buf_mgr_debug = msm_isp_buf_mgr_debug,
+ .get_bufq = msm_isp_get_bufq,
+ .update_put_buf_cnt = msm_isp_update_put_buf_cnt,
+};
+
+int msm_isp_create_isp_buf_mgr(
+ struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_sd_req_vb2_q *vb2_ops,
+ struct device *dev,
+ uint32_t scratch_buf_range)
+{
+ int rc = 0;
+
+ if (buf_mgr->init_done)
+ return rc;
+
+ buf_mgr->ops = &isp_buf_ops;
+ buf_mgr->vb2_ops = vb2_ops;
+ buf_mgr->open_count = 0;
+ buf_mgr->pagefault_debug_disable = 0;
+ buf_mgr->secure_enable = NON_SECURE_MODE;
+ buf_mgr->attach_state = MSM_ISP_BUF_MGR_DETACH;
+ buf_mgr->scratch_buf_range = scratch_buf_range;
+ mutex_init(&buf_mgr->lock);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/ais/isp/msm_buf_mgr.h b/drivers/media/platform/msm/ais/isp/msm_buf_mgr.h
new file mode 100644
index 000000000000..4794771d3213
--- /dev/null
+++ b/drivers/media/platform/msm/ais/isp/msm_buf_mgr.h
@@ -0,0 +1,230 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_ISP_BUF_H_
+#define _MSM_ISP_BUF_H_
+
+#include <media/ais/msm_ais_isp.h>
+#include "msm_sd.h"
+
+/* Buffer type could be userspace / HAL.
+ * Userspase could provide native or scratch buffer.
+ */
+#define BUF_SRC(id) ( \
+ (id & ISP_SCRATCH_BUF_BIT) ? MSM_ISP_BUFFER_SRC_SCRATCH : \
+ (id & ISP_NATIVE_BUF_BIT) ? MSM_ISP_BUFFER_SRC_NATIVE : \
+ MSM_ISP_BUFFER_SRC_HAL)
+
+/*
+ * This mask can be set dynamically if there are more than 2 VFE
+ * and 2 of those are used
+ */
+#define ISP_SHARE_BUF_MASK 0x3
+#define ISP_NUM_BUF_MASK 2
+#define BUF_MGR_NUM_BUF_Q 28
+#define MAX_IOMMU_CTX 2
+
+#define MSM_ISP_INVALID_BUF_INDEX 0xFFFFFFFF
+
+struct msm_isp_buf_mgr;
+
+enum msm_isp_buffer_src_t {
+ MSM_ISP_BUFFER_SRC_HAL,
+ MSM_ISP_BUFFER_SRC_NATIVE,
+ MSM_ISP_BUFFER_SRC_SCRATCH,
+ MSM_ISP_BUFFER_SRC_MAX,
+};
+
+enum msm_isp_buffer_state {
+ MSM_ISP_BUFFER_STATE_UNUSED, /* not used */
+ MSM_ISP_BUFFER_STATE_INITIALIZED, /* REQBUF done */
+ MSM_ISP_BUFFER_STATE_PREPARED, /* BUF mapped */
+ MSM_ISP_BUFFER_STATE_QUEUED, /* buf queued */
+ MSM_ISP_BUFFER_STATE_DEQUEUED, /* in use in VFE */
+ MSM_ISP_BUFFER_STATE_DIVERTED, /* Sent to other hardware*/
+ MSM_ISP_BUFFER_STATE_DISPATCHED, /* Sent to HAL*/
+};
+
+enum msm_isp_buffer_put_state {
+ MSM_ISP_BUFFER_STATE_PUT_PREPARED, /* on init */
+ MSM_ISP_BUFFER_STATE_PUT_BUF, /* on rotation */
+ MSM_ISP_BUFFER_STATE_FLUSH, /* on recovery */
+ MSM_ISP_BUFFER_STATE_DROP_REG, /* on drop frame for reg_update */
+ MSM_ISP_BUFFER_STATE_DROP_SKIP, /* on drop frame for sw skip */
+ MSM_ISP_BUFFER_STATE_RETURN_EMPTY, /* for return empty */
+};
+
+enum msm_isp_buffer_flush_t {
+ MSM_ISP_BUFFER_FLUSH_DIVERTED,
+ MSM_ISP_BUFFER_FLUSH_ALL,
+};
+
+enum msm_isp_buf_mgr_state {
+ MSM_ISP_BUF_MGR_ATTACH,
+ MSM_ISP_BUF_MGR_DETACH,
+};
+
+struct msm_isp_buffer_mapped_info {
+ size_t len;
+ dma_addr_t paddr;
+ int buf_fd;
+};
+
+struct buffer_cmd {
+ struct list_head list;
+ struct msm_isp_buffer_mapped_info *mapped_info;
+};
+
+struct msm_isp_buffer_debug_t {
+ enum msm_isp_buffer_put_state put_state[2];
+ uint8_t put_state_last;
+};
+
+struct msm_isp_buffer {
+ /*Common Data structure*/
+ int num_planes;
+ struct msm_isp_buffer_mapped_info mapped_info[VIDEO_MAX_PLANES];
+ int buf_idx;
+ uint32_t bufq_handle;
+ uint32_t frame_id;
+ struct timeval *tv;
+ /* Indicates whether buffer is used as ping ot pong buffer */
+ uint32_t pingpong_bit;
+
+ /*Native buffer*/
+ struct list_head list;
+ enum msm_isp_buffer_state state;
+
+ struct msm_isp_buffer_debug_t buf_debug;
+
+ /*Vb2 buffer data*/
+ struct vb2_v4l2_buffer *vb2_v4l2_buf;
+};
+
+struct msm_isp_bufq {
+ uint32_t vfe_id;
+ enum msm_vfe_axi_stream_src output_id;
+ uint32_t flags;
+ uint32_t num_bufs;
+ uint32_t bufq_handle;
+ enum msm_isp_buf_type buf_type;
+ struct msm_isp_buffer *bufs;
+ spinlock_t bufq_lock;
+ uint8_t put_buf_mask[ISP_NUM_BUF_MASK];
+ /*Native buffer queue*/
+ struct list_head head;
+ /*deprecated params*/
+ uint32_t session_id;
+ uint32_t stream_id;
+};
+
+struct msm_isp_buf_ops {
+ int (*request_bufq)(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_buf_request *buf_request);
+
+ int (*release_bufq)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle);
+
+ int (*enqueue_buf)(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_qbuf_info *info);
+
+ int (*dequeue_buf)(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_qbuf_info *info);
+
+ int (*get_bufq_handle)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t vfe_id, uint32_t output_id);
+
+ int (*get_buf_src)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t *buf_src);
+
+ int (*get_buf)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct msm_isp_buffer **buf_info);
+
+ int (*get_buf_by_index)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct msm_isp_buffer **buf_info);
+
+ int (*map_buf)(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_buffer_mapped_info *mapped_info, uint32_t fd);
+
+ int (*unmap_buf)(struct msm_isp_buf_mgr *buf_mgr, uint32_t fd);
+
+ int (*put_buf)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index);
+
+ int (*flush_buf)(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
+ uint32_t bufq_handle, enum msm_isp_buffer_flush_t flush_type,
+ struct timeval *tv, uint32_t frame_id);
+
+ int (*buf_done)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct timeval *tv, uint32_t frame_id, uint32_t output_format);
+ void (*register_ctx)(struct msm_isp_buf_mgr *buf_mgr,
+ struct device **iommu_ctx1, struct device **iommu_ctx2,
+ int num_iommu_ctx1, int num_iommu_ctx2);
+ int (*buf_mgr_init)(struct msm_isp_buf_mgr *buf_mgr,
+ const char *ctx_name);
+ int (*buf_mgr_deinit)(struct msm_isp_buf_mgr *buf_mgr);
+ int (*buf_mgr_debug)(struct msm_isp_buf_mgr *buf_mgr,
+ unsigned long fault_addr);
+ struct msm_isp_bufq * (*get_bufq)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle);
+ int (*update_put_buf_cnt)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t id, uint32_t bufq_handle, int32_t buf_index,
+ struct timeval *tv, uint32_t frame_id, uint32_t pingpong_bit);
+};
+
+struct msm_isp_buf_mgr {
+ int init_done;
+ uint32_t open_count;
+ uint32_t pagefault_debug_disable;
+ uint32_t frameId_mismatch_recovery;
+ uint16_t num_buf_q;
+ struct msm_isp_bufq bufq[BUF_MGR_NUM_BUF_Q];
+
+ struct ion_client *client;
+ struct msm_isp_buf_ops *ops;
+
+ struct msm_sd_req_vb2_q *vb2_ops;
+
+ /*IOMMU driver*/
+ int iommu_hdl;
+
+ /*Add secure mode*/
+ int secure_enable;
+
+ int num_iommu_ctx;
+ int num_iommu_secure_ctx;
+ int attach_ref_cnt;
+ enum msm_isp_buf_mgr_state attach_state;
+ struct device *isp_dev;
+ struct mutex lock;
+ /* Scratch buffer */
+ dma_addr_t scratch_buf_addr;
+ uint32_t scratch_buf_range;
+};
+
+int msm_isp_create_isp_buf_mgr(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_sd_req_vb2_q *vb2_ops, struct device *dev,
+ uint32_t scratch_addr_range);
+
+int msm_isp_proc_buf_cmd(struct msm_isp_buf_mgr *buf_mgr,
+ unsigned int cmd, void *arg);
+
+int msm_isp_smmu_attach(struct msm_isp_buf_mgr *buf_mgr,
+ void *arg);
+
+int msm_isp_flush_queue(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle);
+
+#endif /* _MSM_ISP_BUF_H_ */
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp.c b/drivers/media/platform/msm/ais/isp/msm_isp.c
new file mode 100644
index 000000000000..97c0f779cf73
--- /dev/null
+++ b/drivers/media/platform/msm/ais/isp/msm_isp.c
@@ -0,0 +1,658 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/of_device.h>
+#include <linux/sched_clock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+
+#include "msm_isp.h"
+#include "msm_isp_util.h"
+#include "msm_isp_axi_util.h"
+#include "msm_isp_stats_util.h"
+#include "msm_sd.h"
+
+static struct msm_sd_req_vb2_q vfe_vb2_ops;
+static struct msm_isp_buf_mgr vfe_buf_mgr;
+static struct msm_vfe_common_dev_data vfe_common_data;
+static struct dual_vfe_resource dualvfe;
+
+static const struct of_device_id msm_vfe_dt_match[] = {
+ {
+ .compatible = "qcom,vfe",
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_vfe_dt_match);
+
+#define MAX_OVERFLOW_COUNTERS 29
+#define OVERFLOW_LENGTH 1024
+#define OVERFLOW_BUFFER_LENGTH 64
+static char stat_line[OVERFLOW_LENGTH];
+
+struct msm_isp_statistics stats;
+struct msm_isp_ub_info ub_info;
+
+static int msm_isp_enable_debugfs(struct vfe_device *vfe_dev,
+ struct msm_isp_bw_req_info *isp_req_hist);
+
+static char *stats_str[MAX_OVERFLOW_COUNTERS] = {
+ "imgmaster0_overflow_cnt",
+ "imgmaster1_overflow_cnt",
+ "imgmaster2_overflow_cnt",
+ "imgmaster3_overflow_cnt",
+ "imgmaster4_overflow_cnt",
+ "imgmaster5_overflow_cnt",
+ "imgmaster6_overflow_cnt",
+ "be_overflow_cnt",
+ "bg_overflow_cnt",
+ "bf_overflow_cnt",
+ "awb_overflow_cnt",
+ "rs_overflow_cnt",
+ "cs_overflow_cnt",
+ "ihist_overflow_cnt",
+ "skinbhist_overflow_cnt",
+ "bfscale_overflow_cnt",
+ "ISP_VFE0_client_info.active",
+ "ISP_VFE0_client_info.ab",
+ "ISP_VFE0_client_info.ib",
+ "ISP_VFE1_client_info.active",
+ "ISP_VFE1_client_info.ab",
+ "ISP_VFE1_client_info.ib",
+ "ISP_CPP_client_info.active",
+ "ISP_CPP_client_info.ab",
+ "ISP_CPP_client_info.ib",
+ "ISP_last_overflow.ab",
+ "ISP_last_overflow.ib",
+ "ISP_VFE_CLK_RATE",
+ "ISP_CPP_CLK_RATE",
+};
+
+#define MAX_DEPTH_BW_REQ_HISTORY 25
+#define MAX_BW_HISTORY_BUFF_LEN 6144
+#define MAX_BW_HISTORY_LINE_BUFF_LEN 512
+
+#define MAX_UB_INFO_BUFF_LEN 1024
+#define MAX_UB_INFO_LINE_BUFF_LEN 256
+
+static struct msm_isp_bw_req_info
+ msm_isp_bw_request_history[MAX_DEPTH_BW_REQ_HISTORY];
+static int msm_isp_bw_request_history_idx;
+static char bw_request_history_buff[MAX_BW_HISTORY_BUFF_LEN];
+static char ub_info_buffer[MAX_UB_INFO_BUFF_LEN];
+static spinlock_t req_history_lock;
+
+static int vfe_debugfs_statistics_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t vfe_debugfs_statistics_read(struct file *t_file, char *t_char,
+ size_t t_size_t, loff_t *t_loff_t)
+{
+ int i;
+ uint64_t *ptr;
+ char buffer[OVERFLOW_BUFFER_LENGTH] = {0};
+ struct vfe_device *vfe_dev = (struct vfe_device *)
+ t_file->private_data;
+ struct msm_isp_statistics *stats = vfe_dev->stats;
+
+ memset(stat_line, 0, sizeof(stat_line));
+ msm_isp_util_get_bandwidth_stats(vfe_dev, stats);
+ ptr = (uint64_t *)(stats);
+ for (i = 0; i < MAX_OVERFLOW_COUNTERS; i++) {
+ strlcat(stat_line, stats_str[i], sizeof(stat_line));
+ strlcat(stat_line, " ", sizeof(stat_line));
+ snprintf(buffer, sizeof(buffer), "%llu", ptr[i]);
+ strlcat(stat_line, buffer, sizeof(stat_line));
+ strlcat(stat_line, "\r\n", sizeof(stat_line));
+ }
+ return simple_read_from_buffer(t_char, t_size_t,
+ t_loff_t, stat_line, strlen(stat_line));
+}
+
+static ssize_t vfe_debugfs_statistics_write(struct file *t_file,
+ const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+ struct vfe_device *vfe_dev = (struct vfe_device *)
+ t_file->private_data;
+ struct msm_isp_statistics *stats = vfe_dev->stats;
+
+ memset(stats, 0, sizeof(struct msm_isp_statistics));
+
+ return sizeof(struct msm_isp_statistics);
+}
+
+static int bw_history_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t bw_history_read(struct file *t_file, char *t_char,
+ size_t t_size_t, loff_t *t_loff_t)
+{
+ int i;
+ char *out_buffer = bw_request_history_buff;
+ char line_buffer[MAX_BW_HISTORY_LINE_BUFF_LEN] = {0};
+ struct msm_isp_bw_req_info *isp_req_hist =
+ (struct msm_isp_bw_req_info *) t_file->private_data;
+
+ memset(out_buffer, 0, MAX_BW_HISTORY_BUFF_LEN);
+
+ snprintf(line_buffer, sizeof(line_buffer),
+ "Bus bandwidth request history in chronological order:\n");
+ strlcat(out_buffer, line_buffer, sizeof(bw_request_history_buff));
+
+ snprintf(line_buffer, sizeof(line_buffer),
+ "MSM_ISP_MIN_AB = %u, MSM_ISP_MIN_IB = %u\n\n",
+ MSM_ISP_MIN_AB, MSM_ISP_MIN_IB);
+ strlcat(out_buffer, line_buffer, sizeof(bw_request_history_buff));
+
+ for (i = 0; i < MAX_DEPTH_BW_REQ_HISTORY; i++) {
+ snprintf(line_buffer, sizeof(line_buffer),
+ "idx = %d, client = %u, timestamp = %llu, ab = %llu, ib = %llu\n"
+ "ISP0.active = %x, ISP0.ab = %llu, ISP0.ib = %llu\n"
+ "ISP1.active = %x, ISP1.ab = %llu, ISP1.ib = %llu\n"
+ "CPP.active = %x, CPP.ab = %llu, CPP.ib = %llu\n\n",
+ i, isp_req_hist[i].client, isp_req_hist[i].timestamp,
+ isp_req_hist[i].total_ab, isp_req_hist[i].total_ib,
+ isp_req_hist[i].client_info[0].active,
+ isp_req_hist[i].client_info[0].ab,
+ isp_req_hist[i].client_info[0].ib,
+ isp_req_hist[i].client_info[1].active,
+ isp_req_hist[i].client_info[1].ab,
+ isp_req_hist[i].client_info[1].ib,
+ isp_req_hist[i].client_info[2].active,
+ isp_req_hist[i].client_info[2].ab,
+ isp_req_hist[i].client_info[2].ib);
+ strlcat(out_buffer, line_buffer,
+ sizeof(bw_request_history_buff));
+ }
+ return simple_read_from_buffer(t_char, t_size_t,
+ t_loff_t, out_buffer, strlen(out_buffer));
+}
+
+static ssize_t bw_history_write(struct file *t_file,
+ const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+ struct msm_isp_bw_req_info *isp_req_hist =
+ (struct msm_isp_bw_req_info *) t_file->private_data;
+
+ memset(isp_req_hist, 0, sizeof(msm_isp_bw_request_history));
+ msm_isp_bw_request_history_idx = 0;
+ return sizeof(msm_isp_bw_request_history);
+}
+
+static int ub_info_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t ub_info_read(struct file *t_file, char *t_char,
+ size_t t_size_t, loff_t *t_loff_t)
+{
+ int i;
+ char *out_buffer = ub_info_buffer;
+ char line_buffer[MAX_UB_INFO_LINE_BUFF_LEN] = {0};
+ struct vfe_device *vfe_dev =
+ (struct vfe_device *) t_file->private_data;
+ struct msm_isp_ub_info *ub_info = vfe_dev->ub_info;
+
+ memset(out_buffer, 0, MAX_UB_INFO_LINE_BUFF_LEN);
+ snprintf(line_buffer, sizeof(line_buffer),
+ "wm_ub_policy_type = %d\n"
+ "num_wm = %d\n"
+ "wm_ub = %d\n",
+ ub_info->policy, ub_info->num_wm, ub_info->wm_ub);
+ strlcat(out_buffer, line_buffer,
+ sizeof(ub_info_buffer));
+ for (i = 0; i < ub_info->num_wm; i++) {
+ snprintf(line_buffer, sizeof(line_buffer),
+ "data[%d] = 0x%x, addr[%d] = 0x%llx\n",
+ i, ub_info->data[i], i, ub_info->addr[i]);
+ strlcat(out_buffer, line_buffer,
+ sizeof(ub_info_buffer));
+ }
+
+ return simple_read_from_buffer(t_char, t_size_t,
+ t_loff_t, out_buffer, strlen(out_buffer));
+}
+
+static ssize_t ub_info_write(struct file *t_file,
+ const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+ struct vfe_device *vfe_dev =
+ (struct vfe_device *) t_file->private_data;
+ struct msm_isp_ub_info *ub_info = vfe_dev->ub_info;
+
+ memset(ub_info, 0, sizeof(struct msm_isp_ub_info));
+
+ return sizeof(struct msm_isp_ub_info);
+}
+
+static const struct file_operations vfe_debugfs_error = {
+ .open = vfe_debugfs_statistics_open,
+ .read = vfe_debugfs_statistics_read,
+ .write = vfe_debugfs_statistics_write,
+};
+
+static const struct file_operations bw_history_ops = {
+ .open = bw_history_open,
+ .read = bw_history_read,
+ .write = bw_history_write,
+};
+
+static const struct file_operations ub_info_ops = {
+ .open = ub_info_open,
+ .read = ub_info_read,
+ .write = ub_info_write,
+};
+
+static int msm_isp_enable_debugfs(struct vfe_device *vfe_dev,
+ struct msm_isp_bw_req_info *isp_req_hist)
+{
+ struct dentry *debugfs_base;
+ char dirname[32] = {0};
+
+ snprintf(dirname, sizeof(dirname), "msm_isp%d", vfe_dev->pdev->id);
+ debugfs_base = debugfs_create_dir(dirname, NULL);
+ if (!debugfs_base)
+ return -ENOMEM;
+ if (!debugfs_create_file("stats", S_IRUGO | S_IWUSR, debugfs_base,
+ vfe_dev, &vfe_debugfs_error))
+ return -ENOMEM;
+
+ if (!debugfs_create_file("bw_req_history", S_IRUGO | S_IWUSR,
+ debugfs_base, isp_req_hist, &bw_history_ops))
+ return -ENOMEM;
+
+ if (!debugfs_create_file("ub_info", S_IRUGO | S_IWUSR,
+ debugfs_base, vfe_dev, &ub_info_ops))
+ return -ENOMEM;
+
+ return 0;
+}
+
+void msm_isp_update_req_history(uint32_t client, uint64_t ab,
+ uint64_t ib,
+ struct msm_isp_bandwidth_info *client_info,
+ unsigned long long ts)
+{
+ int i;
+
+ spin_lock(&req_history_lock);
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].client =
+ client;
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].timestamp =
+ ts;
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].total_ab =
+ ab;
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].total_ib =
+ ib;
+
+ for (i = 0; i < MAX_ISP_CLIENT; i++) {
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].
+ client_info[i].active = client_info[i].active;
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].
+ client_info[i].ab = client_info[i].ab;
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].
+ client_info[i].ib = client_info[i].ib;
+ }
+
+ msm_isp_bw_request_history_idx = (msm_isp_bw_request_history_idx + 1)
+ % MAX_DEPTH_BW_REQ_HISTORY;
+ spin_unlock(&req_history_lock);
+}
+
+void msm_isp_update_last_overflow_ab_ib(struct vfe_device *vfe_dev)
+{
+ spin_lock(&req_history_lock);
+ vfe_dev->msm_isp_last_overflow_ab =
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].total_ab;
+ vfe_dev->msm_isp_last_overflow_ib =
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].total_ib;
+ spin_unlock(&req_history_lock);
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_isp_dqevent(struct file *file, struct v4l2_fh *vfh, void *arg)
+{
+ long rc;
+
+ if (is_compat_task()) {
+ struct msm_isp_event_data32 *event_data32;
+ struct msm_isp_event_data *event_data;
+ struct v4l2_event isp_event;
+ struct v4l2_event *isp_event_user;
+
+ memset(&isp_event, 0, sizeof(isp_event));
+ rc = v4l2_event_dequeue(vfh, &isp_event,
+ file->f_flags & O_NONBLOCK);
+ if (rc)
+ return rc;
+ event_data = (struct msm_isp_event_data *)
+ isp_event.u.data;
+ isp_event_user = (struct v4l2_event *)arg;
+ memcpy(isp_event_user, &isp_event,
+ sizeof(*isp_event_user));
+ event_data32 = (struct msm_isp_event_data32 *)
+ isp_event_user->u.data;
+ memset(event_data32, 0,
+ sizeof(struct msm_isp_event_data32));
+ event_data32->timestamp.tv_sec =
+ event_data->timestamp.tv_sec;
+ event_data32->timestamp.tv_usec =
+ event_data->timestamp.tv_usec;
+ event_data32->mono_timestamp.tv_sec =
+ event_data->mono_timestamp.tv_sec;
+ event_data32->mono_timestamp.tv_usec =
+ event_data->mono_timestamp.tv_usec;
+ event_data32->frame_id = event_data->frame_id;
+ memcpy(&(event_data32->u), &(event_data->u),
+ sizeof(event_data32->u));
+ } else {
+ rc = v4l2_event_dequeue(vfh, arg,
+ file->f_flags & O_NONBLOCK);
+ }
+ return rc;
+}
+#else
+static long msm_isp_dqevent(struct file *file, struct v4l2_fh *vfh, void *arg)
+{
+ return v4l2_event_dequeue(vfh, arg,
+ file->f_flags & O_NONBLOCK);
+}
+#endif
+
+static long msm_isp_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *vfh = file->private_data;
+
+ switch (cmd) {
+ case VIDIOC_DQEVENT: {
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+ return -ENOIOCTLCMD;
+ return msm_isp_dqevent(file, vfh, arg);
+ }
+ break;
+ case VIDIOC_SUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
+
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+
+ default:
+ return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+ }
+}
+
+static struct v4l2_subdev_core_ops msm_vfe_v4l2_subdev_core_ops = {
+ .ioctl = msm_isp_ioctl,
+ .subscribe_event = msm_isp_subscribe_event,
+ .unsubscribe_event = msm_isp_unsubscribe_event,
+};
+
+static struct v4l2_subdev_ops msm_vfe_v4l2_subdev_ops = {
+ .core = &msm_vfe_v4l2_subdev_core_ops,
+};
+
+static struct v4l2_subdev_internal_ops msm_vfe_subdev_internal_ops = {
+ .open = msm_isp_open_node,
+ .close = msm_isp_close_node,
+};
+
+static long msm_isp_v4l2_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_isp_subdev_do_ioctl);
+}
+
+static struct v4l2_file_operations msm_isp_v4l2_fops = {
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = msm_isp_v4l2_fops_ioctl,
+#endif
+ .unlocked_ioctl = msm_isp_v4l2_fops_ioctl
+};
+
+static int vfe_set_common_data(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct vfe_device *vfe_dev = NULL;
+
+ sd = (struct v4l2_subdev *)platform_get_drvdata(pdev);
+ if (!sd) {
+ pr_err("%s: Error! Cannot find subdev\n", __func__);
+ return -EPERM;
+ }
+ vfe_dev = (struct vfe_device *)v4l2_get_subdevdata(sd);
+ if (!vfe_dev) {
+ pr_err("%s: Error! Cannot find vfe_dev\n", __func__);
+ return -EPERM;
+ }
+
+ vfe_dev->common_data = (struct msm_vfe_common_dev_data *)
+ pdev->dev.platform_data;
+
+ vfe_dev->common_data->dual_vfe_res = &dualvfe;
+ vfe_dev->common_data->dual_vfe_res->axi_data[vfe_dev->pdev->id] =
+ &vfe_dev->axi_data;
+ vfe_dev->common_data->dual_vfe_res->stats_data[vfe_dev->pdev->id] =
+ &vfe_dev->stats_data;
+ vfe_dev->common_data->dual_vfe_res->vfe_dev[vfe_dev->pdev->id] =
+ vfe_dev;
+ return 0;
+}
+
+static int vfe_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct device_node *node;
+ struct platform_device *new_dev = NULL;
+ uint32_t i = 0;
+ uint32_t num_hw_sd = 0;
+ char name[10] = "\0";
+
+ memset(&vfe_common_data, 0, sizeof(vfe_common_data));
+ spin_lock_init(&vfe_common_data.common_dev_data_lock);
+
+ of_property_read_u32(pdev->dev.of_node,
+ "num_child", &num_hw_sd);
+
+ for (i = 0; i < num_hw_sd; i++) {
+ node = NULL;
+ snprintf(name, sizeof(name), "qcom,vfe%d", i);
+ node = of_find_node_by_name(NULL, name);
+ if (!node) {
+ pr_err("%s: Error! Cannot find node in dtsi %s\n",
+ __func__, name);
+ break;
+ }
+ new_dev = of_find_device_by_node(node);
+ if (!new_dev) {
+ pr_err("%s: Failed to find device on bus %s\n",
+ __func__, node->name);
+ break;
+ }
+
+ new_dev->dev.platform_data =
+ (void *)&vfe_common_data;
+ rc = vfe_set_common_data(new_dev);
+ if (rc < 0)
+ break;
+ }
+
+ return rc;
+}
+
+int vfe_hw_probe(struct platform_device *pdev)
+{
+ struct vfe_device *vfe_dev;
+ /*struct msm_cam_subdev_info sd_info;*/
+ const struct of_device_id *match_dev;
+ int rc = 0;
+
+ vfe_dev = kzalloc(sizeof(struct vfe_device), GFP_KERNEL);
+ if (!vfe_dev) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ vfe_dev->stats = kzalloc(sizeof(struct msm_isp_statistics), GFP_KERNEL);
+ if (!vfe_dev->stats) {
+ rc = -ENOMEM;
+ goto probe_fail1;
+ }
+
+ vfe_dev->ub_info = kzalloc(sizeof(struct msm_isp_ub_info), GFP_KERNEL);
+ if (!vfe_dev->ub_info) {
+ rc = -ENOMEM;
+ goto probe_fail2;
+ }
+
+ if (pdev->dev.of_node) {
+ of_property_read_u32(pdev->dev.of_node,
+ "cell-index", &pdev->id);
+
+ match_dev = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ if (!match_dev) {
+ pr_err("%s: No vfe hardware info\n", __func__);
+ rc = -EINVAL;
+ goto probe_fail3;
+ }
+ vfe_dev->hw_info =
+ (struct msm_vfe_hardware_info *) match_dev->data;
+ } else {
+ vfe_dev->hw_info = (struct msm_vfe_hardware_info *)
+ platform_get_device_id(pdev)->driver_data;
+ }
+
+ if (!vfe_dev->hw_info) {
+ pr_err("%s: No vfe hardware info\n", __func__);
+ rc = -EINVAL;
+ goto probe_fail3;
+ }
+ ISP_DBG("%s: device id = %d\n", __func__, pdev->id);
+
+ vfe_dev->pdev = pdev;
+
+ rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_platform_data(vfe_dev);
+ if (rc < 0) {
+ pr_err("%s: failed to get platform resources\n", __func__);
+ rc = -ENOMEM;
+ goto probe_fail3;
+ }
+
+ INIT_LIST_HEAD(&vfe_dev->tasklet_q);
+ tasklet_init(&vfe_dev->vfe_tasklet,
+ msm_isp_do_tasklet, (unsigned long)vfe_dev);
+
+ v4l2_subdev_init(&vfe_dev->subdev.sd, &msm_vfe_v4l2_subdev_ops);
+ vfe_dev->subdev.sd.internal_ops =
+ &msm_vfe_subdev_internal_ops;
+ snprintf(vfe_dev->subdev.sd.name,
+ ARRAY_SIZE(vfe_dev->subdev.sd.name),
+ "vfe");
+ vfe_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ vfe_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+ v4l2_set_subdevdata(&vfe_dev->subdev.sd, vfe_dev);
+ platform_set_drvdata(pdev, &vfe_dev->subdev.sd);
+ mutex_init(&vfe_dev->realtime_mutex);
+ mutex_init(&vfe_dev->core_mutex);
+ spin_lock_init(&vfe_dev->tasklet_lock);
+ spin_lock_init(&vfe_dev->shared_data_lock);
+ spin_lock_init(&vfe_dev->reg_update_lock);
+ spin_lock_init(&req_history_lock);
+ media_entity_init(&vfe_dev->subdev.sd.entity, 0, NULL, 0);
+ vfe_dev->subdev.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ vfe_dev->subdev.sd.entity.group_id = MSM_CAMERA_SUBDEV_VFE;
+ vfe_dev->subdev.sd.entity.name = pdev->name;
+ vfe_dev->subdev.close_seq = MSM_SD_CLOSE_1ST_CATEGORY | 0x2;
+ rc = msm_sd_register(&vfe_dev->subdev);
+ if (rc != 0) {
+ pr_err("%s: msm_sd_register error = %d\n", __func__, rc);
+ goto probe_fail3;
+ }
+ msm_cam_copy_v4l2_subdev_fops(&msm_isp_v4l2_fops);
+ msm_isp_v4l2_fops.unlocked_ioctl = msm_isp_v4l2_fops_ioctl;
+#ifdef CONFIG_COMPAT
+ msm_isp_v4l2_fops.compat_ioctl32 =
+ msm_isp_v4l2_fops_ioctl;
+#endif
+ vfe_dev->subdev.sd.devnode->fops = &msm_isp_v4l2_fops;
+
+ vfe_dev->buf_mgr = &vfe_buf_mgr;
+ v4l2_subdev_notify(&vfe_dev->subdev.sd,
+ MSM_SD_NOTIFY_REQ_CB, &vfe_vb2_ops);
+ rc = msm_isp_create_isp_buf_mgr(vfe_dev->buf_mgr,
+ &vfe_vb2_ops, &pdev->dev,
+ vfe_dev->hw_info->axi_hw_info->scratch_buf_range);
+ if (rc < 0) {
+ pr_err("%s: Unable to create buffer manager\n", __func__);
+ rc = -EINVAL;
+ goto probe_fail3;
+ }
+ msm_isp_enable_debugfs(vfe_dev, msm_isp_bw_request_history);
+ vfe_dev->buf_mgr->num_iommu_secure_ctx =
+ vfe_dev->hw_info->num_iommu_secure_ctx;
+ vfe_dev->buf_mgr->init_done = 1;
+ vfe_dev->vfe_open_cnt = 0;
+ return rc;
+
+probe_fail3:
+ kfree(vfe_dev->ub_info);
+probe_fail2:
+ kfree(vfe_dev->stats);
+probe_fail1:
+ kfree(vfe_dev);
+end:
+ return rc;
+}
+
+static struct platform_driver vfe_driver = {
+ .probe = vfe_probe,
+ .driver = {
+ .name = "msm_vfe",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_vfe_dt_match,
+ },
+};
+
+static int __init msm_vfe_init_module(void)
+{
+ return platform_driver_register(&vfe_driver);
+}
+
+static void __exit msm_vfe_exit_module(void)
+{
+ platform_driver_unregister(&vfe_driver);
+}
+
+late_initcall(msm_vfe_init_module);
+module_exit(msm_vfe_exit_module);
+MODULE_DESCRIPTION("MSM VFE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp.h b/drivers/media/platform/msm/ais/isp/msm_isp.h
new file mode 100644
index 000000000000..72a76d178aa8
--- /dev/null
+++ b/drivers/media/platform/msm/ais/isp/msm_isp.h
@@ -0,0 +1,813 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_VFE_H__
+#define __MSM_VFE_H__
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/avtimer_kernel.h>
+#include <media/v4l2-subdev.h>
+#include <media/ais/msm_ais_isp.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+
+#include "msm_buf_mgr.h"
+#include "cam_hw_ops.h"
+
+#define VFE40_8974V1_VERSION 0x10000018
+#define VFE40_8974V2_VERSION 0x1001001A
+#define VFE40_8974V3_VERSION 0x1001001B
+#define VFE40_8x26_VERSION 0x20000013
+#define VFE40_8x26V2_VERSION 0x20010014
+#define VFE40_8916_VERSION 0x10030000
+#define VFE40_8939_VERSION 0x10040000
+#define VFE40_8952_VERSION 0x10060000
+#define VFE40_8976_VERSION 0x10050000
+#define VFE40_8937_VERSION 0x10080000
+#define VFE40_8917_VERSION 0x10080001
+#define VFE40_8953_VERSION 0x10090000
+#define VFE32_8909_VERSION 0x30600
+
+#define MAX_IOMMU_CTX 2
+#define MAX_NUM_WM 7
+#define MAX_NUM_RDI 3
+#define MAX_NUM_RDI_MASTER 3
+#define MAX_NUM_COMPOSITE_MASK 4
+#define MAX_NUM_STATS_COMP_MASK 2
+#define MAX_INIT_FRAME_DROP 31
+#define MAX_REG_UPDATE_THRESHOLD 10
+#define ISP_Q2 (1 << 2)
+
+#define VFE_PING_FLAG 0xFFFFFFFF
+#define VFE_PONG_FLAG 0x0
+
+#define VFE_MAX_CFG_TIMEOUT 3000
+#define VFE_CLK_INFO_MAX 16
+#define STATS_COMP_BIT_MASK 0x1FF
+
+#define MSM_ISP_MIN_AB 100000000
+#define MSM_ISP_MIN_IB 100000000
+#define MAX_BUFFERS_IN_HW 2
+
+#define MAX_VFE 2
+#define MAX_RECOVERY_THRESHOLD 5
+
+struct vfe_device;
+struct msm_vfe_axi_stream;
+struct msm_vfe_stats_stream;
+
+#define VFE_SD_HW_MAX VFE_SD_COMMON
+
+/* Irq operations to perform on the irq mask register */
+enum msm_isp_irq_operation {
+ /* enable the irq bits in given parameters */
+ MSM_ISP_IRQ_ENABLE = 1,
+ /* disable the irq bits in the given parameters */
+ MSM_ISP_IRQ_DISABLE = 2,
+ /* set the irq bits to the given parameters */
+ MSM_ISP_IRQ_SET = 3,
+};
+
+/* This struct is used to save/track SOF info for some INTF.
+ * e.g. used in Master-Slave mode
+ */
+struct msm_vfe_sof_info {
+ uint32_t timestamp_ms;
+ uint32_t mono_timestamp_ms;
+ uint32_t frame_id;
+};
+
+/* Each INTF in Master-Slave mode uses this struct. */
+struct msm_vfe_dual_hw_ms_info {
+ /* type is Master/Slave */
+ enum msm_vfe_dual_hw_ms_type dual_hw_ms_type;
+ /* sof_info is resource from common_data. If NULL, then this INTF
+ * sof does not need to be saved
+ */
+ struct msm_vfe_sof_info *sof_info;
+ /* slave_id is index in common_data sof_info array for slaves */
+ uint8_t slave_id;
+};
+
+struct vfe_subscribe_info {
+ struct v4l2_fh *vfh;
+ uint32_t active;
+};
+
+enum msm_isp_pack_fmt {
+ QCOM,
+ MIPI,
+ DPCM6,
+ DPCM8,
+ PLAIN8,
+ PLAIN16,
+ DPCM10,
+ MAX_ISP_PACK_FMT,
+};
+
+enum msm_isp_camif_update_state {
+ NO_UPDATE,
+ ENABLE_CAMIF,
+ DISABLE_CAMIF,
+ DISABLE_CAMIF_IMMEDIATELY
+};
+
+struct msm_isp_timestamp {
+ /* Monotonic clock for v4l2 buffer */
+ struct timeval buf_time;
+ /* Monotonic clock for VT */
+ struct timeval vt_time;
+ /* Wall clock for userspace event */
+ struct timeval event_time;
+};
+
+struct msm_vfe_irq_ops {
+ void (*read_irq_status_and_clear)(struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1);
+ void (*read_irq_status)(struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1);
+ void (*process_reg_update)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+ void (*process_epoch_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+ void (*process_reset_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1);
+ void (*process_halt_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1);
+ void (*process_camif_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+ void (*process_axi_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ uint32_t pingpong_status,
+ struct msm_isp_timestamp *ts);
+ void (*process_stats_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ uint32_t pingpong_status,
+ struct msm_isp_timestamp *ts);
+ void (*config_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ enum msm_isp_irq_operation);
+ void (*process_eof_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0);
+};
+
+struct msm_vfe_axi_ops {
+ void (*reload_wm)(struct vfe_device *vfe_dev, void __iomem *vfe_base,
+ uint32_t reload_mask);
+ void (*enable_wm)(void __iomem *vfe_base,
+ uint8_t wm_idx, uint8_t enable);
+ int32_t (*cfg_io_format)(struct vfe_device *vfe_dev,
+ enum msm_vfe_axi_stream_src stream_src,
+ uint32_t io_format);
+ void (*cfg_framedrop)(void __iomem *vfe_base,
+ struct msm_vfe_axi_stream *stream_info,
+ uint32_t framedrop_pattern, uint32_t framedrop_period);
+ void (*clear_framedrop)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+ void (*cfg_comp_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+ void (*clear_comp_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+ void (*cfg_wm_irq_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+ void (*clear_wm_irq_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+ void (*clear_irq_mask)(struct vfe_device *vfe_dev);
+
+ void (*cfg_wm_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx);
+ void (*clear_wm_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx);
+
+ void (*cfg_wm_xbar_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx);
+ void (*clear_wm_xbar_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx);
+ void (*cfg_ub)(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src);
+ void (*read_wm_ping_pong_addr)(struct vfe_device *vfe_dev);
+ void (*update_ping_pong_addr)(void __iomem *vfe_base,
+ uint8_t wm_idx, uint32_t pingpong_bit, dma_addr_t paddr,
+ int32_t buf_size);
+ uint32_t (*get_wm_mask)(uint32_t irq_status0, uint32_t irq_status1);
+ uint32_t (*get_comp_mask)(uint32_t irq_status0, uint32_t irq_status1);
+ uint32_t (*get_pingpong_status)(struct vfe_device *vfe_dev);
+ int (*halt)(struct vfe_device *vfe_dev, uint32_t blocking);
+ int (*restart)(struct vfe_device *vfe_dev, uint32_t blocking,
+ uint32_t enable_camif);
+ void (*update_cgc_override)(struct vfe_device *vfe_dev,
+ uint8_t wm_idx, uint8_t cgc_override);
+ uint32_t (*ub_reg_offset)(struct vfe_device *vfe_dev, int idx);
+ uint32_t (*get_ub_size)(struct vfe_device *vfe_dev);
+};
+
+struct msm_vfe_core_ops {
+ void (*reg_update)(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src);
+ long (*reset_hw)(struct vfe_device *vfe_dev, uint32_t first_start,
+ uint32_t blocking_call);
+ int (*init_hw)(struct vfe_device *vfe_dev);
+ void (*init_hw_reg)(struct vfe_device *vfe_dev);
+ void (*clear_status_reg)(struct vfe_device *vfe_dev);
+ void (*release_hw)(struct vfe_device *vfe_dev);
+ void (*cfg_input_mux)(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg);
+ int (*start_fetch_eng)(struct vfe_device *vfe_dev,
+ void *arg);
+ void (*update_camif_state)(struct vfe_device *vfe_dev,
+ enum msm_isp_camif_update_state update_state);
+ void (*cfg_rdi_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_rdi_cfg *rdi_cfg,
+ enum msm_vfe_input_src input_src);
+ void (*get_error_mask)(uint32_t *error_mask0, uint32_t *error_mask1);
+ void (*process_error_status)(struct vfe_device *vfe_dev);
+ void (*get_overflow_mask)(uint32_t *overflow_mask);
+ void (*get_irq_mask)(struct vfe_device *vfe_dev,
+ uint32_t *irq0_mask, uint32_t *irq1_mask);
+ void (*get_halt_restart_mask)(uint32_t *irq0_mask,
+ uint32_t *irq1_mask);
+ void (*get_rdi_wm_mask)(struct vfe_device *vfe_dev,
+ uint32_t *rdi_wm_mask);
+ bool (*is_module_cfg_lock_needed)(uint32_t reg_offset);
+ int (*ahb_clk_cfg)(struct vfe_device *vfe_dev,
+ struct msm_isp_ahb_clk_cfg *ahb_cfg);
+ void (*set_halt_restart_mask)(struct vfe_device *vfe_dev);
+ int (*start_fetch_eng_multi_pass)(struct vfe_device *vfe_dev,
+ void *arg);
+};
+struct msm_vfe_stats_ops {
+ int (*get_stats_idx)(enum msm_isp_stats_type stats_type);
+ int (*check_streams)(struct msm_vfe_stats_stream *stream_info);
+ void (*cfg_framedrop)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info,
+ uint32_t framedrop_pattern, uint32_t framedrop_period);
+ void (*clear_framedrop)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+ void (*cfg_comp_mask)(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t comp_index,
+ uint8_t enable);
+ void (*cfg_wm_irq_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+ void (*clear_wm_irq_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+
+ void (*cfg_wm_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+ void (*clear_wm_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+
+ void (*cfg_ub)(struct vfe_device *vfe_dev);
+
+ void (*enable_module)(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable);
+
+ void (*update_ping_pong_addr)(void __iomem *vfe_base,
+ struct msm_vfe_stats_stream *stream_info,
+ uint32_t pingpong_status, dma_addr_t paddr);
+
+ uint32_t (*get_frame_id)(struct vfe_device *vfe_dev);
+ uint32_t (*get_wm_mask)(uint32_t irq_status0, uint32_t irq_status1);
+ uint32_t (*get_comp_mask)(uint32_t irq_status0, uint32_t irq_status1);
+ uint32_t (*get_pingpong_status)(struct vfe_device *vfe_dev);
+
+ void (*update_cgc_override)(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable);
+ void (*enable_stats_wm)(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable);
+};
+
+enum msm_isp_hw_client {
+ ISP_VFE0,
+ ISP_VFE1,
+ ISP_CPP,
+ MAX_ISP_CLIENT,
+};
+
+struct msm_isp_bandwidth_info {
+ uint32_t active;
+ uint64_t ab;
+ uint64_t ib;
+};
+
+struct msm_isp_bandwidth_mgr {
+ uint32_t bus_client;
+ uint32_t bus_vector_active_idx;
+ uint32_t use_count;
+ struct msm_isp_bandwidth_info client_info[MAX_ISP_CLIENT];
+ int (*update_bw)(struct msm_isp_bandwidth_mgr *isp_bandwidth_mgr);
+ void (*deinit_bw_mgr)(struct msm_isp_bandwidth_mgr *isp_bandwidth_mgr);
+};
+
+struct msm_vfe_platform_ops {
+ int (*get_platform_data)(struct vfe_device *vfe_dev);
+ int (*enable_clks)(struct vfe_device *vfe_dev, int enable);
+ int (*get_clks)(struct vfe_device *vfe_dev);
+ void (*put_clks)(struct vfe_device *vfe_dev);
+ int (*get_clk_rates)(struct vfe_device *vfe_dev,
+ struct msm_isp_clk_rates *rates);
+ int (*get_max_clk_rate)(struct vfe_device *vfe_dev, long *rate);
+ int (*set_clk_rate)(struct vfe_device *vfe_dev, long *rate);
+ int (*enable_regulators)(struct vfe_device *vfe_dev, int enable);
+ int (*get_regulators)(struct vfe_device *vfe_dev);
+ void (*put_regulators)(struct vfe_device *vfe_dev);
+ int (*init_bw_mgr)(struct vfe_device *vfe_dev,
+ struct msm_isp_bandwidth_mgr *isp_bandwidth_mgr);
+ int (*update_bw)(struct msm_isp_bandwidth_mgr *isp_bandwidth_mgr);
+ void (*deinit_bw_mgr)(struct msm_isp_bandwidth_mgr *isp_bandwidth_mgr);
+};
+
+struct msm_vfe_ops {
+ struct msm_vfe_irq_ops irq_ops;
+ struct msm_vfe_axi_ops axi_ops;
+ struct msm_vfe_core_ops core_ops;
+ struct msm_vfe_stats_ops stats_ops;
+ struct msm_vfe_platform_ops platform_ops;
+};
+
+struct msm_vfe_hardware_info {
+ int num_iommu_ctx;
+ /* secure iommu ctx nums */
+ int num_iommu_secure_ctx;
+ int vfe_clk_idx;
+ int runtime_axi_update;
+ struct msm_vfe_ops vfe_ops;
+ struct msm_vfe_axi_hardware_info *axi_hw_info;
+ struct msm_vfe_stats_hardware_info *stats_hw_info;
+ uint32_t dmi_reg_offset;
+ uint32_t min_ab;
+ uint32_t min_ib;
+ const char *regulator_names[];
+};
+
+struct msm_vfe_axi_hardware_info {
+ uint8_t num_wm;
+ uint8_t num_rdi;
+ uint8_t num_rdi_master;
+ uint8_t num_comp_mask;
+ uint32_t min_wm_ub;
+ uint32_t scratch_buf_range;
+};
+
+enum msm_vfe_axi_state {
+ AVAILABLE,
+ INACTIVE,
+ ACTIVE,
+ PAUSED,
+ START_PENDING,
+ STOP_PENDING,
+ PAUSE_PENDING,
+ RESUME_PENDING,
+ STARTING,
+ STOPPING,
+ PAUSING,
+ RESUMING,
+ UPDATING,
+};
+
+enum msm_vfe_axi_cfg_update_state {
+ NO_AXI_CFG_UPDATE,
+ APPLYING_UPDATE_RESUME,
+ UPDATE_REQUESTED,
+};
+
+#define VFE_NO_DROP 0xFFFFFFFF
+#define VFE_DROP_EVERY_2FRAME 0x55555555
+#define VFE_DROP_EVERY_4FRAME 0x11111111
+#define VFE_DROP_EVERY_8FRAME 0x01010101
+#define VFE_DROP_EVERY_16FRAME 0x00010001
+#define VFE_DROP_EVERY_32FRAME 0x00000001
+
+enum msm_vfe_axi_stream_type {
+ CONTINUOUS_STREAM,
+ BURST_STREAM,
+};
+
+struct msm_vfe_frame_request_queue {
+ struct list_head list;
+ enum msm_vfe_buff_queue_id buff_queue_id;
+ uint32_t buf_index;
+ uint8_t cmd_used;
+};
+
+#define MSM_VFE_REQUESTQ_SIZE 8
+
+struct msm_vfe_axi_stream {
+ uint32_t frame_id;
+ enum msm_vfe_axi_state state;
+ enum msm_vfe_axi_stream_src stream_src;
+ uint8_t num_planes;
+ uint8_t wm[MAX_PLANES_PER_STREAM];
+ uint32_t output_format;/*Planar/RAW/Misc*/
+ struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+ struct msm_vfe_axi_output_plane_cfg
+ vfe_plane_cfg[MAX_PLANES_PER_STREAM];
+
+ uint8_t comp_mask_index;
+ struct msm_isp_buffer *buf[2];
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t bufq_handle[VFE_BUF_QUEUE_MAX];
+ uint8_t controllable_output;
+ uint8_t undelivered_request_cnt;
+ uint8_t request_q_idx;
+ uint32_t request_q_cnt;
+ struct list_head request_q;
+ struct msm_vfe_frame_request_queue
+ request_queue_cmd[MSM_VFE_REQUESTQ_SIZE];
+ uint32_t stream_handle;
+ uint8_t buf_divert;
+ enum msm_vfe_axi_stream_type stream_type;
+ uint32_t frame_based;
+ enum msm_vfe_frame_skip_pattern frame_skip_pattern;
+ uint32_t current_framedrop_period; /* user requested period*/
+ uint32_t requested_framedrop_period; /* requested period*/
+ uint32_t activated_framedrop_period; /* active hw period */
+ uint32_t num_burst_capture;/*number of frame to capture*/
+ uint32_t init_frame_drop;
+ spinlock_t lock;
+
+ /* Bandwidth calculation info */
+ uint32_t max_width;
+ /* Based on format plane size in Q2. e.g NV12 = 1.5 */
+ uint32_t format_factor;
+ uint32_t bandwidth;
+
+ uint32_t runtime_num_burst_capture;
+ uint32_t runtime_output_format;
+ enum msm_stream_memory_input_t memory_input;
+ struct msm_isp_sw_framskip sw_skip;
+ uint8_t sw_ping_pong_bit;
+};
+
+struct msm_vfe_axi_composite_info {
+ uint32_t stream_handle;
+ uint32_t stream_composite_mask;
+};
+
+enum msm_vfe_camif_state {
+ CAMIF_STOPPED,
+ CAMIF_ENABLE,
+ CAMIF_DISABLE,
+ CAMIF_STOPPING,
+};
+
+struct msm_vfe_src_info {
+ uint32_t frame_id;
+ uint32_t reg_update_frame_id;
+ uint8_t active;
+ uint8_t flag;
+ uint8_t pix_stream_count;
+ uint8_t raw_stream_count;
+ enum msm_vfe_inputmux input_mux;
+ enum ISP_START_PIXEL_PATTERN pixel_pattern;
+ enum msm_vfe_camif_input camif_input;
+ uint32_t width;
+ long pixel_clock;
+ uint32_t input_format;/*V4L2 pix format with bayer pattern*/
+ uint32_t last_updt_frm_id;
+ uint32_t sof_counter_step;
+ struct timeval time_stamp;
+ enum msm_vfe_dual_hw_type dual_hw_type;
+ struct msm_vfe_dual_hw_ms_info dual_hw_ms_info;
+ uint32_t eof_id;
+};
+
+struct msm_vfe_fetch_engine_info {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t bufq_handle;
+ uint32_t buf_idx;
+ uint8_t is_busy;
+ uint8_t offline_mode;
+ uint32_t fd;
+};
+
+enum msm_wm_ub_cfg_type {
+ MSM_WM_UB_CFG_DEFAULT,
+ MSM_WM_UB_EQUAL_SLICING,
+ MSM_WM_UB_CFG_MAX_NUM
+};
+
+struct msm_vfe_axi_shared_data {
+ struct msm_vfe_axi_hardware_info *hw_info;
+ struct msm_vfe_axi_stream stream_info[VFE_AXI_SRC_MAX];
+ uint32_t free_wm[MAX_NUM_WM];
+ uint32_t wm_image_size[MAX_NUM_WM];
+ enum msm_wm_ub_cfg_type wm_ub_cfg_policy;
+ uint8_t num_used_wm;
+ uint8_t num_active_stream;
+ uint8_t num_rdi_stream;
+ uint8_t num_pix_stream;
+ uint32_t rdi_wm_mask;
+ struct msm_vfe_axi_composite_info
+ composite_info[MAX_NUM_COMPOSITE_MASK];
+ uint8_t num_used_composite_mask;
+ uint32_t stream_update[VFE_SRC_MAX];
+ atomic_t axi_cfg_update[VFE_SRC_MAX];
+ enum msm_isp_camif_update_state pipeline_update;
+ struct msm_vfe_src_info src_info[VFE_SRC_MAX];
+ uint16_t stream_handle_cnt;
+ uint32_t event_mask;
+ uint8_t enable_frameid_recovery;
+ enum msm_vfe_camif_state camif_state;
+ uint32_t recovery_count;
+};
+
+struct msm_vfe_stats_hardware_info {
+ uint32_t stats_capability_mask;
+ uint8_t *stats_ping_pong_offset;
+ uint8_t *stats_wm_index;
+ uint8_t num_stats_type;
+ uint8_t num_stats_comp_mask;
+};
+
+enum msm_vfe_stats_state {
+ STATS_AVAILABLE,
+ STATS_INACTIVE,
+ STATS_ACTIVE,
+ STATS_START_PENDING,
+ STATS_STOP_PENDING,
+ STATS_STARTING,
+ STATS_STOPPING,
+};
+
+struct msm_vfe_stats_stream {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t stream_handle;
+ uint32_t composite_flag;
+ enum msm_isp_stats_type stats_type;
+ enum msm_vfe_stats_state state;
+ uint32_t framedrop_pattern;
+ uint32_t framedrop_period;
+ uint32_t irq_subsample_pattern;
+ uint32_t init_stats_frame_drop;
+ struct msm_isp_sw_framskip sw_skip;
+
+ uint32_t buffer_offset;
+ struct msm_isp_buffer *buf[2];
+ uint32_t bufq_handle;
+};
+
+struct msm_vfe_stats_shared_data {
+ struct msm_vfe_stats_stream stream_info[MSM_ISP_STATS_MAX];
+ uint8_t num_active_stream;
+ atomic_t stats_comp_mask[MAX_NUM_STATS_COMP_MASK];
+ uint16_t stream_handle_cnt;
+ atomic_t stats_update;
+};
+
+struct msm_vfe_tasklet_queue_cmd {
+ struct list_head list;
+ uint32_t vfeInterruptStatus0;
+ uint32_t vfeInterruptStatus1;
+ uint32_t vfePingPongStatus;
+ struct msm_isp_timestamp ts;
+ uint8_t cmd_used;
+};
+
+#define MSM_VFE_TASKLETQ_SIZE 200
+
+enum msm_vfe_overflow_state {
+ NO_OVERFLOW,
+ OVERFLOW_DETECTED,
+ HALT_ENFORCED,
+};
+
+struct msm_vfe_error_info {
+ atomic_t overflow_state;
+ uint32_t error_mask0;
+ uint32_t error_mask1;
+ uint32_t violation_status;
+ uint32_t camif_status;
+ uint8_t stream_framedrop_count[BUF_MGR_NUM_BUF_Q];
+ uint8_t stats_framedrop_count[MSM_ISP_STATS_MAX];
+ uint32_t info_dump_frame_count;
+ uint32_t error_count;
+ uint32_t framedrop_flag;
+};
+
+struct msm_isp_statistics {
+ int64_t imagemaster0_overflow;
+ int64_t imagemaster1_overflow;
+ int64_t imagemaster2_overflow;
+ int64_t imagemaster3_overflow;
+ int64_t imagemaster4_overflow;
+ int64_t imagemaster5_overflow;
+ int64_t imagemaster6_overflow;
+ int64_t be_overflow;
+ int64_t bg_overflow;
+ int64_t bf_overflow;
+ int64_t awb_overflow;
+ int64_t rs_overflow;
+ int64_t cs_overflow;
+ int64_t ihist_overflow;
+ int64_t skinbhist_overflow;
+ int64_t bfscale_overflow;
+
+ int64_t isp_vfe0_active;
+ int64_t isp_vfe0_ab;
+ int64_t isp_vfe0_ib;
+
+ int64_t isp_vfe1_active;
+ int64_t isp_vfe1_ab;
+ int64_t isp_vfe1_ib;
+
+ int64_t isp_cpp_active;
+ int64_t isp_cpp_ab;
+ int64_t isp_cpp_ib;
+
+ int64_t last_overflow_ab;
+ int64_t last_overflow_ib;
+
+ int64_t vfe_clk_rate;
+ int64_t cpp_clk_rate;
+};
+
+struct msm_isp_bw_req_info {
+ uint32_t client;
+ unsigned long long timestamp;
+ uint64_t total_ab;
+ uint64_t total_ib;
+ struct msm_isp_bandwidth_info client_info[MAX_ISP_CLIENT];
+};
+
+#define MSM_ISP_MAX_WM 7
+struct msm_isp_ub_info {
+ enum msm_wm_ub_cfg_type policy;
+ uint8_t num_wm;
+ uint32_t wm_ub;
+ uint32_t data[MSM_ISP_MAX_WM];
+ uint64_t addr[MSM_ISP_MAX_WM];
+};
+
+struct msm_vfe_hw_init_parms {
+ const char *entries;
+ const char *regs;
+ const char *settings;
+};
+
+struct dual_vfe_resource {
+ struct vfe_device *vfe_dev[MAX_VFE];
+ void __iomem *vfe_base[MAX_VFE];
+ uint32_t reg_update_mask[MAX_VFE];
+ struct msm_vfe_stats_shared_data *stats_data[MAX_VFE];
+ struct msm_vfe_axi_shared_data *axi_data[MAX_VFE];
+ uint32_t wm_reload_mask[MAX_VFE];
+ uint32_t epoch_sync_mask;
+};
+
+struct master_slave_resource_info {
+ enum msm_vfe_dual_hw_type dual_hw_type;
+ struct msm_vfe_sof_info master_sof_info;
+ uint8_t master_active;
+ uint32_t sof_delta_threshold; /* Updated by Master */
+ uint32_t num_slave;
+ uint32_t reserved_slave_mask;
+ uint32_t slave_active_mask;
+ struct msm_vfe_sof_info slave_sof_info[MS_NUM_SLAVE_MAX];
+};
+
+struct msm_vfe_irq_debug_info {
+ uint32_t vfe_id;
+ struct msm_isp_timestamp ts;
+ uint32_t core_id;
+ uint32_t irq_status0[MAX_VFE];
+ uint32_t irq_status1[MAX_VFE];
+ uint32_t ping_pong_status[MAX_VFE];
+};
+
+struct msm_vfe_common_dev_data {
+ spinlock_t common_dev_data_lock;
+ struct dual_vfe_resource *dual_vfe_res;
+ struct master_slave_resource_info ms_resource;
+};
+
+struct msm_vfe_common_subdev {
+ /* parent reference */
+ struct vfe_parent_device *parent;
+
+ /* Media Subdevice */
+ struct msm_sd_subdev *subdev;
+
+ /* Buf Mgr */
+ struct msm_isp_buf_mgr *buf_mgr;
+
+ /* Common Data */
+ struct msm_vfe_common_dev_data *common_data;
+};
+
+struct vfe_device {
+ /* Driver private data */
+ struct platform_device *pdev;
+ struct msm_vfe_common_dev_data *common_data;
+ struct msm_sd_subdev subdev;
+ struct msm_isp_buf_mgr *buf_mgr;
+
+ /* Resource info */
+ struct resource *vfe_irq;
+ void __iomem *vfe_base;
+ uint32_t vfe_base_size;
+ void __iomem *vfe_vbif_base;
+ uint32_t vfe_vbif_base_size;
+ struct device *iommu_ctx[MAX_IOMMU_CTX];
+ struct msm_cam_regulator *regulator_info;
+ uint32_t vfe_num_regulators;
+ struct clk **vfe_clk;
+ struct msm_cam_clk_info *vfe_clk_info;
+ uint32_t **vfe_clk_rates;
+ size_t num_clk;
+ size_t num_rates;
+ enum cam_ahb_clk_vote ahb_vote;
+
+ /* Sync variables*/
+ struct completion reset_complete;
+ struct completion halt_complete;
+ struct completion stream_config_complete;
+ struct completion stats_config_complete;
+ struct mutex realtime_mutex;
+ struct mutex core_mutex;
+ spinlock_t shared_data_lock;
+ spinlock_t reg_update_lock;
+ spinlock_t tasklet_lock;
+
+ /* Tasklet info */
+ atomic_t irq_cnt;
+ uint8_t taskletq_idx;
+ struct list_head tasklet_q;
+ struct tasklet_struct vfe_tasklet;
+ struct msm_vfe_tasklet_queue_cmd
+ tasklet_queue_cmd[MSM_VFE_TASKLETQ_SIZE];
+
+ /* Data structures */
+ struct msm_vfe_hardware_info *hw_info;
+ struct msm_vfe_axi_shared_data axi_data;
+ struct msm_vfe_stats_shared_data stats_data;
+ struct msm_vfe_error_info error_info;
+ struct msm_vfe_fetch_engine_info fetch_engine_info;
+ enum msm_vfe_hvx_streaming_cmd hvx_cmd;
+
+ /* State variables */
+ uint32_t vfe_hw_version;
+ int vfe_clk_idx;
+ uint32_t vfe_open_cnt;
+ uint8_t vt_enable;
+ uint32_t vfe_ub_policy;
+ uint8_t reset_pending;
+ uint8_t reg_update_requested;
+ uint8_t reg_updated;
+ uint32_t is_split;
+ uint32_t dual_vfe_enable;
+ unsigned long page_fault_addr;
+
+ /* Debug variables */
+ int dump_reg;
+ struct msm_isp_statistics *stats;
+ uint64_t msm_isp_last_overflow_ab;
+ uint64_t msm_isp_last_overflow_ib;
+ uint64_t msm_isp_vfe_clk_rate;
+ struct msm_isp_ub_info *ub_info;
+ uint32_t isp_sof_debug;
+ uint32_t isp_raw0_debug;
+ uint32_t isp_raw1_debug;
+ uint32_t isp_raw2_debug;
+
+ /* irq info */
+ uint32_t irq0_mask;
+ uint32_t irq1_mask;
+ /* before halt irq info */
+ uint32_t recovery_irq0_mask;
+ uint32_t recovery_irq1_mask;
+};
+
+struct vfe_parent_device {
+ struct platform_device *pdev;
+ uint32_t num_sd;
+ uint32_t num_hw_sd;
+ struct platform_device *child_list[VFE_SD_HW_MAX];
+ struct msm_vfe_common_subdev *common_sd;
+};
+
+int vfe_hw_probe(struct platform_device *pdev);
+void msm_isp_update_last_overflow_ab_ib(struct vfe_device *vfe_dev);
+
+#endif
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp47.c b/drivers/media/platform/msm/ais/isp/msm_isp47.c
new file mode 100644
index 000000000000..8991433b2c67
--- /dev/null
+++ b/drivers/media/platform/msm/ais/isp/msm_isp47.c
@@ -0,0 +1,2851 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+
+#include "msm_isp_util.h"
+#include "msm_isp_axi_util.h"
+#include "msm_isp_stats_util.h"
+#include "msm_isp.h"
+#include "msm.h"
+#include "msm_camera_io_util.h"
+#include "cam_hw_ops.h"
+#include "msm_isp47.h"
+#include "cam_soc_api.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define VFE47_8996V1_VERSION 0x70000000
+
+#define VFE47_BURST_LEN 3
+#define VFE47_FETCH_BURST_LEN 3
+#define VFE47_STATS_BURST_LEN 3
+#define VFE47_UB_SIZE_VFE0 2048
+#define VFE47_UB_SIZE_VFE1 1536
+#define VFE47_UB_STATS_SIZE 144
+#define MSM_ISP47_TOTAL_IMAGE_UB_VFE0 (VFE47_UB_SIZE_VFE0 - VFE47_UB_STATS_SIZE)
+#define MSM_ISP47_TOTAL_IMAGE_UB_VFE1 (VFE47_UB_SIZE_VFE1 - VFE47_UB_STATS_SIZE)
+#define VFE47_WM_BASE(idx) (0xA0 + 0x2C * idx)
+#define VFE47_RDI_BASE(idx) (0x46C + 0x4 * idx)
+#define VFE47_XBAR_BASE(idx) (0x90 + 0x4 * (idx / 2))
+#define VFE47_XBAR_SHIFT(idx) ((idx%2) ? 16 : 0)
+/*add ping MAX and Pong MAX*/
+#define VFE47_PING_PONG_BASE(wm, ping_pong) \
+ (VFE47_WM_BASE(wm) + 0x4 * (1 + (((~ping_pong) & 0x1) * 2)))
+#define SHIFT_BF_SCALE_BIT 1
+
+#define VFE47_BUS_RD_CGC_OVERRIDE_BIT 16
+
+#define VFE47_VBIF_CLK_OFFSET 0x4
+
+#define UB_CFG_POLICY MSM_WM_UB_EQUAL_SLICING
+
+static uint32_t stats_base_addr[] = {
+ 0x1D4, /* HDR_BE */
+ 0x254, /* BG(AWB_BG) */
+ 0x214, /* BF */
+ 0x1F4, /* HDR_BHIST */
+ 0x294, /* RS */
+ 0x2B4, /* CS */
+ 0x2D4, /* IHIST */
+ 0x274, /* BHIST (SKIN_BHIST) */
+ 0x234, /* AEC_BG */
+};
+
+static uint8_t stats_pingpong_offset_map[] = {
+ 8, /* HDR_BE */
+ 12, /* BG(AWB_BG) */
+ 10, /* BF */
+ 9, /* HDR_BHIST */
+ 14, /* RS */
+ 15, /* CS */
+ 16, /* IHIST */
+ 13, /* BHIST (SKIN_BHIST) */
+ 11, /* AEC_BG */
+};
+
+static uint8_t stats_irq_map_comp_mask[] = {
+ 16, /* HDR_BE */
+ 17, /* BG(AWB_BG) */
+ 18, /* BF EARLY DONE/ BF */
+ 19, /* HDR_BHIST */
+ 20, /* RS */
+ 21, /* CS */
+ 22, /* IHIST */
+ 23, /* BHIST (SKIN_BHIST) */
+ 15, /* AEC_BG */
+};
+#define VFE47_STATS_BASE(idx) (stats_base_addr[idx])
+#define VFE47_STATS_PING_PONG_BASE(idx, ping_pong) \
+ (VFE47_STATS_BASE(idx) + 0x4 * \
+ (~(ping_pong >> (stats_pingpong_offset_map[idx])) & 0x1) * 2)
+
+#define VFE47_SRC_CLK_DTSI_IDX 5
+#define HANDLE_TO_IDX(handle) (handle & 0xFF)
+
+static struct msm_bus_vectors msm_isp_init_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+/*
+ * During open node request min ab/ib bus bandwidth which
+ * is needed to successfully enable bus clocks
+ */
+static struct msm_bus_vectors msm_isp_ping_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = MSM_ISP_MIN_AB,
+ .ib = MSM_ISP_MIN_IB,
+ },
+};
+
+static struct msm_bus_vectors msm_isp_pong_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+static struct msm_bus_paths msm_isp_bus_client_config[] = {
+ {
+ ARRAY_SIZE(msm_isp_init_vectors),
+ msm_isp_init_vectors,
+ },
+ {
+ ARRAY_SIZE(msm_isp_ping_vectors),
+ msm_isp_ping_vectors,
+ },
+ {
+ ARRAY_SIZE(msm_isp_pong_vectors),
+ msm_isp_pong_vectors,
+ },
+};
+
+static struct msm_bus_scale_pdata msm_isp_bus_client_pdata = {
+ msm_isp_bus_client_config,
+ ARRAY_SIZE(msm_isp_bus_client_config),
+ .name = "msm_camera_isp",
+};
+
+uint32_t msm_vfe47_ub_reg_offset(struct vfe_device *vfe_dev, int wm_idx)
+{
+ return (VFE47_WM_BASE(wm_idx) + 0x18);
+}
+
+uint32_t msm_vfe47_get_ub_size(struct vfe_device *vfe_dev)
+{
+ if (vfe_dev->pdev->id == ISP_VFE0)
+ return MSM_ISP47_TOTAL_IMAGE_UB_VFE0;
+ return MSM_ISP47_TOTAL_IMAGE_UB_VFE1;
+}
+
+void msm_vfe47_config_irq(struct vfe_device *vfe_dev,
+ uint32_t irq0_mask, uint32_t irq1_mask,
+ enum msm_isp_irq_operation oper)
+{
+ switch (oper) {
+ case MSM_ISP_IRQ_ENABLE:
+ vfe_dev->irq0_mask |= irq0_mask;
+ vfe_dev->irq1_mask |= irq1_mask;
+ break;
+ case MSM_ISP_IRQ_DISABLE:
+ vfe_dev->irq0_mask &= ~irq0_mask;
+ vfe_dev->irq1_mask &= ~irq1_mask;
+ break;
+ case MSM_ISP_IRQ_SET:
+ vfe_dev->irq0_mask = irq0_mask;
+ vfe_dev->irq1_mask = irq1_mask;
+ break;
+ }
+ msm_camera_io_w_mb(vfe_dev->irq0_mask,
+ vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w_mb(vfe_dev->irq1_mask,
+ vfe_dev->vfe_base + 0x60);
+}
+
+static int32_t msm_vfe47_init_dt_parms(struct vfe_device *vfe_dev,
+ struct msm_vfe_hw_init_parms *dt_parms, void __iomem *dev_mem_base)
+{
+ struct device_node *of_node;
+ int32_t i = 0, rc = 0;
+ uint32_t *dt_settings = NULL, *dt_regs = NULL, num_dt_entries = 0;
+
+ of_node = vfe_dev->pdev->dev.of_node;
+
+ rc = of_property_read_u32(of_node, dt_parms->entries,
+ &num_dt_entries);
+ if (rc < 0 || !num_dt_entries) {
+ pr_err("%s: NO QOS entries found\n", __func__);
+ return -EINVAL;
+ }
+ dt_settings = kcalloc(num_dt_entries, sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!dt_settings)
+ return -ENOMEM;
+ dt_regs = kcalloc(num_dt_entries, sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!dt_regs) {
+ kfree(dt_settings);
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(of_node, dt_parms->regs,
+ dt_regs, num_dt_entries);
+ if (rc < 0) {
+ pr_err("%s: NO QOS BUS BDG info\n", __func__);
+ kfree(dt_settings);
+ kfree(dt_regs);
+ return -EINVAL;
+ }
+ if (dt_parms->settings) {
+ rc = of_property_read_u32_array(of_node,
+ dt_parms->settings,
+ dt_settings, num_dt_entries);
+ if (rc < 0) {
+ pr_err("%s: NO QOS settings\n", __func__);
+ kfree(dt_settings);
+ kfree(dt_regs);
+ } else {
+ for (i = 0; i < num_dt_entries; i++) {
+ msm_camera_io_w(dt_settings[i],
+ dev_mem_base +
+ dt_regs[i]);
+ }
+ kfree(dt_settings);
+ kfree(dt_regs);
+ }
+ } else {
+ kfree(dt_settings);
+ kfree(dt_regs);
+ }
+ return 0;
+}
+
+static enum cam_ahb_clk_vote msm_isp47_get_cam_clk_vote(
+ enum msm_vfe_ahb_clk_vote vote)
+{
+ switch (vote) {
+ case MSM_ISP_CAMERA_AHB_SVS_VOTE:
+ return CAM_AHB_SVS_VOTE;
+ case MSM_ISP_CAMERA_AHB_TURBO_VOTE:
+ return CAM_AHB_TURBO_VOTE;
+ case MSM_ISP_CAMERA_AHB_NOMINAL_VOTE:
+ return CAM_AHB_NOMINAL_VOTE;
+ case MSM_ISP_CAMERA_AHB_SUSPEND_VOTE:
+ return CAM_AHB_SUSPEND_VOTE;
+ }
+ return 0;
+}
+
+static int msm_isp47_ahb_clk_cfg(struct vfe_device *vfe_dev,
+ struct msm_isp_ahb_clk_cfg *ahb_cfg)
+{
+ int rc = 0;
+ enum cam_ahb_clk_vote vote;
+
+ vote = msm_isp47_get_cam_clk_vote(ahb_cfg->vote);
+
+ if (vote && vfe_dev->ahb_vote != vote) {
+ rc = cam_config_ahb_clk(NULL, 0,
+ (vfe_dev->pdev->id == ISP_VFE0 ?
+ CAM_AHB_CLIENT_VFE0 : CAM_AHB_CLIENT_VFE1), vote);
+ if (rc)
+ pr_err("%s: failed to set ahb vote to %x\n",
+ __func__, vote);
+ else
+ vfe_dev->ahb_vote = vote;
+ }
+ return rc;
+}
+
+int msm_vfe47_init_hardware(struct vfe_device *vfe_dev)
+{
+ int rc = -1;
+ enum cam_ahb_clk_client id;
+
+ if (vfe_dev->pdev->id == 0)
+ id = CAM_AHB_CLIENT_VFE0;
+ else
+ id = CAM_AHB_CLIENT_VFE1;
+
+ rc = vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(
+ vfe_dev, 1);
+ if (rc)
+ goto enable_regulators_failed;
+
+ rc = vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(
+ vfe_dev, 1);
+ if (rc)
+ goto clk_enable_failed;
+
+ rc = cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ goto ahb_vote_fail;
+ }
+ vfe_dev->ahb_vote = CAM_AHB_SVS_VOTE;
+
+ vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] =
+ vfe_dev->vfe_base;
+
+ rc = msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id,
+ MSM_ISP_MIN_AB, MSM_ISP_MIN_IB);
+ if (rc)
+ goto bw_enable_fail;
+
+ rc = msm_camera_enable_irq(vfe_dev->vfe_irq, 1);
+ if (rc < 0)
+ goto irq_enable_fail;
+
+ return rc;
+irq_enable_fail:
+ msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id, 0, 0);
+bw_enable_fail:
+ vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
+ if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+ vfe_dev->ahb_vote = CAM_AHB_SUSPEND_VOTE;
+ahb_vote_fail:
+ vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 0);
+clk_enable_failed:
+ vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
+enable_regulators_failed:
+ return rc;
+}
+
+void msm_vfe47_release_hardware(struct vfe_device *vfe_dev)
+{
+ enum cam_ahb_clk_client id;
+
+ /* when closing node, disable all irq */
+ vfe_dev->irq0_mask = 0;
+ vfe_dev->irq1_mask = 0;
+ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev,
+ vfe_dev->irq0_mask, vfe_dev->irq1_mask,
+ MSM_ISP_IRQ_SET);
+ msm_camera_enable_irq(vfe_dev->vfe_irq, 0);
+ tasklet_kill(&vfe_dev->vfe_tasklet);
+ msm_isp_flush_tasklet(vfe_dev);
+
+ vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
+
+ msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id, 0, 0);
+
+ if (vfe_dev->pdev->id == 0)
+ id = CAM_AHB_CLIENT_VFE0;
+ else
+ id = CAM_AHB_CLIENT_VFE1;
+
+ if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to vote for AHB\n", __func__);
+
+ vfe_dev->ahb_vote = CAM_AHB_SUSPEND_VOTE;
+
+ vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(
+ vfe_dev, 0);
+ vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
+}
+
+void msm_vfe47_init_hardware_reg(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_hw_init_parms qos_parms;
+ struct msm_vfe_hw_init_parms vbif_parms;
+ struct msm_vfe_hw_init_parms ds_parms;
+
+ memset(&qos_parms, 0, sizeof(struct msm_vfe_hw_init_parms));
+ memset(&vbif_parms, 0, sizeof(struct msm_vfe_hw_init_parms));
+ memset(&ds_parms, 0, sizeof(struct msm_vfe_hw_init_parms));
+
+ qos_parms.entries = "qos-entries";
+ qos_parms.regs = "qos-regs";
+ qos_parms.settings = "qos-settings";
+ vbif_parms.entries = "vbif-entries";
+ vbif_parms.regs = "vbif-regs";
+ vbif_parms.settings = "vbif-settings";
+ ds_parms.entries = "ds-entries";
+ ds_parms.regs = "ds-regs";
+ ds_parms.settings = "ds-settings";
+
+ msm_vfe47_init_dt_parms(vfe_dev, &qos_parms, vfe_dev->vfe_base);
+ msm_vfe47_init_dt_parms(vfe_dev, &ds_parms, vfe_dev->vfe_base);
+ msm_vfe47_init_dt_parms(vfe_dev, &vbif_parms, vfe_dev->vfe_vbif_base);
+
+
+ /* BUS_CFG */
+ msm_camera_io_w(0x00000101, vfe_dev->vfe_base + 0x84);
+ /* IRQ_MASK/CLEAR */
+ msm_vfe47_config_irq(vfe_dev, 0x810000E0, 0xFFFFFF7E,
+ MSM_ISP_IRQ_ENABLE);
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58);
+}
+
+void msm_vfe47_clear_status_reg(struct vfe_device *vfe_dev)
+{
+ msm_vfe47_config_irq(vfe_dev, 0x80000000, 0x0,
+ MSM_ISP_IRQ_SET);
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58);
+}
+
+void msm_vfe47_process_reset_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ if (irq_status0 & (1 << 31)) {
+ complete(&vfe_dev->reset_complete);
+ vfe_dev->reset_pending = 0;
+ }
+}
+
+void msm_vfe47_process_halt_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ uint32_t val = 0;
+
+ if (irq_status1 & (1 << 8)) {
+ complete(&vfe_dev->halt_complete);
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x400);
+ }
+
+ val = msm_camera_io_r(vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET);
+ val &= ~(0x1);
+ msm_camera_io_w(val, vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET);
+}
+
+void msm_vfe47_process_input_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ if (!(irq_status0 & 0x1000003))
+ return;
+
+ if (irq_status0 & (1 << 0)) {
+ ISP_DBG("vfe %d: SOF IRQ, frame id %d\n",
+ vfe_dev->pdev->id,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+ msm_isp_increment_frame_id(vfe_dev, VFE_PIX_0, ts);
+ }
+
+ if (irq_status0 & (1 << 24)) {
+ ISP_DBG("%s: Fetch Engine Read IRQ\n", __func__);
+ msm_isp_fetch_engine_done_notify(vfe_dev,
+ &vfe_dev->fetch_engine_info);
+ }
+
+
+ if (irq_status0 & (1 << 1))
+ ISP_DBG("%s: EOF IRQ\n", __func__);
+}
+
+void msm_vfe47_process_violation_status(
+ struct vfe_device *vfe_dev)
+{
+ uint32_t violation_status = vfe_dev->error_info.violation_status;
+
+ if (violation_status > 39) {
+ pr_err("%s: invalid violation status %d\n",
+ __func__, violation_status);
+ return;
+ }
+
+ pr_err("%s: VFE pipeline violation status %d\n", __func__,
+ violation_status);
+}
+
+void msm_vfe47_process_error_status(struct vfe_device *vfe_dev)
+{
+ uint32_t error_status1 = vfe_dev->error_info.error_mask1;
+
+ if (error_status1 & (1 << 0)) {
+ pr_err("%s: camif error status: 0x%x\n",
+ __func__, vfe_dev->error_info.camif_status);
+ /* dump camif registers on camif error */
+ msm_camera_io_dump(vfe_dev->vfe_base + 0x478, 0x3C, 1);
+ /* testgen */
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
+ msm_camera_io_dump(vfe_dev->vfe_base + 0xC58, 0x28, 1);
+ }
+ if (error_status1 & (1 << 1))
+ pr_err("%s: stats bhist overwrite\n", __func__);
+ if (error_status1 & (1 << 2))
+ pr_err("%s: stats cs overwrite\n", __func__);
+ if (error_status1 & (1 << 3))
+ pr_err("%s: stats ihist overwrite\n", __func__);
+ if (error_status1 & (1 << 4))
+ pr_err("%s: realign buf y overflow\n", __func__);
+ if (error_status1 & (1 << 5))
+ pr_err("%s: realign buf cb overflow\n", __func__);
+ if (error_status1 & (1 << 6))
+ pr_err("%s: realign buf cr overflow\n", __func__);
+ if (error_status1 & (1 << 7))
+ msm_vfe47_process_violation_status(vfe_dev);
+ if (error_status1 & (1 << 9))
+ pr_err("%s: image master 0 bus overflow\n", __func__);
+ if (error_status1 & (1 << 10))
+ pr_err("%s: image master 1 bus overflow\n", __func__);
+ if (error_status1 & (1 << 11))
+ pr_err("%s: image master 2 bus overflow\n", __func__);
+ if (error_status1 & (1 << 12))
+ pr_err("%s: image master 3 bus overflow\n", __func__);
+ if (error_status1 & (1 << 13))
+ pr_err("%s: image master 4 bus overflow\n", __func__);
+ if (error_status1 & (1 << 14))
+ pr_err("%s: image master 5 bus overflow\n", __func__);
+ if (error_status1 & (1 << 15))
+ pr_err("%s: image master 6 bus overflow\n", __func__);
+ if (error_status1 & (1 << 16))
+ pr_err("%s: status hdr be bus overflow\n", __func__);
+ if (error_status1 & (1 << 17))
+ pr_err("%s: status bg bus overflow\n", __func__);
+ if (error_status1 & (1 << 18))
+ pr_err("%s: status bf bus overflow\n", __func__);
+ if (error_status1 & (1 << 19))
+ pr_err("%s: status hdr bhist bus overflow\n", __func__);
+ if (error_status1 & (1 << 20))
+ pr_err("%s: status rs bus overflow\n", __func__);
+ if (error_status1 & (1 << 21))
+ pr_err("%s: status cs bus overflow\n", __func__);
+ if (error_status1 & (1 << 22))
+ pr_err("%s: status ihist bus overflow\n", __func__);
+ if (error_status1 & (1 << 23))
+ pr_err("%s: status skin bhist bus overflow\n", __func__);
+ if (error_status1 & (1 << 24))
+ pr_err("%s: status aec bg bus overflow\n", __func__);
+ if (error_status1 & (1 << 25))
+ pr_err("%s: status dsp error\n", __func__);
+}
+
+void msm_vfe47_read_irq_status_and_clear(struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1)
+{
+ *irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x6C);
+ *irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x70);
+ /* Mask off bits that are not enabled */
+ msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w(*irq_status1, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x58);
+ *irq_status0 &= vfe_dev->irq0_mask;
+ *irq_status1 &= vfe_dev->irq1_mask;
+
+ if (*irq_status1 & (1 << 0)) {
+ vfe_dev->error_info.camif_status =
+ msm_camera_io_r(vfe_dev->vfe_base + 0x4A4);
+ /* mask off camif error after first occurrance */
+ msm_vfe47_config_irq(vfe_dev, 0, (1 << 0), MSM_ISP_IRQ_DISABLE);
+ }
+
+ if (*irq_status1 & (1 << 7))
+ vfe_dev->error_info.violation_status =
+ msm_camera_io_r(vfe_dev->vfe_base + 0x7C);
+
+}
+
+void msm_vfe47_read_irq_status(struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1)
+{
+ *irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x6C);
+ *irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x70);
+}
+
+void msm_vfe47_process_reg_update(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ enum msm_vfe_input_src i;
+ uint32_t shift_irq;
+ uint8_t reg_updated = 0;
+ unsigned long flags;
+
+ if (!(irq_status0 & 0xF0))
+ return;
+ /* Shift status bits so that PIX SOF is 1st bit */
+ shift_irq = ((irq_status0 & 0xF0) >> 4);
+
+ for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
+ if (shift_irq & BIT(i)) {
+ reg_updated |= BIT(i);
+ ISP_DBG("%s REG_UPDATE IRQ %x\n", __func__,
+ (uint32_t)BIT(i));
+ switch (i) {
+ case VFE_PIX_0:
+ msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE,
+ VFE_PIX_0, ts);
+ if (atomic_read(
+ &vfe_dev->stats_data.stats_update))
+ msm_isp_stats_stream_update(vfe_dev);
+ if (vfe_dev->axi_data.camif_state ==
+ CAMIF_STOPPING)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, i);
+ break;
+ case VFE_RAW_0:
+ case VFE_RAW_1:
+ case VFE_RAW_2:
+ msm_isp_increment_frame_id(vfe_dev, i, ts);
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
+ msm_isp_update_framedrop_reg(vfe_dev, i);
+ /*
+ * Reg Update is pseudo SOF for RDI,
+ * so request every frame
+ */
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, i);
+ break;
+ default:
+ pr_err("%s: Error case\n", __func__);
+ return;
+ }
+ if (vfe_dev->axi_data.stream_update[i])
+ msm_isp_axi_stream_update(vfe_dev, i);
+ msm_isp_save_framedrop_values(vfe_dev, i);
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
+ msm_isp_axi_cfg_update(vfe_dev, i);
+ if (atomic_read(
+ &vfe_dev->axi_data.axi_cfg_update[i]) ==
+ 0)
+ msm_isp_notify(vfe_dev,
+ ISP_EVENT_STREAM_UPDATE_DONE,
+ i, ts);
+ }
+ }
+ }
+
+ spin_lock_irqsave(&vfe_dev->reg_update_lock, flags);
+ if (reg_updated & BIT(VFE_PIX_0))
+ vfe_dev->reg_updated = 1;
+
+ vfe_dev->reg_update_requested &= ~reg_updated;
+ spin_unlock_irqrestore(&vfe_dev->reg_update_lock, flags);
+}
+
+void msm_vfe47_process_epoch_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ if (!(irq_status0 & 0xc))
+ return;
+
+ if (irq_status0 & BIT(2)) {
+ ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
+ msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
+ msm_isp_update_stats_framedrop_reg(vfe_dev);
+ msm_isp_update_error_frame_count(vfe_dev);
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
+ && vfe_dev->axi_data.src_info[VFE_PIX_0].
+ pix_stream_count == 0) {
+ if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
+ msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
+ vfe_dev, VFE_PIX_0);
+ }
+ }
+}
+
+void msm_isp47_process_eof_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0)
+{
+ if (irq_status0 & BIT(1))
+ vfe_dev->axi_data.src_info[VFE_PIX_0].eof_id++;
+}
+
+void msm_vfe47_reg_update(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ uint32_t update_mask = 0;
+ unsigned long flags;
+
+ /* This HW supports upto VFE_RAW_2 */
+ if (frame_src > VFE_RAW_2 && frame_src != VFE_SRC_MAX) {
+ pr_err("%s Error case\n", __func__);
+ return;
+ }
+
+ /*
+ * If frame_src == VFE_SRC_MAX request reg_update on all
+ * supported INTF
+ */
+ if (frame_src == VFE_SRC_MAX)
+ update_mask = 0xF;
+ else
+ update_mask = BIT((uint32_t)frame_src);
+ ISP_DBG("%s update_mask %x\n", __func__, update_mask);
+
+ spin_lock_irqsave(&vfe_dev->reg_update_lock, flags);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].reg_update_frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ vfe_dev->reg_update_requested |= update_mask;
+ vfe_dev->common_data->dual_vfe_res->reg_update_mask[vfe_dev->pdev->id] =
+ vfe_dev->reg_update_requested;
+ if ((vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) &&
+ ((frame_src == VFE_PIX_0) || (frame_src == VFE_SRC_MAX))) {
+ msm_camera_io_w_mb(update_mask,
+ vfe_dev->common_data->dual_vfe_res->
+ vfe_base[ISP_VFE0] + 0x4AC);
+ msm_camera_io_w_mb(update_mask,
+ vfe_dev->vfe_base + 0x4AC);
+ } else if (!vfe_dev->is_split ||
+ ((frame_src == VFE_PIX_0) &&
+ (vfe_dev->axi_data.camif_state == CAMIF_STOPPING)) ||
+ (frame_src >= VFE_RAW_0 && frame_src <= VFE_SRC_MAX)) {
+ msm_camera_io_w_mb(update_mask,
+ vfe_dev->vfe_base + 0x4AC);
+ }
+ spin_unlock_irqrestore(&vfe_dev->reg_update_lock, flags);
+}
+
+long msm_vfe47_reset_hardware(struct vfe_device *vfe_dev,
+ uint32_t first_start, uint32_t blocking_call)
+{
+ long rc = 0;
+
+ init_completion(&vfe_dev->reset_complete);
+
+ if (blocking_call)
+ vfe_dev->reset_pending = 1;
+
+ if (first_start) {
+ msm_camera_io_w_mb(0x3FF, vfe_dev->vfe_base + 0x18);
+ } else {
+ msm_camera_io_w_mb(0x3EF, vfe_dev->vfe_base + 0x18);
+ msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w(0xFFFFFEFF, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ reload_wm(vfe_dev, vfe_dev->vfe_base, 0x0001FFFF);
+ }
+
+ if (blocking_call) {
+ rc = wait_for_completion_timeout(
+ &vfe_dev->reset_complete, msecs_to_jiffies(50));
+ if (rc <= 0) {
+ pr_err("%s:%d failed: reset timeout\n", __func__,
+ __LINE__);
+ vfe_dev->reset_pending = 0;
+ }
+ }
+
+ return rc;
+}
+
+void msm_vfe47_axi_reload_wm(struct vfe_device *vfe_dev,
+ void __iomem *vfe_base, uint32_t reload_mask)
+{
+ msm_camera_io_w_mb(reload_mask, vfe_base + 0x80);
+}
+
+void msm_vfe47_axi_update_cgc_override(struct vfe_device *vfe_dev,
+ uint8_t wm_idx, uint8_t enable)
+{
+ uint32_t val;
+
+ /* Change CGC override */
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x3C);
+ if (enable)
+ val |= (1 << wm_idx);
+ else
+ val &= ~(1 << wm_idx);
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x3C);
+}
+
+static void msm_vfe47_axi_enable_wm(void __iomem *vfe_base,
+ uint8_t wm_idx, uint8_t enable)
+{
+ uint32_t val;
+
+ val = msm_camera_io_r(vfe_base + VFE47_WM_BASE(wm_idx));
+ if (enable)
+ val |= 0x1;
+ else
+ val &= ~0x1;
+ msm_camera_io_w_mb(val,
+ vfe_base + VFE47_WM_BASE(wm_idx));
+}
+
+void msm_vfe47_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t comp_mask, comp_mask_index =
+ stream_info->comp_mask_index;
+
+ comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
+ comp_mask &= ~(0x7F << (comp_mask_index * 8));
+ comp_mask |= (axi_data->composite_info[comp_mask_index].
+ stream_composite_mask << (comp_mask_index * 8));
+ msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
+
+ msm_vfe47_config_irq(vfe_dev, 1 << (comp_mask_index + 25), 0,
+ MSM_ISP_IRQ_ENABLE);
+}
+
+void msm_vfe47_axi_clear_comp_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
+
+ comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
+ comp_mask &= ~(0x7F << (comp_mask_index * 8));
+ msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
+
+ msm_vfe47_config_irq(vfe_dev, (1 << (comp_mask_index + 25)), 0,
+ MSM_ISP_IRQ_DISABLE);
+}
+
+void msm_vfe47_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ msm_vfe47_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ MSM_ISP_IRQ_ENABLE);
+}
+
+void msm_vfe47_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ msm_vfe47_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
+ MSM_ISP_IRQ_DISABLE);
+}
+
+void msm_vfe47_axi_clear_irq_mask(struct vfe_device *vfe_dev)
+{
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x60);
+}
+
+void msm_vfe47_cfg_framedrop(void __iomem *vfe_base,
+ struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
+ uint32_t framedrop_period)
+{
+ uint32_t i, temp;
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ msm_camera_io_w(framedrop_pattern, vfe_base +
+ VFE47_WM_BASE(stream_info->wm[i]) + 0x24);
+ temp = msm_camera_io_r(vfe_base +
+ VFE47_WM_BASE(stream_info->wm[i]) + 0x14);
+ temp &= 0xFFFFFF83;
+ msm_camera_io_w(temp | (framedrop_period - 1) << 2,
+ vfe_base + VFE47_WM_BASE(stream_info->wm[i]) + 0x14);
+ }
+}
+
+void msm_vfe47_clear_framedrop(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t i;
+
+ for (i = 0; i < stream_info->num_planes; i++)
+ msm_camera_io_w(0, vfe_dev->vfe_base +
+ VFE47_WM_BASE(stream_info->wm[i]) + 0x24);
+}
+
+static int32_t msm_vfe47_convert_bpp_to_reg(int32_t bpp, uint32_t *bpp_reg)
+{
+ int rc = 0;
+
+ switch (bpp) {
+ case 8:
+ *bpp_reg = 0;
+ break;
+ case 10:
+ *bpp_reg = 1;
+ break;
+ case 12:
+ *bpp_reg = 2;
+ break;
+ case 14:
+ *bpp_reg = 3;
+ break;
+ default:
+ pr_err("%s:%d invalid bpp %d", __func__, __LINE__, bpp);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int32_t msm_vfe47_convert_io_fmt_to_reg(
+ enum msm_isp_pack_fmt pack_format, uint32_t *pack_reg)
+{
+ int rc = 0;
+
+ switch (pack_format) {
+ case QCOM:
+ *pack_reg = 0x0;
+ break;
+ case MIPI:
+ *pack_reg = 0x1;
+ break;
+ case DPCM6:
+ *pack_reg = 0x2;
+ break;
+ case DPCM8:
+ *pack_reg = 0x3;
+ break;
+ case PLAIN8:
+ *pack_reg = 0x4;
+ break;
+ case PLAIN16:
+ *pack_reg = 0x5;
+ break;
+ case DPCM10:
+ *pack_reg = 0x6;
+ break;
+ default:
+ pr_err("%s: invalid pack fmt %d!\n", __func__, pack_format);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+int32_t msm_vfe47_cfg_io_format(struct vfe_device *vfe_dev,
+ enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
+{
+ int rc = 0;
+ int bpp = 0, read_bpp = 0;
+ enum msm_isp_pack_fmt pack_fmt = 0, read_pack_fmt = 0;
+ uint32_t bpp_reg = 0, pack_reg = 0;
+ uint32_t read_bpp_reg = 0, read_pack_reg = 0;
+ uint32_t io_format_reg = 0; /*io format register bit*/
+
+ io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x88);
+
+ /*input config*/
+ if ((stream_src < RDI_INTF_0) &&
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux ==
+ EXTERNAL_READ)) {
+ read_bpp = msm_isp_get_bit_per_pixel(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
+ rc = msm_vfe47_convert_bpp_to_reg(read_bpp, &read_bpp_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_bpp_to_reg err! in_bpp %d rc %d\n",
+ __func__, read_bpp, rc);
+ return rc;
+ }
+
+ read_pack_fmt = msm_isp_get_pack_format(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
+ rc = msm_vfe47_convert_io_fmt_to_reg(
+ read_pack_fmt, &read_pack_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_io_fmt_to_reg err! rc = %d\n",
+ __func__, rc);
+ return rc;
+ }
+ /*use input format(v4l2_pix_fmt) to get pack format*/
+ io_format_reg &= 0xFFC8FFFF;
+ io_format_reg |= (read_bpp_reg << 20 | read_pack_reg << 16);
+ }
+
+ bpp = msm_isp_get_bit_per_pixel(io_format);
+ rc = msm_vfe47_convert_bpp_to_reg(bpp, &bpp_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_bpp_to_reg err! bpp %d rc = %d\n",
+ __func__, bpp, rc);
+ return rc;
+ }
+
+ switch (stream_src) {
+ case PIX_VIDEO:
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER:
+ case CAMIF_RAW:
+ io_format_reg &= 0xFFFFCFFF;
+ io_format_reg |= bpp_reg << 12;
+ break;
+ case IDEAL_RAW:
+ /*use output format(v4l2_pix_fmt) to get pack format*/
+ pack_fmt = msm_isp_get_pack_format(io_format);
+ rc = msm_vfe47_convert_io_fmt_to_reg(pack_fmt, &pack_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_io_fmt_to_reg err! rc = %d\n",
+ __func__, rc);
+ return rc;
+ }
+ io_format_reg &= 0xFFFFFFC8;
+ io_format_reg |= bpp_reg << 4 | pack_reg;
+ break;
+ case RDI_INTF_0:
+ case RDI_INTF_1:
+ case RDI_INTF_2:
+ default:
+ pr_err("%s: Invalid stream source\n", __func__);
+ return -EINVAL;
+ }
+ msm_camera_io_w(io_format_reg, vfe_dev->vfe_base + 0x88);
+ return 0;
+}
+
+int msm_vfe47_start_fetch_engine(struct vfe_device *vfe_dev,
+ void *arg)
+{
+ int rc = 0;
+ uint32_t bufq_handle = 0;
+ struct msm_isp_buffer *buf = NULL;
+ struct msm_vfe_fetch_eng_start *fe_cfg = arg;
+ struct msm_isp_buffer_mapped_info mapped_info;
+
+ if (vfe_dev->fetch_engine_info.is_busy == 1) {
+ pr_err("%s: fetch engine busy\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(&mapped_info, 0, sizeof(struct msm_isp_buffer_mapped_info));
+
+ /* There is other option of passing buffer address from user,
+ * in such case, driver needs to map the buffer and use it
+ */
+ vfe_dev->fetch_engine_info.session_id = fe_cfg->session_id;
+ vfe_dev->fetch_engine_info.stream_id = fe_cfg->stream_id;
+ vfe_dev->fetch_engine_info.offline_mode = fe_cfg->offline_mode;
+ vfe_dev->fetch_engine_info.fd = fe_cfg->fd;
+
+ if (!fe_cfg->offline_mode) {
+ bufq_handle = vfe_dev->buf_mgr->ops->get_bufq_handle(
+ vfe_dev->buf_mgr, fe_cfg->session_id,
+ fe_cfg->stream_id);
+ vfe_dev->fetch_engine_info.bufq_handle = bufq_handle;
+
+ rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
+ vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
+ if (rc < 0 || !buf) {
+ pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
+ __func__, rc, buf);
+ return -EINVAL;
+ }
+ mapped_info = buf->mapped_info[0];
+ buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ } else {
+ rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
+ &mapped_info, fe_cfg->fd);
+ if (rc < 0) {
+ pr_err("%s: can not map buffer\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ vfe_dev->fetch_engine_info.buf_idx = fe_cfg->buf_idx;
+ vfe_dev->fetch_engine_info.is_busy = 1;
+
+ msm_camera_io_w(mapped_info.paddr, vfe_dev->vfe_base + 0x2F4);
+
+ msm_camera_io_w_mb(0x100000, vfe_dev->vfe_base + 0x80);
+ msm_camera_io_w_mb(0x200000, vfe_dev->vfe_base + 0x80);
+
+ ISP_DBG("%s:VFE%d Fetch Engine ready\n", __func__, vfe_dev->pdev->id);
+
+ return 0;
+}
+
+int msm_vfe47_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
+ void *arg)
+{
+ int rc = 0;
+ uint32_t bufq_handle = 0;
+ struct msm_isp_buffer *buf = NULL;
+ struct msm_vfe_fetch_eng_multi_pass_start *fe_cfg = arg;
+ struct msm_isp_buffer_mapped_info mapped_info;
+
+ if (vfe_dev->fetch_engine_info.is_busy == 1) {
+ pr_err("%s: fetch engine busy\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(&mapped_info, 0, sizeof(struct msm_isp_buffer_mapped_info));
+
+ vfe_dev->fetch_engine_info.session_id = fe_cfg->session_id;
+ vfe_dev->fetch_engine_info.stream_id = fe_cfg->stream_id;
+ vfe_dev->fetch_engine_info.offline_mode = fe_cfg->offline_mode;
+ vfe_dev->fetch_engine_info.fd = fe_cfg->fd;
+
+ if (!fe_cfg->offline_mode) {
+ bufq_handle = vfe_dev->buf_mgr->ops->get_bufq_handle(
+ vfe_dev->buf_mgr, fe_cfg->session_id,
+ fe_cfg->stream_id);
+ vfe_dev->fetch_engine_info.bufq_handle = bufq_handle;
+
+ rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
+ vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
+ if (rc < 0 || !buf) {
+ pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
+ __func__, rc, buf);
+ return -EINVAL;
+ }
+ mapped_info = buf->mapped_info[0];
+ buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ } else {
+ rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
+ &mapped_info, fe_cfg->fd);
+ if (rc < 0) {
+ pr_err("%s: can not map buffer\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ vfe_dev->fetch_engine_info.buf_idx = fe_cfg->buf_idx;
+ vfe_dev->fetch_engine_info.is_busy = 1;
+
+ msm_camera_io_w(mapped_info.paddr + fe_cfg->input_buf_offset,
+ vfe_dev->vfe_base + 0x2F4);
+ msm_camera_io_w_mb(0x100000, vfe_dev->vfe_base + 0x80);
+ msm_camera_io_w_mb(0x200000, vfe_dev->vfe_base + 0x80);
+
+ ISP_DBG("%s:VFE%d Fetch Engine ready\n", __func__, vfe_dev->pdev->id);
+
+ return 0;
+}
+void msm_vfe47_cfg_fetch_engine(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ uint32_t x_size_word, temp;
+ struct msm_vfe_fetch_engine_cfg *fe_cfg = NULL;
+
+ if (pix_cfg->input_mux == EXTERNAL_READ) {
+ fe_cfg = &pix_cfg->fetch_engine_cfg;
+ pr_debug("%s:VFE%d wd x ht buf = %d x %d, fe = %d x %d\n",
+ __func__, vfe_dev->pdev->id, fe_cfg->buf_width,
+ fe_cfg->buf_height,
+ fe_cfg->fetch_width, fe_cfg->fetch_height);
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.update_cgc_override(vfe_dev,
+ VFE47_BUS_RD_CGC_OVERRIDE_BIT, 1);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x84);
+ temp &= 0xFFFFFFFD;
+ temp |= (1 << 1);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x84);
+
+ msm_vfe47_config_irq(vfe_dev, (1 << 24), 0,
+ MSM_ISP_IRQ_ENABLE);
+
+ temp = fe_cfg->fetch_height - 1;
+ msm_camera_io_w(temp & 0x3FFF, vfe_dev->vfe_base + 0x308);
+
+ x_size_word = msm_isp_cal_word_per_line(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format,
+ fe_cfg->buf_width);
+ msm_camera_io_w((x_size_word - 1) << 16,
+ vfe_dev->vfe_base + 0x30c);
+
+ x_size_word = msm_isp_cal_word_per_line(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format,
+ fe_cfg->fetch_width);
+ msm_camera_io_w(x_size_word << 16 |
+ (temp & 0x3FFF) << 2 | VFE47_FETCH_BURST_LEN,
+ vfe_dev->vfe_base + 0x310);
+
+ temp = ((fe_cfg->buf_width - 1) & 0x3FFF) << 16 |
+ ((fe_cfg->buf_height - 1) & 0x3FFF);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x314);
+
+ /* need to use formulae to calculate MAIN_UNPACK_PATTERN*/
+ msm_camera_io_w(0xF6543210, vfe_dev->vfe_base + 0x318);
+ msm_camera_io_w(0xF, vfe_dev->vfe_base + 0x334);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
+ temp |= 2 << 5;
+ temp |= 128 << 8;
+ temp |= (pix_cfg->pixel_pattern & 0x3);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x50);
+
+ } else {
+ pr_err("%s: Invalid mux configuration - mux: %d", __func__,
+ pix_cfg->input_mux);
+ }
+}
+
+void msm_vfe47_cfg_testgen(struct vfe_device *vfe_dev,
+ struct msm_vfe_testgen_cfg *testgen_cfg)
+{
+ uint32_t temp;
+ uint32_t bit_per_pixel = 0;
+ uint32_t bpp_reg = 0;
+ uint32_t bayer_pix_pattern_reg = 0;
+ uint32_t unicolorbar_reg = 0;
+ uint32_t unicolor_enb = 0;
+
+ bit_per_pixel = msm_isp_get_bit_per_pixel(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
+
+ switch (bit_per_pixel) {
+ case 8:
+ bpp_reg = 0x0;
+ break;
+ case 10:
+ bpp_reg = 0x1;
+ break;
+ case 12:
+ bpp_reg = 0x10;
+ break;
+ case 14:
+ bpp_reg = 0x11;
+ break;
+ default:
+ pr_err("%s: invalid bpp %d\n", __func__, bit_per_pixel);
+ break;
+ }
+
+ msm_camera_io_w(bpp_reg << 16 | testgen_cfg->burst_num_frame,
+ vfe_dev->vfe_base + 0xC5C);
+
+ msm_camera_io_w(((testgen_cfg->lines_per_frame - 1) << 16) |
+ (testgen_cfg->pixels_per_line - 1), vfe_dev->vfe_base + 0xC60);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
+ temp |= (((testgen_cfg->h_blank) & 0x3FFF) << 8);
+ temp |= (1 << 22);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x50);
+
+ msm_camera_io_w((1 << 16) | testgen_cfg->v_blank,
+ vfe_dev->vfe_base + 0xC70);
+
+ switch (testgen_cfg->pixel_bayer_pattern) {
+ case ISP_BAYER_RGRGRG:
+ bayer_pix_pattern_reg = 0x0;
+ break;
+ case ISP_BAYER_GRGRGR:
+ bayer_pix_pattern_reg = 0x1;
+ break;
+ case ISP_BAYER_BGBGBG:
+ bayer_pix_pattern_reg = 0x10;
+ break;
+ case ISP_BAYER_GBGBGB:
+ bayer_pix_pattern_reg = 0x11;
+ break;
+ default:
+ pr_err("%s: invalid pix pattern %d\n",
+ __func__, bit_per_pixel);
+ break;
+ }
+
+ if (testgen_cfg->color_bar_pattern == COLOR_BAR_8_COLOR) {
+ unicolor_enb = 0x0;
+ } else {
+ unicolor_enb = 0x1;
+ switch (testgen_cfg->color_bar_pattern) {
+ case UNICOLOR_WHITE:
+ unicolorbar_reg = 0x0;
+ break;
+ case UNICOLOR_YELLOW:
+ unicolorbar_reg = 0x1;
+ break;
+ case UNICOLOR_CYAN:
+ unicolorbar_reg = 0x10;
+ break;
+ case UNICOLOR_GREEN:
+ unicolorbar_reg = 0x11;
+ break;
+ case UNICOLOR_MAGENTA:
+ unicolorbar_reg = 0x100;
+ break;
+ case UNICOLOR_RED:
+ unicolorbar_reg = 0x101;
+ break;
+ case UNICOLOR_BLUE:
+ unicolorbar_reg = 0x110;
+ break;
+ case UNICOLOR_BLACK:
+ unicolorbar_reg = 0x111;
+ break;
+ default:
+ pr_err("%s: invalid colorbar %d\n",
+ __func__, testgen_cfg->color_bar_pattern);
+ break;
+ }
+ }
+
+ msm_camera_io_w((testgen_cfg->rotate_period << 8) |
+ (bayer_pix_pattern_reg << 6) | (unicolor_enb << 4) |
+ (unicolorbar_reg), vfe_dev->vfe_base + 0xC78);
+}
+
+void msm_vfe47_cfg_camif(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ uint16_t first_pixel, last_pixel, first_line, last_line;
+ struct msm_vfe_camif_cfg *camif_cfg = &pix_cfg->camif_cfg;
+ uint32_t val, subsample_period, subsample_pattern;
+ uint32_t irq_sub_period = camif_cfg->irq_subsample_period;
+ uint32_t frame_drop_period = camif_cfg->frame_drop_Period;
+ uint32_t frame_drop_pattern = camif_cfg->frame_drop_pattern;
+ struct msm_vfe_camif_subsample_cfg *subsample_cfg =
+ &pix_cfg->camif_cfg.subsample_cfg;
+ uint16_t bus_sub_en = 0;
+
+ bus_sub_en = camif_cfg->bus_subsample_en;
+
+ vfe_dev->dual_vfe_enable = camif_cfg->is_split;
+
+ msm_camera_io_w(pix_cfg->input_mux << 5 | pix_cfg->pixel_pattern,
+ vfe_dev->vfe_base + 0x50);
+
+ first_pixel = camif_cfg->first_pixel;
+ last_pixel = camif_cfg->last_pixel;
+ first_line = camif_cfg->first_line;
+ last_line = camif_cfg->last_line;
+
+ msm_camera_io_w((camif_cfg->lines_per_frame) << 16 |
+ (camif_cfg->pixels_per_line), vfe_dev->vfe_base + 0x484);
+ if (bus_sub_en) {
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x47C);
+ val &= 0xFFFFFFDF;
+ val = val | bus_sub_en << 5;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x47C);
+ subsample_cfg->pixel_skip &= 0x0000FFFF;
+ subsample_cfg->line_skip &= 0x0000FFFF;
+ msm_camera_io_w((subsample_cfg->line_skip << 16) |
+ subsample_cfg->pixel_skip, vfe_dev->vfe_base + 0x490);
+ } else {
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x490);
+ }
+
+
+ msm_camera_io_w(first_pixel << 16 | last_pixel,
+ vfe_dev->vfe_base + 0x488);
+
+ msm_camera_io_w(first_line << 16 | last_line,
+ vfe_dev->vfe_base + 0x48C);
+
+
+ msm_camera_io_w((irq_sub_period << 8) | 0 << 5 |
+ frame_drop_period, vfe_dev->vfe_base + 0x494);
+ msm_camera_io_w(frame_drop_pattern, vfe_dev->vfe_base + 0x498);
+ if (bus_sub_en) {
+ subsample_period =
+ camif_cfg->subsample_cfg.irq_subsample_period;
+ subsample_pattern =
+ camif_cfg->subsample_cfg.irq_subsample_pattern;
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x494);
+ val &= 0xFFFFE0FF;
+ val |= subsample_period << 8;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x494);
+ ISP_DBG("%s:camif PERIOD %x PATTERN %x\n",
+ __func__, subsample_period, subsample_pattern);
+
+ val = subsample_pattern;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x49C);
+
+ msm_camera_io_w(
+ subsample_cfg->first_pixel << 16 |
+ subsample_cfg->last_pixel,
+ vfe_dev->vfe_base + 0xCE4);
+ msm_camera_io_w(
+ subsample_cfg->first_line << 16 |
+ subsample_cfg->last_line,
+ vfe_dev->vfe_base + 0xCE4);
+ val = msm_camera_io_r(
+ vfe_dev->vfe_base + 0x47C);
+ ISP_DBG("%s: camif raw crop enabled\n", __func__);
+ val |= 1 << 22;
+ msm_camera_io_w(val,
+ vfe_dev->vfe_base + 0x47C);
+ } else {
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x49C);
+ }
+
+ ISP_DBG("%s: camif raw op fmt %d\n",
+ __func__, subsample_cfg->output_format);
+ /* Pdaf output can be sent in below formats */
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x88);
+ switch (subsample_cfg->output_format) {
+ case CAMIF_PLAIN_8:
+ val |= PLAIN8 << 9;
+ break;
+ case CAMIF_PLAIN_16:
+ val |= PLAIN16 << 9;
+ break;
+ case CAMIF_MIPI_RAW:
+ val |= MIPI << 9;
+ break;
+ case CAMIF_QCOM_RAW:
+ val |= QCOM << 9;
+ break;
+ default:
+ break;
+ }
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x88);
+
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x46C);
+ val |= camif_cfg->camif_input;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x46C);
+}
+
+void msm_vfe47_cfg_input_mux(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ uint32_t core_cfg = 0;
+ uint32_t val = 0;
+
+ core_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
+ core_cfg &= 0xFFFFFF9F;
+
+ switch (pix_cfg->input_mux) {
+ case CAMIF:
+ core_cfg |= 0x0 << 5;
+ msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x50);
+ msm_vfe47_cfg_camif(vfe_dev, pix_cfg);
+ break;
+ case TESTGEN:
+ /* Change CGC override */
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x3C);
+ val |= (1 << 31);
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x3C);
+
+ /* CAMIF and TESTGEN will both go thorugh CAMIF*/
+ core_cfg |= 0x1 << 5;
+ msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x50);
+ msm_vfe47_cfg_camif(vfe_dev, pix_cfg);
+ msm_vfe47_cfg_testgen(vfe_dev, &pix_cfg->testgen_cfg);
+ break;
+ case EXTERNAL_READ:
+ core_cfg |= 0x2 << 5;
+ msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x50);
+ msm_vfe47_cfg_fetch_engine(vfe_dev, pix_cfg);
+ break;
+ default:
+ pr_err("%s: Unsupported input mux %d\n",
+ __func__, pix_cfg->input_mux);
+ break;
+ }
+}
+
+void msm_vfe47_configure_hvx(struct vfe_device *vfe_dev,
+ uint8_t is_stream_on)
+{
+ uint32_t val;
+
+ if (is_stream_on == 1) {
+ /* Enable HVX */
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
+ val |= (1 << 3);
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x50);
+ val &= 0xFF7FFFFF;
+ if (vfe_dev->hvx_cmd == HVX_ROUND_TRIP)
+ val |= (1 << 23);
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x50);
+ } else {
+ /* Disable HVX */
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
+ val &= 0xFFFFFFF7;
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x50);
+ }
+}
+
+void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
+ enum msm_isp_camif_update_state update_state)
+{
+ uint32_t val;
+ bool bus_en, vfe_en;
+
+ if (update_state == NO_UPDATE)
+ return;
+
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x47C);
+ if (update_state == ENABLE_CAMIF) {
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w(0x81, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
+ msm_vfe47_config_irq(vfe_dev, 0x17, 0x81,
+ MSM_ISP_IRQ_ENABLE);
+
+ if ((vfe_dev->hvx_cmd > HVX_DISABLE) &&
+ (vfe_dev->hvx_cmd <= HVX_ROUND_TRIP))
+ msm_vfe47_configure_hvx(vfe_dev, 1);
+ else
+ msm_vfe47_configure_hvx(vfe_dev, 0);
+
+ bus_en =
+ ((vfe_dev->axi_data.
+ src_info[VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
+ vfe_en =
+ ((vfe_dev->axi_data.
+ src_info[VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x47C);
+ val &= 0xFFFFFF3F;
+ val = val | bus_en << 7 | vfe_en << 6;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x47C);
+ msm_camera_io_w_mb(0x4, vfe_dev->vfe_base + 0x478);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x478);
+ /* configure EPOCH0 for 20 lines */
+ msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x4A0);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
+ /* testgen GO*/
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
+ msm_camera_io_w(1, vfe_dev->vfe_base + 0xC58);
+ } else if (update_state == DISABLE_CAMIF ||
+ update_state == DISABLE_CAMIF_IMMEDIATELY) {
+ /* turn off camif violation and error irqs */
+ msm_vfe47_config_irq(vfe_dev, 0, 0x81,
+ MSM_ISP_IRQ_DISABLE);
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x464);
+ /* disable danger signal */
+ msm_camera_io_w_mb(val & ~(1 << 8), vfe_dev->vfe_base + 0x464);
+ msm_camera_io_w_mb((update_state == DISABLE_CAMIF ? 0x0 : 0x6),
+ vfe_dev->vfe_base + 0x478);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
+ /* testgen OFF*/
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
+ msm_camera_io_w(1 << 1, vfe_dev->vfe_base + 0xC58);
+
+ if ((vfe_dev->hvx_cmd > HVX_DISABLE) &&
+ (vfe_dev->hvx_cmd <= HVX_ROUND_TRIP))
+ msm_vfe47_configure_hvx(vfe_dev, 0);
+ }
+}
+
+void msm_vfe47_cfg_rdi_reg(
+ struct vfe_device *vfe_dev, struct msm_vfe_rdi_cfg *rdi_cfg,
+ enum msm_vfe_input_src input_src)
+{
+ uint8_t rdi = input_src - VFE_RAW_0;
+ uint32_t rdi_reg_cfg;
+
+ rdi_reg_cfg = (rdi == 0) ? 0x3 : 0x0;
+
+ rdi_reg_cfg |= (rdi * 3) << 28 | rdi_cfg->cid << 4 | 1 << 2;
+ msm_camera_io_w(
+ rdi_reg_cfg, vfe_dev->vfe_base + VFE47_RDI_BASE(rdi));
+}
+
+void msm_vfe47_axi_cfg_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx)
+{
+ uint32_t val;
+ uint32_t wm_base =
+ VFE47_WM_BASE(stream_info->vfe_plane_cfg[plane_idx].wmIndex);
+
+ val = msm_camera_io_r(vfe_dev->vfe_base + wm_base + 0x14);
+ val &= ~0x2;
+ if (stream_info->frame_based)
+ val |= 0x2;
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+
+ if (!stream_info->frame_based) {
+ /* WR_IMAGE_SIZE */
+ val =
+ ((stream_info->vfe_plane_cfg[plane_idx].
+ image_qwords_per_line << 16)
+ | (stream_info->vfe_plane_cfg[plane_idx].
+ image_height - 1));
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x1C);
+ /* WR_BUFFER_CFG */
+ val = VFE47_BURST_LEN |
+ ((stream_info->vfe_plane_cfg[plane_idx].
+ output_scan_lines - 1)
+ << 2) |
+ (stream_info->vfe_plane_cfg[plane_idx].
+ output_stride << 16);
+ }
+
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x20);
+ /* WR_IRQ_SUBSAMPLE_PATTERN */
+ msm_camera_io_w(0xFFFFFFFF,
+ vfe_dev->vfe_base + wm_base + 0x28);
+}
+
+void msm_vfe47_axi_clear_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
+{
+ uint32_t val = 0;
+ uint32_t wm_base =
+ VFE47_WM_BASE(stream_info->vfe_plane_cfg[plane_idx].wmIndex);
+
+ /* WR_ADDR_CFG */
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+ /* WR_IMAGE_SIZE */
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x1C);
+ /* WR_BUFFER_CFG */
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x20);
+ /* WR_IRQ_SUBSAMPLE_PATTERN */
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x28);
+}
+
+void msm_vfe47_axi_cfg_wm_xbar_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx)
+{
+ struct msm_vfe_axi_output_plane_cfg *plane_cfg =
+ &stream_info->vfe_plane_cfg[plane_idx];
+ uint8_t wm = plane_cfg->wmIndex;
+ uint32_t xbar_cfg = 0;
+ uint32_t xbar_reg_cfg = 0;
+
+ switch (stream_info->stream_src) {
+ case PIX_VIDEO:
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER: {
+ if (plane_cfg->plane_fmt != CRCB_PLANE &&
+ plane_cfg->plane_fmt != CBCR_PLANE) {
+ /* SINGLE_STREAM_SEL */
+ xbar_cfg |= plane_cfg->output_plane_format << 8;
+ } else {
+ switch (stream_info->output_format) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV24:
+ /* PAIR_STREAM_SWAP_CTRL */
+ xbar_cfg |= 0x3 << 4;
+ break;
+ }
+ xbar_cfg |= 0x1 << 2; /* PAIR_STREAM_EN */
+ }
+ if (stream_info->stream_src == PIX_VIEWFINDER)
+ xbar_cfg |= 0x1; /* VIEW_STREAM_EN */
+ else if (stream_info->stream_src == PIX_VIDEO)
+ xbar_cfg |= 0x2;
+ break;
+ }
+ case CAMIF_RAW:
+ xbar_cfg = 0x300;
+ break;
+ case IDEAL_RAW:
+ xbar_cfg = 0x400;
+ break;
+ case RDI_INTF_0:
+ xbar_cfg = 0xC00;
+ break;
+ case RDI_INTF_1:
+ xbar_cfg = 0xD00;
+ break;
+ case RDI_INTF_2:
+ xbar_cfg = 0xE00;
+ break;
+ default:
+ pr_err("%s: Invalid stream src\n", __func__);
+ break;
+ }
+
+ xbar_reg_cfg =
+ msm_camera_io_r(vfe_dev->vfe_base + VFE47_XBAR_BASE(wm));
+ xbar_reg_cfg &= ~(0xFFFF << VFE47_XBAR_SHIFT(wm));
+ xbar_reg_cfg |= (xbar_cfg << VFE47_XBAR_SHIFT(wm));
+ msm_camera_io_w(xbar_reg_cfg,
+ vfe_dev->vfe_base + VFE47_XBAR_BASE(wm));
+}
+
+void msm_vfe47_axi_clear_wm_xbar_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
+{
+ uint8_t wm = stream_info->wm[plane_idx];
+ uint32_t xbar_reg_cfg = 0;
+
+ xbar_reg_cfg =
+ msm_camera_io_r(vfe_dev->vfe_base + VFE47_XBAR_BASE(wm));
+ xbar_reg_cfg &= ~(0xFFFF << VFE47_XBAR_SHIFT(wm));
+ msm_camera_io_w(xbar_reg_cfg,
+ vfe_dev->vfe_base + VFE47_XBAR_BASE(wm));
+}
+
+
+void msm_vfe47_cfg_axi_ub_equal_default(
+ struct vfe_device *vfe_dev, enum msm_vfe_input_src frame_src)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ struct msm_vfe_axi_shared_data *axi_data =
+ &vfe_dev->axi_data;
+ uint32_t total_image_size = 0;
+ uint8_t num_used_wms = 0;
+ uint32_t prop_size = 0;
+ uint32_t wm_ub_size;
+ uint64_t delta;
+ uint32_t rdi_ub_offset;
+ int plane;
+ struct msm_vfe_axi_stream *stream_info;
+
+ if (frame_src == VFE_PIX_0) {
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (axi_data->free_wm[i] &&
+ SRC_TO_INTF(
+ HANDLE_TO_IDX(axi_data->free_wm[i])) ==
+ VFE_PIX_0) {
+ num_used_wms++;
+ total_image_size +=
+ axi_data->wm_image_size[i];
+ }
+ }
+ ub_offset = (axi_data->hw_info->num_rdi * 2) *
+ axi_data->hw_info->min_wm_ub;
+ prop_size = vfe_dev->hw_info->vfe_ops.axi_ops.
+ get_ub_size(vfe_dev) -
+ axi_data->hw_info->min_wm_ub * (num_used_wms +
+ axi_data->hw_info->num_rdi * 2);
+ }
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (!axi_data->free_wm[i]) {
+ msm_camera_io_w(0,
+ vfe_dev->vfe_base +
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ ub_reg_offset(vfe_dev, i));
+ }
+ if (frame_src != SRC_TO_INTF(
+ HANDLE_TO_IDX(axi_data->free_wm[i])))
+ continue;
+
+ if (frame_src == VFE_PIX_0) {
+ delta = (uint64_t)axi_data->wm_image_size[i] *
+ (uint64_t)prop_size;
+ do_div(delta, total_image_size);
+ wm_ub_size = axi_data->hw_info->min_wm_ub +
+ (uint32_t)delta;
+ msm_camera_io_w(ub_offset << 16 | (wm_ub_size - 1),
+ vfe_dev->vfe_base +
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ ub_reg_offset(vfe_dev, i));
+ ub_offset += wm_ub_size;
+ } else {
+
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(axi_data->free_wm[i])];
+ for (plane = 0; plane < stream_info->num_planes;
+ plane++)
+ if (stream_info->wm[plane] ==
+ axi_data->free_wm[i])
+ break;
+
+ rdi_ub_offset = ((SRC_TO_INTF(
+ HANDLE_TO_IDX(axi_data->free_wm[i])) -
+ VFE_RAW_0 * 2) + plane) *
+ axi_data->hw_info->min_wm_ub;
+ wm_ub_size = axi_data->hw_info->min_wm_ub * 2;
+ msm_camera_io_w((rdi_ub_offset << 16 |
+ (wm_ub_size - 1)),
+ vfe_dev->vfe_base +
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ ub_reg_offset(vfe_dev, i));
+ }
+ }
+}
+
+void msm_vfe47_cfg_axi_ub_equal_slicing(
+ struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t ub_equal_slice = 0;
+
+ ub_equal_slice = vfe_dev->hw_info->vfe_ops.axi_ops.
+ get_ub_size(vfe_dev) /
+ axi_data->hw_info->num_wm;
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ msm_camera_io_w(ub_offset << 16 | (ub_equal_slice - 1),
+ vfe_dev->vfe_base +
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ ub_reg_offset(vfe_dev, i));
+ ub_offset += ub_equal_slice;
+ }
+}
+
+void msm_vfe47_cfg_axi_ub(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ axi_data->wm_ub_cfg_policy = UB_CFG_POLICY;
+ if (axi_data->wm_ub_cfg_policy == MSM_WM_UB_EQUAL_SLICING)
+ msm_vfe47_cfg_axi_ub_equal_slicing(vfe_dev);
+ else
+ msm_vfe47_cfg_axi_ub_equal_default(vfe_dev, frame_src);
+}
+
+void msm_vfe47_read_wm_ping_pong_addr(
+ struct vfe_device *vfe_dev)
+{
+ msm_camera_io_dump(vfe_dev->vfe_base +
+ (VFE47_WM_BASE(0) & 0xFFFFFFF0), 0x200, 1);
+}
+
+void msm_vfe47_update_ping_pong_addr(
+ void __iomem *vfe_base,
+ uint8_t wm_idx, uint32_t pingpong_bit, dma_addr_t paddr,
+ int32_t buf_size)
+{
+ uint32_t paddr32 = (paddr & 0xFFFFFFFF);
+ uint32_t paddr32_max = 0;
+
+ if (buf_size < 0)
+ buf_size = 0;
+
+ paddr32_max = (paddr + buf_size) & 0xFFFFFFC0;
+
+ msm_camera_io_w(paddr32, vfe_base +
+ VFE47_PING_PONG_BASE(wm_idx, pingpong_bit));
+ msm_camera_io_w(paddr32_max, vfe_base +
+ VFE47_PING_PONG_BASE(wm_idx, pingpong_bit) + 0x4);
+
+}
+
+static void msm_vfe47_set_halt_restart_mask(struct vfe_device *vfe_dev)
+{
+ msm_vfe47_config_irq(vfe_dev, BIT(31), BIT(8), MSM_ISP_IRQ_SET);
+}
+
+int msm_vfe47_axi_halt(struct vfe_device *vfe_dev,
+ uint32_t blocking)
+{
+ int rc = 0;
+ enum msm_vfe_input_src i;
+ uint32_t val = 0;
+
+ val = msm_camera_io_r(vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET);
+ val |= 0x1;
+ msm_camera_io_w(val, vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET);
+
+ /* Keep only halt and reset mask */
+ msm_vfe47_set_halt_restart_mask(vfe_dev);
+
+ /*Clear IRQ Status0, only leave reset irq mask*/
+ msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x64);
+
+ /*Clear IRQ Status1, only leave halt irq mask*/
+ msm_camera_io_w(0xFFFFFEFF, vfe_dev->vfe_base + 0x68);
+
+ /*push clear cmd*/
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
+
+
+ if (atomic_read(&vfe_dev->error_info.overflow_state)
+ == OVERFLOW_DETECTED)
+ pr_err_ratelimited("%s: VFE%d halt for recovery, blocking %d\n",
+ __func__, vfe_dev->pdev->id, blocking);
+
+ if (blocking) {
+ init_completion(&vfe_dev->halt_complete);
+ /* Halt AXI Bus Bridge */
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x400);
+ rc = wait_for_completion_timeout(
+ &vfe_dev->halt_complete, msecs_to_jiffies(500));
+ if (rc <= 0)
+ pr_err("%s:VFE%d halt timeout rc=%d\n", __func__,
+ vfe_dev->pdev->id, rc);
+
+ } else {
+ /* Halt AXI Bus Bridge */
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x400);
+ }
+
+ for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
+ /* if any stream is waiting for update, signal complete */
+ if (vfe_dev->axi_data.stream_update[i]) {
+ ISP_DBG("%s: complete stream update\n", __func__);
+ msm_isp_axi_stream_update(vfe_dev, i);
+ if (vfe_dev->axi_data.stream_update[i])
+ msm_isp_axi_stream_update(vfe_dev, i);
+ }
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
+ ISP_DBG("%s: complete on axi config update\n",
+ __func__);
+ msm_isp_axi_cfg_update(vfe_dev, i);
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i]))
+ msm_isp_axi_cfg_update(vfe_dev, i);
+ }
+ }
+
+ if (atomic_read(&vfe_dev->stats_data.stats_update)) {
+ ISP_DBG("%s: complete on stats update\n", __func__);
+ msm_isp_stats_stream_update(vfe_dev);
+ if (atomic_read(&vfe_dev->stats_data.stats_update))
+ msm_isp_stats_stream_update(vfe_dev);
+ }
+
+ return rc;
+}
+
+int msm_vfe47_axi_restart(struct vfe_device *vfe_dev,
+ uint32_t blocking, uint32_t enable_camif)
+{
+ msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w(0xFFFFFEFF, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
+
+ /* Start AXI */
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x400);
+
+ memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+ atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+
+ /* reset the irq masks without camif violation and errors */
+ msm_vfe47_config_irq(vfe_dev, vfe_dev->recovery_irq0_mask,
+ vfe_dev->recovery_irq1_mask, MSM_ISP_IRQ_SET);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, VFE_SRC_MAX);
+
+ if (enable_camif) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, ENABLE_CAMIF);
+ }
+
+ return 0;
+}
+
+uint32_t msm_vfe47_get_wm_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 8) & 0x7F;
+}
+
+uint32_t msm_vfe47_get_comp_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 25) & 0xF;
+}
+
+uint32_t msm_vfe47_get_pingpong_status(
+ struct vfe_device *vfe_dev)
+{
+ return msm_camera_io_r(vfe_dev->vfe_base + 0x338);
+}
+
+int msm_vfe47_get_stats_idx(enum msm_isp_stats_type stats_type)
+{
+ /*idx use for composite, need to map to irq status*/
+ switch (stats_type) {
+ case MSM_ISP_STATS_HDR_BE:
+ return STATS_COMP_IDX_HDR_BE;
+ case MSM_ISP_STATS_BG:
+ return STATS_COMP_IDX_BG;
+ case MSM_ISP_STATS_BF:
+ return STATS_COMP_IDX_BF;
+ case MSM_ISP_STATS_HDR_BHIST:
+ return STATS_COMP_IDX_HDR_BHIST;
+ case MSM_ISP_STATS_RS:
+ return STATS_COMP_IDX_RS;
+ case MSM_ISP_STATS_CS:
+ return STATS_COMP_IDX_CS;
+ case MSM_ISP_STATS_IHIST:
+ return STATS_COMP_IDX_IHIST;
+ case MSM_ISP_STATS_BHIST:
+ return STATS_COMP_IDX_BHIST;
+ case MSM_ISP_STATS_AEC_BG:
+ return STATS_COMP_IDX_AEC_BG;
+ default:
+ pr_err("%s: Invalid stats type\n", __func__);
+ return -EINVAL;
+ }
+}
+
+int msm_vfe47_stats_check_streams(
+ struct msm_vfe_stats_stream *stream_info)
+{
+ return 0;
+}
+
+void msm_vfe47_stats_cfg_comp_mask(
+ struct vfe_device *vfe_dev, uint32_t stats_mask,
+ uint8_t request_comp_index, uint8_t enable)
+{
+ uint32_t comp_mask_reg;
+ atomic_t *stats_comp_mask;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+ if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask < 1)
+ return;
+
+ if (request_comp_index >= MAX_NUM_STATS_COMP_MASK) {
+ pr_err("%s: num of comp masks %d exceed max %d\n",
+ __func__, request_comp_index,
+ MAX_NUM_STATS_COMP_MASK);
+ return;
+ }
+
+ if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask >
+ MAX_NUM_STATS_COMP_MASK) {
+ pr_err("%s: num of comp masks %d exceed max %d\n",
+ __func__,
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask,
+ MAX_NUM_STATS_COMP_MASK);
+ return;
+ }
+
+ stats_mask = stats_mask & 0x1FF;
+
+ stats_comp_mask = &stats_data->stats_comp_mask[request_comp_index];
+ comp_mask_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x78);
+
+ if (enable) {
+ comp_mask_reg |= stats_mask << (request_comp_index * 16);
+ atomic_set(stats_comp_mask, stats_mask |
+ atomic_read(stats_comp_mask));
+ msm_vfe47_config_irq(vfe_dev, 1 << (29 + request_comp_index),
+ 0, MSM_ISP_IRQ_ENABLE);
+ } else {
+ if (!(atomic_read(stats_comp_mask) & stats_mask))
+ return;
+
+ atomic_set(stats_comp_mask,
+ ~stats_mask & atomic_read(stats_comp_mask));
+ comp_mask_reg &= ~(stats_mask << (request_comp_index * 16));
+ msm_vfe47_config_irq(vfe_dev, 1 << (29 + request_comp_index),
+ 0, MSM_ISP_IRQ_DISABLE);
+ }
+
+ msm_camera_io_w(comp_mask_reg, vfe_dev->vfe_base + 0x78);
+
+ ISP_DBG("%s: comp_mask_reg: %x comp mask0 %x mask1: %x\n",
+ __func__, comp_mask_reg,
+ atomic_read(&stats_data->stats_comp_mask[0]),
+ atomic_read(&stats_data->stats_comp_mask[1]));
+}
+
+void msm_vfe47_stats_cfg_wm_irq_mask(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ switch (STATS_IDX(stream_info->stream_handle)) {
+ case STATS_COMP_IDX_AEC_BG:
+ msm_vfe47_config_irq(vfe_dev, 1 << 15, 0, MSM_ISP_IRQ_ENABLE);
+ break;
+ case STATS_COMP_IDX_HDR_BE:
+ msm_vfe47_config_irq(vfe_dev, 1 << 16, 0, MSM_ISP_IRQ_ENABLE);
+ break;
+ case STATS_COMP_IDX_BG:
+ msm_vfe47_config_irq(vfe_dev, 1 << 17, 0, MSM_ISP_IRQ_ENABLE);
+ break;
+ case STATS_COMP_IDX_BF:
+ msm_vfe47_config_irq(vfe_dev, 1 << 18, 1 << 26,
+ MSM_ISP_IRQ_ENABLE);
+ break;
+ case STATS_COMP_IDX_HDR_BHIST:
+ msm_vfe47_config_irq(vfe_dev, 1 << 19, 0, MSM_ISP_IRQ_ENABLE);
+ break;
+ case STATS_COMP_IDX_RS:
+ msm_vfe47_config_irq(vfe_dev, 1 << 20, 0, MSM_ISP_IRQ_ENABLE);
+ break;
+ case STATS_COMP_IDX_CS:
+ msm_vfe47_config_irq(vfe_dev, 1 << 21, 0, MSM_ISP_IRQ_ENABLE);
+ break;
+ case STATS_COMP_IDX_IHIST:
+ msm_vfe47_config_irq(vfe_dev, 1 << 22, 0, MSM_ISP_IRQ_ENABLE);
+ break;
+ case STATS_COMP_IDX_BHIST:
+ msm_vfe47_config_irq(vfe_dev, 1 << 23, 0, MSM_ISP_IRQ_ENABLE);
+ break;
+ default:
+ pr_err("%s: Invalid stats idx %d\n", __func__,
+ STATS_IDX(stream_info->stream_handle));
+ }
+}
+
+void msm_vfe47_stats_clear_wm_irq_mask(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t irq_mask, irq_mask_1;
+
+ irq_mask = vfe_dev->irq0_mask;
+ irq_mask_1 = vfe_dev->irq1_mask;
+
+ switch (STATS_IDX(stream_info->stream_handle)) {
+ case STATS_COMP_IDX_AEC_BG:
+ msm_vfe47_config_irq(vfe_dev, 1 << 15, 0, MSM_ISP_IRQ_DISABLE);
+ break;
+ case STATS_COMP_IDX_HDR_BE:
+ msm_vfe47_config_irq(vfe_dev, 1 << 16, 0, MSM_ISP_IRQ_DISABLE);
+ break;
+ case STATS_COMP_IDX_BG:
+ msm_vfe47_config_irq(vfe_dev, 1 << 17, 0, MSM_ISP_IRQ_DISABLE);
+ break;
+ case STATS_COMP_IDX_BF:
+ msm_vfe47_config_irq(vfe_dev, 1 << 18, 1 << 26,
+ MSM_ISP_IRQ_DISABLE);
+ break;
+ case STATS_COMP_IDX_HDR_BHIST:
+ msm_vfe47_config_irq(vfe_dev, 1 << 19, 0, MSM_ISP_IRQ_DISABLE);
+ break;
+ case STATS_COMP_IDX_RS:
+ msm_vfe47_config_irq(vfe_dev, 1 << 20, 0, MSM_ISP_IRQ_DISABLE);
+ break;
+ case STATS_COMP_IDX_CS:
+ msm_vfe47_config_irq(vfe_dev, 1 << 21, 0, MSM_ISP_IRQ_DISABLE);
+ break;
+ case STATS_COMP_IDX_IHIST:
+ msm_vfe47_config_irq(vfe_dev, 1 << 22, 0, MSM_ISP_IRQ_DISABLE);
+ break;
+ case STATS_COMP_IDX_BHIST:
+ msm_vfe47_config_irq(vfe_dev, 1 << 23, 0, MSM_ISP_IRQ_DISABLE);
+ break;
+ default:
+ pr_err("%s: Invalid stats idx %d\n", __func__,
+ STATS_IDX(stream_info->stream_handle));
+ }
+}
+
+void msm_vfe47_stats_cfg_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+ uint32_t stats_base = VFE47_STATS_BASE(stats_idx);
+
+ /* WR_ADDR_CFG */
+ msm_camera_io_w(stream_info->framedrop_period << 2,
+ vfe_dev->vfe_base + stats_base + 0x10);
+ /* WR_IRQ_FRAMEDROP_PATTERN */
+ msm_camera_io_w(stream_info->framedrop_pattern,
+ vfe_dev->vfe_base + stats_base + 0x18);
+ /* WR_IRQ_SUBSAMPLE_PATTERN */
+ msm_camera_io_w(0xFFFFFFFF,
+ vfe_dev->vfe_base + stats_base + 0x1C);
+}
+
+void msm_vfe47_stats_clear_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t val = 0;
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+ uint32_t stats_base = VFE47_STATS_BASE(stats_idx);
+
+ /* WR_ADDR_CFG */
+ msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x10);
+ /* WR_IRQ_FRAMEDROP_PATTERN */
+ msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x18);
+ /* WR_IRQ_SUBSAMPLE_PATTERN */
+ msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x1C);
+}
+
+void msm_vfe47_stats_cfg_ub(struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ uint32_t ub_size[VFE47_NUM_STATS_TYPE] = {
+ 16, /* MSM_ISP_STATS_HDR_BE */
+ 16, /* MSM_ISP_STATS_BG */
+ 16, /* MSM_ISP_STATS_BF */
+ 16, /* MSM_ISP_STATS_HDR_BHIST */
+ 16, /* MSM_ISP_STATS_RS */
+ 16, /* MSM_ISP_STATS_CS */
+ 16, /* MSM_ISP_STATS_IHIST */
+ 16, /* MSM_ISP_STATS_BHIST */
+ 16, /* MSM_ISP_STATS_AEC_BG */
+ };
+ if (vfe_dev->pdev->id == ISP_VFE1)
+ ub_offset = VFE47_UB_SIZE_VFE1;
+ else if (vfe_dev->pdev->id == ISP_VFE0)
+ ub_offset = VFE47_UB_SIZE_VFE0;
+ else
+ pr_err("%s: incorrect VFE device\n", __func__);
+
+ for (i = 0; i < VFE47_NUM_STATS_TYPE; i++) {
+ ub_offset -= ub_size[i];
+ msm_camera_io_w(VFE47_STATS_BURST_LEN << 30 |
+ ub_offset << 16 | (ub_size[i] - 1),
+ vfe_dev->vfe_base + VFE47_STATS_BASE(i) + 0x14);
+ }
+}
+
+void msm_vfe47_stats_update_cgc_override(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable)
+{
+ int i;
+ uint32_t module_cfg, cgc_mask = 0;
+
+ for (i = 0; i < VFE47_NUM_STATS_TYPE; i++) {
+ if ((stats_mask >> i) & 0x1) {
+ switch (i) {
+ case STATS_COMP_IDX_HDR_BE:
+ cgc_mask |= 1;
+ break;
+ case STATS_COMP_IDX_BG:
+ cgc_mask |= (1 << 3);
+ break;
+ case STATS_COMP_IDX_BHIST:
+ cgc_mask |= (1 << 4);
+ break;
+ case STATS_COMP_IDX_RS:
+ cgc_mask |= (1 << 5);
+ break;
+ case STATS_COMP_IDX_CS:
+ cgc_mask |= (1 << 6);
+ break;
+ case STATS_COMP_IDX_IHIST:
+ cgc_mask |= (1 << 7);
+ break;
+ case STATS_COMP_IDX_AEC_BG:
+ cgc_mask |= (1 << 8);
+ break;
+ case STATS_COMP_IDX_BF:
+ cgc_mask |= (1 << 2);
+ break;
+ case STATS_COMP_IDX_HDR_BHIST:
+ cgc_mask |= (1 << 1);
+ break;
+ default:
+ pr_err("%s: Invalid stats mask\n", __func__);
+ return;
+ }
+ }
+ }
+
+ /* CGC override: enforce BAF for DMI */
+ module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x30);
+ if (enable)
+ module_cfg |= cgc_mask;
+ else
+ module_cfg &= ~cgc_mask;
+ msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x30);
+}
+
+bool msm_vfe47_is_module_cfg_lock_needed(
+ uint32_t reg_offset)
+{
+ return false;
+}
+
+void msm_vfe47_stats_enable_module(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable)
+{
+ int i;
+ uint32_t module_cfg, module_cfg_mask = 0;
+
+ /* BF stats involve DMI cfg, ignore*/
+ for (i = 0; i < VFE47_NUM_STATS_TYPE; i++) {
+ if ((stats_mask >> i) & 0x1) {
+ switch (i) {
+ case STATS_COMP_IDX_HDR_BE:
+ module_cfg_mask |= 1;
+ break;
+ case STATS_COMP_IDX_HDR_BHIST:
+ module_cfg_mask |= 1 << 1;
+ break;
+ case STATS_COMP_IDX_BF:
+ module_cfg_mask |= 1 << 2;
+ break;
+ case STATS_COMP_IDX_BG:
+ module_cfg_mask |= 1 << 3;
+ break;
+ case STATS_COMP_IDX_BHIST:
+ module_cfg_mask |= 1 << 4;
+ break;
+ case STATS_COMP_IDX_RS:
+ module_cfg_mask |= 1 << 5;
+ break;
+ case STATS_COMP_IDX_CS:
+ module_cfg_mask |= 1 << 6;
+ break;
+ case STATS_COMP_IDX_IHIST:
+ module_cfg_mask |= 1 << 7;
+ break;
+ case STATS_COMP_IDX_AEC_BG:
+ module_cfg_mask |= 1 << 8;
+ break;
+ default:
+ pr_err("%s: Invalid stats mask\n", __func__);
+ return;
+ }
+ }
+ }
+
+ module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x44);
+ if (enable)
+ module_cfg |= module_cfg_mask;
+ else
+ module_cfg &= ~module_cfg_mask;
+
+ msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x44);
+ /* enable wm if needed */
+ if (vfe_dev->hw_info->vfe_ops.stats_ops.enable_stats_wm)
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_stats_wm(vfe_dev,
+ stats_mask, enable);
+}
+
+void msm_vfe47_stats_update_ping_pong_addr(
+ void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ uint32_t pingpong_status, dma_addr_t paddr)
+{
+ uint32_t paddr32 = (paddr & 0xFFFFFFFF);
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+
+ msm_camera_io_w(paddr32, vfe_base +
+ VFE47_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
+}
+
+uint32_t msm_vfe47_stats_get_wm_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ /* TODO: define bf early done irq in status_0 and
+ * bf pingpong done in status_1
+ */
+ uint32_t comp_mapped_irq_mask = 0;
+ int i = 0;
+
+ /*
+ * remove early done and handle separately,
+ * add bf idx on status 1
+ */
+ irq_status0 &= ~(1 << 18);
+
+ for (i = 0; i < VFE47_NUM_STATS_TYPE; i++)
+ if ((irq_status0 >> stats_irq_map_comp_mask[i]) & 0x1)
+ comp_mapped_irq_mask |= (1 << i);
+ if ((irq_status1 >> 26) & 0x1)
+ comp_mapped_irq_mask |= (1 << STATS_COMP_IDX_BF);
+
+ return comp_mapped_irq_mask;
+}
+
+uint32_t msm_vfe47_stats_get_comp_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 29) & 0x3;
+}
+
+uint32_t msm_vfe47_stats_get_frame_id(
+ struct vfe_device *vfe_dev)
+{
+ return vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+}
+
+void msm_vfe47_deinit_bandwidth_mgr(
+ struct msm_isp_bandwidth_mgr *isp_bandwidth_mgr)
+{
+ msm_bus_scale_client_update_request(
+ isp_bandwidth_mgr->bus_client, 0);
+ msm_bus_scale_unregister_client(isp_bandwidth_mgr->bus_client);
+ isp_bandwidth_mgr->bus_client = 0;
+}
+
+int msm_vfe47_init_bandwidth_mgr(struct vfe_device *vfe_dev,
+ struct msm_isp_bandwidth_mgr *isp_bandwidth_mgr)
+{
+ isp_bandwidth_mgr->bus_client =
+ msm_bus_scale_register_client(&msm_isp_bus_client_pdata);
+ if (!isp_bandwidth_mgr->bus_client) {
+ pr_err("%s: client register failed\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int msm_vfe47_update_bandwidth(
+ struct msm_isp_bandwidth_mgr *isp_bandwidth_mgr)
+{
+ int i;
+ uint64_t ab = 0;
+ uint64_t ib = 0;
+ struct msm_bus_paths *path;
+
+ if (!isp_bandwidth_mgr->bus_vector_active_idx)
+ isp_bandwidth_mgr->bus_vector_active_idx = 1;
+ else
+ ALT_VECTOR_IDX(isp_bandwidth_mgr->bus_vector_active_idx);
+
+ path = &(msm_isp_bus_client_pdata.usecase[
+ isp_bandwidth_mgr->bus_vector_active_idx]);
+ path->vectors[0].ab = 0;
+ path->vectors[0].ib = 0;
+ for (i = 0; i < MAX_ISP_CLIENT; i++) {
+ if (isp_bandwidth_mgr->client_info[i].active) {
+ path->vectors[0].ab +=
+ isp_bandwidth_mgr->client_info[i].ab;
+ path->vectors[0].ib +=
+ isp_bandwidth_mgr->client_info[i].ib;
+ ab += isp_bandwidth_mgr->client_info[i].ab;
+ ib += isp_bandwidth_mgr->client_info[i].ib;
+ }
+ }
+ msm_bus_scale_client_update_request(isp_bandwidth_mgr->bus_client,
+ isp_bandwidth_mgr->bus_vector_active_idx);
+ /* Insert into circular buffer */
+ msm_isp_update_req_history(isp_bandwidth_mgr->bus_client,
+ ab, ib,
+ isp_bandwidth_mgr->client_info,
+ sched_clock());
+ return 0;
+}
+
+int msm_vfe47_get_clks(struct vfe_device *vfe_dev)
+{
+ int i, rc;
+
+ rc = msm_camera_get_clk_info(vfe_dev->pdev, &vfe_dev->vfe_clk_info,
+ &vfe_dev->vfe_clk, &vfe_dev->num_clk);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < vfe_dev->num_clk; i++) {
+ if (strcmp(vfe_dev->vfe_clk_info[i].clk_name,
+ "vfe_clk_src") == 0)
+ vfe_dev->hw_info->vfe_clk_idx = i;
+ }
+ return 0;
+}
+
+void msm_vfe47_put_clks(struct vfe_device *vfe_dev)
+{
+ msm_camera_put_clk_info(vfe_dev->pdev, &vfe_dev->vfe_clk_info,
+ &vfe_dev->vfe_clk, vfe_dev->num_clk);
+
+ vfe_dev->num_clk = 0;
+}
+
+int msm_vfe47_enable_clks(struct vfe_device *vfe_dev, int enable)
+{
+ return msm_camera_clk_enable(&vfe_dev->pdev->dev,
+ vfe_dev->vfe_clk_info,
+ vfe_dev->vfe_clk, vfe_dev->num_clk, enable);
+}
+
+int msm_vfe47_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
+{
+ int rc = 0;
+ int clk_idx = vfe_dev->hw_info->vfe_clk_idx;
+
+ rc = msm_camera_clk_set_rate(&vfe_dev->pdev->dev,
+ vfe_dev->vfe_clk[clk_idx], *rate);
+ if (rc < 0)
+ return rc;
+ *rate = clk_round_rate(vfe_dev->vfe_clk[clk_idx], *rate);
+ vfe_dev->msm_isp_vfe_clk_rate = *rate;
+ return 0;
+}
+
+int msm_vfe47_get_max_clk_rate(struct vfe_device *vfe_dev, long *rate)
+{
+ int clk_idx = 0;
+ unsigned long max_value = ~0;
+ long round_rate = 0;
+
+ if (!vfe_dev || !rate) {
+ pr_err("%s:%d failed: vfe_dev %pK rate %pK\n",
+ __func__, __LINE__, vfe_dev, rate);
+ return -EINVAL;
+ }
+
+ *rate = 0;
+ if (!vfe_dev->hw_info) {
+ pr_err("%s:%d failed: vfe_dev->hw_info %pK\n", __func__,
+ __LINE__, vfe_dev->hw_info);
+ return -EINVAL;
+ }
+
+ clk_idx = vfe_dev->hw_info->vfe_clk_idx;
+ if (clk_idx >= vfe_dev->num_clk) {
+ pr_err("%s:%d failed: clk_idx %d max array size %zd\n",
+ __func__, __LINE__, clk_idx,
+ vfe_dev->num_clk);
+ return -EINVAL;
+ }
+
+ round_rate = clk_round_rate(vfe_dev->vfe_clk[clk_idx], max_value);
+ if (round_rate < 0) {
+ pr_err("%s: Invalid vfe clock rate\n", __func__);
+ return -EINVAL;
+ }
+
+ *rate = round_rate;
+ return 0;
+}
+
+int msm_vfe47_get_clk_rates(struct vfe_device *vfe_dev,
+ struct msm_isp_clk_rates *rates)
+{
+ struct device_node *of_node;
+ int32_t rc = 0;
+ uint32_t svs = 0, nominal = 0, turbo = 0;
+
+ if (!vfe_dev || !rates) {
+ pr_err("%s:%d failed: vfe_dev %pK rates %pK\n", __func__,
+ __LINE__, vfe_dev, rates);
+ return -EINVAL;
+ }
+
+ if (!vfe_dev->pdev) {
+ pr_err("%s:%d failed: vfe_dev->pdev %pK\n", __func__,
+ __LINE__, vfe_dev->pdev);
+ return -EINVAL;
+ }
+
+ of_node = vfe_dev->pdev->dev.of_node;
+
+ if (!of_node) {
+ pr_err("%s %d failed: of_node = %pK\n", __func__,
+ __LINE__, of_node);
+ return -EINVAL;
+ }
+
+ /*
+ * Many older targets dont define svs.
+ * return svs=0 for older targets.
+ */
+ rc = of_property_read_u32(of_node, "max-clk-svs",
+ &svs);
+ if (rc < 0)
+ svs = 0;
+
+ rc = of_property_read_u32(of_node, "max-clk-nominal",
+ &nominal);
+ if (rc < 0 || !nominal) {
+ pr_err("%s: nominal rate error\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(of_node, "max-clk-turbo",
+ &turbo);
+ if (rc < 0 || !turbo) {
+ pr_err("%s: turbo rate error\n", __func__);
+ return -EINVAL;
+ }
+ rates->svs_rate = svs;
+ rates->nominal_rate = nominal;
+ rates->high_rate = turbo;
+ return 0;
+}
+
+void msm_vfe47_put_regulators(struct vfe_device *vfe_dev)
+{
+ int i;
+
+ for (i = 0; i < vfe_dev->vfe_num_regulators; i++)
+ regulator_put(vfe_dev->regulator_info[i].vdd);
+
+ vfe_dev->vfe_num_regulators = 0;
+ kfree(vfe_dev->regulator_info);
+ vfe_dev->regulator_info = NULL;
+}
+
+int msm_vfe47_get_regulators(struct vfe_device *vfe_dev)
+{
+ int rc = 0;
+ int i;
+
+ vfe_dev->vfe_num_regulators =
+ sizeof(*vfe_dev->hw_info->regulator_names) / sizeof(char *);
+
+ vfe_dev->regulator_info = kzalloc(sizeof(struct msm_cam_regulator) *
+ vfe_dev->vfe_num_regulators, GFP_KERNEL);
+ if (!vfe_dev->regulator_info)
+ return -ENOMEM;
+
+ for (i = 0; i < vfe_dev->vfe_num_regulators; i++) {
+ vfe_dev->regulator_info[i].vdd = regulator_get(
+ &vfe_dev->pdev->dev,
+ vfe_dev->hw_info->regulator_names[i]);
+ if (IS_ERR(vfe_dev->regulator_info[i].vdd)) {
+ pr_err("%s: Regulator vfe get failed %ld\n", __func__,
+ PTR_ERR(vfe_dev->regulator_info[i].vdd));
+ rc = -ENODEV;
+ goto reg_get_fail;
+ }
+ }
+ return 0;
+
+reg_get_fail:
+ for (i--; i >= 0; i--)
+ regulator_put(vfe_dev->regulator_info[i].vdd);
+ kfree(vfe_dev->regulator_info);
+ vfe_dev->regulator_info = NULL;
+ return rc;
+}
+
+int msm_vfe47_enable_regulators(struct vfe_device *vfe_dev, int enable)
+{
+ return msm_camera_regulator_enable(vfe_dev->regulator_info,
+ vfe_dev->vfe_num_regulators, enable);
+}
+
+int msm_vfe47_get_platform_data(struct vfe_device *vfe_dev)
+{
+ int rc = 0;
+
+ vfe_dev->vfe_base = msm_camera_get_reg_base(vfe_dev->pdev, "vfe", 0);
+ if (!vfe_dev->vfe_base)
+ return -ENOMEM;
+ vfe_dev->vfe_vbif_base = msm_camera_get_reg_base(vfe_dev->pdev,
+ "vfe_vbif", 0);
+ if (!vfe_dev->vfe_vbif_base) {
+ rc = -ENOMEM;
+ goto vbif_base_fail;
+ }
+
+ vfe_dev->vfe_irq = msm_camera_get_irq(vfe_dev->pdev, "vfe");
+ if (!vfe_dev->vfe_irq) {
+ rc = -ENODEV;
+ goto vfe_irq_fail;
+ }
+
+ vfe_dev->vfe_base_size = msm_camera_get_res_size(vfe_dev->pdev, "vfe");
+ vfe_dev->vfe_vbif_base_size = msm_camera_get_res_size(vfe_dev->pdev,
+ "vfe_vbif");
+ if (!vfe_dev->vfe_base_size || !vfe_dev->vfe_vbif_base_size) {
+ rc = -ENOMEM;
+ goto get_res_fail;
+ }
+
+ rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_regulators(vfe_dev);
+ if (rc)
+ goto get_regulator_fail;
+
+ rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_clks(vfe_dev);
+ if (rc)
+ goto get_clkcs_fail;
+
+ rc = msm_camera_register_irq(vfe_dev->pdev, vfe_dev->vfe_irq,
+ msm_isp_process_irq,
+ IRQF_TRIGGER_RISING, "vfe", vfe_dev);
+ if (rc < 0)
+ goto irq_register_fail;
+
+ msm_camera_enable_irq(vfe_dev->vfe_irq, 0);
+
+ rc = msm_isp_init_bandwidth_mgr(vfe_dev, ISP_VFE0 + vfe_dev->pdev->id);
+ if (rc)
+ goto init_bw_fail;
+
+ return 0;
+
+init_bw_fail:
+ msm_camera_unregister_irq(vfe_dev->pdev, vfe_dev->vfe_irq, "vfe");
+irq_register_fail:
+ vfe_dev->hw_info->vfe_ops.platform_ops.put_clks(vfe_dev);
+get_clkcs_fail:
+ vfe_dev->hw_info->vfe_ops.platform_ops.put_regulators(vfe_dev);
+get_regulator_fail:
+get_res_fail:
+ vfe_dev->vfe_vbif_base_size = 0;
+ vfe_dev->vfe_base_size = 0;
+vfe_irq_fail:
+ msm_camera_put_reg_base(vfe_dev->pdev, vfe_dev->vfe_base,
+ "vfe_vbif", 0);
+vbif_base_fail:
+ msm_camera_put_reg_base(vfe_dev->pdev, vfe_dev->vfe_base, "vfe", 0);
+ return rc;
+}
+
+void msm_vfe47_get_error_mask(
+ uint32_t *error_mask0, uint32_t *error_mask1)
+{
+ *error_mask0 = 0x00000000;
+ *error_mask1 = 0x0BFFFEFF;
+}
+
+void msm_vfe47_get_overflow_mask(uint32_t *overflow_mask)
+{
+ *overflow_mask = 0x09FFFE7E;
+}
+
+void msm_vfe47_get_rdi_wm_mask(struct vfe_device *vfe_dev,
+ uint32_t *rdi_wm_mask)
+{
+ *rdi_wm_mask = vfe_dev->axi_data.rdi_wm_mask;
+}
+
+void msm_vfe47_get_irq_mask(struct vfe_device *vfe_dev,
+ uint32_t *irq0_mask, uint32_t *irq1_mask)
+{
+ *irq0_mask = vfe_dev->irq0_mask;
+ *irq1_mask = vfe_dev->irq1_mask;
+}
+
+void msm_vfe47_get_halt_restart_mask(uint32_t *irq0_mask,
+ uint32_t *irq1_mask)
+{
+ *irq0_mask = BIT(31);
+ *irq1_mask = BIT(8);
+}
+
+static struct msm_vfe_axi_hardware_info msm_vfe47_axi_hw_info = {
+ .num_wm = 7,
+ .num_comp_mask = 3,
+ .num_rdi = 3,
+ .num_rdi_master = 3,
+ .min_wm_ub = 96,
+ .scratch_buf_range = SZ_32M + SZ_4M,
+};
+
+static struct msm_vfe_stats_hardware_info msm_vfe47_stats_hw_info = {
+ .stats_capability_mask =
+ 1 << MSM_ISP_STATS_HDR_BE | 1 << MSM_ISP_STATS_BF |
+ 1 << MSM_ISP_STATS_BG | 1 << MSM_ISP_STATS_BHIST |
+ 1 << MSM_ISP_STATS_HDR_BHIST | 1 << MSM_ISP_STATS_IHIST |
+ 1 << MSM_ISP_STATS_RS | 1 << MSM_ISP_STATS_CS |
+ 1 << MSM_ISP_STATS_AEC_BG,
+ .stats_ping_pong_offset = stats_pingpong_offset_map,
+ .num_stats_type = VFE47_NUM_STATS_TYPE,
+ .num_stats_comp_mask = VFE47_NUM_STATS_COMP,
+};
+
+struct msm_vfe_hardware_info vfe47_hw_info = {
+ .num_iommu_ctx = 1,
+ .num_iommu_secure_ctx = 0,
+ .vfe_clk_idx = VFE47_SRC_CLK_DTSI_IDX,
+ .runtime_axi_update = 1,
+ .min_ib = 100000000,
+ .min_ab = 100000000,
+ .vfe_ops = {
+ .irq_ops = {
+ .read_irq_status = msm_vfe47_read_irq_status,
+ .read_irq_status_and_clear =
+ msm_vfe47_read_irq_status_and_clear,
+ .process_camif_irq = msm_vfe47_process_input_irq,
+ .process_reset_irq = msm_vfe47_process_reset_irq,
+ .process_halt_irq = msm_vfe47_process_halt_irq,
+ .process_reset_irq = msm_vfe47_process_reset_irq,
+ .process_reg_update = msm_vfe47_process_reg_update,
+ .process_axi_irq = msm_isp_process_axi_irq,
+ .process_stats_irq = msm_isp_process_stats_irq,
+ .process_epoch_irq = msm_vfe47_process_epoch_irq,
+ .config_irq = msm_vfe47_config_irq,
+ .process_eof_irq = msm_isp47_process_eof_irq,
+ },
+ .axi_ops = {
+ .reload_wm = msm_vfe47_axi_reload_wm,
+ .enable_wm = msm_vfe47_axi_enable_wm,
+ .cfg_io_format = msm_vfe47_cfg_io_format,
+ .cfg_comp_mask = msm_vfe47_axi_cfg_comp_mask,
+ .clear_comp_mask = msm_vfe47_axi_clear_comp_mask,
+ .cfg_wm_irq_mask = msm_vfe47_axi_cfg_wm_irq_mask,
+ .clear_wm_irq_mask = msm_vfe47_axi_clear_wm_irq_mask,
+ .clear_irq_mask =
+ msm_vfe47_axi_clear_irq_mask,
+ .cfg_framedrop = msm_vfe47_cfg_framedrop,
+ .clear_framedrop = msm_vfe47_clear_framedrop,
+ .cfg_wm_reg = msm_vfe47_axi_cfg_wm_reg,
+ .clear_wm_reg = msm_vfe47_axi_clear_wm_reg,
+ .cfg_wm_xbar_reg = msm_vfe47_axi_cfg_wm_xbar_reg,
+ .clear_wm_xbar_reg = msm_vfe47_axi_clear_wm_xbar_reg,
+ .cfg_ub = msm_vfe47_cfg_axi_ub,
+ .read_wm_ping_pong_addr =
+ msm_vfe47_read_wm_ping_pong_addr,
+ .update_ping_pong_addr =
+ msm_vfe47_update_ping_pong_addr,
+ .get_comp_mask = msm_vfe47_get_comp_mask,
+ .get_wm_mask = msm_vfe47_get_wm_mask,
+ .get_pingpong_status = msm_vfe47_get_pingpong_status,
+ .halt = msm_vfe47_axi_halt,
+ .restart = msm_vfe47_axi_restart,
+ .update_cgc_override =
+ msm_vfe47_axi_update_cgc_override,
+ .ub_reg_offset = msm_vfe47_ub_reg_offset,
+ .get_ub_size = msm_vfe47_get_ub_size,
+ },
+ .core_ops = {
+ .reg_update = msm_vfe47_reg_update,
+ .cfg_input_mux = msm_vfe47_cfg_input_mux,
+ .update_camif_state = msm_vfe47_update_camif_state,
+ .start_fetch_eng = msm_vfe47_start_fetch_engine,
+ .cfg_rdi_reg = msm_vfe47_cfg_rdi_reg,
+ .reset_hw = msm_vfe47_reset_hardware,
+ .init_hw = msm_vfe47_init_hardware,
+ .init_hw_reg = msm_vfe47_init_hardware_reg,
+ .clear_status_reg = msm_vfe47_clear_status_reg,
+ .release_hw = msm_vfe47_release_hardware,
+ .get_error_mask = msm_vfe47_get_error_mask,
+ .get_overflow_mask = msm_vfe47_get_overflow_mask,
+ .get_rdi_wm_mask = msm_vfe47_get_rdi_wm_mask,
+ .get_irq_mask = msm_vfe47_get_irq_mask,
+ .get_halt_restart_mask =
+ msm_vfe47_get_halt_restart_mask,
+ .process_error_status = msm_vfe47_process_error_status,
+ .is_module_cfg_lock_needed =
+ msm_vfe47_is_module_cfg_lock_needed,
+ .ahb_clk_cfg = msm_isp47_ahb_clk_cfg,
+ .set_halt_restart_mask =
+ msm_vfe47_set_halt_restart_mask,
+ .start_fetch_eng_multi_pass =
+ msm_vfe47_start_fetch_engine_multi_pass,
+ },
+ .stats_ops = {
+ .get_stats_idx = msm_vfe47_get_stats_idx,
+ .check_streams = msm_vfe47_stats_check_streams,
+ .cfg_comp_mask = msm_vfe47_stats_cfg_comp_mask,
+ .cfg_wm_irq_mask = msm_vfe47_stats_cfg_wm_irq_mask,
+ .clear_wm_irq_mask = msm_vfe47_stats_clear_wm_irq_mask,
+ .cfg_wm_reg = msm_vfe47_stats_cfg_wm_reg,
+ .clear_wm_reg = msm_vfe47_stats_clear_wm_reg,
+ .cfg_ub = msm_vfe47_stats_cfg_ub,
+ .enable_module = msm_vfe47_stats_enable_module,
+ .update_ping_pong_addr =
+ msm_vfe47_stats_update_ping_pong_addr,
+ .get_comp_mask = msm_vfe47_stats_get_comp_mask,
+ .get_wm_mask = msm_vfe47_stats_get_wm_mask,
+ .get_frame_id = msm_vfe47_stats_get_frame_id,
+ .get_pingpong_status = msm_vfe47_get_pingpong_status,
+ .update_cgc_override =
+ msm_vfe47_stats_update_cgc_override,
+ .enable_stats_wm = NULL,
+ },
+ .platform_ops = {
+ .get_platform_data = msm_vfe47_get_platform_data,
+ .enable_regulators = msm_vfe47_enable_regulators,
+ .get_regulators = msm_vfe47_get_regulators,
+ .put_regulators = msm_vfe47_put_regulators,
+ .enable_clks = msm_vfe47_enable_clks,
+ .get_clks = msm_vfe47_get_clks,
+ .put_clks = msm_vfe47_put_clks,
+ .get_clk_rates = msm_vfe47_get_clk_rates,
+ .get_max_clk_rate = msm_vfe47_get_max_clk_rate,
+ .set_clk_rate = msm_vfe47_set_clk_rate,
+ .init_bw_mgr = msm_vfe47_init_bandwidth_mgr,
+ .deinit_bw_mgr = msm_vfe47_deinit_bandwidth_mgr,
+ .update_bw = msm_vfe47_update_bandwidth,
+ }
+ },
+ .dmi_reg_offset = 0xC2C,
+ .axi_hw_info = &msm_vfe47_axi_hw_info,
+ .stats_hw_info = &msm_vfe47_stats_hw_info,
+ .regulator_names = {"vdd", "camss-vdd", "mmagic-vdd"},
+};
+EXPORT_SYMBOL(vfe47_hw_info);
+
+static const struct of_device_id msm_vfe47_dt_match[] = {
+ {
+ .compatible = "qcom,vfe47",
+ .data = &vfe47_hw_info,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_vfe47_dt_match);
+
+static struct platform_driver vfe47_driver = {
+ .probe = vfe_hw_probe,
+ .driver = {
+ .name = "msm_vfe47",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_vfe47_dt_match,
+ },
+};
+
+static int __init msm_vfe47_init_module(void)
+{
+ return platform_driver_register(&vfe47_driver);
+}
+
+static void __exit msm_vfe47_exit_module(void)
+{
+ platform_driver_unregister(&vfe47_driver);
+}
+
+module_init(msm_vfe47_init_module);
+module_exit(msm_vfe47_exit_module);
+MODULE_DESCRIPTION("MSM VFE47 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp47.h b/drivers/media/platform/msm/ais/isp/msm_isp47.h
new file mode 100644
index 000000000000..b29fca61ce7c
--- /dev/null
+++ b/drivers/media/platform/msm/ais/isp/msm_isp47.h
@@ -0,0 +1,202 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_ISP47_H__
+#define __MSM_ISP47_H__
+
+#define VFE47_NUM_STATS_COMP 2
+#define VFE47_NUM_STATS_TYPE 9
+/* composite mask order */
+enum msm_vfe47_stats_comp_idx {
+ STATS_COMP_IDX_HDR_BE = 0,
+ STATS_COMP_IDX_BG,
+ STATS_COMP_IDX_BF,
+ STATS_COMP_IDX_HDR_BHIST,
+ STATS_COMP_IDX_RS,
+ STATS_COMP_IDX_CS,
+ STATS_COMP_IDX_IHIST,
+ STATS_COMP_IDX_BHIST,
+ STATS_COMP_IDX_AEC_BG,
+};
+
+extern struct msm_vfe_hardware_info vfe47_hw_info;
+
+void msm_vfe47_read_irq_status(struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1);
+void msm_vfe47_read_irq_status_and_clear(struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1);
+void msm_vfe47_enable_camif_error(struct vfe_device *vfe_dev,
+ int enable);
+void msm_vfe47_process_reg_update(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+void msm_vfe47_process_epoch_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+void msm_isp47_process_eof_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0);
+void msm_vfe47_reg_update(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src);
+long msm_vfe47_reset_hardware(struct vfe_device *vfe_dev,
+ uint32_t first_start, uint32_t blocking_call);
+void msm_vfe47_axi_reload_wm(struct vfe_device *vfe_dev,
+ void __iomem *vfe_base, uint32_t reload_mask);
+void msm_vfe47_axi_update_cgc_override(struct vfe_device *vfe_dev,
+ uint8_t wm_idx, uint8_t enable);
+void msm_vfe47_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+void msm_vfe47_axi_clear_comp_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+void msm_vfe47_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+void msm_vfe47_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+void msm_vfe47_axi_clear_irq_mask(struct vfe_device *vfe_dev);
+void msm_vfe47_cfg_framedrop(void __iomem *vfe_base,
+ struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
+ uint32_t framedrop_period);
+void msm_vfe47_clear_framedrop(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+int32_t msm_vfe47_cfg_io_format(struct vfe_device *vfe_dev,
+ enum msm_vfe_axi_stream_src stream_src, uint32_t io_format);
+int msm_vfe47_start_fetch_engine(struct vfe_device *vfe_dev,
+ void *arg);
+void msm_vfe47_cfg_fetch_engine(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg);
+void msm_vfe47_cfg_testgen(struct vfe_device *vfe_dev,
+ struct msm_vfe_testgen_cfg *testgen_cfg);
+void msm_vfe47_cfg_camif(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg);
+void msm_vfe47_cfg_input_mux(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg);
+void msm_vfe47_configure_hvx(struct vfe_device *vfe_dev,
+ uint8_t is_stream_on);
+void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
+ enum msm_isp_camif_update_state update_state);
+void msm_vfe47_cfg_rdi_reg(
+ struct vfe_device *vfe_dev, struct msm_vfe_rdi_cfg *rdi_cfg,
+ enum msm_vfe_input_src input_src);
+void msm_vfe47_axi_cfg_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx);
+void msm_vfe47_axi_clear_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx);
+void msm_vfe47_axi_cfg_wm_xbar_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx);
+void msm_vfe47_axi_clear_wm_xbar_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx);
+void msm_vfe47_cfg_axi_ub_equal_default(
+ struct vfe_device *vfe_dev, enum msm_vfe_input_src frame_src);
+void msm_vfe47_cfg_axi_ub_equal_slicing(
+ struct vfe_device *vfe_dev);
+void msm_vfe47_cfg_axi_ub(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src);
+void msm_vfe47_read_wm_ping_pong_addr(
+ struct vfe_device *vfe_dev);
+void msm_vfe47_update_ping_pong_addr(
+ void __iomem *vfe_base,
+ uint8_t wm_idx, uint32_t pingpong_bit, dma_addr_t paddr,
+ int32_t buf_size);
+int msm_vfe47_axi_halt(struct vfe_device *vfe_dev,
+ uint32_t blocking);
+int msm_vfe47_axi_restart(struct vfe_device *vfe_dev,
+ uint32_t blocking, uint32_t enable_camif);
+uint32_t msm_vfe47_get_wm_mask(
+ uint32_t irq_status0, uint32_t irq_status1);
+uint32_t msm_vfe47_get_comp_mask(
+ uint32_t irq_status0, uint32_t irq_status1);
+uint32_t msm_vfe47_get_pingpong_status(
+ struct vfe_device *vfe_dev);
+int msm_vfe47_get_stats_idx(enum msm_isp_stats_type stats_type);
+int msm_vfe47_stats_check_streams(
+ struct msm_vfe_stats_stream *stream_info);
+void msm_vfe47_stats_cfg_comp_mask(
+ struct vfe_device *vfe_dev, uint32_t stats_mask,
+ uint8_t request_comp_index, uint8_t enable);
+void msm_vfe47_stats_cfg_wm_irq_mask(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+void msm_vfe47_stats_clear_wm_irq_mask(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+void msm_vfe47_stats_cfg_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+void msm_vfe47_stats_clear_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+void msm_vfe47_stats_cfg_ub(struct vfe_device *vfe_dev);
+void msm_vfe47_stats_update_cgc_override(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable);
+bool msm_vfe47_is_module_cfg_lock_needed(
+ uint32_t reg_offset);
+void msm_vfe47_stats_enable_module(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable);
+void msm_vfe47_stats_update_ping_pong_addr(
+ void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ uint32_t pingpong_status, dma_addr_t paddr);
+uint32_t msm_vfe47_stats_get_wm_mask(
+ uint32_t irq_status0, uint32_t irq_status1);
+uint32_t msm_vfe47_stats_get_comp_mask(
+ uint32_t irq_status0, uint32_t irq_status1);
+uint32_t msm_vfe47_stats_get_frame_id(
+ struct vfe_device *vfe_dev);
+void msm_vfe47_get_error_mask(
+ uint32_t *error_mask0, uint32_t *error_mask1);
+void msm_vfe47_get_overflow_mask(uint32_t *overflow_mask);
+void msm_vfe47_get_rdi_wm_mask(struct vfe_device *vfe_dev,
+ uint32_t *rdi_wm_mask);
+void msm_vfe47_get_irq_mask(struct vfe_device *vfe_dev,
+ uint32_t *irq0_mask, uint32_t *irq1_mask);
+void msm_vfe47_restore_irq_mask(struct vfe_device *vfe_dev);
+void msm_vfe47_get_halt_restart_mask(uint32_t *irq0_mask,
+ uint32_t *irq1_mask);
+int msm_vfe47_init_hardware(struct vfe_device *vfe_dev);
+void msm_vfe47_release_hardware(struct vfe_device *vfe_dev);
+void msm_vfe47_init_hardware_reg(struct vfe_device *vfe_dev);
+void msm_vfe47_process_reset_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1);
+void msm_vfe47_process_halt_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1);
+void msm_vfe47_process_input_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+void msm_vfe47_process_violation_status(
+ struct vfe_device *vfe_dev);
+void msm_vfe47_process_error_status(struct vfe_device *vfe_dev);
+void msm_vfe47_clear_status_reg(struct vfe_device *vfe_dev);
+int msm_vfe47_get_platform_data(struct vfe_device *vfe_dev);
+int msm_vfe47_enable_regulators(struct vfe_device *vfe_dev, int enable);
+int msm_vfe47_get_regulators(struct vfe_device *vfe_dev);
+void msm_vfe47_put_regulators(struct vfe_device *vfe_dev);
+int msm_vfe47_enable_clks(struct vfe_device *vfe_dev, int enable);
+int msm_vfe47_get_clks(struct vfe_device *vfe_dev);
+void msm_vfe47_put_clks(struct vfe_device *vfe_dev);
+int msm_vfe47_get_clk_rates(struct vfe_device *vfe_dev,
+ struct msm_isp_clk_rates *rates);
+int msm_vfe47_get_max_clk_rate(struct vfe_device *vfe_dev, long *rate);
+int msm_vfe47_set_clk_rate(struct vfe_device *vfe_dev, long *rate);
+int msm_vfe47_init_bandwidth_mgr(struct vfe_device *vfe_dev,
+ struct msm_isp_bandwidth_mgr *isp_bandwidth_mgr);
+void msm_vfe47_deinit_bandwidth_mgr(
+ struct msm_isp_bandwidth_mgr *isp_bandwidth_mgr);
+int msm_vfe47_update_bandwidth(
+ struct msm_isp_bandwidth_mgr *isp_bandwidth_mgr);
+void msm_vfe47_config_irq(struct vfe_device *vfe_dev,
+ uint32_t irq0_mask, uint32_t irq1_mask,
+ enum msm_isp_irq_operation oper);
+#endif /* __MSM_ISP47_H__ */
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c
new file mode 100644
index 000000000000..373a963f75aa
--- /dev/null
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c
@@ -0,0 +1,4169 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <asm/div64.h>
+#include "msm_isp_util.h"
+#include "msm_isp_axi_util.h"
+#include "trace/events/msm_cam.h"
+
+#define ISP_SOF_DEBUG_COUNT 0
+static int msm_isp_update_dual_HW_ms_info_at_start(
+ struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src stream_src,
+ struct msm_isp_timestamp *ts);
+
+static int msm_isp_update_dual_HW_axi(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+
+#define DUAL_VFE_AND_VFE1(s, v) ((s->stream_src < RDI_INTF_0) && \
+ v->is_split && vfe_dev->pdev->id == ISP_VFE1)
+
+#define RDI_OR_NOT_DUAL_VFE(v, s) (!v->is_split || \
+ ((s->stream_src >= RDI_INTF_0) && \
+ (stream_info->stream_src <= RDI_INTF_2)))
+
+static inline struct msm_vfe_axi_stream *msm_isp_vfe_get_stream(
+ struct dual_vfe_resource *dual_vfe_res,
+ int vfe_id, uint32_t index)
+{
+ struct msm_vfe_axi_shared_data *axi_data =
+ dual_vfe_res->axi_data[vfe_id];
+ return &axi_data->stream_info[index];
+}
+
+static inline struct msm_vfe_axi_stream *msm_isp_get_controllable_stream(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ if (vfe_dev->is_split && stream_info->stream_src < RDI_INTF_0 &&
+ stream_info->controllable_output)
+ return msm_isp_vfe_get_stream(
+ vfe_dev->common_data->dual_vfe_res,
+ ISP_VFE1,
+ HANDLE_TO_IDX(
+ stream_info->stream_handle));
+ return stream_info;
+}
+
+int msm_isp_axi_create_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
+{
+ uint32_t i = stream_cfg_cmd->stream_src;
+
+ if (i >= VFE_AXI_SRC_MAX) {
+ pr_err("%s:%d invalid stream_src %d\n", __func__, __LINE__,
+ stream_cfg_cmd->stream_src);
+ return -EINVAL;
+ }
+
+ if (axi_data->stream_info[i].state != AVAILABLE) {
+ pr_err("%s:%d invalid state %d expected %d for src %d\n",
+ __func__, __LINE__, axi_data->stream_info[i].state,
+ AVAILABLE, i);
+ return -EINVAL;
+ }
+
+ if ((axi_data->stream_handle_cnt << 8) == 0)
+ axi_data->stream_handle_cnt++;
+
+ stream_cfg_cmd->axi_stream_handle =
+ (++axi_data->stream_handle_cnt) << 8 | i;
+
+ ISP_DBG("%s: vfe %d handle %x\n", __func__, vfe_dev->pdev->id,
+ stream_cfg_cmd->axi_stream_handle);
+
+ memset(&axi_data->stream_info[i], 0,
+ sizeof(struct msm_vfe_axi_stream));
+ spin_lock_init(&axi_data->stream_info[i].lock);
+ axi_data->stream_info[i].session_id = stream_cfg_cmd->session_id;
+ axi_data->stream_info[i].stream_id = stream_cfg_cmd->stream_id;
+ axi_data->stream_info[i].buf_divert = stream_cfg_cmd->buf_divert;
+ axi_data->stream_info[i].state = INACTIVE;
+ axi_data->stream_info[i].stream_handle =
+ stream_cfg_cmd->axi_stream_handle;
+ axi_data->stream_info[i].controllable_output =
+ stream_cfg_cmd->controllable_output;
+ axi_data->stream_info[i].activated_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ if (stream_cfg_cmd->controllable_output)
+ stream_cfg_cmd->frame_skip_pattern = SKIP_ALL;
+ INIT_LIST_HEAD(&axi_data->stream_info[i].request_q);
+ return 0;
+}
+
+void msm_isp_axi_destroy_stream(
+ struct msm_vfe_axi_shared_data *axi_data, int stream_idx)
+{
+ if (axi_data->stream_info[stream_idx].state != AVAILABLE) {
+ axi_data->stream_info[stream_idx].state = AVAILABLE;
+ axi_data->stream_info[stream_idx].stream_handle = 0;
+ } else {
+ pr_err("%s: stream does not exist\n", __func__);
+ }
+}
+
+
+int msm_isp_axi_get_num_planes(uint32_t output_format,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ if (!stream_info)
+ return -EINVAL;
+
+ switch (output_format) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR10DPCM6:
+ case V4L2_PIX_FMT_SGBRG10DPCM6:
+ case V4L2_PIX_FMT_SGRBG10DPCM6:
+ case V4L2_PIX_FMT_SRGGB10DPCM6:
+ case V4L2_PIX_FMT_SBGGR10DPCM8:
+ case V4L2_PIX_FMT_SGBRG10DPCM8:
+ case V4L2_PIX_FMT_SGRBG10DPCM8:
+ case V4L2_PIX_FMT_SRGGB10DPCM8:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_SBGGR14:
+ case V4L2_PIX_FMT_SGBRG14:
+ case V4L2_PIX_FMT_SGRBG14:
+ case V4L2_PIX_FMT_SRGGB14:
+ case V4L2_PIX_FMT_QBGGR8:
+ case V4L2_PIX_FMT_QGBRG8:
+ case V4L2_PIX_FMT_QGRBG8:
+ case V4L2_PIX_FMT_QRGGB8:
+ case V4L2_PIX_FMT_QBGGR10:
+ case V4L2_PIX_FMT_QGBRG10:
+ case V4L2_PIX_FMT_QGRBG10:
+ case V4L2_PIX_FMT_QRGGB10:
+ case V4L2_PIX_FMT_QBGGR12:
+ case V4L2_PIX_FMT_QGBRG12:
+ case V4L2_PIX_FMT_QGRBG12:
+ case V4L2_PIX_FMT_QRGGB12:
+ case V4L2_PIX_FMT_QBGGR14:
+ case V4L2_PIX_FMT_QGBRG14:
+ case V4L2_PIX_FMT_QGRBG14:
+ case V4L2_PIX_FMT_QRGGB14:
+ case V4L2_PIX_FMT_P16BGGR10:
+ case V4L2_PIX_FMT_P16GBRG10:
+ case V4L2_PIX_FMT_P16GRBG10:
+ case V4L2_PIX_FMT_P16RGGB10:
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_META:
+ case V4L2_PIX_FMT_META10:
+ case V4L2_PIX_FMT_GREY:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y12:
+ stream_info->num_planes = 1;
+ stream_info->format_factor = ISP_Q2;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV41:
+ stream_info->num_planes = 2;
+ stream_info->format_factor = 1.5 * ISP_Q2;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ stream_info->num_planes = 2;
+ stream_info->format_factor = 2 * ISP_Q2;
+ break;
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ stream_info->num_planes = 2;
+ stream_info->format_factor = 3 * ISP_Q2;
+ break;
+ /*TD: Add more image format*/
+ default:
+ msm_isp_print_fourcc_error(__func__,
+ output_format);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+int msm_isp_validate_axi_request(struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
+{
+ int rc = -1, i;
+ struct msm_vfe_axi_stream *stream_info = NULL;
+
+ if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)
+ < VFE_AXI_SRC_MAX) {
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+ } else {
+ pr_err("%s: Invalid axi_stream_handle\n", __func__);
+ return rc;
+ }
+
+ if (!stream_info) {
+ pr_err("%s: Stream info is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = msm_isp_axi_get_num_planes
+ (stream_cfg_cmd->output_format, stream_info);
+ if (rc < 0) {
+ pr_err("%s: output_format error %d\n", __func__,
+ stream_cfg_cmd->output_format);
+ return rc;
+ }
+
+ if (axi_data->hw_info->num_wm - axi_data->num_used_wm <
+ stream_info->num_planes) {
+ pr_err("%s: No free write masters\n", __func__);
+ return rc;
+ }
+
+ if ((stream_info->num_planes > 1) &&
+ (axi_data->hw_info->num_comp_mask -
+ axi_data->num_used_composite_mask < 1)) {
+ pr_err("%s: No free composite mask\n", __func__);
+ return rc;
+ }
+
+ if (stream_cfg_cmd->init_frame_drop >= MAX_INIT_FRAME_DROP) {
+ pr_err("%s: Invalid skip pattern\n", __func__);
+ return rc;
+ }
+
+ if (stream_cfg_cmd->frame_skip_pattern >= MAX_SKIP) {
+ pr_err("%s: Invalid skip pattern\n", __func__);
+ return rc;
+ }
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ stream_info->plane_cfg[i] = stream_cfg_cmd->plane_cfg[i];
+ stream_info->max_width = max(stream_info->max_width,
+ stream_cfg_cmd->plane_cfg[i].output_width);
+ }
+
+ stream_info->output_format = stream_cfg_cmd->output_format;
+ stream_info->runtime_output_format = stream_info->output_format;
+ stream_info->stream_src = stream_cfg_cmd->stream_src;
+ stream_info->frame_based = stream_cfg_cmd->frame_base;
+ return 0;
+}
+
+static uint32_t msm_isp_axi_get_plane_size(
+ struct msm_vfe_axi_stream *stream_info, int plane_idx)
+{
+ uint32_t size = 0;
+ struct msm_vfe_axi_plane_cfg *plane_cfg = stream_info->plane_cfg;
+
+ switch (stream_info->output_format) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_QBGGR8:
+ case V4L2_PIX_FMT_QGBRG8:
+ case V4L2_PIX_FMT_QGRBG8:
+ case V4L2_PIX_FMT_QRGGB8:
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_META:
+ case V4L2_PIX_FMT_GREY:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y12:
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR10DPCM6:
+ case V4L2_PIX_FMT_SGBRG10DPCM6:
+ case V4L2_PIX_FMT_SGRBG10DPCM6:
+ case V4L2_PIX_FMT_SRGGB10DPCM6:
+ case V4L2_PIX_FMT_SBGGR10DPCM8:
+ case V4L2_PIX_FMT_SGBRG10DPCM8:
+ case V4L2_PIX_FMT_SGRBG10DPCM8:
+ case V4L2_PIX_FMT_SRGGB10DPCM8:
+ case V4L2_PIX_FMT_QBGGR10:
+ case V4L2_PIX_FMT_QGBRG10:
+ case V4L2_PIX_FMT_QGRBG10:
+ case V4L2_PIX_FMT_QRGGB10:
+ case V4L2_PIX_FMT_META10:
+ /* TODO: fix me */
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_QBGGR12:
+ case V4L2_PIX_FMT_QGBRG12:
+ case V4L2_PIX_FMT_QGRBG12:
+ case V4L2_PIX_FMT_QRGGB12:
+ case V4L2_PIX_FMT_SBGGR14:
+ case V4L2_PIX_FMT_SGBRG14:
+ case V4L2_PIX_FMT_SGRBG14:
+ case V4L2_PIX_FMT_SRGGB14:
+ case V4L2_PIX_FMT_QBGGR14:
+ case V4L2_PIX_FMT_QGBRG14:
+ case V4L2_PIX_FMT_QGRBG14:
+ case V4L2_PIX_FMT_QRGGB14:
+ /* TODO: fix me */
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_P16BGGR10:
+ case V4L2_PIX_FMT_P16GBRG10:
+ case V4L2_PIX_FMT_P16GRBG10:
+ case V4L2_PIX_FMT_P16RGGB10:
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ if (plane_cfg[plane_idx].output_plane_format == Y_PLANE)
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ else
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV41:
+ if (plane_cfg[plane_idx].output_plane_format == Y_PLANE)
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ else
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ /*TD: Add more image format*/
+ default:
+ msm_isp_print_fourcc_error(__func__,
+ stream_info->output_format);
+ break;
+ }
+ return size;
+}
+
+void msm_isp_axi_reserve_wm(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int i, j;
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ for (j = 0; j < axi_data->hw_info->num_wm; j++) {
+ if (!axi_data->free_wm[j]) {
+ axi_data->free_wm[j] =
+ stream_info->stream_handle;
+ axi_data->wm_image_size[j] =
+ msm_isp_axi_get_plane_size(
+ stream_info, i);
+ axi_data->num_used_wm++;
+ break;
+ }
+ }
+ ISP_DBG("%s vfe %d stream_handle %x wm %d\n", __func__,
+ vfe_dev->pdev->id,
+ stream_info->stream_handle, j);
+ stream_info->wm[i] = j;
+ }
+}
+
+void msm_isp_axi_free_wm(struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int i;
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ axi_data->free_wm[stream_info->wm[i]] = 0;
+ axi_data->num_used_wm--;
+ }
+ if (stream_info->stream_src <= IDEAL_RAW)
+ axi_data->num_pix_stream++;
+ else if (stream_info->stream_src < VFE_AXI_SRC_MAX)
+ axi_data->num_rdi_stream++;
+}
+
+void msm_isp_axi_reserve_comp_mask(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int i;
+ uint8_t comp_mask = 0;
+
+ for (i = 0; i < stream_info->num_planes; i++)
+ comp_mask |= 1 << stream_info->wm[i];
+
+ for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
+ if (!axi_data->composite_info[i].stream_handle) {
+ axi_data->composite_info[i].stream_handle =
+ stream_info->stream_handle;
+ axi_data->composite_info[i].
+ stream_composite_mask = comp_mask;
+ axi_data->num_used_composite_mask++;
+ break;
+ }
+ }
+ stream_info->comp_mask_index = i;
+}
+
+void msm_isp_axi_free_comp_mask(struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ axi_data->composite_info[stream_info->comp_mask_index].
+ stream_composite_mask = 0;
+ axi_data->composite_info[stream_info->comp_mask_index].
+ stream_handle = 0;
+ axi_data->num_used_composite_mask--;
+}
+
+int msm_isp_axi_check_stream_state(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int rc = 0, i;
+ unsigned long flags;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream *stream_info;
+ enum msm_vfe_axi_state valid_state =
+ (stream_cfg_cmd->cmd == START_STREAM) ? INACTIVE : ACTIVE;
+
+ pr_debug("%s: entry %d\n", __func__, __LINE__);
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+ return -EINVAL;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ VFE_AXI_SRC_MAX) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ if (stream_info->state == AVAILABLE)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ if (stream_info->state != valid_state) {
+ if ((stream_info->state == PAUSING ||
+ stream_info->state == PAUSED ||
+ stream_info->state == RESUME_PENDING ||
+ stream_info->state == RESUMING ||
+ stream_info->state == UPDATING) &&
+ (stream_cfg_cmd->cmd == STOP_STREAM ||
+ stream_cfg_cmd->cmd == STOP_IMMEDIATELY)) {
+ stream_info->state = ACTIVE;
+ } else {
+ pr_err("%s: Invalid stream state: %d\n",
+ __func__, stream_info->state);
+ spin_unlock_irqrestore(
+ &stream_info->lock, flags);
+ if (stream_cfg_cmd->cmd == START_STREAM)
+ rc = -EINVAL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+
+ pr_debug("%s: exit %d\n", __func__, __LINE__);
+
+ return rc;
+}
+
+/**
+ * msm_isp_cfg_framedrop_reg() - Program the period and pattern
+ * @vfe_dev: The device for which the period and pattern is programmed
+ * @stream_info: The stream for which programming is done
+ *
+ * This function calculates the period and pattern to be configured
+ * for the stream based on the current frame id of the stream's input
+ * source and the initial framedrops.
+ *
+ * Returns void.
+ */
+static void msm_isp_cfg_framedrop_reg(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ struct msm_vfe_axi_stream *vfe0_stream_info = NULL;
+ uint32_t runtime_init_frame_drop;
+
+ uint32_t framedrop_pattern = 0;
+ uint32_t framedrop_period = MSM_VFE_STREAM_STOP_PERIOD;
+ enum msm_vfe_input_src frame_src = SRC_TO_INTF(stream_info->stream_src);
+
+ if (vfe_dev->axi_data.src_info[frame_src].frame_id >=
+ stream_info->init_frame_drop)
+ runtime_init_frame_drop = 0;
+ else
+ runtime_init_frame_drop = stream_info->init_frame_drop -
+ vfe_dev->axi_data.src_info[frame_src].frame_id;
+
+ if (!runtime_init_frame_drop)
+ framedrop_period = stream_info->current_framedrop_period;
+
+ if (framedrop_period != MSM_VFE_STREAM_STOP_PERIOD)
+ framedrop_pattern = 0x1;
+
+ ISP_DBG("%s: stream %x framedrop pattern %x period %u\n", __func__,
+ stream_info->stream_handle, framedrop_pattern,
+ framedrop_period);
+
+ WARN_ON(framedrop_period == 0);
+ if (DUAL_VFE_AND_VFE1(stream_info, vfe_dev)) {
+ vfe0_stream_info = msm_isp_vfe_get_stream(
+ vfe_dev->common_data->dual_vfe_res,
+ ISP_VFE0,
+ HANDLE_TO_IDX(
+ stream_info->stream_handle));
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
+ vfe_dev->common_data->dual_vfe_res->
+ vfe_base[ISP_VFE0],
+ vfe0_stream_info, framedrop_pattern,
+ framedrop_period);
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
+ vfe_dev->vfe_base, stream_info,
+ framedrop_pattern,
+ framedrop_period);
+
+ stream_info->requested_framedrop_period =
+ framedrop_period;
+ vfe0_stream_info->requested_framedrop_period =
+ framedrop_period;
+
+ } else if (RDI_OR_NOT_DUAL_VFE(vfe_dev, stream_info)) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
+ vfe_dev->vfe_base, stream_info, framedrop_pattern,
+ framedrop_period);
+ stream_info->requested_framedrop_period = framedrop_period;
+ }
+}
+
+/**
+ * msm_isp_check_epoch_status() - check the epock signal for framedrop
+ *
+ * @vfe_dev: The h/w on which the epoch signel is reveived
+ * @frame_src: The source of the epoch signal for this frame
+ *
+ * For dual vfe case and pixel stream, if both vfe's epoch signal is
+ * received, this function will return success.
+ * It will also return the vfe1 for further process
+ * For none dual VFE stream or none pixl source, this
+ * funciton will just return success.
+ *
+ * Returns 1 - epoch received is complete.
+ * 0 - epoch reveived is not complete.
+ */
+static int msm_isp_check_epoch_status(struct vfe_device **vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ struct vfe_device *vfe_dev_cur = *vfe_dev;
+ struct vfe_device *vfe_dev_other = NULL;
+ uint32_t vfe_id_other = 0;
+ uint32_t vfe_id_cur = 0;
+ uint32_t epoch_mask = 0;
+ unsigned long flags;
+ int completed = 0;
+
+ spin_lock_irqsave(
+ &vfe_dev_cur->common_data->common_dev_data_lock, flags);
+
+ if (vfe_dev_cur->is_split &&
+ frame_src == VFE_PIX_0) {
+ if (vfe_dev_cur->pdev->id == ISP_VFE0) {
+ vfe_id_cur = ISP_VFE0;
+ vfe_id_other = ISP_VFE1;
+ } else {
+ vfe_id_cur = ISP_VFE1;
+ vfe_id_other = ISP_VFE0;
+ }
+ vfe_dev_other = vfe_dev_cur->common_data->dual_vfe_res->
+ vfe_dev[vfe_id_other];
+
+ if (vfe_dev_cur->common_data->dual_vfe_res->
+ epoch_sync_mask & (1 << vfe_id_cur)) {
+ /* serious scheduling delay */
+ pr_err("Missing epoch: vfe %d, epoch mask 0x%x\n",
+ vfe_dev_cur->pdev->id,
+ vfe_dev_cur->common_data->dual_vfe_res->
+ epoch_sync_mask);
+ msm_isp_dump_ping_pong_mismatch();
+ goto fatal;
+ }
+
+ vfe_dev_cur->common_data->dual_vfe_res->
+ epoch_sync_mask |= (1 << vfe_id_cur);
+
+ epoch_mask = (1 << vfe_id_cur) | (1 << vfe_id_other);
+ if ((vfe_dev_cur->common_data->dual_vfe_res->
+ epoch_sync_mask & epoch_mask) == epoch_mask) {
+
+ if (vfe_id_other == ISP_VFE0)
+ *vfe_dev = vfe_dev_cur;
+ else
+ *vfe_dev = vfe_dev_other;
+
+ vfe_dev_cur->common_data->dual_vfe_res->
+ epoch_sync_mask &= ~epoch_mask;
+ completed = 1;
+ }
+ } else
+ completed = 1;
+
+ spin_unlock_irqrestore(
+ &vfe_dev_cur->common_data->common_dev_data_lock, flags);
+
+ return completed;
+fatal:
+ spin_unlock_irqrestore(
+ &vfe_dev_cur->common_data->common_dev_data_lock, flags);
+ /* new error event code will be added later */
+ msm_isp_halt_send_error(vfe_dev_cur, ISP_EVENT_PING_PONG_MISMATCH);
+ return 0;
+}
+
+
+/**
+ * msm_isp_update_framedrop_reg() - Update frame period pattern on h/w
+ * @vfe_dev: The h/w on which the perion pattern is updated.
+ * @frame_src: Input source.
+ *
+ * If the period and pattern needs to be updated for a stream then it is
+ * updated here. Updates happen if initial frame drop reaches 0 or burst
+ * streams have been provided new skip pattern from user space.
+ *
+ * Returns void
+ */
+void msm_isp_update_framedrop_reg(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ int i;
+ struct msm_vfe_axi_shared_data *axi_data = NULL;
+ struct msm_vfe_axi_stream *stream_info;
+ unsigned long flags;
+
+ if (msm_isp_check_epoch_status(&vfe_dev, frame_src) != 1)
+ return;
+
+ axi_data = &vfe_dev->axi_data;
+
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ if (SRC_TO_INTF(axi_data->stream_info[i].stream_src) !=
+ frame_src) {
+ continue;
+ }
+ stream_info = &axi_data->stream_info[i];
+ if (stream_info->state != ACTIVE)
+ continue;
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+
+ if (stream_info->stream_type == BURST_STREAM) {
+ if (stream_info->runtime_num_burst_capture == 0 ||
+ (stream_info->runtime_num_burst_capture == 1 &&
+ stream_info->activated_framedrop_period == 1))
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ }
+
+ if (stream_info->undelivered_request_cnt > 0)
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+
+ /*
+ * re-configure the period pattern, only if it's not already
+ * set to what we want
+ */
+ if (stream_info->current_framedrop_period !=
+ stream_info->requested_framedrop_period) {
+ msm_isp_cfg_framedrop_reg(vfe_dev, stream_info);
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+}
+
+/**
+ * msm_isp_reset_framedrop() - Compute the framedrop period pattern
+ * @vfe_dev: Device for which the period and pattern is computed
+ * @stream_info: The stream for the which period and pattern is generated
+ *
+ * This function is called when stream starts or is reset. It's main
+ * purpose is to setup the runtime parameters of framedrop required
+ * for the stream.
+ *
+ * Returms void
+ */
+void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ stream_info->runtime_num_burst_capture = stream_info->num_burst_capture;
+
+ /**
+ * only reset none controllable output stream, since the
+ * controllable stream framedrop period will be controlled
+ * by the request frame api
+ */
+ if (!stream_info->controllable_output) {
+ stream_info->current_framedrop_period =
+ msm_isp_get_framedrop_period(
+ stream_info->frame_skip_pattern);
+ }
+
+ msm_isp_cfg_framedrop_reg(vfe_dev, stream_info);
+ ISP_DBG("%s: init frame drop: %d\n", __func__,
+ stream_info->init_frame_drop);
+ ISP_DBG("%s: num_burst_capture: %d\n", __func__,
+ stream_info->runtime_num_burst_capture);
+}
+
+void msm_isp_check_for_output_error(struct vfe_device *vfe_dev,
+ struct msm_isp_timestamp *ts, struct msm_isp_sof_info *sof_info)
+{
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data;
+ int i;
+ uint32_t stream_idx;
+
+ if (!vfe_dev || !sof_info) {
+ pr_err("%s %d failed: vfe_dev %pK sof_info %pK\n", __func__,
+ __LINE__, vfe_dev, sof_info);
+ return;
+ }
+ sof_info->regs_not_updated = 0;
+ sof_info->reg_update_fail_mask = 0;
+ sof_info->stream_get_buf_fail_mask = 0;
+
+ axi_data = &vfe_dev->axi_data;
+ /* report that registers are not updated and return empty buffer for
+ * controllable outputs
+ */
+ if (!vfe_dev->reg_updated) {
+ sof_info->regs_not_updated =
+ vfe_dev->reg_update_requested;
+ }
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ struct msm_vfe_axi_stream *temp_stream_info;
+
+ stream_info = &axi_data->stream_info[i];
+ stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+
+ /*
+ * Process drop only if controllable ACTIVE PIX stream &&
+ * reg_not_updated
+ * OR stream is in RESUMING state.
+ * Other cases there is no drop to report, so continue.
+ */
+ if (!((stream_info->state == ACTIVE &&
+ stream_info->controllable_output &&
+ (SRC_TO_INTF(stream_info->stream_src) ==
+ VFE_PIX_0)) ||
+ stream_info->state == RESUMING))
+ continue;
+
+ if (stream_info->controllable_output &&
+ !vfe_dev->reg_updated) {
+ temp_stream_info =
+ msm_isp_get_controllable_stream(vfe_dev,
+ stream_info);
+ if (temp_stream_info->undelivered_request_cnt) {
+ if (msm_isp_drop_frame(vfe_dev, stream_info, ts,
+ sof_info)) {
+ pr_err("drop frame failed\n");
+ }
+ }
+ }
+
+ if (stream_info->state == RESUMING &&
+ !stream_info->controllable_output) {
+ ISP_DBG("%s: axi_updating_mask stream_id %x\n",
+ __func__, stream_idx);
+ ISP_DBG("%s: axi_updating_mask frame_id %d\n",
+ __func__, vfe_dev->axi_data.
+ src_info[SRC_TO_INTF(stream_info->stream_src)]
+ .frame_id);
+ sof_info->axi_updating_mask |=
+ 1 << stream_idx;
+ }
+ }
+
+ vfe_dev->reg_updated = 0;
+
+ /* report frame drop per stream */
+ if (vfe_dev->error_info.framedrop_flag) {
+ for (i = 0; i < BUF_MGR_NUM_BUF_Q; i++) {
+ if (vfe_dev->error_info.stream_framedrop_count[i]) {
+ ISP_DBG("%s: get buf failed i %d\n", __func__,
+ i);
+ sof_info->stream_get_buf_fail_mask |= (1 << i);
+ vfe_dev->error_info.
+ stream_framedrop_count[i] = 0;
+ }
+ }
+ vfe_dev->error_info.framedrop_flag = 0;
+ }
+}
+
+void msm_isp_increment_frame_id(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts)
+{
+ struct msm_vfe_src_info *src_info = NULL;
+ struct msm_vfe_sof_info *sof_info = NULL;
+ enum msm_vfe_dual_hw_type dual_hw_type;
+ enum msm_vfe_dual_hw_ms_type ms_type;
+ struct msm_vfe_sof_info *master_sof_info = NULL;
+ int32_t time, master_time, delta;
+ uint32_t sof_incr = 0;
+ unsigned long flags;
+
+ if (vfe_dev->axi_data.src_info[frame_src].frame_id == 0)
+ msm_isp_update_dual_HW_ms_info_at_start(vfe_dev, frame_src,
+ ts);
+
+ spin_lock_irqsave(&vfe_dev->common_data->common_dev_data_lock, flags);
+ dual_hw_type =
+ vfe_dev->axi_data.src_info[frame_src].dual_hw_type;
+ ms_type =
+ vfe_dev->axi_data.src_info[frame_src].
+ dual_hw_ms_info.dual_hw_ms_type;
+ /*
+ * Increment frame_id if
+ * 1. Not Master Slave
+ * 2. Master
+ * 3. Slave and Master is Inactive
+ *
+ * OR
+ * (in other words)
+ * If SLAVE and Master active, don't increment slave frame_id.
+ * Instead use Master frame_id for Slave.
+ */
+ if ((dual_hw_type == DUAL_HW_MASTER_SLAVE) &&
+ (ms_type == MS_TYPE_SLAVE) &&
+ (vfe_dev->common_data->ms_resource.master_active == 1)) {
+ /* DUAL_HW_MS_SLAVE && MASTER active */
+ time = ts->buf_time.tv_sec * 1000 +
+ ts->buf_time.tv_usec / 1000;
+ master_sof_info = &vfe_dev->common_data->ms_resource.
+ master_sof_info;
+ master_time = master_sof_info->mono_timestamp_ms;
+ delta = vfe_dev->common_data->ms_resource.sof_delta_threshold;
+ ISP_DBG("%s: vfe %d frame_src %d frame %d", __func__,
+ vfe_dev->pdev->id, frame_src,
+ vfe_dev->axi_data.src_info[frame_src].frame_id);
+ ISP_DBG("%s: Slave time %d Master time %d delta %d\n", __func__,
+ time, master_time, time - master_time);
+
+ if (time - master_time > delta)
+ sof_incr = 1;
+
+ /*
+ * If delta < 5ms, slave frame_id = master frame_id
+ * If delta > 5ms, slave frame_id = master frame_id + 1
+ * CANNOT support Batch Mode with this logic currently.
+ */
+ vfe_dev->axi_data.src_info[frame_src].frame_id =
+ master_sof_info->frame_id + sof_incr;
+ } else {
+ if (frame_src == VFE_PIX_0) {
+ vfe_dev->axi_data.src_info[frame_src].frame_id +=
+ vfe_dev->axi_data.src_info[frame_src].
+ sof_counter_step;
+ ISP_DBG("%s: vfe %d sof_step %d\n", __func__,
+ vfe_dev->pdev->id,
+ vfe_dev->axi_data.src_info[frame_src].
+ sof_counter_step);
+ src_info = &vfe_dev->axi_data.src_info[frame_src];
+
+ if (!src_info->frame_id &&
+ !src_info->reg_update_frame_id &&
+ ((src_info->frame_id -
+ src_info->reg_update_frame_id) >
+ (MAX_REG_UPDATE_THRESHOLD *
+ src_info->sof_counter_step))) {
+ pr_err("%s:%d reg_update not received for %d frames\n",
+ __func__, __LINE__,
+ src_info->frame_id -
+ src_info->reg_update_frame_id);
+
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_REG_UPDATE_MISSING);
+ }
+
+ } else
+ vfe_dev->axi_data.src_info[frame_src].frame_id++;
+ }
+
+ sof_info = vfe_dev->axi_data.src_info[frame_src].
+ dual_hw_ms_info.sof_info;
+ if (dual_hw_type == DUAL_HW_MASTER_SLAVE &&
+ sof_info != NULL) {
+ sof_info->frame_id = vfe_dev->axi_data.src_info[frame_src].
+ frame_id;
+ sof_info->timestamp_ms = ts->event_time.tv_sec * 1000 +
+ ts->event_time.tv_usec / 1000;
+ sof_info->mono_timestamp_ms = ts->buf_time.tv_sec * 1000 +
+ ts->buf_time.tv_usec / 1000;
+ }
+ spin_unlock_irqrestore(&vfe_dev->common_data->common_dev_data_lock,
+ flags);
+}
+
+void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
+ enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts)
+{
+ struct msm_isp_event_data event_data;
+ struct msm_vfe_sof_info *sof_info = NULL, *self_sof = NULL;
+ enum msm_vfe_dual_hw_ms_type ms_type;
+ int i, j;
+ unsigned long flags;
+
+ memset(&event_data, 0, sizeof(event_data));
+
+ switch (event_type) {
+ case ISP_EVENT_SOF:
+ if (frame_src == VFE_PIX_0) {
+ if (vfe_dev->isp_sof_debug < ISP_SOF_DEBUG_COUNT)
+ pr_err("%s: PIX0 frame id: %u\n", __func__,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+ vfe_dev->isp_sof_debug++;
+ } else if (frame_src == VFE_RAW_0) {
+ if (vfe_dev->isp_raw0_debug < ISP_SOF_DEBUG_COUNT)
+ pr_err("%s: RAW_0 frame id: %u\n", __func__,
+ vfe_dev->axi_data.src_info[VFE_RAW_0].frame_id);
+ vfe_dev->isp_raw0_debug++;
+ } else if (frame_src == VFE_RAW_1) {
+ if (vfe_dev->isp_raw1_debug < ISP_SOF_DEBUG_COUNT)
+ pr_err("%s: RAW_1 frame id: %u\n", __func__,
+ vfe_dev->axi_data.src_info[VFE_RAW_1].frame_id);
+ vfe_dev->isp_raw1_debug++;
+ } else if (frame_src == VFE_RAW_2) {
+ if (vfe_dev->isp_raw2_debug < ISP_SOF_DEBUG_COUNT)
+ pr_err("%s: RAW_2 frame id: %u\n", __func__,
+ vfe_dev->axi_data.src_info[VFE_RAW_2].frame_id);
+ vfe_dev->isp_raw2_debug++;
+ }
+
+ ISP_DBG("%s: vfe %d frame_src %d frame id: %u\n", __func__,
+ vfe_dev->pdev->id, frame_src,
+ vfe_dev->axi_data.src_info[frame_src].frame_id);
+
+ /*
+ * Cannot support dual_cam and framedrop same time in union.
+ * If need to support framedrop as well, move delta calculation
+ * to userspace
+ */
+ if (vfe_dev->axi_data.src_info[frame_src].dual_hw_type ==
+ DUAL_HW_MASTER_SLAVE) {
+ spin_lock_irqsave(
+ &vfe_dev->common_data->common_dev_data_lock,
+ flags);
+ self_sof = vfe_dev->axi_data.src_info[frame_src].
+ dual_hw_ms_info.sof_info;
+ if (!self_sof) {
+ spin_unlock_irqrestore(&vfe_dev->common_data->
+ common_dev_data_lock, flags);
+ break;
+ }
+ ms_type = vfe_dev->axi_data.src_info[frame_src].
+ dual_hw_ms_info.dual_hw_ms_type;
+ if (ms_type == MS_TYPE_MASTER) {
+ for (i = 0, j = 0; i < MS_NUM_SLAVE_MAX; i++) {
+ if (!(vfe_dev->common_data->
+ ms_resource.slave_active_mask
+ & (1 << i)))
+ continue;
+ sof_info = &vfe_dev->common_data->
+ ms_resource.slave_sof_info[i];
+ event_data.u.sof_info.ms_delta_info.
+ delta[j] =
+ self_sof->mono_timestamp_ms -
+ sof_info->mono_timestamp_ms;
+ j++;
+ if (j == vfe_dev->common_data->
+ ms_resource.num_slave)
+ break;
+ }
+ event_data.u.sof_info.ms_delta_info.
+ num_delta_info = j;
+ } else {
+ sof_info = &vfe_dev->common_data->ms_resource.
+ master_sof_info;
+ event_data.u.sof_info.ms_delta_info.
+ num_delta_info = 1;
+ event_data.u.sof_info.ms_delta_info.delta[0] =
+ self_sof->mono_timestamp_ms -
+ sof_info->mono_timestamp_ms;
+ }
+ spin_unlock_irqrestore(&vfe_dev->common_data->
+ common_dev_data_lock, flags);
+ } else {
+ msm_isp_check_for_output_error(vfe_dev, ts,
+ &event_data.u.sof_info);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ event_data.frame_id = vfe_dev->axi_data.src_info[frame_src].frame_id;
+ event_data.timestamp = ts->event_time;
+ event_data.mono_timestamp = ts->buf_time;
+ msm_isp_send_event(vfe_dev, event_type | frame_src, &event_data);
+}
+
+/**
+ * msm_isp_calculate_framedrop() - Setup frame period and pattern
+ * @axi_data: Structure describing the h/w streams.
+ * @stream_cfg_cmd: User space input parameter for perion/pattern.
+ *
+ * Initialize the h/w stream framedrop period and pattern sent
+ * by user space.
+ *
+ * Returns 0 on success else error code.
+ */
+int msm_isp_calculate_framedrop(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
+{
+ uint32_t framedrop_period = 0;
+ struct msm_vfe_axi_stream *stream_info = NULL;
+
+ if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)
+ < VFE_AXI_SRC_MAX) {
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+ } else {
+ pr_err("%s: Invalid stream handle", __func__);
+ return -EINVAL;
+ }
+ if (!stream_info) {
+ pr_err("%s: Stream info is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ framedrop_period = msm_isp_get_framedrop_period(
+ stream_cfg_cmd->frame_skip_pattern);
+ stream_info->frame_skip_pattern =
+ stream_cfg_cmd->frame_skip_pattern;
+ if (stream_cfg_cmd->frame_skip_pattern == SKIP_ALL)
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ else
+ stream_info->current_framedrop_period = framedrop_period;
+
+ stream_info->init_frame_drop = stream_cfg_cmd->init_frame_drop;
+
+ if (stream_cfg_cmd->burst_count > 0) {
+ stream_info->stream_type = BURST_STREAM;
+ stream_info->num_burst_capture =
+ stream_cfg_cmd->burst_count;
+ } else {
+ stream_info->stream_type = CONTINUOUS_STREAM;
+ }
+ return 0;
+}
+
+void msm_isp_calculate_bandwidth(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int bpp = 0;
+
+ if (stream_info->stream_src < RDI_INTF_0) {
+ stream_info->bandwidth =
+ (axi_data->src_info[VFE_PIX_0].pixel_clock /
+ axi_data->src_info[VFE_PIX_0].width) *
+ stream_info->max_width;
+ stream_info->bandwidth = (unsigned long)stream_info->bandwidth *
+ stream_info->format_factor / ISP_Q2;
+ } else {
+ int rdi = SRC_TO_INTF(stream_info->stream_src);
+
+ bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
+ if (rdi < VFE_SRC_MAX)
+ stream_info->bandwidth =
+ (axi_data->src_info[rdi].pixel_clock / 8) * bpp;
+ else
+ pr_err("%s: Invalid rdi interface\n", __func__);
+ }
+}
+
+#ifdef CONFIG_MSM_AVTIMER
+void msm_isp_start_avtimer(void)
+{
+ avcs_core_open();
+ avcs_core_disable_power_collapse(1);
+}
+
+void msm_isp_get_avtimer_ts(
+ struct msm_isp_timestamp *time_stamp)
+{
+ int rc = 0;
+ uint32_t avtimer_usec = 0;
+ uint64_t avtimer_tick = 0;
+
+ rc = avcs_core_query_timer(&avtimer_tick);
+ if (rc < 0) {
+ pr_err("%s: Error: Invalid AVTimer Tick, rc=%d\n",
+ __func__, rc);
+ /* In case of error return zero AVTimer Tick Value */
+ time_stamp->vt_time.tv_sec = 0;
+ time_stamp->vt_time.tv_usec = 0;
+ } else {
+ avtimer_usec = do_div(avtimer_tick, USEC_PER_SEC);
+ time_stamp->vt_time.tv_sec = (uint32_t)(avtimer_tick);
+ time_stamp->vt_time.tv_usec = avtimer_usec;
+ pr_debug("%s: AVTimer TS = %u:%u\n", __func__,
+ (uint32_t)(avtimer_tick), avtimer_usec);
+ }
+}
+#else
+void msm_isp_start_avtimer(void)
+{
+ pr_err("AV Timer is not supported\n");
+}
+
+void msm_isp_get_avtimer_ts(
+ struct msm_isp_timestamp *time_stamp)
+{
+ pr_err_ratelimited("%s: Error: AVTimer driver not available\n",
+ __func__);
+ time_stamp->vt_time.tv_sec = 0;
+ time_stamp->vt_time.tv_usec = 0;
+}
+#endif
+
+int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, i;
+ uint32_t io_format = 0;
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd = arg;
+ struct msm_vfe_axi_stream *stream_info;
+
+ rc = msm_isp_axi_create_stream(vfe_dev,
+ &vfe_dev->axi_data, stream_cfg_cmd);
+ if (rc) {
+ pr_err("%s: create stream failed\n", __func__);
+ return rc;
+ }
+
+ rc = msm_isp_validate_axi_request(
+ &vfe_dev->axi_data, stream_cfg_cmd);
+ if (rc) {
+ pr_err("%s: Request validation failed\n", __func__);
+ if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle) <
+ VFE_AXI_SRC_MAX)
+ msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
+ HANDLE_TO_IDX
+ (stream_cfg_cmd->axi_stream_handle));
+ return rc;
+ }
+ stream_info = &vfe_dev->axi_data.
+ stream_info[HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+ if (!stream_info) {
+ pr_err("%s: can not find stream handle %x\n", __func__,
+ stream_cfg_cmd->axi_stream_handle);
+ return -EINVAL;
+ }
+
+ stream_info->memory_input = stream_cfg_cmd->memory_input;
+ vfe_dev->reg_update_requested &=
+ ~(BIT(SRC_TO_INTF(stream_info->stream_src)));
+
+ msm_isp_axi_reserve_wm(vfe_dev, &vfe_dev->axi_data, stream_info);
+
+ if (stream_info->stream_src < RDI_INTF_0) {
+ io_format = vfe_dev->axi_data.src_info[VFE_PIX_0].input_format;
+ if (stream_info->stream_src == CAMIF_RAW ||
+ stream_info->stream_src == IDEAL_RAW) {
+ if (stream_info->stream_src == CAMIF_RAW &&
+ io_format != stream_info->output_format)
+ pr_debug("%s: Overriding input format\n",
+ __func__);
+
+ io_format = stream_info->output_format;
+ }
+ rc = vfe_dev->hw_info->vfe_ops.axi_ops.cfg_io_format(
+ vfe_dev, stream_info->stream_src, io_format);
+ if (rc) {
+ pr_err("%s: cfg io format failed\n", __func__);
+ goto done;
+ }
+ }
+ rc = msm_isp_calculate_framedrop(&vfe_dev->axi_data, stream_cfg_cmd);
+ if (rc)
+ goto done;
+ if (stream_cfg_cmd->vt_enable && !vfe_dev->vt_enable) {
+ vfe_dev->vt_enable = stream_cfg_cmd->vt_enable;
+ msm_isp_start_avtimer();
+ }
+ if (stream_info->num_planes > 1)
+ msm_isp_axi_reserve_comp_mask(
+ &vfe_dev->axi_data, stream_info);
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_reg(vfe_dev, stream_info, i);
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_xbar_reg(vfe_dev, stream_info, i);
+ }
+ /* initialize the WM ping pong with scratch buffer */
+ msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PING_FLAG);
+ msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PONG_FLAG);
+
+done:
+ if (rc) {
+ msm_isp_axi_free_wm(&vfe_dev->axi_data, stream_info);
+ msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
+ }
+ return rc;
+}
+
+int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, i;
+ struct msm_vfe_axi_stream_release_cmd *stream_release_cmd = arg;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_stream_cfg_cmd stream_cfg;
+
+
+ if (HANDLE_TO_IDX(stream_release_cmd->stream_handle) >=
+ VFE_AXI_SRC_MAX) {
+ pr_err("%s: Invalid stream handle\n", __func__);
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_release_cmd->stream_handle)];
+ if (stream_info->state == AVAILABLE) {
+ pr_err("%s: Stream already released\n", __func__);
+ return -EINVAL;
+ } else if (stream_info->state != INACTIVE) {
+ stream_cfg.cmd = STOP_STREAM;
+ stream_cfg.num_streams = 1;
+ stream_cfg.stream_handle[0] = stream_release_cmd->stream_handle;
+ msm_isp_cfg_axi_stream(vfe_dev, (void *) &stream_cfg);
+ }
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_wm_reg(vfe_dev, stream_info, i);
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_wm_xbar_reg(vfe_dev, stream_info, i);
+ }
+
+ if (stream_info->num_planes > 1)
+ msm_isp_axi_free_comp_mask(&vfe_dev->axi_data, stream_info);
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.clear_framedrop(vfe_dev, stream_info);
+ msm_isp_axi_free_wm(axi_data, stream_info);
+
+ msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
+ HANDLE_TO_IDX(stream_release_cmd->stream_handle));
+
+ return rc;
+}
+
+static int msm_isp_axi_stream_enable_cfg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, int32_t dual_vfe_sync)
+{
+ int i, vfe_id = 0, enable_wm = 0;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ struct dual_vfe_resource *dual_vfe_res = NULL;
+
+ if (stream_idx >= VFE_AXI_SRC_MAX) {
+ pr_err("%s: Invalid stream_idx", __func__);
+ goto error;
+ }
+
+ if (stream_info->state == INACTIVE)
+ goto error;
+
+ if (stream_info->state == START_PENDING ||
+ stream_info->state == RESUME_PENDING) {
+ enable_wm = 1;
+ } else {
+ enable_wm = 0;
+ }
+ for (i = 0; i < stream_info->num_planes; i++) {
+ /*
+ * In case when sensor is streaming, use dual vfe sync mode
+ * to enable wm together and avoid split.
+ */
+ if ((stream_info->stream_src < RDI_INTF_0) &&
+ vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1 &&
+ dual_vfe_sync) {
+ dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
+ if (!dual_vfe_res->vfe_base[ISP_VFE0] ||
+ !dual_vfe_res->axi_data[ISP_VFE0] ||
+ !dual_vfe_res->vfe_base[ISP_VFE1] ||
+ !dual_vfe_res->axi_data[ISP_VFE1]) {
+ pr_err("%s:%d failed vfe0 %pK %pK vfe %pK %pK\n",
+ __func__, __LINE__,
+ dual_vfe_res->vfe_base[ISP_VFE0],
+ dual_vfe_res->axi_data[ISP_VFE0],
+ dual_vfe_res->vfe_base[ISP_VFE1],
+ dual_vfe_res->axi_data[ISP_VFE1]);
+ goto error;
+ }
+ for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ enable_wm(dual_vfe_res->vfe_base[vfe_id],
+ dual_vfe_res->axi_data[vfe_id]->
+ stream_info[stream_idx].wm[i],
+ enable_wm);
+ }
+ } else if (!vfe_dev->is_split ||
+ (stream_info->stream_src >= RDI_INTF_0 &&
+ stream_info->stream_src <= RDI_INTF_2) ||
+ !dual_vfe_sync) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ enable_wm(vfe_dev->vfe_base, stream_info->wm[i],
+ enable_wm);
+ }
+ if (!enable_wm) {
+ /* Issue a reg update for Raw Snapshot Case
+ * since we dont have reg update ack
+ */
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count > 0
+ && vfe_dev->axi_data.src_info[VFE_PIX_0].
+ pix_stream_count == 0) {
+ if (stream_info->stream_src == CAMIF_RAW ||
+ stream_info->stream_src == IDEAL_RAW) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev,
+ VFE_PIX_0);
+ }
+ }
+ }
+ }
+ if (stream_info->state == START_PENDING)
+ axi_data->num_active_stream++;
+ else if (stream_info->state == STOP_PENDING)
+ axi_data->num_active_stream--;
+ return 0;
+error:
+ return -EINVAL;
+}
+
+void msm_isp_axi_stream_update(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ int i;
+ unsigned long flags;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ if (SRC_TO_INTF(axi_data->stream_info[i].stream_src) !=
+ frame_src) {
+ ISP_DBG("%s stream_src %d frame_src %d\n", __func__,
+ SRC_TO_INTF(
+ axi_data->stream_info[i].stream_src),
+ frame_src);
+ continue;
+ }
+ if (axi_data->stream_info[i].state == UPDATING)
+ axi_data->stream_info[i].state = ACTIVE;
+ else if (axi_data->stream_info[i].state == START_PENDING ||
+ axi_data->stream_info[i].state == STOP_PENDING) {
+ msm_isp_axi_stream_enable_cfg(
+ vfe_dev, &axi_data->stream_info[i],
+ axi_data->stream_info[i].state ==
+ START_PENDING ? 1 : 0);
+ axi_data->stream_info[i].state =
+ axi_data->stream_info[i].state ==
+ START_PENDING ? STARTING : STOPPING;
+ } else if (axi_data->stream_info[i].state == STARTING ||
+ axi_data->stream_info[i].state == STOPPING) {
+ axi_data->stream_info[i].state =
+ axi_data->stream_info[i].state == STARTING ?
+ ACTIVE : INACTIVE;
+ }
+ }
+
+ spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
+ if (vfe_dev->axi_data.stream_update[frame_src])
+ vfe_dev->axi_data.stream_update[frame_src]--;
+ spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
+
+ if (vfe_dev->axi_data.pipeline_update == DISABLE_CAMIF ||
+ (vfe_dev->axi_data.pipeline_update ==
+ DISABLE_CAMIF_IMMEDIATELY)) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ enable_module(vfe_dev, 0xFF, 0);
+ vfe_dev->axi_data.pipeline_update = NO_UPDATE;
+ }
+
+ if (vfe_dev->axi_data.stream_update[frame_src] == 0)
+ complete(&vfe_dev->stream_config_complete);
+}
+
+static void msm_isp_reload_ping_pong_offset(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int i, j;
+ uint32_t bit;
+ struct msm_isp_buffer *buf;
+ int32_t buf_size_byte = 0;
+ int32_t word_per_line = 0;
+
+ for (i = 0; i < 2; i++) {
+ buf = stream_info->buf[i];
+ if (!buf)
+ continue;
+
+ bit = i ? 0 : 1;
+
+ for (j = 0; j < stream_info->num_planes; j++) {
+ word_per_line = msm_isp_cal_word_per_line(
+ stream_info->output_format, stream_info->
+ plane_cfg[j].output_stride);
+ if (word_per_line < 0) {
+ /* 0 means no prefetch*/
+ word_per_line = 0;
+ buf_size_byte = 0;
+ } else {
+ buf_size_byte = (word_per_line * 8 *
+ stream_info->plane_cfg[j].
+ output_scan_lines) - stream_info->
+ plane_cfg[j].plane_addr_offset;
+ }
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
+ vfe_dev->vfe_base, stream_info->wm[j], bit,
+ buf->mapped_info[j].paddr +
+ stream_info->plane_cfg[j].plane_addr_offset,
+ buf_size_byte);
+ }
+ }
+}
+
+void msm_isp_axi_cfg_update(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ int i, j;
+ uint32_t update_state;
+ unsigned long flags, flags1;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream *stream_info;
+ int num_stream = 0;
+
+ spin_lock_irqsave(&vfe_dev->common_data->common_dev_data_lock, flags);
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ if (SRC_TO_INTF(axi_data->stream_info[i].stream_src) !=
+ frame_src) {
+ continue;
+ }
+ num_stream++;
+ stream_info = &axi_data->stream_info[i];
+ if (stream_info->state == INACTIVE)
+ continue;
+ if ((stream_info->stream_type == BURST_STREAM &&
+ !stream_info->controllable_output) ||
+ stream_info->state == AVAILABLE)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags1);
+ if (stream_info->state == PAUSING) {
+ /*AXI Stopped, apply update*/
+ stream_info->state = PAUSED;
+ msm_isp_reload_ping_pong_offset(vfe_dev, stream_info);
+ for (j = 0; j < stream_info->num_planes; j++)
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_reg(vfe_dev, stream_info, j);
+ /*Resume AXI*/
+ stream_info->state = RESUME_PENDING;
+ msm_isp_update_dual_HW_axi(vfe_dev, stream_info);
+ } else if (stream_info->state == RESUMING) {
+ msm_isp_update_dual_HW_axi(vfe_dev, stream_info);
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags1);
+ }
+ spin_unlock_irqrestore(&vfe_dev->common_data->common_dev_data_lock,
+ flags);
+ if (num_stream)
+ update_state = atomic_dec_return(
+ &axi_data->axi_cfg_update[frame_src]);
+}
+
+static int msm_isp_update_deliver_count(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_bit)
+{
+ struct msm_vfe_axi_stream *temp_stream_info;
+ int rc = 0;
+
+ if (!stream_info->controllable_output)
+ goto done;
+
+ temp_stream_info =
+ msm_isp_get_controllable_stream(vfe_dev, stream_info);
+
+ if (!temp_stream_info->undelivered_request_cnt) {
+ pr_err_ratelimited("%s:%d error undelivered_request_cnt 0\n",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ goto done;
+ } else {
+ temp_stream_info->undelivered_request_cnt--;
+ if (pingpong_bit != temp_stream_info->sw_ping_pong_bit) {
+ pr_err("%s:%d ping pong bit actual %d sw %d\n",
+ __func__, __LINE__, pingpong_bit,
+ temp_stream_info->sw_ping_pong_bit);
+ rc = -EINVAL;
+ goto done;
+ }
+ temp_stream_info->sw_ping_pong_bit ^= 1;
+ }
+done:
+ return rc;
+}
+
+void msm_isp_halt_send_error(struct vfe_device *vfe_dev, uint32_t event)
+{
+ uint32_t i = 0;
+ struct msm_isp_event_data error_event;
+ struct msm_vfe_axi_halt_cmd halt_cmd;
+ uint32_t irq_status0, irq_status1;
+
+ if (atomic_read(&vfe_dev->error_info.overflow_state) !=
+ NO_OVERFLOW)
+ /* Recovery is already in Progress */
+ return;
+
+ if (event == ISP_EVENT_PING_PONG_MISMATCH &&
+ vfe_dev->axi_data.recovery_count < MAX_RECOVERY_THRESHOLD) {
+ vfe_dev->hw_info->vfe_ops.irq_ops.
+ read_irq_status(vfe_dev, &irq_status0, &irq_status1);
+ pr_err("%s:pingpong mismatch from vfe%d, core%d, recovery_count %d\n",
+ __func__, vfe_dev->pdev->id, smp_processor_id(),
+ vfe_dev->axi_data.recovery_count);
+
+ vfe_dev->axi_data.recovery_count++;
+
+ msm_isp_process_overflow_irq(vfe_dev,
+ &irq_status0, &irq_status1, 1);
+ return;
+ }
+
+ memset(&halt_cmd, 0, sizeof(struct msm_vfe_axi_halt_cmd));
+ memset(&error_event, 0, sizeof(struct msm_isp_event_data));
+ halt_cmd.stop_camif = 1;
+ halt_cmd.overflow_detected = 0;
+ halt_cmd.blocking_halt = 0;
+
+ pr_err("%s: vfe%d exiting camera!\n", __func__, vfe_dev->pdev->id);
+
+ atomic_set(&vfe_dev->error_info.overflow_state,
+ HALT_ENFORCED);
+
+ /* heavy spin lock in axi halt, avoid spin lock outside. */
+ msm_isp_axi_halt(vfe_dev, &halt_cmd);
+
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++)
+ vfe_dev->axi_data.stream_info[i].state =
+ INACTIVE;
+
+ error_event.frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+
+ msm_isp_send_event(vfe_dev, event, &error_event);
+}
+
+int msm_isp_print_ping_pong_address(struct vfe_device *vfe_dev,
+ unsigned long fault_addr)
+{
+ int i, j;
+ struct msm_isp_buffer *buf = NULL;
+ uint32_t pingpong_bit;
+ struct msm_vfe_axi_stream *stream_info = NULL;
+
+ for (j = 0; j < VFE_AXI_SRC_MAX; j++) {
+ stream_info = &vfe_dev->axi_data.stream_info[j];
+ if (stream_info->state == INACTIVE)
+ continue;
+
+ for (pingpong_bit = 0; pingpong_bit < 2; pingpong_bit++) {
+ for (i = 0; i < stream_info->num_planes; i++) {
+ buf = stream_info->buf[pingpong_bit];
+ if (buf == NULL) {
+ pr_err("%s: buf NULL\n", __func__);
+ continue;
+ }
+ pr_debug("%s: stream_id %x ping-pong %d plane %d start_addr %lu addr_offset %x len %zx stride %d scanline %d\n"
+ , __func__, stream_info->stream_id,
+ pingpong_bit, i, (unsigned long)
+ buf->mapped_info[i].paddr,
+ stream_info->
+ plane_cfg[i].plane_addr_offset,
+ buf->mapped_info[i].len,
+ stream_info->
+ plane_cfg[i].output_stride,
+ stream_info->
+ plane_cfg[i].output_scan_lines
+ );
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct msm_isp_buffer *msm_isp_get_stream_buffer(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int rc = 0;
+ uint32_t bufq_handle = 0;
+ struct msm_isp_buffer *buf = NULL;
+ struct msm_vfe_axi_stream *temp_stream_info = NULL;
+ struct msm_vfe_frame_request_queue *queue_req;
+ uint32_t buf_index = MSM_ISP_INVALID_BUF_INDEX;
+
+ if (!stream_info->controllable_output) {
+ bufq_handle = stream_info->bufq_handle
+ [VFE_BUF_QUEUE_DEFAULT];
+ } else {
+ temp_stream_info = msm_isp_get_controllable_stream(
+ vfe_dev, stream_info);
+ queue_req = list_first_entry_or_null(
+ &temp_stream_info->request_q,
+ struct msm_vfe_frame_request_queue, list);
+ if (!queue_req)
+ return buf;
+
+ bufq_handle = temp_stream_info->
+ bufq_handle[queue_req->buff_queue_id];
+
+ if (!bufq_handle ||
+ temp_stream_info->request_q_cnt <= 0) {
+ pr_err_ratelimited("%s: Drop request. Shared stream is stopped.\n",
+ __func__);
+ return buf;
+ }
+ buf_index = queue_req->buf_index;
+ queue_req->cmd_used = 0;
+ list_del(&queue_req->list);
+ temp_stream_info->request_q_cnt--;
+ }
+
+ rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
+ bufq_handle, buf_index, &buf);
+
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return buf;
+ }
+ if (rc < 0)
+ return buf;
+
+ if (buf->num_planes != stream_info->num_planes) {
+ pr_err("%s: Invalid buffer\n", __func__);
+ vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ bufq_handle, buf->buf_idx);
+ buf = NULL;
+ }
+
+ return buf;
+}
+
+int msm_isp_cfg_offline_ping_pong_address(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status,
+ uint32_t buf_idx)
+{
+ int i, rc = 0;
+ struct msm_isp_buffer *buf = NULL;
+ uint32_t pingpong_bit;
+ uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ uint32_t buffer_size_byte = 0;
+ int32_t word_per_line = 0;
+ dma_addr_t paddr;
+ uint32_t bufq_handle = 0;
+
+ if (stream_idx >= VFE_AXI_SRC_MAX) {
+ pr_err("%s: Invalid stream_idx", __func__);
+ return -EINVAL;
+ }
+
+ bufq_handle = stream_info->bufq_handle[VFE_BUF_QUEUE_DEFAULT];
+
+ if (!vfe_dev->is_split) {
+ rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
+ vfe_dev->buf_mgr, bufq_handle, buf_idx, &buf);
+ if (rc < 0 || !buf) {
+ pr_err("%s: No fetch buffer rc= %d\n",
+ __func__, rc);
+ return -EINVAL;
+ }
+
+ if (buf->num_planes != stream_info->num_planes) {
+ pr_err("%s: Invalid buffer\n", __func__);
+ vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ bufq_handle, buf->buf_idx);
+ return -EINVAL;
+ }
+
+ pingpong_bit = ((pingpong_status >>
+ stream_info->wm[0]) & 0x1);
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ word_per_line = msm_isp_cal_word_per_line(
+ stream_info->output_format,
+ stream_info->plane_cfg[i].
+ output_stride);
+ if (word_per_line < 0) {
+ /* 0 means no prefetch*/
+ word_per_line = 0;
+ buffer_size_byte = 0;
+ } else {
+ buffer_size_byte = (word_per_line * 8 *
+ stream_info->plane_cfg[i].
+ output_scan_lines) -
+ stream_info->
+ plane_cfg[i].plane_addr_offset;
+ }
+ paddr = buf->mapped_info[i].paddr;
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ update_ping_pong_addr(
+ vfe_dev->vfe_base, stream_info->wm[i],
+ pingpong_bit, paddr +
+ stream_info->
+ plane_cfg[i].plane_addr_offset,
+ buffer_size_byte);
+
+ if (i == 0) {
+ stream_info->buf[!pingpong_bit] = buf;
+ buf->pingpong_bit = !pingpong_bit;
+ }
+ buf->state = MSM_ISP_BUFFER_STATE_DEQUEUED;
+ }
+ }
+ return rc;
+
+}
+
+static int msm_isp_cfg_ping_pong_address(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status,
+ int scratch)
+{
+ int i;
+ struct msm_isp_buffer *buf = NULL;
+ uint32_t pingpong_bit;
+ uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ uint32_t buffer_size_byte = 0;
+ int32_t word_per_line = 0;
+ dma_addr_t paddr;
+ struct dual_vfe_resource *dual_vfe_res = NULL;
+ uint32_t vfe_id = 0;
+ unsigned long flags;
+
+ if (stream_idx >= VFE_AXI_SRC_MAX) {
+ pr_err("%s: Invalid stream_idx", __func__);
+ return -EINVAL;
+ }
+ /* make sure that streams are in right state */
+ if ((stream_info->stream_src < RDI_INTF_0) &&
+ vfe_dev->is_split) {
+ dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
+ if (!dual_vfe_res->vfe_base[ISP_VFE0] ||
+ !dual_vfe_res->axi_data[ISP_VFE0] ||
+ !dual_vfe_res->vfe_base[ISP_VFE1] ||
+ !dual_vfe_res->axi_data[ISP_VFE1]) {
+ pr_err("%s:%d failed vfe0 %pK %pK vfe %pK %pK\n",
+ __func__, __LINE__,
+ dual_vfe_res->vfe_base[ISP_VFE0],
+ dual_vfe_res->axi_data[ISP_VFE0],
+ dual_vfe_res->vfe_base[ISP_VFE1],
+ dual_vfe_res->axi_data[ISP_VFE1]);
+ return -EINVAL;
+ }
+ } else if (!vfe_dev->is_split ||
+ (stream_info->stream_src >= RDI_INTF_0 &&
+ stream_info->stream_src <= RDI_INTF_2)) {
+ dual_vfe_res = NULL;
+ } else {
+ pr_err("%s: Error! Should not reach this case is_split %d stream_src %d\n",
+ __func__, vfe_dev->is_split, stream_info->stream_src);
+ msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
+ return 0;
+ }
+
+ if (!scratch)
+ buf = msm_isp_get_stream_buffer(vfe_dev, stream_info);
+
+ /* Isolate pingpong_bit from pingpong_status */
+ pingpong_bit = ((pingpong_status >>
+ stream_info->wm[0]) & 0x1);
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ if (buf) {
+ word_per_line = msm_isp_cal_word_per_line(
+ stream_info->output_format, stream_info->
+ plane_cfg[i].output_stride);
+ if (word_per_line < 0) {
+ /* 0 means no prefetch*/
+ word_per_line = 0;
+ buffer_size_byte = 0;
+ } else {
+ buffer_size_byte = (word_per_line * 8 *
+ stream_info->plane_cfg[i].
+ output_scan_lines) - stream_info->
+ plane_cfg[i].plane_addr_offset;
+ }
+
+ paddr = buf->mapped_info[i].paddr;
+ ISP_DBG(
+ "%s: vfe %d config buf %d to pingpong %d stream %x\n",
+ __func__, vfe_dev->pdev->id,
+ buf->buf_idx, !pingpong_bit,
+ stream_info->stream_id);
+ }
+
+ if (dual_vfe_res) {
+ for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
+ if (vfe_id != vfe_dev->pdev->id)
+ spin_lock_irqsave(
+ &dual_vfe_res->
+ axi_data[vfe_id]->
+ stream_info[stream_idx].
+ lock, flags);
+
+ if (buf)
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ update_ping_pong_addr(
+ dual_vfe_res->vfe_base[vfe_id],
+ dual_vfe_res->axi_data[vfe_id]->
+ stream_info[stream_idx].wm[i],
+ pingpong_bit, paddr +
+ dual_vfe_res->axi_data[vfe_id]->
+ stream_info[stream_idx].
+ plane_cfg[i].plane_addr_offset,
+ buffer_size_byte);
+ else
+ msm_isp_cfg_stream_scratch(
+ dual_vfe_res->vfe_dev[vfe_id],
+ &(dual_vfe_res->axi_data
+ [vfe_id]->
+ stream_info[stream_idx]),
+ pingpong_status);
+
+ if (i == 0) {
+ dual_vfe_res->axi_data[vfe_id]->
+ stream_info[stream_idx].
+ buf[!pingpong_bit] =
+ buf;
+ }
+ if (vfe_id != vfe_dev->pdev->id)
+ spin_unlock_irqrestore(
+ &dual_vfe_res->
+ axi_data[vfe_id]->
+ stream_info[stream_idx].
+ lock, flags);
+ }
+ } else {
+ if (buf)
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ update_ping_pong_addr(
+ vfe_dev->vfe_base, stream_info->wm[i],
+ pingpong_bit, paddr +
+ stream_info->plane_cfg[i].
+ plane_addr_offset,
+ buffer_size_byte);
+ else
+ msm_isp_cfg_stream_scratch(vfe_dev,
+ stream_info, pingpong_status);
+ if (i == 0)
+ stream_info->buf[!pingpong_bit] = buf;
+ }
+ if (i == 0 && buf)
+ buf->pingpong_bit = !pingpong_bit;
+ }
+
+ return 0;
+}
+
+static void msm_isp_handle_done_buf_frame_id_mismatch(
+ struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info,
+ struct msm_isp_buffer *buf, struct timeval *time_stamp,
+ uint32_t frame_id)
+{
+ struct msm_isp_event_data error_event;
+ int ret = 0;
+
+ memset(&error_event, 0, sizeof(error_event));
+ error_event.frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ error_event.u.error_info.err_type =
+ ISP_ERROR_FRAME_ID_MISMATCH;
+ ret = vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx, time_stamp,
+ frame_id,
+ stream_info->runtime_output_format);
+ if (ret == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
+ return;
+ }
+ msm_isp_send_event(vfe_dev, ISP_EVENT_ERROR,
+ &error_event);
+ pr_err("%s: Error! frame id mismatch!! 1st buf frame %d,curr frame %d\n",
+ __func__, buf->frame_id, frame_id);
+ vfe_dev->buf_mgr->frameId_mismatch_recovery = 1;
+}
+
+static int msm_isp_process_done_buf(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, struct msm_isp_buffer *buf,
+ struct timeval *time_stamp, uint32_t frame_id)
+{
+ int rc;
+ unsigned long flags;
+ struct msm_isp_event_data buf_event;
+ uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ uint32_t buf_src;
+ uint8_t drop_frame = 0;
+ struct msm_isp_bufq *bufq = NULL;
+
+ memset(&buf_event, 0, sizeof(buf_event));
+
+ if (stream_idx >= VFE_AXI_SRC_MAX) {
+ pr_err_ratelimited("%s: Invalid stream_idx", __func__);
+ return -EINVAL;
+ }
+
+ if (SRC_TO_INTF(stream_info->stream_src) >= VFE_SRC_MAX) {
+ pr_err_ratelimited("%s: Invalid stream index, put buf back to vb2 queue\n",
+ __func__);
+ rc = vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ return -EINVAL;
+ }
+
+ if (stream_info->stream_type != BURST_STREAM &&
+ (stream_info->sw_skip.stream_src_mask &
+ (1 << stream_info->stream_src))) {
+ /* Hw stream output of this src is requested for drop */
+ if (stream_info->sw_skip.skip_mode == SKIP_ALL) {
+ /* drop all buffers */
+ drop_frame = 1;
+ } else if (stream_info->sw_skip.skip_mode == SKIP_RANGE &&
+ (stream_info->sw_skip.min_frame_id <= frame_id &&
+ stream_info->sw_skip.max_frame_id >= frame_id)) {
+ drop_frame = 1;
+ } else if (frame_id > stream_info->sw_skip.max_frame_id) {
+ spin_lock_irqsave(&stream_info->lock, flags);
+ memset(&stream_info->sw_skip, 0,
+ sizeof(struct msm_isp_sw_framskip));
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+ }
+
+ rc = vfe_dev->buf_mgr->ops->get_buf_src(vfe_dev->buf_mgr,
+ buf->bufq_handle, &buf_src);
+ if (rc != 0) {
+ pr_err_ratelimited("%s: Error getting buf_src\n", __func__);
+ return -EINVAL;
+ }
+
+ if (drop_frame) {
+ buf->buf_debug.put_state[
+ buf->buf_debug.put_state_last] =
+ MSM_ISP_BUFFER_STATE_DROP_SKIP;
+ buf->buf_debug.put_state_last ^= 1;
+ rc = vfe_dev->buf_mgr->ops->buf_done(
+ vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx,
+ time_stamp, frame_id,
+ stream_info->runtime_output_format);
+
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
+ if (!rc) {
+ ISP_DBG("%s:%d vfe_id %d Buffer dropped %d\n",
+ __func__, __LINE__, vfe_dev->pdev->id,
+ frame_id);
+ /*
+ * Return rc which is 0 at this point so that
+ * we can cfg ping pong and we can continue
+ * streaming
+ */
+ return rc;
+ }
+ }
+
+ buf_event.frame_id = frame_id;
+ buf_event.timestamp = *time_stamp;
+ buf_event.u.buf_done.session_id = stream_info->session_id;
+ buf_event.u.buf_done.stream_id = stream_info->stream_id;
+ buf_event.u.buf_done.handle = buf->bufq_handle;
+ buf_event.u.buf_done.buf_idx = buf->buf_idx;
+ buf_event.u.buf_done.output_format =
+ stream_info->runtime_output_format;
+ if (vfe_dev->fetch_engine_info.is_busy &&
+ SRC_TO_INTF(stream_info->stream_src) == VFE_PIX_0) {
+ vfe_dev->fetch_engine_info.is_busy = 0;
+ }
+
+ if (stream_info->buf_divert &&
+ buf_src != MSM_ISP_BUFFER_SRC_SCRATCH) {
+
+ bufq = vfe_dev->buf_mgr->ops->get_bufq(vfe_dev->buf_mgr,
+ buf->bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq buf_handle %x\n",
+ __func__, buf->bufq_handle);
+ return -EINVAL;
+ }
+ if ((bufq != NULL) && bufq->buf_type == ISP_SHARE_BUF)
+ msm_isp_send_event(vfe_dev->common_data->
+ dual_vfe_res->vfe_dev[ISP_VFE1],
+ ISP_EVENT_BUF_DIVERT, &buf_event);
+ else
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_BUF_DIVERT, &buf_event);
+ } else {
+ ISP_DBG("%s: vfe_id %d send buf done buf-id %d bufq %x\n",
+ __func__, vfe_dev->pdev->id, buf->buf_idx,
+ buf->bufq_handle);
+ msm_isp_send_event(vfe_dev, ISP_EVENT_BUF_DONE,
+ &buf_event);
+ buf->buf_debug.put_state[
+ buf->buf_debug.put_state_last] =
+ MSM_ISP_BUFFER_STATE_PUT_BUF;
+ buf->buf_debug.put_state_last ^= 1;
+ rc = vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx, time_stamp,
+ frame_id, stream_info->runtime_output_format);
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int msm_isp_drop_frame(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, struct msm_isp_timestamp *ts,
+ struct msm_isp_sof_info *sof_info)
+{
+ struct msm_isp_buffer *done_buf = NULL;
+ uint32_t pingpong_status;
+ unsigned long flags;
+ struct msm_isp_bufq *bufq = NULL;
+ uint32_t pingpong_bit;
+
+ if (!vfe_dev || !stream_info || !ts || !sof_info) {
+ pr_err("%s %d vfe_dev %pK stream_info %pK ts %pK op_info %pK\n",
+ __func__, __LINE__, vfe_dev, stream_info, ts,
+ sof_info);
+ return -EINVAL;
+ }
+ pingpong_status =
+ ~vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+ pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+ done_buf = stream_info->buf[pingpong_bit];
+ if (done_buf) {
+ bufq = vfe_dev->buf_mgr->ops->get_bufq(vfe_dev->buf_mgr,
+ done_buf->bufq_handle);
+ if (!bufq) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: Invalid bufq buf_handle %x\n",
+ __func__, done_buf->bufq_handle);
+ return -EINVAL;
+ }
+ sof_info->reg_update_fail_mask_ext |=
+ (bufq->bufq_handle & 0xFF);
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+
+ /* if buf done will not come, we need to process it ourself */
+ if (stream_info->activated_framedrop_period ==
+ MSM_VFE_STREAM_STOP_PERIOD) {
+ /* no buf done come */
+ msm_isp_process_axi_irq_stream(vfe_dev, stream_info,
+ pingpong_status, ts);
+ }
+ return 0;
+}
+
+static void msm_isp_get_camif_update_state_and_halt(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+ enum msm_isp_camif_update_state *camif_update,
+ int *halt)
+{
+ int i;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint8_t pix_stream_cnt = 0, cur_pix_stream_cnt;
+
+ pr_debug("%s: entry\n", __func__);
+
+ cur_pix_stream_cnt =
+ axi_data->src_info[VFE_PIX_0].pix_stream_count +
+ axi_data->src_info[VFE_PIX_0].raw_stream_count;
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ stream_info =
+ &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+
+ if (stream_info->stream_src < RDI_INTF_0)
+ pix_stream_cnt++;
+ }
+
+ if (vfe_dev->axi_data.num_active_stream == stream_cfg_cmd->num_streams
+ && (stream_cfg_cmd->cmd == STOP_STREAM ||
+ stream_cfg_cmd->cmd == STOP_IMMEDIATELY))
+ *halt = 1;
+ else
+ *halt = 0;
+
+ if ((pix_stream_cnt) &&
+ (axi_data->src_info[VFE_PIX_0].input_mux != EXTERNAL_READ)) {
+ if (cur_pix_stream_cnt == 0 && pix_stream_cnt &&
+ stream_cfg_cmd->cmd == START_STREAM)
+ *camif_update = ENABLE_CAMIF;
+ else if (cur_pix_stream_cnt &&
+ (cur_pix_stream_cnt - pix_stream_cnt) == 0 &&
+ (stream_cfg_cmd->cmd == STOP_STREAM ||
+ stream_cfg_cmd->cmd == STOP_IMMEDIATELY)) {
+ if (*halt)
+ *camif_update = DISABLE_CAMIF_IMMEDIATELY;
+ else
+ *camif_update = DISABLE_CAMIF;
+ } else
+ *camif_update = NO_UPDATE;
+ } else
+ *camif_update = NO_UPDATE;
+
+ pr_debug("%s: exit\n", __func__);
+
+}
+
+static void msm_isp_update_camif_output_count(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+ return;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ VFE_AXI_SRC_MAX) {
+ return;
+ }
+ stream_info =
+ &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ if (stream_info->stream_src == PIX_ENCODER ||
+ stream_info->stream_src == PIX_VIEWFINDER ||
+ stream_info->stream_src == PIX_VIDEO ||
+ stream_info->stream_src == IDEAL_RAW) {
+ if (stream_cfg_cmd->cmd == START_STREAM)
+ vfe_dev->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].
+ pix_stream_count++;
+ else
+ vfe_dev->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].
+ pix_stream_count--;
+ } else if (stream_info->stream_src == CAMIF_RAW ||
+ stream_info->stream_src == RDI_INTF_0 ||
+ stream_info->stream_src == RDI_INTF_1 ||
+ stream_info->stream_src == RDI_INTF_2) {
+ if (stream_cfg_cmd->cmd == START_STREAM)
+ vfe_dev->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].
+ raw_stream_count++;
+ else
+ vfe_dev->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].
+ raw_stream_count--;
+
+ }
+ }
+}
+
+/* Factor in Q2 format */
+#define ISP_DEFAULT_FORMAT_FACTOR 6
+#define ISP_BUS_UTILIZATION_FACTOR 6
+int msm_isp_update_stream_bandwidth(struct vfe_device *vfe_dev,
+ enum msm_vfe_hw_state hw_state)
+{
+ int i, rc = 0, frame_src, ms_type;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint64_t total_pix_bandwidth = 0, total_rdi_bandwidth = 0;
+ uint32_t num_pix_streams = 0;
+ uint64_t total_bandwidth = 0;
+
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ stream_info = &axi_data->stream_info[i];
+ frame_src = SRC_TO_INTF(stream_info->stream_src);
+ ms_type = vfe_dev->axi_data.src_info[frame_src].
+ dual_hw_ms_info.dual_hw_ms_type;
+ if (hw_state == HW_STATE_SLEEP) {
+ rc = msm_isp_update_bandwidth(
+ ISP_VFE0 + vfe_dev->pdev->id, 0, 0);
+ return rc;
+ }
+
+ if (stream_info->state == ACTIVE ||
+ stream_info->state == START_PENDING) {
+ if (stream_info->stream_src < RDI_INTF_0) {
+ total_pix_bandwidth += stream_info->bandwidth;
+ num_pix_streams++;
+ } else {
+ total_rdi_bandwidth += stream_info->bandwidth;
+ }
+ }
+ }
+
+ total_bandwidth = total_pix_bandwidth + total_rdi_bandwidth;
+ rc = msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id,
+ (total_bandwidth + vfe_dev->hw_info->min_ab),
+ (total_bandwidth + vfe_dev->hw_info->min_ib));
+ if (rc < 0)
+ pr_err("%s: update failed\n", __func__);
+
+ return rc;
+}
+
+static int msm_isp_axi_wait_for_cfg_done(struct vfe_device *vfe_dev,
+ enum msm_isp_camif_update_state camif_update,
+ uint32_t src_mask, int regUpdateCnt)
+{
+ int rc;
+ unsigned long flags;
+ enum msm_vfe_input_src i = 0;
+
+ spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
+
+ for (i = 0; i < VFE_SRC_MAX; i++) {
+ if (src_mask & (1 << i)) {
+ if (vfe_dev->axi_data.stream_update[i] > 0) {
+ pr_err("%s:Stream Update in progress. cnt %d\n",
+ __func__,
+ vfe_dev->axi_data.stream_update[i]);
+ spin_unlock_irqrestore(
+ &vfe_dev->shared_data_lock, flags);
+ return -EINVAL;
+ }
+ vfe_dev->axi_data.stream_update[i] = regUpdateCnt;
+ }
+ }
+ if (src_mask) {
+ init_completion(&vfe_dev->stream_config_complete);
+ vfe_dev->axi_data.pipeline_update = camif_update;
+ }
+ spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
+ rc = wait_for_completion_timeout(
+ &vfe_dev->stream_config_complete,
+ msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
+ if (rc == 0) {
+ for (i = 0; i < VFE_SRC_MAX; i++) {
+ if (src_mask & (1 << i)) {
+ spin_lock_irqsave(&vfe_dev->shared_data_lock,
+ flags);
+ vfe_dev->axi_data.stream_update[i] = 0;
+ spin_unlock_irqrestore(&vfe_dev->
+ shared_data_lock, flags);
+ }
+ }
+ pr_err("%s: wait timeout\n", __func__);
+ rc = -EBUSY;
+ } else {
+ rc = 0;
+ }
+ return rc;
+}
+
+static int msm_isp_init_stream_ping_pong_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int rc = 0;
+
+ if ((vfe_dev->is_split && vfe_dev->pdev->id == 1 &&
+ stream_info->stream_src < RDI_INTF_0) ||
+ !vfe_dev->is_split || stream_info->stream_src >= RDI_INTF_0) {
+ if (stream_info->stream_type == BURST_STREAM) {
+ /* Set address for both PING & PONG register */
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PING_FLAG, 0);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for ping\n",
+ __func__);
+ return rc;
+ }
+
+ if (stream_info->runtime_num_burst_capture > 1)
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PONG_FLAG, 0);
+
+ if (rc < 0) {
+ pr_err("%s: No free buffer for pong\n",
+ __func__);
+ return rc;
+ }
+ } else if (stream_info->stream_type == CONTINUOUS_STREAM) {
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PING_FLAG, 0);
+ if (rc < 0 && rc != ENOMEM) {
+ pr_err("%s: config error for ping\n",
+ __func__);
+ return rc;
+ }
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PONG_FLAG, 0);
+ if (rc < 0 && rc != ENOMEM) {
+ pr_err("%s: config error for pong\n",
+ __func__);
+ return rc;
+ }
+ } else {
+ rc = -1;
+ pr_err("%s:%d failed invalid stream type %d", __func__,
+ __LINE__, stream_info->stream_type);
+ }
+ }
+
+ return rc;
+}
+
+static void msm_isp_get_stream_wm_mask(
+ struct msm_vfe_axi_stream *stream_info,
+ uint32_t *wm_reload_mask)
+{
+ int i;
+
+ for (i = 0; i < stream_info->num_planes; i++)
+ *wm_reload_mask |= (1 << stream_info->wm[i]);
+}
+
+int msm_isp_axi_halt(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_halt_cmd *halt_cmd)
+{
+ int rc = 0;
+
+ if (halt_cmd->stop_camif) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
+ }
+
+ rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev,
+ halt_cmd->blocking_halt);
+
+ return rc;
+}
+
+int msm_isp_axi_reset(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_reset_cmd *reset_cmd)
+{
+ int rc = 0, i, j;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t bufq_handle = 0, bufq_id = 0;
+ struct msm_isp_timestamp timestamp;
+ unsigned long flags;
+
+ if (!reset_cmd) {
+ pr_err("%s: NULL pointer reset cmd %pK\n", __func__, reset_cmd);
+ rc = -1;
+ return rc;
+ }
+
+ /* flush the tasklet queue */
+ msm_isp_flush_tasklet(vfe_dev);
+
+ rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
+ 0, reset_cmd->blocking);
+
+ msm_isp_get_timestamp(&timestamp, vfe_dev);
+
+ for (i = 0, j = 0; j < axi_data->num_active_stream &&
+ i < VFE_AXI_SRC_MAX; i++, j++) {
+ stream_info = &axi_data->stream_info[i];
+ if (stream_info->stream_src >= VFE_AXI_SRC_MAX) {
+ rc = -1;
+ pr_err("%s invalid stream src = %d\n", __func__,
+ stream_info->stream_src);
+ break;
+ }
+ if (stream_info->state != ACTIVE) {
+ j--;
+ continue;
+ }
+
+ for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX; bufq_id++) {
+ bufq_handle = stream_info->bufq_handle[bufq_id];
+ if (!bufq_handle)
+ continue;
+
+ /* set ping pong address to scratch before flush */
+ spin_lock_irqsave(&stream_info->lock, flags);
+ msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
+ VFE_PING_FLAG);
+ msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
+ VFE_PONG_FLAG);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ rc = vfe_dev->buf_mgr->ops->flush_buf(
+ vfe_dev->buf_mgr, vfe_dev->pdev->id,
+ bufq_handle, MSM_ISP_BUFFER_FLUSH_ALL,
+ &timestamp.buf_time, reset_cmd->frame_id);
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
+
+ axi_data->src_info[SRC_TO_INTF(stream_info->
+ stream_src)].frame_id = reset_cmd->frame_id;
+ msm_isp_reset_burst_count_and_frame_drop(vfe_dev,
+ stream_info);
+ }
+ }
+
+ if (rc < 0)
+ pr_err("%s Error! reset hw Timed out\n", __func__);
+
+ return rc;
+}
+
+int msm_isp_axi_restart(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_restart_cmd *restart_cmd)
+{
+ int rc = 0, i, j;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t wm_reload_mask = 0x0;
+ unsigned long flags;
+
+ /* reset sync mask */
+ spin_lock_irqsave(
+ &vfe_dev->common_data->common_dev_data_lock, flags);
+ vfe_dev->common_data->dual_vfe_res->epoch_sync_mask = 0;
+ spin_unlock_irqrestore(
+ &vfe_dev->common_data->common_dev_data_lock, flags);
+
+ vfe_dev->buf_mgr->frameId_mismatch_recovery = 0;
+
+
+ for (i = 0, j = 0; j < axi_data->num_active_stream &&
+ i < VFE_AXI_SRC_MAX; i++, j++) {
+ stream_info = &axi_data->stream_info[i];
+ if (stream_info->state != ACTIVE) {
+ j--;
+ continue;
+ }
+ msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ vfe_dev->vfe_base, wm_reload_mask);
+
+ rc = vfe_dev->hw_info->vfe_ops.axi_ops.restart(vfe_dev, 0,
+ restart_cmd->enable_camif);
+ if (rc < 0)
+ pr_err("%s Error restarting vfe %d HW\n",
+ __func__, vfe_dev->pdev->id);
+
+ return rc;
+}
+
+static int msm_isp_axi_update_cgc_override(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+ uint8_t cgc_override)
+{
+ int i = 0, j = 0;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+ return -EINVAL;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ VFE_AXI_SRC_MAX) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ for (j = 0; j < stream_info->num_planes; j++) {
+ if (vfe_dev->hw_info->vfe_ops.axi_ops.
+ update_cgc_override)
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ update_cgc_override(vfe_dev,
+ stream_info->wm[j], cgc_override);
+ }
+ }
+ return 0;
+}
+
+static int msm_isp_update_dual_HW_ms_info_at_start(
+ struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src stream_src,
+ struct msm_isp_timestamp *ts)
+{
+ int rc = 0;
+ uint32_t j, k, max_sof = 0, timestamp_ms = 0, cur_timestamp_ms = 0;
+ uint32_t delta;
+ uint8_t slave_id;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_src_info *src_info = NULL;
+ uint32_t vfe_id = 0;
+ unsigned long flags;
+
+ if (stream_src >= VFE_SRC_MAX) {
+ pr_err("%s: Error! Invalid src %u\n", __func__, stream_src);
+ return -EINVAL;
+ }
+
+ src_info = &axi_data->src_info[stream_src];
+ if (src_info->dual_hw_type != DUAL_HW_MASTER_SLAVE)
+ return rc;
+
+ spin_lock_irqsave(&vfe_dev->common_data->common_dev_data_lock, flags);
+ if (src_info->dual_hw_ms_info.dual_hw_ms_type ==
+ MS_TYPE_MASTER) {
+ if (vfe_dev->common_data->ms_resource.master_active == 1) {
+ spin_unlock_irqrestore(&vfe_dev->common_data->
+ common_dev_data_lock, flags);
+ return rc;
+ }
+
+ vfe_dev->common_data->ms_resource.master_active = 1;
+
+ /*
+ * If any slaves are active, then find the max slave
+ * frame_id and set it to Master, so master will start
+ * higher and then the slave can copy master frame_id
+ * without repeating.
+ */
+ if (!vfe_dev->common_data->ms_resource.slave_active_mask) {
+ spin_unlock_irqrestore(&vfe_dev->common_data->
+ common_dev_data_lock, flags);
+ return rc;
+ }
+ cur_timestamp_ms = ts->buf_time.tv_sec * 1000 +
+ ts->buf_time.tv_usec / 1000;
+ for (j = 0, k = 0; k < MS_NUM_SLAVE_MAX; k++) {
+ if (!(vfe_dev->common_data->ms_resource.
+ reserved_slave_mask & (1 << k)))
+ continue;
+
+ if (vfe_dev->common_data->ms_resource.slave_active_mask
+ & (1 << k) &&
+ (vfe_dev->common_data->ms_resource.
+ slave_sof_info[k].frame_id > max_sof)) {
+ max_sof = vfe_dev->common_data->ms_resource.
+ slave_sof_info[k].frame_id;
+ timestamp_ms = vfe_dev->common_data->ms_resource
+ .slave_sof_info[k].mono_timestamp_ms;
+ }
+ j++;
+ if (j == vfe_dev->common_data->ms_resource.num_slave)
+ break;
+ }
+ if (cur_timestamp_ms > timestamp_ms)
+ delta = cur_timestamp_ms - timestamp_ms;
+ else
+ delta = timestamp_ms - cur_timestamp_ms;
+ if (delta > vfe_dev->common_data->ms_resource.
+ sof_delta_threshold) {
+ vfe_dev->axi_data.src_info[stream_src].frame_id =
+ max_sof;
+ } else {
+ vfe_dev->axi_data.src_info[stream_src].frame_id =
+ max_sof - vfe_dev->axi_data.src_info[
+ stream_src].sof_counter_step;
+ }
+ if (vfe_dev->is_split) {
+ vfe_id = vfe_dev->pdev->id;
+ vfe_id = (vfe_id == 0) ? 1 : 0;
+ vfe_dev->common_data->dual_vfe_res->axi_data[vfe_id]->
+ src_info[stream_src].frame_id = max_sof + 1;
+ }
+
+ ISP_DBG("%s: Setting Master frame_id to %u\n", __func__,
+ max_sof + 1);
+ } else {
+ if (src_info->dual_hw_ms_info.sof_info != NULL) {
+ slave_id = src_info->dual_hw_ms_info.slave_id;
+ vfe_dev->common_data->ms_resource.slave_active_mask |=
+ (1 << slave_id);
+ }
+ }
+ spin_unlock_irqrestore(&vfe_dev->common_data->common_dev_data_lock,
+ flags);
+
+ return rc;
+}
+
+static int msm_isp_update_dual_HW_ms_info_at_stop(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+ enum msm_isp_camif_update_state camif_update)
+{
+ int i, rc = 0;
+ uint8_t slave_id;
+ struct msm_vfe_axi_stream *stream_info = NULL;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ enum msm_vfe_input_src stream_src = VFE_SRC_MAX;
+ struct msm_vfe_src_info *src_info = NULL;
+ unsigned long flags;
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM ||
+ stream_cfg_cmd->num_streams == 0)
+ return -EINVAL;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ VFE_AXI_SRC_MAX) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ stream_src = SRC_TO_INTF(stream_info->stream_src);
+
+ /* Remove PIX if DISABLE CAMIF */
+ if (stream_src == VFE_PIX_0 && !((camif_update == DISABLE_CAMIF)
+ || (camif_update == DISABLE_CAMIF_IMMEDIATELY)))
+ continue;
+
+ src_info = &axi_data->src_info[stream_src];
+ if (src_info->dual_hw_type != DUAL_HW_MASTER_SLAVE)
+ continue;
+
+ spin_lock_irqsave(
+ &vfe_dev->common_data->common_dev_data_lock,
+ flags);
+ if (src_info->dual_hw_ms_info.dual_hw_ms_type ==
+ MS_TYPE_MASTER) {
+ /*
+ * Once Master is inactive, slave will increment
+ * its own frame_id
+ */
+ vfe_dev->common_data->ms_resource.master_active = 0;
+ } else {
+ slave_id = src_info->dual_hw_ms_info.slave_id;
+ vfe_dev->common_data->ms_resource.reserved_slave_mask &=
+ ~(1 << slave_id);
+ vfe_dev->common_data->ms_resource.slave_active_mask &=
+ ~(1 << slave_id);
+ vfe_dev->common_data->ms_resource.num_slave--;
+ }
+ src_info->dual_hw_ms_info.sof_info = NULL;
+ spin_unlock_irqrestore(
+ &vfe_dev->common_data->common_dev_data_lock,
+ flags);
+ vfe_dev->vfe_ub_policy = 0;
+ }
+
+ return rc;
+}
+
+static int msm_isp_update_dual_HW_axi(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int rc = 0;
+ int vfe_id;
+ uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ struct dual_vfe_resource *dual_vfe_res = NULL;
+
+ if (stream_idx >= VFE_AXI_SRC_MAX) {
+ pr_err("%s: Invalid stream idx %d\n", __func__, stream_idx);
+ return -EINVAL;
+ }
+
+ dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
+ if (vfe_dev->is_split) {
+ if (!dual_vfe_res->vfe_dev[ISP_VFE0] ||
+ !dual_vfe_res->vfe_dev[ISP_VFE1] ||
+ !dual_vfe_res->axi_data[ISP_VFE0] ||
+ !dual_vfe_res->axi_data[ISP_VFE1]) {
+ pr_err("%s: Error in dual vfe resource\n", __func__);
+ rc = -EINVAL;
+ } else {
+ if (stream_info->state == RESUME_PENDING &&
+ (dual_vfe_res->axi_data[!vfe_dev->pdev->id]->
+ stream_info[stream_idx].state ==
+ RESUME_PENDING)) {
+ /* Update the AXI only after both ISPs receiving
+ * the Reg update interrupt
+ */
+ for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
+ rc = msm_isp_axi_stream_enable_cfg(
+ dual_vfe_res->vfe_dev[vfe_id],
+ &dual_vfe_res->
+ axi_data[vfe_id]->
+ stream_info[stream_idx], 1);
+ dual_vfe_res->axi_data[vfe_id]->
+ stream_info[stream_idx].state =
+ RESUMING;
+ }
+ } else if (stream_info->state == RESUMING &&
+ (dual_vfe_res->axi_data[!vfe_dev->pdev->id]->
+ stream_info[stream_idx].state == RESUMING)) {
+ for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
+ dual_vfe_res->axi_data[vfe_id]->
+ stream_info[stream_idx].
+ runtime_output_format =
+ stream_info->output_format;
+ dual_vfe_res->axi_data[vfe_id]->
+ stream_info[stream_idx].state =
+ ACTIVE;
+ }
+ }
+
+ }
+ } else {
+ if (stream_info->state == RESUME_PENDING) {
+ msm_isp_axi_stream_enable_cfg(
+ vfe_dev, stream_info, 0);
+ stream_info->state = RESUMING;
+ } else if (stream_info->state == RESUMING) {
+ stream_info->runtime_output_format =
+ stream_info->output_format;
+ stream_info->state = ACTIVE;
+ }
+ }
+ return rc;
+}
+
+static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+ enum msm_isp_camif_update_state camif_update)
+{
+ int i, rc = 0;
+ uint8_t src_state, wait_for_complete = 0;
+ uint32_t wm_reload_mask = 0x0;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t src_mask = 0;
+ unsigned long flags;
+
+ pr_debug("%s: entry\n", __func__);
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+ return -EINVAL;
+
+ if (camif_update == ENABLE_CAMIF) {
+ ISP_DBG("%s: vfe %d camif enable\n", __func__,
+ vfe_dev->pdev->id);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id = 0;
+ vfe_dev->axi_data.src_info[VFE_PIX_0].eof_id = 0;
+ }
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ VFE_AXI_SRC_MAX) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
+ src_state = axi_data->src_info[
+ SRC_TO_INTF(stream_info->stream_src)].active;
+ else {
+ ISP_DBG("%s: invalid src info index\n", __func__);
+ return -EINVAL;
+ }
+
+ msm_isp_calculate_bandwidth(axi_data, stream_info);
+
+ msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ msm_isp_reset_framedrop(vfe_dev, stream_info);
+ rc = msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
+ if (rc < 0) {
+ pr_err("%s: No buffer for stream%d\n", __func__,
+ HANDLE_TO_IDX(
+ stream_cfg_cmd->stream_handle[i]));
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ return rc;
+ }
+
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ if (stream_info->num_planes > 1) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_comp_mask(vfe_dev, stream_info);
+ } else {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_irq_mask(vfe_dev, stream_info);
+ }
+
+ stream_info->state = START_PENDING;
+
+ ISP_DBG("%s, Stream 0x%x src %d src_state %d on vfe %d\n",
+ __func__, stream_info->stream_id,
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]),
+ src_state, vfe_dev->pdev->id);
+
+ if (src_state) {
+ src_mask |= (1 << SRC_TO_INTF(stream_info->stream_src));
+ wait_for_complete = 1;
+ } else {
+ if (vfe_dev->dump_reg)
+ msm_camera_io_dump(vfe_dev->vfe_base,
+ 0x1000, 1);
+
+ /* Configure AXI start bits to start immediately */
+ msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info, 0);
+ stream_info->state = ACTIVE;
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
+ SRC_TO_INTF(stream_info->stream_src));
+
+ /*
+ * Active bit is set in enable_camif for PIX.
+ * For RDI, set it here
+ */
+ if (SRC_TO_INTF(stream_info->stream_src) >= VFE_RAW_0 &&
+ SRC_TO_INTF(stream_info->stream_src) <
+ VFE_SRC_MAX) {
+ /* Incase PIX and RDI streams are part of same
+ * session, this will ensure RDI stream will
+ * have same frame id as of PIX stream
+ */
+ if (stream_cfg_cmd->sync_frame_id_src)
+ vfe_dev->axi_data.src_info[SRC_TO_INTF(
+ stream_info->stream_src)].frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0]
+ .frame_id;
+ else
+ vfe_dev->axi_data.src_info[SRC_TO_INTF(
+ stream_info->stream_src)].frame_id = 0;
+ vfe_dev->axi_data.src_info[SRC_TO_INTF(
+ stream_info->stream_src)].active = 1;
+ }
+ }
+ }
+
+ msm_isp_update_stream_bandwidth(vfe_dev, stream_cfg_cmd->hw_state);
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ vfe_dev->vfe_base, wm_reload_mask);
+ msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
+ for (i = 0; i < VFE_SRC_MAX; i++) {
+ if ((vfe_dev->axi_data.src_info[i].pix_stream_count ||
+ vfe_dev->axi_data.src_info[i].raw_stream_count) &&
+ !vfe_dev->axi_data.src_info[i].flag) {
+ /* Configure UB */
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_ub(vfe_dev, i);
+ /* when start reset overflow state */
+ atomic_set(&vfe_dev->error_info.overflow_state,
+ NO_OVERFLOW);
+ vfe_dev->axi_data.src_info[i].flag = 1;
+ }
+ }
+
+ if (camif_update == ENABLE_CAMIF) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, camif_update);
+ vfe_dev->axi_data.camif_state = CAMIF_ENABLE;
+ vfe_dev->common_data->dual_vfe_res->epoch_sync_mask = 0;
+ }
+
+ if (wait_for_complete) {
+ rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update,
+ src_mask, 2);
+ if (rc < 0) {
+ pr_err("%s: wait for config done failed\n", __func__);
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(
+ stream_cfg_cmd->stream_handle[i])];
+ stream_info->state = STOPPING;
+ msm_isp_axi_stream_enable_cfg(
+ vfe_dev, stream_info, 0);
+ stream_cfg_cmd->cmd = STOP_IMMEDIATELY;
+ msm_isp_update_camif_output_count(vfe_dev,
+ stream_cfg_cmd);
+ }
+ }
+ }
+
+ pr_debug("%s: exit\n", __func__);
+
+ return rc;
+}
+
+static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+ enum msm_isp_camif_update_state camif_update,
+ int halt)
+{
+ int i, rc = 0;
+ uint8_t wait_for_complete_for_this_stream = 0;
+ struct msm_vfe_axi_stream *stream_info = NULL;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int ext_read =
+ (axi_data->src_info[VFE_PIX_0].input_mux == EXTERNAL_READ);
+ uint32_t src_mask = 0, intf, bufq_id = 0, bufq_handle = 0;
+ unsigned long flags;
+ struct msm_isp_timestamp timestamp;
+
+ pr_debug("%s: entry\n", __func__);
+
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM ||
+ stream_cfg_cmd->num_streams == 0)
+ return -EINVAL;
+
+ msm_isp_get_timestamp(&timestamp, vfe_dev);
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ VFE_AXI_SRC_MAX) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ if (stream_info->state == AVAILABLE)
+ continue;
+ /* set ping pong address to scratch before stream stop */
+ spin_lock_irqsave(&stream_info->lock, flags);
+ msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PING_FLAG);
+ msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PONG_FLAG);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ wait_for_complete_for_this_stream = 0;
+
+ if (stream_info->num_planes > 1)
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_comp_mask(vfe_dev, stream_info);
+ else
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_wm_irq_mask(vfe_dev, stream_info);
+
+ stream_info->state = STOP_PENDING;
+ if (!halt && !ext_read &&
+ !(stream_info->stream_type == BURST_STREAM &&
+ stream_info->runtime_num_burst_capture == 0))
+ wait_for_complete_for_this_stream = 1;
+
+ /* always stop immediate with ais */
+ wait_for_complete_for_this_stream = 0;
+
+ ISP_DBG("%s: stream 0x%x, vfe %d camif %d halt %d wait %d\n",
+ __func__,
+ stream_info->stream_id,
+ vfe_dev->pdev->id,
+ camif_update,
+ halt,
+ wait_for_complete_for_this_stream);
+
+ intf = SRC_TO_INTF(stream_info->stream_src);
+ if (!wait_for_complete_for_this_stream ||
+ stream_info->state == INACTIVE ||
+ !vfe_dev->axi_data.src_info[intf].active) {
+ msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info, 0);
+ stream_info->state = INACTIVE;
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
+ SRC_TO_INTF(stream_info->stream_src));
+
+ /*
+ * Active bit is reset in disble_camif for PIX.
+ * For RDI, reset it here for not wait_for_complete
+ * This is assuming there is only 1 stream mapped to
+ * each RDI.
+ */
+ if (intf >= VFE_RAW_0 &&
+ intf < VFE_SRC_MAX) {
+ vfe_dev->axi_data.src_info[intf].active = 0;
+ }
+ } else
+ src_mask |= (1 << intf);
+
+ }
+
+ if (src_mask) {
+ rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update,
+ src_mask, 2);
+ if (rc < 0) {
+ pr_err("%s: wait for config done failed, retry...\n",
+ __func__);
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(
+ stream_cfg_cmd->stream_handle[i])];
+ stream_info->state = STOPPING;
+ msm_isp_axi_stream_enable_cfg(
+ vfe_dev, stream_info, 0);
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
+ vfe_dev,
+ SRC_TO_INTF(stream_info->stream_src));
+ rc = msm_isp_axi_wait_for_cfg_done(vfe_dev,
+ camif_update, src_mask, 1);
+ if (rc < 0) {
+ pr_err("%s: vfe%d cfg done failed\n",
+ __func__, vfe_dev->pdev->id);
+ stream_info->state = INACTIVE;
+ } else
+ pr_err("%s: vfe%d retry success! report err!\n",
+ __func__, vfe_dev->pdev->id);
+
+ rc = -EBUSY;
+ }
+ }
+
+ /*
+ * Active bit is reset in disble_camif for PIX.
+ * For RDI, reset it here after wait_for_complete
+ * This is assuming there is only 1 stream mapped to each RDI
+ */
+ for (i = VFE_RAW_0; i < VFE_SRC_MAX; i++) {
+ if (src_mask & (1 << i))
+ vfe_dev->axi_data.src_info[i].active = 0;
+ }
+ }
+
+ /* never halt and always disable camif immediately with ais */
+ halt = 0;
+ if (camif_update == DISABLE_CAMIF)
+ camif_update = DISABLE_CAMIF_IMMEDIATELY;
+
+ if (camif_update == DISABLE_CAMIF) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, DISABLE_CAMIF);
+ vfe_dev->axi_data.camif_state = CAMIF_DISABLE;
+ } else if ((camif_update == DISABLE_CAMIF_IMMEDIATELY) ||
+ (ext_read)) {
+ if (!ext_read)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev,
+ DISABLE_CAMIF_IMMEDIATELY);
+ vfe_dev->axi_data.camif_state = CAMIF_STOPPED;
+ }
+ if (halt) {
+ /*during stop immediately, stop output then stop input*/
+ vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
+ vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 1);
+ vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
+ }
+
+ msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
+ msm_isp_update_stream_bandwidth(vfe_dev, stream_cfg_cmd->hw_state);
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX; bufq_id++) {
+ bufq_handle = stream_info->bufq_handle[bufq_id];
+ if (!bufq_handle)
+ continue;
+
+ rc = vfe_dev->buf_mgr->ops->flush_buf(
+ vfe_dev->buf_mgr, vfe_dev->pdev->id,
+ bufq_handle, MSM_ISP_BUFFER_FLUSH_ALL,
+ &timestamp.buf_time, 0);
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
+
+ rc = vfe_dev->buf_mgr->ops->flush_buf(
+ vfe_dev->buf_mgr, vfe_dev->pdev->id,
+ bufq_handle, MSM_ISP_BUFFER_FLUSH_DIVERTED,
+ &timestamp.buf_time, 0);
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
+
+ rc = msm_isp_flush_queue(vfe_dev->buf_mgr, bufq_handle);
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
+
+ }
+ vfe_dev->reg_update_requested &=
+ ~(BIT(SRC_TO_INTF(stream_info->stream_src)));
+ }
+
+ pr_debug("%s: exit\n", __func__);
+
+ return rc;
+}
+
+
+int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, ret;
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd = arg;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ enum msm_isp_camif_update_state camif_update;
+ int halt = 0;
+
+ pr_debug("%s: entry\n", __func__);
+
+ rc = msm_isp_axi_check_stream_state(vfe_dev, stream_cfg_cmd);
+ if (rc < 0) {
+ pr_err("%s: Invalid stream state\n", __func__);
+ return rc;
+ }
+ msm_isp_get_camif_update_state_and_halt(vfe_dev, stream_cfg_cmd,
+ &camif_update, &halt);
+ if (camif_update == DISABLE_CAMIF)
+ vfe_dev->axi_data.camif_state = CAMIF_STOPPING;
+ if (stream_cfg_cmd->cmd == START_STREAM) {
+ msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 1);
+
+ rc = msm_isp_start_axi_stream(
+ vfe_dev, stream_cfg_cmd, camif_update);
+ } else {
+ rc = msm_isp_stop_axi_stream(
+ vfe_dev, stream_cfg_cmd, camif_update, halt);
+
+ msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 0);
+ if (axi_data->num_active_stream == 0) {
+ /* Reset hvx state */
+ vfe_dev->hvx_cmd = HVX_DISABLE;
+ }
+
+ /*
+ * Use different ret value to not overwrite the error from
+ * msm_isp_stop_axi_stream
+ */
+ ret = msm_isp_update_dual_HW_ms_info_at_stop(
+ vfe_dev, stream_cfg_cmd, camif_update);
+ if (ret < 0)
+ pr_warn("%s: Warning! Update dual_cam failed\n",
+ __func__);
+ }
+
+ if (rc < 0)
+ pr_err("%s: start/stop %d stream failed\n", __func__,
+ stream_cfg_cmd->cmd);
+
+
+ pr_debug("%s: exit\n", __func__);
+
+ return rc;
+}
+
+static int msm_isp_return_empty_buffer(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t user_stream_id,
+ uint32_t frame_id, uint32_t buf_index,
+ enum msm_vfe_input_src frame_src)
+{
+ int rc = -1;
+ struct msm_isp_buffer *buf = NULL;
+ uint32_t bufq_handle = 0;
+ uint32_t stream_idx;
+ struct msm_isp_event_data error_event;
+ struct msm_isp_timestamp timestamp;
+
+ if (!vfe_dev || !stream_info) {
+ pr_err("%s %d failed: vfe_dev %pK stream_info %pK\n", __func__,
+ __LINE__, vfe_dev, stream_info);
+ return -EINVAL;
+ }
+
+ stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ if (!stream_info->controllable_output)
+ return -EINVAL;
+
+ if (frame_src >= VFE_SRC_MAX) {
+ pr_err("%s: Invalid frame_src %d", __func__, frame_src);
+ return -EINVAL;
+ }
+
+ if (stream_idx >= VFE_AXI_SRC_MAX) {
+ pr_err("%s: Invalid stream_idx", __func__);
+ return rc;
+ }
+
+ if (user_stream_id == stream_info->stream_id)
+ bufq_handle = stream_info->bufq_handle[VFE_BUF_QUEUE_DEFAULT];
+ else
+ bufq_handle = stream_info->bufq_handle[VFE_BUF_QUEUE_SHARED];
+
+
+ rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
+ bufq_handle, buf_index, &buf);
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
+
+ if (rc < 0 || buf == NULL) {
+ pr_err("Skip framedrop report due to no buffer\n");
+ return rc;
+ }
+
+ msm_isp_get_timestamp(&timestamp, vfe_dev);
+ buf->buf_debug.put_state[buf->buf_debug.put_state_last] =
+ MSM_ISP_BUFFER_STATE_DROP_REG;
+ buf->buf_debug.put_state_last ^= 1;
+ rc = vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx,
+ &timestamp.buf_time, frame_id,
+ stream_info->runtime_output_format);
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
+
+ memset(&error_event, 0, sizeof(error_event));
+ error_event.frame_id = frame_id;
+ error_event.u.error_info.err_type = ISP_ERROR_RETURN_EMPTY_BUFFER;
+ error_event.u.error_info.session_id = stream_info->session_id;
+ error_event.u.error_info.stream_id_mask =
+ 1 << (bufq_handle & 0xFF);
+ msm_isp_send_event(vfe_dev, ISP_EVENT_ERROR, &error_event);
+
+ return 0;
+}
+
+static int msm_isp_request_frame(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t user_stream_id,
+ uint32_t frame_id, uint32_t buf_index)
+{
+ struct msm_vfe_axi_stream_request_cmd stream_cfg_cmd;
+ struct msm_vfe_frame_request_queue *queue_req;
+ uint32_t pingpong_status;
+ unsigned long flags;
+ int rc = 0;
+ enum msm_vfe_input_src frame_src = 0;
+ struct dual_vfe_resource *dual_vfe_res =
+ vfe_dev->common_data->dual_vfe_res;
+ uint32_t vfe_id = 0;
+ bool dual_vfe = false;
+
+ if (!vfe_dev || !stream_info) {
+ pr_err("%s %d failed: vfe_dev %pK stream_info %pK\n", __func__,
+ __LINE__, vfe_dev, stream_info);
+ return -EINVAL;
+ }
+
+ if (vfe_dev->is_split) {
+ if (stream_info->stream_src < RDI_INTF_0) {
+ if (vfe_dev->pdev->id == ISP_VFE1) {
+ dual_vfe = true;
+ } else {
+ /* return early for dual vfe0 */
+ return 0;
+ }
+ }
+ }
+
+ if (stream_info->stream_src >= VFE_AXI_SRC_MAX) {
+ pr_err("%s:%d invalid stream src %d\n", __func__, __LINE__,
+ stream_info->stream_src);
+ return -EINVAL;
+ }
+
+ frame_src = SRC_TO_INTF(stream_info->stream_src);
+
+ pingpong_status =
+ vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(
+ vfe_dev);
+
+ /*
+ * If PIX stream is active then RDI path uses SOF frame ID of PIX
+ * In case of standalone RDI streaming, SOF are used from
+ * individual intf.
+ */
+ /*
+ * If frame_id = 1 then no eof check is needed
+ */
+ if (((vfe_dev->axi_data.src_info[VFE_PIX_0].active) && ((frame_id !=
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id + vfe_dev->
+ axi_data.src_info[VFE_PIX_0].sof_counter_step) ||
+ (frame_id <= vfe_dev->
+ axi_data.src_info[VFE_PIX_0].eof_id + 1))) ||
+ ((!vfe_dev->axi_data.src_info[VFE_PIX_0].active) && (frame_id !=
+ vfe_dev->axi_data.src_info[frame_src].frame_id + vfe_dev->
+ axi_data.src_info[frame_src].sof_counter_step)) ||
+ stream_info->undelivered_request_cnt >= MAX_BUFFERS_IN_HW) {
+ pr_debug("%s:%d invalid request_frame %d cur frame id %d pix %d\n",
+ __func__, __LINE__, frame_id,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active);
+
+ rc = msm_isp_return_empty_buffer(vfe_dev, stream_info,
+ user_stream_id, frame_id, buf_index, frame_src);
+ if (rc < 0)
+ pr_err("%s:%d failed: return_empty_buffer src %d\n",
+ __func__, __LINE__, frame_src);
+ return 0;
+ }
+ if ((frame_src == VFE_PIX_0) && !stream_info->undelivered_request_cnt &&
+ MSM_VFE_STREAM_STOP_PERIOD !=
+ stream_info->activated_framedrop_period) {
+ pr_debug("%s:%d vfe %d frame_id %d prev_pattern %x stream_id %x\n",
+ __func__, __LINE__, vfe_dev->pdev->id, frame_id,
+ stream_info->activated_framedrop_period,
+ stream_info->stream_id);
+
+ rc = msm_isp_return_empty_buffer(vfe_dev, stream_info,
+ user_stream_id, frame_id, buf_index, frame_src);
+ if (rc < 0)
+ pr_err("%s:%d failed: return_empty_buffer src %d\n",
+ __func__, __LINE__, frame_src);
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ msm_isp_cfg_framedrop_reg(vfe_dev, stream_info);
+ return 0;
+ }
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+ queue_req = &stream_info->request_queue_cmd[stream_info->request_q_idx];
+ if (queue_req->cmd_used) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err_ratelimited("%s: Request queue overflow.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (user_stream_id == stream_info->stream_id)
+ queue_req->buff_queue_id = VFE_BUF_QUEUE_DEFAULT;
+ else
+ queue_req->buff_queue_id = VFE_BUF_QUEUE_SHARED;
+
+ if (!stream_info->bufq_handle[queue_req->buff_queue_id]) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s:%d request frame failed on hw stream 0x%x, request stream %d due to no bufq idx: %d\n",
+ __func__, __LINE__, stream_info->stream_handle,
+ user_stream_id, queue_req->buff_queue_id);
+ return 0;
+ }
+ queue_req->buf_index = buf_index;
+ queue_req->cmd_used = 1;
+
+ stream_info->request_q_idx =
+ (stream_info->request_q_idx + 1) % MSM_VFE_REQUESTQ_SIZE;
+ list_add_tail(&queue_req->list, &stream_info->request_q);
+ stream_info->request_q_cnt++;
+
+ stream_info->undelivered_request_cnt++;
+ stream_cfg_cmd.axi_stream_handle = stream_info->stream_handle;
+ stream_cfg_cmd.frame_skip_pattern = NO_SKIP;
+ stream_cfg_cmd.init_frame_drop = 0;
+ stream_cfg_cmd.burst_count = stream_info->request_q_cnt;
+ if (stream_info->undelivered_request_cnt == 1) {
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+ VFE_PING_FLAG, 0);
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ stream_info->undelivered_request_cnt--;
+ pr_err_ratelimited("%s:%d fail to cfg HAL buffer\n",
+ __func__, __LINE__);
+ return rc;
+ }
+
+ vfe_id = vfe_dev->pdev->id;
+ if (dual_vfe) {
+ struct msm_vfe_axi_stream *temp_stream_info;
+
+ temp_stream_info = msm_isp_vfe_get_stream(dual_vfe_res,
+ ISP_VFE0,
+ HANDLE_TO_IDX(
+ stream_info->stream_handle));
+ msm_isp_get_stream_wm_mask(temp_stream_info,
+ &dual_vfe_res->wm_reload_mask[ISP_VFE0]);
+ msm_isp_get_stream_wm_mask(stream_info,
+ &dual_vfe_res->wm_reload_mask[ISP_VFE1]);
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ dual_vfe_res->vfe_base[ISP_VFE0],
+ dual_vfe_res->wm_reload_mask[ISP_VFE0]);
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ dual_vfe_res->vfe_base[ISP_VFE1],
+ dual_vfe_res->wm_reload_mask[ISP_VFE1]);
+ dual_vfe_res->wm_reload_mask[ISP_VFE0] = 0;
+ dual_vfe_res->wm_reload_mask[ISP_VFE1] = 0;
+ } else {
+ msm_isp_get_stream_wm_mask(stream_info,
+ &dual_vfe_res->wm_reload_mask[vfe_id]);
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ vfe_dev->vfe_base,
+ dual_vfe_res->wm_reload_mask[vfe_id]);
+ dual_vfe_res->wm_reload_mask[vfe_id] = 0;
+ }
+ stream_info->sw_ping_pong_bit = 0;
+ } else if (stream_info->undelivered_request_cnt == 2) {
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+ stream_info, pingpong_status, 0);
+ if (rc) {
+ stream_info->undelivered_request_cnt--;
+ spin_unlock_irqrestore(&stream_info->lock,
+ flags);
+ pr_err_ratelimited("%s:%d fail to cfg HAL buffer\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ } else {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ stream_info->undelivered_request_cnt--;
+ pr_err_ratelimited("%s: Invalid undeliver frame count %d\n",
+ __func__, stream_info->undelivered_request_cnt);
+ return -EINVAL;
+ }
+
+ rc = msm_isp_calculate_framedrop(&vfe_dev->axi_data, &stream_cfg_cmd);
+ if (rc == 0)
+ msm_isp_reset_framedrop(vfe_dev, stream_info);
+
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+
+ return rc;
+}
+
+static int msm_isp_add_buf_queue(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t stream_id)
+{
+ int rc = 0;
+ uint32_t bufq_id = 0;
+
+ if (stream_id == stream_info->stream_id)
+ bufq_id = VFE_BUF_QUEUE_DEFAULT;
+ else
+ bufq_id = VFE_BUF_QUEUE_SHARED;
+
+
+ stream_info->bufq_handle[bufq_id] =
+ vfe_dev->buf_mgr->ops->get_bufq_handle(vfe_dev->buf_mgr,
+ stream_info->session_id, stream_id);
+ if (stream_info->bufq_handle[bufq_id] == 0) {
+ pr_err("%s: failed: No valid buffer queue for stream: 0x%x\n",
+ __func__, stream_id);
+ rc = -EINVAL;
+ }
+
+ ISP_DBG("%d: Add bufq handle:0x%x, idx:%d, for stream %d on VFE %d\n",
+ __LINE__, stream_info->bufq_handle[bufq_id],
+ bufq_id, stream_info->stream_handle, vfe_dev->pdev->id);
+
+ return rc;
+}
+
+static void msm_isp_remove_buf_queue(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t stream_id)
+{
+ uint32_t bufq_id = 0;
+ unsigned long flags;
+
+ if (stream_id == stream_info->stream_id)
+ bufq_id = VFE_BUF_QUEUE_DEFAULT;
+ else
+ bufq_id = VFE_BUF_QUEUE_SHARED;
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+ stream_info->bufq_handle[bufq_id] = 0;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+
+}
+
+int msm_isp_axi_output_cfg(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ int axi_src_idx, plane_idx;
+ struct msm_vfe_axi_output_cfg *pCmd = arg;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream *pstream_info;
+
+ pr_debug("%s: entry\n", __func__);
+
+ for (axi_src_idx = 0; axi_src_idx < VFE_AXI_SRC_MAX; axi_src_idx++) {
+ if (pCmd->output_path_cfg[axi_src_idx].enable == 0)
+ continue;
+
+ pstream_info = &axi_data->stream_info[axi_src_idx];
+
+ memset(pstream_info, 0,
+ sizeof(struct msm_vfe_axi_stream));
+
+ spin_lock_init(&pstream_info->lock);
+ pstream_info->session_id = vfe_dev->pdev->id;
+ pstream_info->stream_id = axi_src_idx;
+ pstream_info->state = INACTIVE;
+ pstream_info->stream_handle = axi_src_idx;
+ pstream_info->stream_src = axi_src_idx;
+
+ pstream_info->bufq_handle[VFE_BUF_QUEUE_DEFAULT] =
+ vfe_dev->buf_mgr->ops->get_bufq_handle(vfe_dev->buf_mgr,
+ pstream_info->session_id,
+ pstream_info->stream_id);
+
+ INIT_LIST_HEAD(&pstream_info->request_q);
+
+ pstream_info->frame_based =
+ pCmd->output_path_cfg[axi_src_idx].frame_based;
+
+ /* send buffers to user through vfe dev node */
+ pstream_info->buf_divert = 1;
+ pstream_info->output_format =
+ pCmd->output_path_cfg[axi_src_idx].format;
+
+ msm_isp_axi_get_num_planes(
+ pCmd->output_path_cfg[axi_src_idx].format,
+ pstream_info);
+
+ for (plane_idx = 0;
+ plane_idx < pstream_info->num_planes;
+ plane_idx++) {
+ pstream_info->vfe_plane_cfg[plane_idx] =
+ pCmd->output_path_cfg[axi_src_idx].
+ plane_cfg[plane_idx];
+ pstream_info->wm[plane_idx] =
+ pCmd->output_path_cfg[axi_src_idx].
+ plane_cfg[plane_idx].wmIndex;
+ axi_data->free_wm[pstream_info->wm[plane_idx]] =
+ pstream_info->stream_handle;
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_reg(vfe_dev, pstream_info, plane_idx);
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_xbar_reg(vfe_dev,
+ pstream_info, plane_idx);
+ }
+
+ if (axi_src_idx <= PIX_ENCODER && axi_src_idx <= IDEAL_RAW) {
+ if (axi_src_idx == CAMIF_RAW) {
+ vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count = 1;
+ } else {
+ vfe_dev->axi_data.src_info[VFE_PIX_0].
+ pix_stream_count += 1;
+ }
+
+ /* CamIf will be enabled in stream on */
+ } else if (axi_src_idx >= RDI_INTF_0) {
+ struct msm_vfe_rdi_cfg rdi_cfg;
+ long pixel_clock = 0;
+
+ rdi_cfg.cid =
+ pCmd->output_path_cfg[axi_src_idx].rdi_cid;
+ rdi_cfg.frame_based =
+ pCmd->output_path_cfg[axi_src_idx].frame_based;
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ cfg_rdi_reg(vfe_dev, &rdi_cfg,
+ SRC_TO_INTF(axi_src_idx));
+
+ pixel_clock = 320000000; /*USE MAX CLOCK FOR NOW */
+ vfe_dev->axi_data.
+ src_info[SRC_TO_INTF(axi_src_idx)].
+ pixel_clock = pixel_clock;
+
+ rc = vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate
+ (vfe_dev, &pixel_clock);
+ if (rc < 0) {
+ pr_err("%s: clock set rate failed\n", __func__);
+ return rc;
+ }
+ }
+ }
+
+ pr_debug("%s: exit\n", __func__);
+ return rc;
+}
+
+
+int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, i, j;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
+ struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL;
+ struct msm_isp_sw_framskip *sw_skip_info = NULL;
+ unsigned long flags;
+ struct msm_isp_timestamp timestamp;
+ uint32_t frame_id;
+
+ /* num_stream is uint32 and update_info[] bound by MAX_NUM_STREAM */
+ if (update_cmd->num_streams > MAX_NUM_STREAM)
+ return -EINVAL;
+
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info = (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ /* check array reference bounds */
+ if (HANDLE_TO_IDX(update_info->stream_handle) >=
+ VFE_AXI_SRC_MAX) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(update_info->stream_handle)];
+ if (SRC_TO_INTF(stream_info->stream_src) >= VFE_SRC_MAX)
+ continue;
+ if (stream_info->state != ACTIVE &&
+ stream_info->state != INACTIVE &&
+ update_cmd->update_type !=
+ UPDATE_STREAM_REQUEST_FRAMES &&
+ update_cmd->update_type !=
+ UPDATE_STREAM_REMOVE_BUFQ &&
+ update_cmd->update_type !=
+ UPDATE_STREAM_SW_FRAME_DROP) {
+ pr_err("%s: Invalid stream state %d, update cmd %d\n",
+ __func__, stream_info->state,
+ stream_info->stream_id);
+ return -EINVAL;
+ }
+ if (update_cmd->update_type == UPDATE_STREAM_AXI_CONFIG &&
+ atomic_read(&axi_data->axi_cfg_update[
+ SRC_TO_INTF(stream_info->stream_src)])) {
+ pr_err("%s: AXI stream config updating\n", __func__);
+ return -EBUSY;
+ }
+ }
+
+ switch (update_cmd->update_type) {
+ case ENABLE_STREAM_BUF_DIVERT:
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = &axi_data->stream_info[HANDLE_TO_IDX(
+ update_info->stream_handle)];
+ stream_info->buf_divert = 1;
+ }
+ break;
+ case DISABLE_STREAM_BUF_DIVERT:
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = &axi_data->stream_info[HANDLE_TO_IDX(
+ update_info->stream_handle)];
+ stream_info->buf_divert = 0;
+ msm_isp_get_timestamp(&timestamp, vfe_dev);
+ frame_id = vfe_dev->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].frame_id;
+ /* set ping pong address to scratch before flush */
+ spin_lock_irqsave(&stream_info->lock, flags);
+ msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
+ VFE_PING_FLAG);
+ msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
+ VFE_PONG_FLAG);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ rc = vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
+ vfe_dev->pdev->id,
+ stream_info->bufq_handle[VFE_BUF_QUEUE_DEFAULT],
+ MSM_ISP_BUFFER_FLUSH_DIVERTED,
+ &timestamp.buf_time, frame_id);
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
+ }
+ break;
+ case UPDATE_STREAM_FRAMEDROP_PATTERN: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ uint32_t framedrop_period =
+ msm_isp_get_framedrop_period(
+ update_info->skip_pattern);
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = &axi_data->stream_info[HANDLE_TO_IDX(
+ update_info->stream_handle)];
+ spin_lock_irqsave(&stream_info->lock, flags);
+ /* no change then break early */
+ if (stream_info->current_framedrop_period ==
+ framedrop_period) {
+ spin_unlock_irqrestore(&stream_info->lock,
+ flags);
+ break;
+ }
+ if (stream_info->controllable_output) {
+ pr_err("Controllable output streams does not support custom frame skip pattern\n");
+ spin_unlock_irqrestore(&stream_info->lock,
+ flags);
+ return -EINVAL;
+ }
+ if (update_info->skip_pattern == SKIP_ALL)
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ else
+ stream_info->current_framedrop_period =
+ framedrop_period;
+ if (stream_info->stream_type != BURST_STREAM)
+ msm_isp_cfg_framedrop_reg(vfe_dev, stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+ break;
+ }
+ case UPDATE_STREAM_SW_FRAME_DROP: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = &axi_data->stream_info[HANDLE_TO_IDX(
+ update_info->stream_handle)];
+ sw_skip_info = &update_info->sw_skip_info;
+ if (sw_skip_info->stream_src_mask != 0) {
+ /* SW image buffer drop */
+ pr_debug("%s:%x sw skip type %x mode %d min %d max %d\n",
+ __func__, stream_info->stream_id,
+ sw_skip_info->stats_type_mask,
+ sw_skip_info->skip_mode,
+ sw_skip_info->min_frame_id,
+ sw_skip_info->max_frame_id);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ stream_info->sw_skip = *sw_skip_info;
+ spin_unlock_irqrestore(&stream_info->lock,
+ flags);
+ }
+ }
+ break;
+ }
+ case UPDATE_STREAM_AXI_CONFIG: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = &axi_data->stream_info[HANDLE_TO_IDX(
+ update_info->stream_handle)];
+ for (j = 0; j < stream_info->num_planes; j++) {
+ stream_info->plane_cfg[j] =
+ update_info->plane_cfg[j];
+ }
+ stream_info->output_format =
+ update_info->output_format;
+ if ((stream_info->state == ACTIVE) &&
+ ((vfe_dev->hw_info->runtime_axi_update == 0) ||
+ (vfe_dev->dual_vfe_enable == 1))) {
+ spin_lock_irqsave(&stream_info->lock, flags);
+ stream_info->state = PAUSE_PENDING;
+ msm_isp_axi_stream_enable_cfg(
+ vfe_dev, stream_info, 1);
+ stream_info->state = PAUSING;
+ atomic_set(&axi_data->
+ axi_cfg_update[SRC_TO_INTF(
+ stream_info->stream_src)],
+ UPDATE_REQUESTED);
+ spin_unlock_irqrestore(&stream_info->lock,
+ flags);
+ } else {
+ for (j = 0; j < stream_info->num_planes; j++) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_reg(vfe_dev, stream_info, j);
+ }
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+ if (stream_info->state != ACTIVE) {
+ stream_info->runtime_output_format =
+ stream_info->output_format;
+ } else {
+ stream_info->state = RESUMING;
+ atomic_set(&axi_data->
+ axi_cfg_update[SRC_TO_INTF(
+ stream_info->stream_src)],
+ APPLYING_UPDATE_RESUME);
+ }
+ spin_unlock_irqrestore(&stream_info->lock,
+ flags);
+ }
+ }
+ break;
+ }
+ case UPDATE_STREAM_REQUEST_FRAMES: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = &axi_data->stream_info[HANDLE_TO_IDX(
+ update_info->stream_handle)];
+ rc = msm_isp_request_frame(vfe_dev, stream_info,
+ update_info->user_stream_id,
+ update_info->frame_id,
+ MSM_ISP_INVALID_BUF_INDEX);
+ if (rc)
+ pr_err("%s failed to request frame!\n",
+ __func__);
+ }
+ break;
+ }
+ case UPDATE_STREAM_ADD_BUFQ: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = &axi_data->stream_info[HANDLE_TO_IDX(
+ update_info->stream_handle)];
+ rc = msm_isp_add_buf_queue(vfe_dev, stream_info,
+ update_info->user_stream_id);
+ if (rc)
+ pr_err("%s failed to add bufq!\n", __func__);
+ }
+ break;
+ }
+ case UPDATE_STREAM_REMOVE_BUFQ: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = &axi_data->stream_info[HANDLE_TO_IDX(
+ update_info->stream_handle)];
+ msm_isp_remove_buf_queue(vfe_dev, stream_info,
+ update_info->user_stream_id);
+ pr_debug("%s, Remove bufq for Stream 0x%x\n",
+ __func__, stream_info->stream_id);
+ if (stream_info->state == ACTIVE) {
+ stream_info->state = UPDATING;
+ rc = msm_isp_axi_wait_for_cfg_done(vfe_dev,
+ NO_UPDATE, (1 << SRC_TO_INTF(
+ stream_info->stream_src)), 2);
+ if (rc < 0)
+ pr_err("%s: wait for update failed\n",
+ __func__);
+ }
+ }
+ break;
+ }
+ case UPDATE_STREAM_OFFLINE_AXI_CONFIG: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = &axi_data->stream_info[HANDLE_TO_IDX(
+ update_info->stream_handle)];
+ for (j = 0; j < stream_info->num_planes; j++) {
+ stream_info->plane_cfg[j] =
+ update_info->plane_cfg[j];
+ }
+ for (j = 0; j < stream_info->num_planes; j++) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_reg(vfe_dev, stream_info, j);
+ }
+ }
+ break;
+ }
+ case UPDATE_STREAM_REQUEST_FRAMES_VER2: {
+ struct msm_vfe_axi_stream_cfg_update_info_req_frm *req_frm =
+ &update_cmd->req_frm_ver2;
+ stream_info = &axi_data->stream_info[HANDLE_TO_IDX(
+ req_frm->stream_handle)];
+ rc = msm_isp_request_frame(vfe_dev, stream_info,
+ req_frm->user_stream_id,
+ req_frm->frame_id,
+ req_frm->buf_index);
+ if (rc)
+ pr_err("%s failed to request frame!\n",
+ __func__);
+ break;
+ }
+ default:
+ pr_err("%s: Invalid update type\n", __func__);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint32_t pingpong_status,
+ struct msm_isp_timestamp *ts)
+{
+ int rc = -1;
+ uint32_t pingpong_bit = 0, i;
+ struct msm_isp_buffer *done_buf = NULL;
+ unsigned long flags;
+ struct timeval *time_stamp;
+ uint32_t frame_id, buf_index = -1;
+ struct msm_vfe_axi_stream *temp_stream;
+
+ if (!ts) {
+ pr_err("%s: Error! Invalid argument\n", __func__);
+ return;
+ }
+
+ if (vfe_dev->vt_enable) {
+ msm_isp_get_avtimer_ts(ts);
+ time_stamp = &ts->vt_time;
+ } else {
+ time_stamp = &ts->buf_time;
+ }
+
+ frame_id = vfe_dev->axi_data.
+ src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+ pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+ for (i = 0; i < stream_info->num_planes; i++) {
+ if (pingpong_bit !=
+ (~(pingpong_status >> stream_info->wm[i]) & 0x1)) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: Write master ping pong mismatch. Status: 0x%x\n",
+ __func__, pingpong_status);
+ msm_isp_dump_ping_pong_mismatch();
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_PING_PONG_MISMATCH);
+ return;
+ }
+ }
+
+ if (stream_info->state == INACTIVE) {
+ msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
+ pingpong_status);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err_ratelimited("%s: Warning! Stream already inactive. Drop irq handling\n",
+ __func__);
+ return;
+ }
+ done_buf = stream_info->buf[pingpong_bit];
+
+ if (vfe_dev->buf_mgr->frameId_mismatch_recovery == 1) {
+ pr_err_ratelimited("%s: Mismatch Recovery in progress, drop frame!\n",
+ __func__);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ return;
+ }
+
+ stream_info->frame_id++;
+ if (done_buf)
+ buf_index = done_buf->buf_idx;
+
+ ISP_DBG("%s: vfe %d: stream 0x%x, frame id %d, pingpong bit %d\n",
+ __func__,
+ vfe_dev->pdev->id,
+ stream_info->stream_id,
+ frame_id,
+ pingpong_bit);
+
+ rc = vfe_dev->buf_mgr->ops->update_put_buf_cnt(vfe_dev->buf_mgr,
+ vfe_dev->pdev->id,
+ done_buf ? done_buf->bufq_handle :
+ stream_info->bufq_handle[VFE_BUF_QUEUE_DEFAULT], buf_index,
+ time_stamp, frame_id, pingpong_bit);
+
+ if (rc < 0) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ /* this usually means a serious scheduling error */
+ msm_isp_halt_send_error(vfe_dev, ISP_EVENT_PING_PONG_MISMATCH);
+ return;
+ }
+ /*
+ * Buf divert return value represent whether the buf
+ * can be diverted. A positive return value means
+ * other ISP hardware is still processing the frame.
+ * A negative value is error. Return in both cases.
+ */
+ if (rc != 0) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ return;
+ }
+
+ if (stream_info->stream_type == CONTINUOUS_STREAM ||
+ stream_info->runtime_num_burst_capture > 1) {
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+ stream_info, pingpong_status, 0);
+ if (rc < 0)
+ ISP_DBG("%s: Error configuring ping_pong\n",
+ __func__);
+ } else if (done_buf) {
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+ stream_info, pingpong_status, 1);
+ if (rc < 0)
+ ISP_DBG("%s: Error configuring ping_pong\n",
+ __func__);
+ }
+
+ if (!done_buf) {
+ if (stream_info->buf_divert) {
+ vfe_dev->error_info.stream_framedrop_count[
+ stream_info->bufq_handle[
+ VFE_BUF_QUEUE_DEFAULT] & 0xFF]++;
+ vfe_dev->error_info.framedrop_flag = 1;
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ return;
+ }
+
+ temp_stream = msm_isp_get_controllable_stream(vfe_dev,
+ stream_info);
+ if (temp_stream->stream_type == BURST_STREAM &&
+ temp_stream->runtime_num_burst_capture) {
+ ISP_DBG("%s: burst_frame_count: %d\n",
+ __func__,
+ temp_stream->runtime_num_burst_capture);
+ temp_stream->runtime_num_burst_capture--;
+ /*
+ * For non controllable stream decrement the burst count for
+ * dual stream as well here
+ */
+ if (!stream_info->controllable_output && vfe_dev->is_split &&
+ stream_info->stream_src < RDI_INTF_0) {
+ temp_stream = msm_isp_vfe_get_stream(
+ vfe_dev->common_data->dual_vfe_res,
+ ((vfe_dev->pdev->id == ISP_VFE0) ?
+ ISP_VFE1 : ISP_VFE0),
+ HANDLE_TO_IDX(
+ stream_info->stream_handle));
+ temp_stream->runtime_num_burst_capture--;
+ }
+ }
+
+ rc = msm_isp_update_deliver_count(vfe_dev, stream_info,
+ pingpong_bit);
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err_ratelimited("%s:VFE%d get done buf fail\n",
+ __func__, vfe_dev->pdev->id);
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_PING_PONG_MISMATCH);
+ return;
+ }
+
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+
+ if ((done_buf->frame_id != frame_id) &&
+ vfe_dev->axi_data.enable_frameid_recovery) {
+ msm_isp_handle_done_buf_frame_id_mismatch(vfe_dev,
+ stream_info, done_buf, time_stamp, frame_id);
+ return;
+ }
+
+ msm_isp_process_done_buf(vfe_dev, stream_info,
+ done_buf, time_stamp, frame_id);
+}
+
+void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ uint32_t pingpong_status, struct msm_isp_timestamp *ts)
+{
+ int i, rc = 0;
+ uint32_t comp_mask = 0, wm_mask = 0;
+ uint32_t stream_idx;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_composite_info *comp_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int wm;
+
+ comp_mask = vfe_dev->hw_info->vfe_ops.axi_ops.
+ get_comp_mask(irq_status0, irq_status1);
+ wm_mask = vfe_dev->hw_info->vfe_ops.axi_ops.
+ get_wm_mask(irq_status0, irq_status1);
+ if (!(comp_mask || wm_mask))
+ return;
+
+ ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0);
+
+ for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
+ rc = 0;
+ comp_info = &axi_data->composite_info[i];
+ wm_mask &= ~(comp_info->stream_composite_mask);
+ if (comp_mask & (1 << i)) {
+ stream_idx = HANDLE_TO_IDX(comp_info->stream_handle);
+ if ((!comp_info->stream_handle) ||
+ (stream_idx >= VFE_AXI_SRC_MAX)) {
+ pr_err_ratelimited(
+ "%s: Invalid handle for composite irq\n",
+ __func__);
+ for (wm = 0; wm < axi_data->hw_info->num_wm;
+ wm++)
+ if (comp_info->stream_composite_mask &
+ (1 << wm))
+ msm_isp_cfg_wm_scratch(vfe_dev,
+ wm, (pingpong_status >>
+ wm) & 0x1);
+ continue;
+ }
+ stream_idx = HANDLE_TO_IDX(comp_info->stream_handle);
+ stream_info = &axi_data->stream_info[stream_idx];
+ msm_isp_process_axi_irq_stream(vfe_dev, stream_info,
+ pingpong_status, ts);
+
+ }
+ }
+
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (wm_mask & (1 << i)) {
+ stream_idx = HANDLE_TO_IDX(axi_data->free_wm[i]);
+ if ((!axi_data->free_wm[i]) ||
+ (stream_idx >= VFE_AXI_SRC_MAX)) {
+ pr_err("%s: Invalid handle for wm irq\n",
+ __func__);
+ msm_isp_cfg_wm_scratch(vfe_dev, i,
+ (pingpong_status >> i) & 0x1);
+ continue;
+ }
+ stream_info = &axi_data->stream_info[stream_idx];
+
+ msm_isp_process_axi_irq_stream(vfe_dev, stream_info,
+ pingpong_status, ts);
+ }
+ }
+}
+
+void msm_isp_axi_disable_all_wm(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int i, j;
+
+ if (!vfe_dev || !axi_data) {
+ pr_err("%s: error %pK %pK\n", __func__, vfe_dev, axi_data);
+ return;
+ }
+
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ stream_info = &axi_data->stream_info[i];
+
+ if (stream_info->state != ACTIVE)
+ continue;
+
+ for (j = 0; j < stream_info->num_planes; j++)
+ vfe_dev->hw_info->vfe_ops.axi_ops.enable_wm(
+ vfe_dev->vfe_base,
+ stream_info->wm[j], 0);
+ }
+}
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h
new file mode 100644
index 000000000000..0396fc4680f1
--- /dev/null
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h
@@ -0,0 +1,131 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_ISP_AXI_UTIL_H__
+#define __MSM_ISP_AXI_UTIL_H__
+
+#include "msm_isp.h"
+
+#define HANDLE_TO_IDX(handle) (handle & 0xFF)
+#define SRC_TO_INTF(src) \
+ ((src < RDI_INTF_0 || src == VFE_AXI_SRC_MAX) ? VFE_PIX_0 : \
+ (VFE_RAW_0 + src - RDI_INTF_0))
+
+int msm_isp_axi_create_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd);
+
+void msm_isp_axi_destroy_stream(
+ struct msm_vfe_axi_shared_data *axi_data, int stream_idx);
+
+int msm_isp_validate_axi_request(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd);
+
+void msm_isp_axi_reserve_wm(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info);
+
+void msm_isp_axi_reserve_comp_mask(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info);
+
+int msm_isp_axi_check_stream_state(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd);
+
+int msm_isp_calculate_framedrop(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd);
+void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+
+int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_get_avtimer_ts(struct msm_isp_timestamp *time_stamp);
+int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_update_stream_bandwidth(struct vfe_device *vfe_dev,
+ enum msm_vfe_hw_state hw_state);
+int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_axi_cfg_update(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src);
+int msm_isp_axi_halt(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_halt_cmd *halt_cmd);
+int msm_isp_axi_reset(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_reset_cmd *reset_cmd);
+int msm_isp_axi_restart(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_restart_cmd *restart_cmd);
+
+int msm_isp_axi_output_cfg(struct vfe_device *vfe_dev, void *arg);
+
+
+void msm_isp_axi_stream_update(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src);
+
+void msm_isp_update_framedrop_reg(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src);
+
+void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
+ enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts);
+
+void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ uint32_t pingpong_status, struct msm_isp_timestamp *ts);
+
+void msm_isp_axi_disable_all_wm(struct vfe_device *vfe_dev);
+
+int msm_isp_print_ping_pong_address(struct vfe_device *vfe_dev,
+ unsigned long fault_addr);
+
+void msm_isp_increment_frame_id(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts);
+
+int msm_isp_drop_frame(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, struct msm_isp_timestamp *ts,
+ struct msm_isp_sof_info *sof_info);
+
+void msm_isp_halt(struct vfe_device *vfe_dev);
+void msm_isp_halt_send_error(struct vfe_device *vfe_dev, uint32_t event);
+
+void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint32_t pingpong_status,
+ struct msm_isp_timestamp *ts);
+
+static inline void msm_isp_cfg_wm_scratch(struct vfe_device *vfe_dev,
+ int wm,
+ uint32_t pingpong_bit)
+{
+ vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
+ vfe_dev->vfe_base, wm,
+ pingpong_bit, vfe_dev->buf_mgr->scratch_buf_addr, 0);
+}
+
+static inline void msm_isp_cfg_stream_scratch(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint32_t pingpong_status)
+{
+ int i;
+ uint32_t pingpong_bit;
+
+ pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+ for (i = 0; i < stream_info->num_planes; i++)
+ msm_isp_cfg_wm_scratch(vfe_dev, stream_info->wm[i],
+ ~pingpong_bit & 0x1);
+ stream_info->buf[pingpong_bit] = NULL;
+}
+
+int msm_isp_cfg_offline_ping_pong_address(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status,
+ uint32_t buf_idx);
+#endif /* __MSM_ISP_AXI_UTIL_H__ */
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c
new file mode 100644
index 000000000000..3b877b4cb994
--- /dev/null
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c
@@ -0,0 +1,973 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <media/v4l2-subdev.h>
+#include <media/ais/msm_ais_isp.h>
+#include "msm_isp_util.h"
+#include "msm_isp_axi_util.h"
+#include "msm_isp_stats_util.h"
+
+static inline void msm_isp_stats_cfg_wm_scratch(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info,
+ uint32_t pingpong_status)
+{
+ vfe_dev->hw_info->vfe_ops.stats_ops.update_ping_pong_addr(
+ vfe_dev->vfe_base, stream_info,
+ pingpong_status, vfe_dev->buf_mgr->scratch_buf_addr);
+}
+
+static inline void msm_isp_stats_cfg_stream_scratch(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info,
+ uint32_t pingpong_status)
+{
+ uint32_t stats_idx = STATS_IDX(stream_info->stream_handle);
+ uint32_t pingpong_bit;
+ uint32_t stats_pingpong_offset =
+ vfe_dev->hw_info->stats_hw_info->stats_ping_pong_offset[
+ stats_idx];
+
+ pingpong_bit = (~(pingpong_status >> stats_pingpong_offset) & 0x1);
+
+ msm_isp_stats_cfg_wm_scratch(vfe_dev, stream_info,
+ pingpong_status);
+ stream_info->buf[pingpong_bit] = NULL;
+}
+
+static int msm_isp_stats_cfg_ping_pong_address(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status)
+{
+ int rc = -1, vfe_id = 0;
+ struct msm_isp_buffer *buf;
+ uint32_t pingpong_bit = 0;
+ uint32_t stats_pingpong_offset;
+ uint32_t bufq_handle = stream_info->bufq_handle;
+ uint32_t stats_idx = STATS_IDX(stream_info->stream_handle);
+ struct dual_vfe_resource *dual_vfe_res = NULL;
+ struct msm_vfe_stats_stream *dual_vfe_stream_info = NULL;
+
+ if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type ||
+ stats_idx >= MSM_ISP_STATS_MAX) {
+ pr_err("%s Invalid stats index %d", __func__, stats_idx);
+ return -EINVAL;
+ }
+
+ stats_pingpong_offset =
+ vfe_dev->hw_info->stats_hw_info->stats_ping_pong_offset[
+ stats_idx];
+
+ pingpong_bit = (~(pingpong_status >> stats_pingpong_offset) & 0x1);
+
+ rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
+ bufq_handle,
+ MSM_ISP_INVALID_BUF_INDEX, &buf);
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
+ if (rc < 0 || NULL == buf)
+ vfe_dev->error_info.stats_framedrop_count[stats_idx]++;
+
+ if (buf && buf->num_planes != 1) {
+ pr_err("%s: Invalid buffer\n", __func__);
+ msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
+ rc = -EINVAL;
+ goto buf_error;
+ }
+ if (vfe_dev->is_split) {
+ dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
+ if (!dual_vfe_res->vfe_base[ISP_VFE0] ||
+ !dual_vfe_res->stats_data[ISP_VFE0] ||
+ !dual_vfe_res->vfe_base[ISP_VFE1] ||
+ !dual_vfe_res->stats_data[ISP_VFE1]) {
+ pr_err("%s:%d error vfe0 %pK %pK vfe1 %pK %pK\n",
+ __func__, __LINE__,
+ dual_vfe_res->vfe_base[ISP_VFE0],
+ dual_vfe_res->stats_data[ISP_VFE0],
+ dual_vfe_res->vfe_base[ISP_VFE1],
+ dual_vfe_res->stats_data[ISP_VFE1]);
+ } else {
+ for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
+ dual_vfe_stream_info = &dual_vfe_res->
+ stats_data[vfe_id]->
+ stream_info[stats_idx];
+ if (buf)
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ update_ping_pong_addr(
+ dual_vfe_res->vfe_base[vfe_id],
+ dual_vfe_stream_info,
+ pingpong_status,
+ buf->mapped_info[0].paddr +
+ dual_vfe_stream_info->
+ buffer_offset);
+ else
+ msm_isp_stats_cfg_stream_scratch(
+ vfe_dev,
+ dual_vfe_stream_info,
+ pingpong_status);
+
+ dual_vfe_stream_info->buf[pingpong_bit]
+ = buf;
+ }
+ }
+ } else {
+ if (buf)
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ update_ping_pong_addr(
+ vfe_dev->vfe_base, stream_info,
+ pingpong_status, buf->mapped_info[0].paddr +
+ stream_info->buffer_offset);
+ else
+ msm_isp_stats_cfg_stream_scratch(vfe_dev,
+ stream_info, pingpong_status);
+
+ stream_info->buf[pingpong_bit] = buf;
+ }
+
+ if (buf)
+ buf->pingpong_bit = pingpong_bit;
+ return 0;
+buf_error:
+ vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ return rc;
+}
+
+static int32_t msm_isp_stats_buf_divert(struct vfe_device *vfe_dev,
+ struct msm_isp_timestamp *ts,
+ struct msm_isp_event_data *buf_event,
+ struct msm_vfe_stats_stream *stream_info,
+ uint32_t *comp_stats_type_mask, uint32_t pingpong_status)
+{
+ int32_t rc = 0, frame_id = 0, drop_buffer = 0;
+ struct msm_isp_stats_event *stats_event = NULL;
+ struct msm_isp_sw_framskip *sw_skip = NULL;
+ int32_t buf_index = -1;
+ uint32_t pingpong_bit;
+ struct msm_isp_buffer *done_buf;
+ uint32_t stats_pingpong_offset;
+ uint32_t stats_idx;
+
+ if (!vfe_dev || !ts || !buf_event || !stream_info) {
+ pr_err("%s:%d failed: invalid params %pK %pK %pK %pK\n",
+ __func__, __LINE__, vfe_dev, ts, buf_event,
+ stream_info);
+ return -EINVAL;
+ }
+ frame_id = vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ sw_skip = &stream_info->sw_skip;
+ stats_event = &buf_event->u.stats;
+
+ if (sw_skip->stats_type_mask &
+ (1 << stream_info->stats_type)) {
+ /* Hw stream output of this src is requested
+ * for drop
+ */
+ if (sw_skip->skip_mode == SKIP_ALL) {
+ /* drop all buffers */
+ drop_buffer = 1;
+ } else if (sw_skip->skip_mode == SKIP_RANGE &&
+ (sw_skip->min_frame_id <= frame_id &&
+ sw_skip->max_frame_id >= frame_id)) {
+ drop_buffer = 1;
+ } else if (frame_id > sw_skip->max_frame_id) {
+ memset(sw_skip, 0, sizeof
+ (struct msm_isp_sw_framskip));
+ }
+ }
+ stats_idx = STATS_IDX(stream_info->stream_handle);
+
+ stats_pingpong_offset =
+ vfe_dev->hw_info->stats_hw_info->stats_ping_pong_offset[
+ stats_idx];
+ pingpong_bit = (~(pingpong_status >> stats_pingpong_offset) & 0x1);
+
+ done_buf = stream_info->buf[pingpong_bit];
+
+ if (done_buf)
+ buf_index = done_buf->buf_idx;
+
+ rc = vfe_dev->buf_mgr->ops->update_put_buf_cnt(
+ vfe_dev->buf_mgr, vfe_dev->pdev->id, stream_info->bufq_handle,
+ buf_index, &ts->buf_time,
+ frame_id, pingpong_bit);
+
+ if (rc < 0) {
+ if (rc == -EFAULT)
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_PING_PONG_MISMATCH);
+ pr_err("stats_buf_divert: update put buf cnt fail\n");
+ return rc;
+ }
+
+ if (rc > 0) {
+ ISP_DBG("%s: vfe_id %d buf_id %d bufq %x put_cnt 1\n", __func__,
+ vfe_dev->pdev->id, buf_index,
+ stream_info->bufq_handle);
+ return rc;
+ }
+
+ /* Program next buffer */
+ rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev, stream_info,
+ pingpong_status);
+ if (rc)
+ return rc;
+
+ if (drop_buffer && done_buf) {
+ rc = vfe_dev->buf_mgr->ops->buf_done(
+ vfe_dev->buf_mgr,
+ done_buf->bufq_handle,
+ done_buf->buf_idx, &ts->buf_time, frame_id, 0);
+ if (rc == -EFAULT)
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
+
+ if (done_buf) {
+ stats_event->stats_buf_idxs
+ [stream_info->stats_type] =
+ done_buf->buf_idx;
+ if (comp_stats_type_mask == NULL) {
+ stats_event->stats_mask =
+ 1 << stream_info->stats_type;
+ ISP_DBG("%s: stats frameid: 0x%x %d bufq %x\n",
+ __func__, buf_event->frame_id,
+ stream_info->stats_type, done_buf->bufq_handle);
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_STATS_NOTIFY +
+ stream_info->stats_type,
+ buf_event);
+ } else {
+ *comp_stats_type_mask |=
+ 1 << stream_info->stats_type;
+ }
+ }
+
+ return rc;
+}
+
+static int32_t msm_isp_stats_configure(struct vfe_device *vfe_dev,
+ uint32_t stats_irq_mask, struct msm_isp_timestamp *ts,
+ uint32_t pingpong_status, bool is_composite)
+{
+ int i, rc = 0;
+ struct msm_isp_event_data buf_event;
+ struct msm_isp_stats_event *stats_event = &buf_event.u.stats;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+ uint32_t comp_stats_type_mask = 0;
+ int result = 0;
+
+ memset(&buf_event, 0, sizeof(struct msm_isp_event_data));
+ buf_event.timestamp = ts->buf_time;
+ buf_event.frame_id = vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+
+ for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+ if (!(stats_irq_mask & (1 << i)))
+ continue;
+ stream_info = &vfe_dev->stats_data.stream_info[i];
+ if (stream_info->state == STATS_INACTIVE) {
+ pr_debug("%s: Warning! Stream already inactive. Drop irq handling\n",
+ __func__);
+ continue;
+ }
+
+ rc = msm_isp_stats_buf_divert(vfe_dev, ts,
+ &buf_event, stream_info,
+ is_composite ? &comp_stats_type_mask : NULL,
+ pingpong_status);
+ if (rc < 0) {
+ pr_err("%s:%d failed: stats buf divert rc %d\n",
+ __func__, __LINE__, rc);
+ result = rc;
+ }
+ }
+ if (is_composite && comp_stats_type_mask) {
+ ISP_DBG("%s:vfe_id %d comp_stats frameid %x,comp_mask %x\n",
+ __func__, vfe_dev->pdev->id, buf_event.frame_id,
+ comp_stats_type_mask);
+ stats_event->stats_mask = comp_stats_type_mask;
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_COMP_STATS_NOTIFY, &buf_event);
+ comp_stats_type_mask = 0;
+ }
+ return result;
+}
+
+void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ uint32_t pingpong_status, struct msm_isp_timestamp *ts)
+{
+ int j, rc;
+ uint32_t atomic_stats_mask = 0;
+ uint32_t stats_comp_mask = 0, stats_irq_mask = 0;
+ bool comp_flag = false;
+ uint32_t num_stats_comp_mask =
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+
+ stats_comp_mask = vfe_dev->hw_info->vfe_ops.stats_ops.
+ get_comp_mask(irq_status0, irq_status1);
+ stats_irq_mask = vfe_dev->hw_info->vfe_ops.stats_ops.
+ get_wm_mask(irq_status0, irq_status1);
+ if (!(stats_comp_mask || stats_irq_mask))
+ return;
+
+ ISP_DBG("%s: vfe %d status: 0x%x\n", __func__, vfe_dev->pdev->id,
+ irq_status0);
+
+ /* Clear composite mask irq bits, they will be restored by comp mask */
+ for (j = 0; j < num_stats_comp_mask; j++) {
+ stats_irq_mask &= ~atomic_read(
+ &vfe_dev->stats_data.stats_comp_mask[j]);
+ }
+
+ /* Process non-composite irq */
+ if (stats_irq_mask) {
+ rc = msm_isp_stats_configure(vfe_dev, stats_irq_mask, ts,
+ pingpong_status, comp_flag);
+ }
+
+ /* Process composite irq */
+ if (stats_comp_mask) {
+ for (j = 0; j < num_stats_comp_mask; j++) {
+ if (!(stats_comp_mask & (1 << j)))
+ continue;
+
+ atomic_stats_mask = atomic_read(
+ &vfe_dev->stats_data.stats_comp_mask[j]);
+
+ rc = msm_isp_stats_configure(vfe_dev, atomic_stats_mask,
+ ts, pingpong_status, !comp_flag);
+ }
+ }
+}
+
+int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_request_cmd *stream_req_cmd)
+{
+ int rc = -1;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ uint32_t stats_idx;
+
+ if (!(vfe_dev->hw_info->stats_hw_info->stats_capability_mask &
+ (1 << stream_req_cmd->stats_type))) {
+ pr_err("%s: Stats type not supported\n", __func__);
+ return rc;
+ }
+
+ stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops.
+ get_stats_idx(stream_req_cmd->stats_type);
+
+ if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, stats_idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[stats_idx];
+ if (stream_info->state != STATS_AVAILABLE) {
+ pr_err("%s: Stats already requested\n", __func__);
+ return rc;
+ }
+
+ if (stream_req_cmd->framedrop_pattern >= MAX_SKIP) {
+ pr_err("%s: Invalid framedrop pattern\n", __func__);
+ return rc;
+ }
+
+ if (stream_req_cmd->irq_subsample_pattern >= MAX_SKIP) {
+ pr_err("%s: Invalid irq subsample pattern\n", __func__);
+ return rc;
+ }
+
+ stream_info->session_id = stream_req_cmd->session_id;
+ stream_info->stream_id = stream_req_cmd->stream_id;
+ stream_info->composite_flag = stream_req_cmd->composite_flag;
+ stream_info->stats_type = stream_req_cmd->stats_type;
+ stream_info->buffer_offset = stream_req_cmd->buffer_offset;
+ stream_info->framedrop_pattern = stream_req_cmd->framedrop_pattern;
+ stream_info->init_stats_frame_drop = stream_req_cmd->init_frame_drop;
+ stream_info->irq_subsample_pattern =
+ stream_req_cmd->irq_subsample_pattern;
+ stream_info->state = STATS_INACTIVE;
+
+ if ((vfe_dev->stats_data.stream_handle_cnt << 8) == 0)
+ vfe_dev->stats_data.stream_handle_cnt++;
+
+ stream_req_cmd->stream_handle =
+ (++vfe_dev->stats_data.stream_handle_cnt) << 8 | stats_idx;
+
+ stream_info->stream_handle = stream_req_cmd->stream_handle;
+ return 0;
+}
+
+int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = -1;
+ struct msm_vfe_stats_stream_request_cmd *stream_req_cmd = arg;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ uint32_t framedrop_period;
+ uint32_t stats_idx;
+
+ rc = msm_isp_stats_create_stream(vfe_dev, stream_req_cmd);
+ if (rc < 0) {
+ pr_err("%s: create stream failed\n", __func__);
+ return rc;
+ }
+
+ stats_idx = STATS_IDX(stream_req_cmd->stream_handle);
+
+ if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, stats_idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[stats_idx];
+
+ framedrop_period = msm_isp_get_framedrop_period(
+ stream_req_cmd->framedrop_pattern);
+
+ if (stream_req_cmd->framedrop_pattern == SKIP_ALL)
+ stream_info->framedrop_pattern = 0x0;
+ else
+ stream_info->framedrop_pattern = 0x1;
+ stream_info->framedrop_period = framedrop_period - 1;
+
+ if (stream_info->init_stats_frame_drop == 0)
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(vfe_dev,
+ stream_info);
+
+ msm_isp_stats_cfg_stream_scratch(vfe_dev, stream_info,
+ VFE_PING_FLAG);
+ msm_isp_stats_cfg_stream_scratch(vfe_dev, stream_info,
+ VFE_PONG_FLAG);
+ return rc;
+}
+
+int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = -1;
+ struct msm_vfe_stats_stream_cfg_cmd stream_cfg_cmd;
+ struct msm_vfe_stats_stream_release_cmd *stream_release_cmd = arg;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ int stats_idx = STATS_IDX(stream_release_cmd->stream_handle);
+ struct msm_vfe_stats_stream *stream_info = NULL;
+
+ if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, stats_idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[stats_idx];
+ if (stream_info->state == STATS_AVAILABLE) {
+ pr_err("%s: stream already release\n", __func__);
+ return rc;
+ } else if (stream_info->state != STATS_INACTIVE) {
+ stream_cfg_cmd.enable = 0;
+ stream_cfg_cmd.num_streams = 1;
+ stream_cfg_cmd.stream_handle[0] =
+ stream_release_cmd->stream_handle;
+ rc = msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
+ }
+
+ vfe_dev->hw_info->vfe_ops.stats_ops.clear_wm_reg(vfe_dev, stream_info);
+ memset(stream_info, 0, sizeof(struct msm_vfe_stats_stream));
+ return 0;
+}
+
+static int msm_isp_init_stats_ping_pong_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ int rc = 0;
+
+ stream_info->bufq_handle =
+ vfe_dev->buf_mgr->ops->get_bufq_handle(
+ vfe_dev->buf_mgr, stream_info->session_id,
+ stream_info->stream_id);
+ if (stream_info->bufq_handle == 0) {
+ pr_err("%s: no buf configured for stream: 0x%x\n",
+ __func__, stream_info->stream_handle);
+ return -EINVAL;
+ }
+
+ if ((vfe_dev->is_split && vfe_dev->pdev->id == 1) ||
+ !vfe_dev->is_split) {
+ rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PING_FLAG);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for ping\n", __func__);
+ return rc;
+ }
+ rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PONG_FLAG);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for pong\n", __func__);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+void msm_isp_update_stats_framedrop_reg(struct vfe_device *vfe_dev)
+{
+ int i;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+
+ for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+ stream_info = &stats_data->stream_info[i];
+ if (stream_info->state != STATS_ACTIVE)
+ continue;
+
+ if (stream_info->init_stats_frame_drop) {
+ stream_info->init_stats_frame_drop--;
+ if (stream_info->init_stats_frame_drop == 0) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(
+ vfe_dev, stream_info);
+ }
+ }
+ }
+}
+
+void msm_isp_stats_stream_update(struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t enable = 0;
+ uint8_t comp_flag = 0;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ struct msm_vfe_stats_ops *stats_ops =
+ &vfe_dev->hw_info->vfe_ops.stats_ops;
+
+ for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+ if (stats_data->stream_info[i].state == STATS_START_PENDING ||
+ stats_data->stream_info[i].state ==
+ STATS_STOP_PENDING) {
+ enable = stats_data->stream_info[i].state ==
+ STATS_START_PENDING ? 1 : 0;
+ stats_data->stream_info[i].state =
+ stats_data->stream_info[i].state ==
+ STATS_START_PENDING ?
+ STATS_STARTING : STATS_STOPPING;
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+ vfe_dev, BIT(i), enable);
+ comp_flag = stats_data->stream_info[i].composite_flag;
+ if (comp_flag)
+ stats_ops->cfg_comp_mask(vfe_dev, BIT(i),
+ (comp_flag - 1), enable);
+ } else if (stats_data->stream_info[i].state == STATS_STARTING ||
+ stats_data->stream_info[i].state == STATS_STOPPING) {
+ stats_data->stream_info[i].state =
+ stats_data->stream_info[i].state ==
+ STATS_STARTING ? STATS_ACTIVE : STATS_INACTIVE;
+ }
+ }
+ atomic_sub(1, &stats_data->stats_update);
+ if (!atomic_read(&stats_data->stats_update))
+ complete(&vfe_dev->stats_config_complete);
+}
+
+static int msm_isp_stats_wait_for_cfg_done(struct vfe_device *vfe_dev)
+{
+ int rc;
+
+ init_completion(&vfe_dev->stats_config_complete);
+ atomic_set(&vfe_dev->stats_data.stats_update, 2);
+ rc = wait_for_completion_timeout(
+ &vfe_dev->stats_config_complete,
+ msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
+ if (rc == 0) {
+ pr_err("%s: wait timeout\n", __func__);
+ rc = -1;
+ } else {
+ rc = 0;
+ }
+ return rc;
+}
+
+static int msm_isp_stats_update_cgc_override(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i;
+ uint32_t stats_mask = 0, idx;
+
+ if (stream_cfg_cmd->num_streams > MSM_ISP_STATS_MAX) {
+ pr_err("%s invalid num_streams %d\n", __func__,
+ stream_cfg_cmd->num_streams);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+ if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, idx);
+ return -EINVAL;
+ }
+ stats_mask |= 1 << idx;
+ }
+
+ if (vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override(
+ vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ }
+ return 0;
+}
+
+int msm_isp_stats_reset(struct vfe_device *vfe_dev)
+{
+ int i = 0, rc = 0;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ struct msm_isp_timestamp timestamp;
+
+ msm_isp_get_timestamp(&timestamp, vfe_dev);
+
+ for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
+ stream_info = &stats_data->stream_info[i];
+ if (stream_info->state != STATS_ACTIVE)
+ continue;
+
+ rc = vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
+ vfe_dev->pdev->id, stream_info->bufq_handle,
+ MSM_ISP_BUFFER_FLUSH_ALL, &timestamp.buf_time,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+int msm_isp_stats_restart(struct vfe_device *vfe_dev)
+{
+ int i = 0;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+ for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
+ stream_info = &stats_data->stream_info[i];
+ if (stream_info->state < STATS_ACTIVE)
+ continue;
+ msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
+ }
+
+ return 0;
+}
+
+static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i, rc = 0;
+ uint32_t stats_mask = 0, idx;
+ uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
+ uint32_t num_stats_comp_mask = 0;
+ struct msm_vfe_stats_stream *stream_info;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+ if (stream_cfg_cmd->num_streams > MSM_ISP_STATS_MAX) {
+ pr_err("%s invalid num_streams %d\n", __func__,
+ stream_cfg_cmd->num_streams);
+ return -EINVAL;
+ }
+ num_stats_comp_mask =
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+ rc = vfe_dev->hw_info->vfe_ops.stats_ops.check_streams(
+ stats_data->stream_info);
+ if (rc < 0)
+ return rc;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+ if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[idx];
+ if (stream_info->stream_handle !=
+ stream_cfg_cmd->stream_handle[i]) {
+ pr_err("%s: Invalid stream handle: 0x%x received\n",
+ __func__, stream_cfg_cmd->stream_handle[i]);
+ continue;
+ }
+
+ if (stream_info->composite_flag > num_stats_comp_mask) {
+ pr_err("%s: comp grp %d exceed max %d\n",
+ __func__, stream_info->composite_flag,
+ num_stats_comp_mask);
+ return -EINVAL;
+ }
+ rc = msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
+ if (rc < 0) {
+ pr_err("%s: No buffer for stream%d\n", __func__, idx);
+ return rc;
+ }
+ if (!stream_info->composite_flag)
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ cfg_wm_irq_mask(vfe_dev, stream_info);
+
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
+ stream_info->state = STATS_START_PENDING;
+ else
+ stream_info->state = STATS_ACTIVE;
+
+ stats_data->num_active_stream++;
+ stats_mask |= 1 << idx;
+
+ if (stream_info->composite_flag > 0)
+ comp_stats_mask[stream_info->composite_flag-1] |=
+ 1 << idx;
+
+ ISP_DBG("%s: stats_mask %x %x active streams %d\n",
+ __func__, comp_stats_mask[0],
+ comp_stats_mask[1],
+ stats_data->num_active_stream);
+
+ }
+
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+ rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
+ } else {
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+ vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ for (i = 0; i < num_stats_comp_mask; i++) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+ vfe_dev, comp_stats_mask[i], i, 1);
+ }
+ }
+ return rc;
+}
+
+static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i, rc = 0;
+ uint32_t stats_mask = 0, idx;
+ uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
+ uint32_t num_stats_comp_mask = 0;
+ struct msm_vfe_stats_stream *stream_info;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ struct msm_isp_timestamp timestamp;
+
+ msm_isp_get_timestamp(&timestamp, vfe_dev);
+
+ num_stats_comp_mask =
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+
+ idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+ if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[idx];
+ if (stream_info->stream_handle !=
+ stream_cfg_cmd->stream_handle[i]) {
+ pr_err("%s: Invalid stream handle: 0x%x received\n",
+ __func__, stream_cfg_cmd->stream_handle[i]);
+ continue;
+ }
+
+ if (stream_info->composite_flag > num_stats_comp_mask) {
+ pr_err("%s: comp grp %d exceed max %d\n",
+ __func__, stream_info->composite_flag,
+ num_stats_comp_mask);
+ return -EINVAL;
+ }
+
+ if (!stream_info->composite_flag)
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ clear_wm_irq_mask(vfe_dev, stream_info);
+
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
+ stream_info->state = STATS_STOP_PENDING;
+ else
+ stream_info->state = STATS_INACTIVE;
+
+ stats_data->num_active_stream--;
+ stats_mask |= 1 << idx;
+
+ if (stream_info->composite_flag > 0)
+ comp_stats_mask[stream_info->composite_flag-1] |=
+ 1 << idx;
+
+ msm_isp_stats_cfg_stream_scratch(vfe_dev, stream_info,
+ VFE_PING_FLAG);
+ msm_isp_stats_cfg_stream_scratch(vfe_dev, stream_info,
+ VFE_PONG_FLAG);
+
+ ISP_DBG("%s: stats_mask %x %x active streams %d\n",
+ __func__, comp_stats_mask[0],
+ comp_stats_mask[1],
+ stats_data->num_active_stream);
+ }
+
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+ rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
+ } else {
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+ vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ for (i = 0; i < num_stats_comp_mask; i++) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+ vfe_dev, comp_stats_mask[i], i, 0);
+ }
+ }
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+ if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[idx];
+ rc = vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
+ vfe_dev->pdev->id, stream_info->bufq_handle,
+ MSM_ISP_BUFFER_FLUSH_ALL, &timestamp.buf_time,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd = arg;
+
+ if (vfe_dev->stats_data.num_active_stream == 0)
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
+
+ if (stream_cfg_cmd->enable) {
+ msm_isp_stats_update_cgc_override(vfe_dev, stream_cfg_cmd);
+
+ rc = msm_isp_start_stats_stream(vfe_dev, stream_cfg_cmd);
+ } else {
+ rc = msm_isp_stop_stats_stream(vfe_dev, stream_cfg_cmd);
+
+ msm_isp_stats_update_cgc_override(vfe_dev, stream_cfg_cmd);
+ }
+
+ return rc;
+}
+
+int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, i;
+ struct msm_vfe_stats_stream *stream_info;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
+ struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL;
+ struct msm_isp_sw_framskip *sw_skip_info = NULL;
+
+ /* validate request */
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info = (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ /* check array reference bounds */
+ if (STATS_IDX(update_info->stream_handle)
+ > vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s: stats idx %d out of bound!", __func__,
+ STATS_IDX(update_info->stream_handle));
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info = (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = &stats_data->stream_info[
+ STATS_IDX(
+ update_info->stream_handle)];
+ if (stream_info->stream_handle !=
+ update_info->stream_handle) {
+ pr_err("%s: stats stream handle %x %x mismatch!\n",
+ __func__, stream_info->stream_handle,
+ update_info->stream_handle);
+ continue;
+ }
+
+ switch (update_cmd->update_type) {
+ case UPDATE_STREAM_STATS_FRAMEDROP_PATTERN: {
+ uint32_t framedrop_period =
+ msm_isp_get_framedrop_period(
+ update_info->skip_pattern);
+ if (update_info->skip_pattern ==
+ SKIP_ALL)
+ stream_info->framedrop_pattern = 0x0;
+ else
+ stream_info->framedrop_pattern = 0x1;
+ stream_info->framedrop_period = framedrop_period - 1;
+ if (stream_info->init_stats_frame_drop == 0)
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(
+ vfe_dev, stream_info);
+ break;
+ }
+ case UPDATE_STREAM_SW_FRAME_DROP: {
+ sw_skip_info =
+ &update_info->sw_skip_info;
+ if (!stream_info->sw_skip.stream_src_mask)
+ stream_info->sw_skip = *sw_skip_info;
+
+ if (sw_skip_info->stats_type_mask != 0) {
+ /* No image buffer skip, only stats skip */
+ pr_debug("%s:%x skip type %x mode %d min %d max %d\n",
+ __func__, stream_info->stream_id,
+ sw_skip_info->stats_type_mask,
+ sw_skip_info->skip_mode,
+ sw_skip_info->min_frame_id,
+ sw_skip_info->max_frame_id);
+ stream_info->sw_skip.stats_type_mask =
+ sw_skip_info->stats_type_mask;
+ }
+ break;
+ }
+
+ default:
+ pr_err("%s: Invalid update type\n", __func__);
+ return -EINVAL;
+ }
+ }
+ return rc;
+}
+
+void msm_isp_stats_disable(struct vfe_device *vfe_dev)
+{
+ int i;
+ unsigned int mask = 0;
+
+ if (!vfe_dev) {
+ pr_err("%s: error NULL ptr\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++)
+ mask |= 1 << i;
+
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(vfe_dev, mask, 0);
+}
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.h b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.h
new file mode 100644
index 000000000000..707901bc6271
--- /dev/null
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.h
@@ -0,0 +1,31 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_ISP_STATS_UTIL_H__
+#define __MSM_ISP_STATS_UTIL_H__
+
+#include "msm_isp.h"
+#define STATS_IDX(idx) (idx & 0xFF)
+
+void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ uint32_t pingpong_status, struct msm_isp_timestamp *ts);
+void msm_isp_stats_stream_update(struct vfe_device *vfe_dev);
+int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_update_stats_framedrop_reg(struct vfe_device *vfe_dev);
+void msm_isp_stats_disable(struct vfe_device *vfe_dev);
+int msm_isp_stats_reset(struct vfe_device *vfe_dev);
+int msm_isp_stats_restart(struct vfe_device *vfe_dev);
+#endif /* __MSM_ISP_STATS_UTIL_H__ */
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_util.c
new file mode 100644
index 000000000000..0353ab27cf19
--- /dev/null
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_util.c
@@ -0,0 +1,2465 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <linux/ratelimit.h>
+
+#include "msm.h"
+#include "msm_isp_util.h"
+#include "msm_isp_axi_util.h"
+#include "msm_isp_stats_util.h"
+#include "msm_camera_io_util.h"
+#include "cam_smmu_api.h"
+#define CREATE_TRACE_POINTS
+#include "trace/events/msm_cam.h"
+
+#define MAX_ISP_V4l2_EVENTS 100
+#define MAX_ISP_REG_LIST 100
+#define MAX_ISP_CMD_NUM 10
+#define MAX_ISP_CMD_LEN 4096
+static DEFINE_MUTEX(bandwidth_mgr_mutex);
+static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
+
+static uint64_t msm_isp_cpp_clk_rate;
+static struct dump_ping_pong_state dump_data;
+static struct dump_ping_pong_state tasklet_data;
+static DEFINE_SPINLOCK(dump_irq_lock);
+static DEFINE_SPINLOCK(dump_tasklet_lock);
+
+#define VFE40_8974V2_VERSION 0x1001001A
+
+void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format)
+{
+ int i;
+ char text[5];
+
+ text[4] = '\0';
+ for (i = 0; i < 4; i++) {
+ text[i] = (char)(((fourcc_format) >> (i * 8)) & 0xFF);
+ if ((text[i] < '0') || (text[i] > 'z')) {
+ pr_err("%s: Invalid output format %d (unprintable)\n",
+ origin, fourcc_format);
+ return;
+ }
+ }
+ pr_err("%s: Invalid output format %s\n",
+ origin, text);
+}
+
+int msm_isp_init_bandwidth_mgr(struct vfe_device *vfe_dev,
+ enum msm_isp_hw_client client)
+{
+ int rc = 0;
+
+ mutex_lock(&bandwidth_mgr_mutex);
+ if (isp_bandwidth_mgr.client_info[client].active) {
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return rc;
+ }
+ isp_bandwidth_mgr.client_info[client].active = 1;
+ isp_bandwidth_mgr.use_count++;
+ if (vfe_dev && !isp_bandwidth_mgr.bus_client) {
+ rc = vfe_dev->hw_info->vfe_ops.platform_ops.init_bw_mgr(vfe_dev,
+ &isp_bandwidth_mgr);
+ if (!rc) {
+ isp_bandwidth_mgr.update_bw =
+ vfe_dev->hw_info->vfe_ops.platform_ops.update_bw;
+ isp_bandwidth_mgr.deinit_bw_mgr =
+ vfe_dev->hw_info->vfe_ops.platform_ops.deinit_bw_mgr;
+ }
+ }
+ if (rc) {
+ isp_bandwidth_mgr.use_count--;
+ isp_bandwidth_mgr.client_info[client].active = 0;
+ }
+
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return rc;
+}
+
+int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
+ uint64_t ab, uint64_t ib)
+{
+ int rc;
+
+ mutex_lock(&bandwidth_mgr_mutex);
+ if (!isp_bandwidth_mgr.use_count ||
+ !isp_bandwidth_mgr.bus_client) {
+ pr_err("%s:error bandwidth manager inactive use_cnt:%d bus_clnt:%d\n",
+ __func__, isp_bandwidth_mgr.use_count,
+ isp_bandwidth_mgr.bus_client);
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return -EINVAL;
+ }
+
+ isp_bandwidth_mgr.client_info[client].ab = ab;
+ isp_bandwidth_mgr.client_info[client].ib = ib;
+ rc = isp_bandwidth_mgr.update_bw(&isp_bandwidth_mgr);
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return 0;
+}
+
+void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client)
+{
+ if (client >= MAX_ISP_CLIENT) {
+ pr_err("invalid Client id %d", client);
+ return;
+ }
+ mutex_lock(&bandwidth_mgr_mutex);
+ memset(&isp_bandwidth_mgr.client_info[client], 0,
+ sizeof(struct msm_isp_bandwidth_info));
+ if (isp_bandwidth_mgr.use_count) {
+ isp_bandwidth_mgr.use_count--;
+ if (isp_bandwidth_mgr.use_count) {
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return;
+ }
+
+ if (!isp_bandwidth_mgr.bus_client) {
+ pr_err("%s:%d error: bus client invalid\n",
+ __func__, __LINE__);
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return;
+ }
+
+ isp_bandwidth_mgr.deinit_bw_mgr(
+ &isp_bandwidth_mgr);
+ }
+ mutex_unlock(&bandwidth_mgr_mutex);
+}
+
+void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
+ struct msm_isp_statistics *stats)
+{
+ stats->isp_vfe0_active = isp_bandwidth_mgr.client_info[ISP_VFE0].active;
+ stats->isp_vfe0_ab = isp_bandwidth_mgr.client_info[ISP_VFE0].ab;
+ stats->isp_vfe0_ib = isp_bandwidth_mgr.client_info[ISP_VFE0].ib;
+
+ stats->isp_vfe1_active = isp_bandwidth_mgr.client_info[ISP_VFE1].active;
+ stats->isp_vfe1_ab = isp_bandwidth_mgr.client_info[ISP_VFE1].ab;
+ stats->isp_vfe1_ib = isp_bandwidth_mgr.client_info[ISP_VFE1].ib;
+
+ stats->isp_cpp_active = isp_bandwidth_mgr.client_info[ISP_CPP].active;
+ stats->isp_cpp_ab = isp_bandwidth_mgr.client_info[ISP_CPP].ab;
+ stats->isp_cpp_ib = isp_bandwidth_mgr.client_info[ISP_CPP].ib;
+ stats->last_overflow_ab = vfe_dev->msm_isp_last_overflow_ab;
+ stats->last_overflow_ib = vfe_dev->msm_isp_last_overflow_ib;
+ stats->vfe_clk_rate = vfe_dev->msm_isp_vfe_clk_rate;
+ stats->cpp_clk_rate = msm_isp_cpp_clk_rate;
+}
+
+void msm_isp_util_update_clk_rate(long clock_rate)
+{
+ msm_isp_cpp_clk_rate = clock_rate;
+}
+
+uint32_t msm_isp_get_framedrop_period(
+ enum msm_vfe_frame_skip_pattern frame_skip_pattern)
+{
+ switch (frame_skip_pattern) {
+ case NO_SKIP:
+ case EVERY_2FRAME:
+ case EVERY_3FRAME:
+ case EVERY_4FRAME:
+ case EVERY_5FRAME:
+ case EVERY_6FRAME:
+ case EVERY_7FRAME:
+ case EVERY_8FRAME:
+ return frame_skip_pattern + 1;
+ case EVERY_16FRAME:
+ return 16;
+ case EVERY_32FRAME:
+ return 32;
+ case SKIP_ALL:
+ return 1;
+ default:
+ return 1;
+ }
+ return 1;
+}
+
+void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
+ struct vfe_device *vfe_dev)
+{
+ struct timespec ts;
+
+ do_gettimeofday(&(time_stamp->event_time));
+ if (vfe_dev->vt_enable) {
+ msm_isp_get_avtimer_ts(time_stamp);
+ time_stamp->buf_time.tv_sec = time_stamp->vt_time.tv_sec;
+ time_stamp->buf_time.tv_usec = time_stamp->vt_time.tv_usec;
+ } else {
+ get_monotonic_boottime(&ts);
+ time_stamp->buf_time.tv_sec = ts.tv_sec;
+ time_stamp->buf_time.tv_usec = ts.tv_nsec/1000;
+ }
+
+}
+
+static inline u32 msm_isp_evt_mask_to_isp_event(u32 evt_mask)
+{
+ u32 evt_id = ISP_EVENT_SUBS_MASK_NONE;
+
+ switch (evt_mask) {
+ case ISP_EVENT_MASK_INDEX_STATS_NOTIFY:
+ evt_id = ISP_EVENT_STATS_NOTIFY;
+ break;
+ case ISP_EVENT_MASK_INDEX_ERROR:
+ evt_id = ISP_EVENT_ERROR;
+ break;
+ case ISP_EVENT_MASK_INDEX_IOMMU_P_FAULT:
+ evt_id = ISP_EVENT_IOMMU_P_FAULT;
+ break;
+ case ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE:
+ evt_id = ISP_EVENT_STREAM_UPDATE_DONE;
+ break;
+ case ISP_EVENT_MASK_INDEX_REG_UPDATE:
+ evt_id = ISP_EVENT_REG_UPDATE;
+ break;
+ case ISP_EVENT_MASK_INDEX_SOF:
+ evt_id = ISP_EVENT_SOF;
+ break;
+ case ISP_EVENT_MASK_INDEX_BUF_DIVERT:
+ evt_id = ISP_EVENT_BUF_DIVERT;
+ break;
+ case ISP_EVENT_MASK_INDEX_BUF_DONE:
+ evt_id = ISP_EVENT_BUF_DONE;
+ break;
+ case ISP_EVENT_MASK_INDEX_COMP_STATS_NOTIFY:
+ evt_id = ISP_EVENT_COMP_STATS_NOTIFY;
+ break;
+ case ISP_EVENT_MASK_INDEX_MASK_FE_READ_DONE:
+ evt_id = ISP_EVENT_FE_READ_DONE;
+ break;
+ case ISP_EVENT_MASK_INDEX_PING_PONG_MISMATCH:
+ evt_id = ISP_EVENT_PING_PONG_MISMATCH;
+ break;
+ case ISP_EVENT_MASK_INDEX_REG_UPDATE_MISSING:
+ evt_id = ISP_EVENT_REG_UPDATE_MISSING;
+ break;
+ case ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR:
+ evt_id = ISP_EVENT_BUF_FATAL_ERROR;
+ break;
+ default:
+ evt_id = ISP_EVENT_SUBS_MASK_NONE;
+ break;
+ }
+
+ return evt_id;
+}
+
+static inline int msm_isp_subscribe_event_mask(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub, int evt_mask_index,
+ u32 evt_id, bool subscribe_flag)
+{
+ int rc = 0, i, interface;
+
+ if (evt_mask_index == ISP_EVENT_MASK_INDEX_STATS_NOTIFY) {
+ for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
+ sub->type = evt_id + i;
+ if (subscribe_flag)
+ rc = v4l2_event_subscribe(fh, sub,
+ MAX_ISP_V4l2_EVENTS, NULL);
+ else
+ rc = v4l2_event_unsubscribe(fh, sub);
+ if (rc != 0) {
+ pr_err("%s: Subs event_type =0x%x failed\n",
+ __func__, sub->type);
+ return rc;
+ }
+ }
+ } else if (evt_mask_index == ISP_EVENT_MASK_INDEX_SOF ||
+ evt_mask_index == ISP_EVENT_MASK_INDEX_REG_UPDATE ||
+ evt_mask_index == ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE) {
+ for (interface = 0; interface < VFE_SRC_MAX; interface++) {
+ sub->type = evt_id | interface;
+ if (subscribe_flag)
+ rc = v4l2_event_subscribe(fh, sub,
+ MAX_ISP_V4l2_EVENTS, NULL);
+ else
+ rc = v4l2_event_unsubscribe(fh, sub);
+ if (rc != 0) {
+ pr_err("%s: Subs event_type =0x%x failed\n",
+ __func__, sub->type);
+ return rc;
+ }
+ }
+ } else {
+ sub->type = evt_id;
+ if (subscribe_flag)
+ rc = v4l2_event_subscribe(fh, sub,
+ MAX_ISP_V4l2_EVENTS, NULL);
+ else
+ rc = v4l2_event_unsubscribe(fh, sub);
+ if (rc != 0) {
+ pr_err("%s: Subs event_type =0x%x failed\n",
+ __func__, sub->type);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+static inline int msm_isp_process_event_subscription(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub, bool subscribe_flag)
+{
+ int rc = 0, evt_mask_index = 0;
+ u32 evt_mask = sub->type;
+ u32 evt_id = 0;
+
+ if (evt_mask == ISP_EVENT_SUBS_MASK_NONE) {
+ pr_err("%s: Subs event_type is None=0x%x\n",
+ __func__, evt_mask);
+ return 0;
+ }
+
+ for (evt_mask_index = ISP_EVENT_MASK_INDEX_STATS_NOTIFY;
+ evt_mask_index <= ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR;
+ evt_mask_index++) {
+ if (evt_mask & (1<<evt_mask_index)) {
+ evt_id = msm_isp_evt_mask_to_isp_event(evt_mask_index);
+ rc = msm_isp_subscribe_event_mask(fh, sub,
+ evt_mask_index, evt_id, subscribe_flag);
+ if (rc != 0) {
+ pr_err("%s: Subs event index:%d failed\n",
+ __func__, evt_mask_index);
+ return rc;
+ }
+ }
+ }
+ return rc;
+}
+
+int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return msm_isp_process_event_subscription(fh, sub, true);
+}
+
+int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return msm_isp_process_event_subscription(fh, sub, false);
+}
+
+static int msm_isp_start_fetch_engine(struct vfe_device *vfe_dev,
+ void *arg)
+{
+ struct msm_vfe_fetch_eng_start *fe_cfg = arg;
+ /*
+ * For Offline VFE, HAL expects same frame id
+ * for offline output which it requested in do_reprocess.
+ */
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id =
+ fe_cfg->frame_id;
+ return vfe_dev->hw_info->vfe_ops.core_ops.
+ start_fetch_eng(vfe_dev, arg);
+}
+
+static int msm_isp_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
+ void *arg)
+{
+ struct msm_vfe_fetch_eng_multi_pass_start *fe_cfg = arg;
+ struct msm_vfe_axi_stream *stream_info = NULL;
+ int i = 0, rc;
+ uint32_t wm_reload_mask = 0;
+ /*
+ * For Offline VFE, HAL expects same frame id
+ * for offline output which it requested in do_reprocess.
+ */
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id =
+ fe_cfg->frame_id;
+
+ if (fe_cfg->offline_pass == OFFLINE_SECOND_PASS) {
+ stream_info = &vfe_dev->axi_data.stream_info[
+ HANDLE_TO_IDX(fe_cfg->output_stream_id)];
+ if (!stream_info) {
+ pr_err("%s: Couldn't find streamid 0x%X\n", __func__,
+ fe_cfg->output_stream_id);
+ return -EINVAL;
+ }
+ vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
+ 0, 1);
+ msm_isp_reset_framedrop(vfe_dev, stream_info);
+
+ rc = msm_isp_cfg_offline_ping_pong_address(vfe_dev, stream_info,
+ VFE_PING_FLAG, fe_cfg->output_buf_idx);
+ if (rc < 0) {
+ pr_err("%s: Fetch engine config failed\n", __func__);
+ return -EINVAL;
+ }
+ for (i = 0; i < stream_info->num_planes; i++) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ enable_wm(vfe_dev->vfe_base, stream_info->wm[i],
+ 1);
+ wm_reload_mask |= (1 << stream_info->wm[i]);
+ }
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
+ VFE_SRC_MAX);
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ vfe_dev->vfe_base, wm_reload_mask);
+ }
+ return vfe_dev->hw_info->vfe_ops.core_ops.
+ start_fetch_eng_multi_pass(vfe_dev, arg);
+}
+
+void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
+ struct msm_vfe_fetch_engine_info *fetch_engine_info)
+{
+ struct msm_isp_event_data fe_rd_done_event;
+
+ memset(&fe_rd_done_event, 0, sizeof(struct msm_isp_event_data));
+ fe_rd_done_event.frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ fe_rd_done_event.u.fetch_done.session_id =
+ fetch_engine_info->session_id;
+ fe_rd_done_event.u.fetch_done.stream_id = fetch_engine_info->stream_id;
+ fe_rd_done_event.u.fetch_done.handle = fetch_engine_info->bufq_handle;
+ fe_rd_done_event.u.fetch_done.buf_idx = fetch_engine_info->buf_idx;
+ fe_rd_done_event.u.fetch_done.fd = fetch_engine_info->fd;
+ fe_rd_done_event.u.fetch_done.offline_mode =
+ fetch_engine_info->offline_mode;
+
+ ISP_DBG("%s:VFE%d ISP_EVENT_FE_READ_DONE buf_idx %d\n",
+ __func__, vfe_dev->pdev->id, fetch_engine_info->buf_idx);
+ fetch_engine_info->is_busy = 0;
+ msm_isp_send_event(vfe_dev, ISP_EVENT_FE_READ_DONE, &fe_rd_done_event);
+}
+
+static int msm_isp_cfg_pix(struct vfe_device *vfe_dev,
+ struct msm_vfe_input_cfg *input_cfg)
+{
+ int rc = 0;
+ struct msm_vfe_pix_cfg *pix_cfg = NULL;
+
+ pr_debug("%s: entry\n", __func__);
+
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+ pr_err("%s: pixel path is active\n", __func__);
+ return -EINVAL;
+ }
+
+ pix_cfg = &input_cfg->d.pix_cfg;
+
+ vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock =
+ input_cfg->input_pix_clk;
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
+ input_cfg->d.pix_cfg.input_mux;
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format =
+ input_cfg->d.pix_cfg.input_format;
+ vfe_dev->axi_data.src_info[VFE_PIX_0].sof_counter_step = 1;
+
+ /*
+ * Fill pixel_clock into input_pix_clk so that user space
+ * can use rounded clk rate
+ */
+ input_cfg->input_pix_clk =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock;
+
+ ISP_DBG("%s: input mux is %d CAMIF %d io_format 0x%x\n", __func__,
+ input_cfg->d.pix_cfg.input_mux, CAMIF,
+ input_cfg->d.pix_cfg.input_format);
+
+ if (input_cfg->d.pix_cfg.input_mux == CAMIF ||
+ input_cfg->d.pix_cfg.input_mux == TESTGEN) {
+ vfe_dev->axi_data.src_info[VFE_PIX_0].width =
+ input_cfg->d.pix_cfg.camif_cfg.pixels_per_line;
+ if (input_cfg->d.pix_cfg.camif_cfg.subsample_cfg.
+ sof_counter_step > 0) {
+ vfe_dev->axi_data.src_info[VFE_PIX_0].
+ sof_counter_step = input_cfg->d.pix_cfg.
+ camif_cfg.subsample_cfg.sof_counter_step;
+ }
+ } else if (input_cfg->d.pix_cfg.input_mux == EXTERNAL_READ) {
+ vfe_dev->axi_data.src_info[VFE_PIX_0].width =
+ input_cfg->d.pix_cfg.fetch_engine_cfg.buf_stride;
+ }
+ vfe_dev->hw_info->vfe_ops.core_ops.cfg_input_mux(
+ vfe_dev, &input_cfg->d.pix_cfg);
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, VFE_PIX_0);
+
+ pr_debug("%s: exit\n", __func__);
+
+ return rc;
+}
+
+static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev,
+ struct msm_vfe_input_cfg *input_cfg)
+{
+ int rc = 0;
+
+ if (vfe_dev->axi_data.src_info[input_cfg->input_src].active) {
+ pr_err("%s: RAW%d path is active\n", __func__,
+ input_cfg->input_src - VFE_RAW_0);
+ return -EINVAL;
+ }
+
+ vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock =
+ input_cfg->input_pix_clk;
+ vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg(
+ vfe_dev, &input_cfg->d.rdi_cfg, input_cfg->input_src);
+ return rc;
+}
+
+int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ struct msm_vfe_input_cfg *input_cfg = arg;
+ long pixel_clock = 0;
+
+ switch (input_cfg->input_src) {
+ case VFE_PIX_0:
+ rc = msm_isp_cfg_pix(vfe_dev, input_cfg);
+ break;
+ case VFE_RAW_0:
+ case VFE_RAW_1:
+ case VFE_RAW_2:
+ rc = msm_isp_cfg_rdi(vfe_dev, input_cfg);
+ break;
+ default:
+ pr_err("%s: Invalid input source\n", __func__);
+ rc = -EINVAL;
+ }
+
+ pixel_clock = input_cfg->input_pix_clk;
+ rc = vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(vfe_dev,
+ &pixel_clock);
+ if (rc < 0) {
+ pr_err("%s: clock set rate failed\n", __func__);
+ return rc;
+ }
+ return rc;
+}
+
+int msm_isp_camif_cfg(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ struct msm_vfe_camif_cfg *camif_cfg = arg;
+ struct msm_vfe_input_cfg input_cfg;
+ long pixel_clock = 0;
+
+ pr_debug("%s: entry\n", __func__);
+
+ memset(&input_cfg, 0, sizeof(input_cfg));
+
+ input_cfg.input_src = VFE_PIX_0;
+ input_cfg.input_pix_clk = 320000000;
+ input_cfg.d.pix_cfg.camif_cfg = *camif_cfg;
+
+ /* populate values from operation cfg */
+ input_cfg.d.pix_cfg.input_mux =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux;
+ input_cfg.d.pix_cfg.camif_cfg.camif_input =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].camif_input;
+
+ rc = msm_isp_cfg_pix(vfe_dev, &input_cfg);
+
+ pixel_clock = input_cfg.input_pix_clk;
+ rc = vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(vfe_dev,
+ &pixel_clock);
+ if (rc < 0) {
+ pr_err("%s: clock set rate failed\n", __func__);
+ return rc;
+ }
+
+ pr_debug("%s: exit\n", __func__);
+
+ return rc;
+}
+
+
+int msm_isp_operation_cfg(struct vfe_device *vfe_dev, void *arg)
+{
+ struct msm_vfe_operation_cfg *op_cfg = arg;
+
+ pr_debug("%s: entry\n", __func__);
+
+ vfe_dev->hvx_cmd = op_cfg->hvx_cmd;
+ vfe_dev->is_split = 0; /* default to false */
+
+ /* yuv_cosited currently not used */
+ /* pixel input select not used */
+
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
+ op_cfg->input_mux;
+ vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_pattern =
+ op_cfg->pixel_pattern;
+ vfe_dev->axi_data.src_info[VFE_PIX_0].camif_input = op_cfg->camif_input;
+
+ pr_debug("%s: exit\n", __func__);
+
+ return 0;
+}
+
+
+static int msm_isp_set_dual_HW_master_slave_mode(
+ struct vfe_device *vfe_dev, void *arg)
+{
+ /*
+ * This method assumes no 2 processes are accessing it simultaneously.
+ * Currently this is guaranteed by mutex lock in ioctl.
+ * If that changes, need to revisit this
+ */
+ int rc = 0, i, j;
+ struct msm_isp_set_dual_hw_ms_cmd *dual_hw_ms_cmd = NULL;
+ struct msm_vfe_src_info *src_info = NULL;
+ unsigned long flags;
+
+ if (!vfe_dev || !arg) {
+ pr_err("%s: Error! Invalid input vfe_dev %pK arg %pK\n",
+ __func__, vfe_dev, arg);
+ return -EINVAL;
+ }
+
+ dual_hw_ms_cmd = (struct msm_isp_set_dual_hw_ms_cmd *)arg;
+ vfe_dev->common_data->ms_resource.dual_hw_type = DUAL_HW_MASTER_SLAVE;
+ vfe_dev->vfe_ub_policy = MSM_WM_UB_EQUAL_SLICING;
+ if (dual_hw_ms_cmd->primary_intf < VFE_SRC_MAX) {
+ ISP_DBG("%s: vfe %d primary_intf %d\n", __func__,
+ vfe_dev->pdev->id, dual_hw_ms_cmd->primary_intf);
+ src_info = &vfe_dev->axi_data.
+ src_info[dual_hw_ms_cmd->primary_intf];
+ src_info->dual_hw_ms_info.dual_hw_ms_type =
+ dual_hw_ms_cmd->dual_hw_ms_type;
+ }
+
+ /* No lock needed here since ioctl lock protects 2 session from race */
+ if (src_info != NULL &&
+ dual_hw_ms_cmd->dual_hw_ms_type == MS_TYPE_MASTER) {
+ src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
+ ISP_DBG("%s: vfe %d Master\n", __func__, vfe_dev->pdev->id);
+
+ src_info->dual_hw_ms_info.sof_info =
+ &vfe_dev->common_data->ms_resource.master_sof_info;
+ vfe_dev->common_data->ms_resource.sof_delta_threshold =
+ dual_hw_ms_cmd->sof_delta_threshold;
+ } else if (src_info != NULL) {
+ spin_lock_irqsave(
+ &vfe_dev->common_data->common_dev_data_lock,
+ flags);
+ src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
+ ISP_DBG("%s: vfe %d Slave\n", __func__, vfe_dev->pdev->id);
+
+ for (j = 0; j < MS_NUM_SLAVE_MAX; j++) {
+ if (vfe_dev->common_data->ms_resource.
+ reserved_slave_mask & (1 << j))
+ continue;
+
+ vfe_dev->common_data->ms_resource.reserved_slave_mask |=
+ (1 << j);
+ vfe_dev->common_data->ms_resource.num_slave++;
+ src_info->dual_hw_ms_info.sof_info =
+ &vfe_dev->common_data->ms_resource.
+ slave_sof_info[j];
+ src_info->dual_hw_ms_info.slave_id = j;
+ ISP_DBG("%s: Slave id %d\n", __func__, j);
+ break;
+ }
+ spin_unlock_irqrestore(
+ &vfe_dev->common_data->common_dev_data_lock,
+ flags);
+
+ if (j == MS_NUM_SLAVE_MAX) {
+ pr_err("%s: Error! Cannot find free aux resource\n",
+ __func__);
+ return -EBUSY;
+ }
+ }
+ ISP_DBG("%s: vfe %d num_src %d\n", __func__, vfe_dev->pdev->id,
+ dual_hw_ms_cmd->num_src);
+ if (dual_hw_ms_cmd->num_src > VFE_SRC_MAX) {
+ pr_err("%s: Error! Invalid num_src %d\n", __func__,
+ dual_hw_ms_cmd->num_src);
+ return -EINVAL;
+ }
+ /* This for loop is for non-primary intf to be marked with Master/Slave
+ * in order for frame id sync. But their timestamp is not saved.
+ * So no sof_info resource is allocated
+ */
+ for (i = 0; i < dual_hw_ms_cmd->num_src; i++) {
+ if (dual_hw_ms_cmd->input_src[i] >= VFE_SRC_MAX) {
+ pr_err("%s: Error! Invalid SRC param %d\n", __func__,
+ dual_hw_ms_cmd->input_src[i]);
+ return -EINVAL;
+ }
+ ISP_DBG("%s: vfe %d src %d type %d\n", __func__,
+ vfe_dev->pdev->id, dual_hw_ms_cmd->input_src[i],
+ dual_hw_ms_cmd->dual_hw_ms_type);
+ src_info = &vfe_dev->axi_data.
+ src_info[dual_hw_ms_cmd->input_src[i]];
+ src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
+ src_info->dual_hw_ms_info.dual_hw_ms_type =
+ dual_hw_ms_cmd->dual_hw_ms_type;
+ }
+
+ return rc;
+}
+
+static int msm_isp_proc_cmd_list_unlocked(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ uint32_t count = 0;
+ struct msm_vfe_cfg_cmd_list *proc_cmd =
+ (struct msm_vfe_cfg_cmd_list *)arg;
+ struct msm_vfe_cfg_cmd_list cmd, cmd_next;
+
+ if (!vfe_dev || !arg) {
+ pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
+ vfe_dev, arg);
+ return -EINVAL;
+ }
+
+ rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd->cfg_cmd);
+ if (rc < 0)
+ pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+ cmd = *proc_cmd;
+
+ while (cmd.next) {
+ if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list)) {
+ pr_err("%s:%d failed: next size %u != expected %zu\n",
+ __func__, __LINE__, cmd.next_size,
+ sizeof(struct msm_vfe_cfg_cmd_list));
+ break;
+ }
+ if (++count >= MAX_ISP_REG_LIST) {
+ pr_err("%s:%d Error exceeding the max register count:%u\n",
+ __func__, __LINE__, count);
+ rc = -EFAULT;
+ break;
+ }
+ if (copy_from_user(&cmd_next, (void __user *)cmd.next,
+ sizeof(struct msm_vfe_cfg_cmd_list))) {
+ rc = -EFAULT;
+ continue;
+ }
+
+ rc = msm_isp_proc_cmd(vfe_dev, &cmd_next.cfg_cmd);
+ if (rc < 0)
+ pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+ cmd = cmd_next;
+ }
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_vfe_cfg_cmd2_32 {
+ uint16_t num_cfg;
+ uint16_t cmd_len;
+ compat_caddr_t cfg_data;
+ compat_caddr_t cfg_cmd;
+};
+
+struct msm_vfe_cfg_cmd_list_32 {
+ struct msm_vfe_cfg_cmd2_32 cfg_cmd;
+ compat_caddr_t next;
+ uint32_t next_size;
+};
+
+#define VIDIOC_MSM_VFE_REG_CFG_COMPAT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_vfe_cfg_cmd2_32)
+#define VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE+14, struct msm_vfe_cfg_cmd_list_32)
+
+static void msm_isp_compat_to_proc_cmd(struct msm_vfe_cfg_cmd2 *proc_cmd,
+ struct msm_vfe_cfg_cmd2_32 *proc_cmd_ptr32)
+{
+ proc_cmd->num_cfg = proc_cmd_ptr32->num_cfg;
+ proc_cmd->cmd_len = proc_cmd_ptr32->cmd_len;
+ proc_cmd->cfg_data = compat_ptr(proc_cmd_ptr32->cfg_data);
+ proc_cmd->cfg_cmd = compat_ptr(proc_cmd_ptr32->cfg_cmd);
+}
+
+static int msm_isp_proc_cmd_list_compat(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ uint32_t count = 0;
+ struct msm_vfe_cfg_cmd_list_32 *proc_cmd =
+ (struct msm_vfe_cfg_cmd_list_32 *)arg;
+ struct msm_vfe_cfg_cmd_list_32 cmd, cmd_next;
+ struct msm_vfe_cfg_cmd2 current_cmd;
+
+ if (!vfe_dev || !arg) {
+ pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
+ vfe_dev, arg);
+ return -EINVAL;
+ }
+ msm_isp_compat_to_proc_cmd(&current_cmd, &proc_cmd->cfg_cmd);
+ rc = msm_isp_proc_cmd(vfe_dev, &current_cmd);
+ if (rc < 0)
+ pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+ cmd = *proc_cmd;
+
+ while (compat_ptr(cmd.next) != NULL) {
+ if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list_32)) {
+ pr_err("%s:%d failed: next size %u != expected %zu\n",
+ __func__, __LINE__, cmd.next_size,
+ sizeof(struct msm_vfe_cfg_cmd_list));
+ break;
+ }
+ if (++count >= MAX_ISP_REG_LIST) {
+ pr_err("%s:%d Error exceeding the max register count:%u\n",
+ __func__, __LINE__, count);
+ rc = -EFAULT;
+ break;
+ }
+ if (copy_from_user(&cmd_next, compat_ptr(cmd.next),
+ sizeof(struct msm_vfe_cfg_cmd_list_32))) {
+ rc = -EFAULT;
+ continue;
+ }
+
+ msm_isp_compat_to_proc_cmd(&current_cmd, &cmd_next.cfg_cmd);
+ rc = msm_isp_proc_cmd(vfe_dev, &current_cmd);
+ if (rc < 0)
+ pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+ cmd = cmd_next;
+ }
+ return rc;
+}
+
+static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
+{
+ if (is_compat_task())
+ return msm_isp_proc_cmd_list_compat(vfe_dev, arg);
+ else
+ return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
+}
+#else /* CONFIG_COMPAT */
+static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
+{
+ return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ long rc = 0;
+ long rc2 = 0;
+ struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+
+ if (!vfe_dev || !vfe_dev->vfe_base) {
+ pr_err("%s:%d failed: invalid params %pK\n",
+ __func__, __LINE__, vfe_dev);
+ if (vfe_dev)
+ pr_err("%s:%d failed %pK\n", __func__,
+ __LINE__, vfe_dev->vfe_base);
+ return -EINVAL;
+ }
+
+ /* use real time mutex for hard real-time ioctls such as
+ * buffer operations and register updates.
+ * Use core mutex for other ioctls that could take
+ * longer time to complete such as start/stop ISP streams
+ * which blocks until the hardware start/stop streaming
+ */
+ ISP_DBG("%s: cmd: %d\n", __func__, _IOC_TYPE(cmd));
+ switch (cmd) {
+ case VIDIOC_MSM_VFE_REG_CFG: {
+ mutex_lock(&vfe_dev->realtime_mutex);
+ rc = msm_isp_proc_cmd(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ break;
+ }
+ case VIDIOC_MSM_VFE_REG_LIST_CFG: {
+ mutex_lock(&vfe_dev->realtime_mutex);
+ rc = msm_isp_proc_cmd_list(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ break;
+ }
+ case VIDIOC_MSM_ISP_REQUEST_BUFQ:
+ /* fallthrough */
+ case VIDIOC_MSM_ISP_ENQUEUE_BUF:
+ /* fallthrough */
+ case VIDIOC_MSM_ISP_DEQUEUE_BUF:
+ /* fallthrough */
+ case VIDIOC_MSM_ISP_UNMAP_BUF: {
+ mutex_lock(&vfe_dev->buf_mgr->lock);
+ rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
+ break;
+ }
+ case VIDIOC_MSM_ISP_RELEASE_BUFQ: {
+ if (vfe_dev->buf_mgr == NULL) {
+ pr_err("%s: buf mgr NULL! rc = -1\n", __func__);
+ rc = -EINVAL;
+ return rc;
+ }
+ mutex_lock(&vfe_dev->buf_mgr->lock);
+ rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
+ break;
+ }
+ case VIDIOC_MSM_ISP_REQUEST_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_request_axi_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_RELEASE_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_release_axi_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_CFG_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_cfg_axi_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_CFG_HW_STATE:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_update_stream_bandwidth(vfe_dev,
+ *(enum msm_vfe_hw_state *)arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_AXI_HALT:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_axi_halt(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_AXI_RESET:
+ mutex_lock(&vfe_dev->core_mutex);
+ if (atomic_read(&vfe_dev->error_info.overflow_state)
+ != HALT_ENFORCED) {
+ rc = msm_isp_stats_reset(vfe_dev);
+ rc2 = msm_isp_axi_reset(vfe_dev, arg);
+ if (!rc && rc2)
+ rc = rc2;
+ } else {
+ pr_err_ratelimited("%s: no HW reset, halt enforced.\n",
+ __func__);
+ }
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_AXI_RESTART:
+ mutex_lock(&vfe_dev->core_mutex);
+ if (atomic_read(&vfe_dev->error_info.overflow_state)
+ != HALT_ENFORCED) {
+ rc = msm_isp_stats_restart(vfe_dev);
+ rc2 = msm_isp_axi_restart(vfe_dev, arg);
+ if (!rc && rc2)
+ rc = rc2;
+ } else {
+ pr_err_ratelimited("%s: no AXI restart, halt enforced.\n",
+ __func__);
+ }
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_INPUT_CFG:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_cfg_input(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_AHB_CLK_CFG:
+ mutex_lock(&vfe_dev->core_mutex);
+ if (vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg)
+ rc = vfe_dev->hw_info->vfe_ops.core_ops.
+ ahb_clk_cfg(vfe_dev, arg);
+ else
+ rc = -EOPNOTSUPP;
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_SET_DUAL_HW_MASTER_SLAVE:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_set_dual_HW_master_slave_mode(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_FETCH_ENG_START:
+ case VIDIOC_MSM_ISP_MAP_BUF_START_FE:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_start_fetch_engine(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+
+ case VIDIOC_MSM_ISP_FETCH_ENG_MULTI_PASS_START:
+ case VIDIOC_MSM_ISP_MAP_BUF_START_MULTI_PASS_FE:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_start_fetch_engine_multi_pass(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_REG_UPDATE_CMD:
+ if (arg) {
+ enum msm_vfe_input_src frame_src =
+ *((enum msm_vfe_input_src *)arg);
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, frame_src);
+ }
+ break;
+ case VIDIOC_MSM_ISP_SET_SRC_STATE:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_set_src_state(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_REQUEST_STATS_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_request_stats_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_RELEASE_STATS_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_release_stats_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_CFG_STATS_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_UPDATE_STATS_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_update_stats_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_UPDATE_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_update_axi_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_SMMU_ATTACH:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_smmu_attach(vfe_dev->buf_mgr, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_OPERATION_CFG:
+ mutex_lock(&vfe_dev->core_mutex);
+ msm_isp_operation_cfg(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_AXI_OUTPUT_CFG:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_axi_output_cfg(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_CAMIF_CFG:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_camif_cfg(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ vfe_dev->isp_sof_debug = 0;
+ vfe_dev->isp_raw0_debug = 0;
+ vfe_dev->isp_raw1_debug = 0;
+ vfe_dev->isp_raw2_debug = 0;
+ break;
+ case MSM_SD_UNNOTIFY_FREEZE:
+ break;
+ case MSM_SD_SHUTDOWN:
+ while (vfe_dev->vfe_open_cnt != 0)
+ msm_isp_close_node(sd, NULL);
+ break;
+
+ default:
+ pr_err_ratelimited("%s: Invalid ISP command %d\n", __func__,
+ cmd);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+
+#ifdef CONFIG_COMPAT
+static long msm_isp_ioctl_compat(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+ long rc = 0;
+
+ if (!vfe_dev || !vfe_dev->vfe_base) {
+ pr_err("%s:%d failed: invalid params %pK\n",
+ __func__, __LINE__, vfe_dev);
+ if (vfe_dev)
+ pr_err("%s:%d failed %pK\n", __func__,
+ __LINE__, vfe_dev->vfe_base);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case VIDIOC_MSM_VFE_REG_CFG_COMPAT: {
+ struct msm_vfe_cfg_cmd2 proc_cmd;
+
+ mutex_lock(&vfe_dev->realtime_mutex);
+ msm_isp_compat_to_proc_cmd(&proc_cmd,
+ (struct msm_vfe_cfg_cmd2_32 *) arg);
+ rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ break;
+ }
+ case VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT: {
+ mutex_lock(&vfe_dev->realtime_mutex);
+ rc = msm_isp_proc_cmd_list(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ break;
+ }
+ default:
+ return msm_isp_ioctl_unlocked(sd, cmd, arg);
+ }
+
+ return rc;
+}
+
+long msm_isp_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ return msm_isp_ioctl_compat(sd, cmd, arg);
+}
+#else /* CONFIG_COMPAT */
+long msm_isp_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ return msm_isp_ioctl_unlocked(sd, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
+ struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd,
+ uint32_t *cfg_data, uint32_t cmd_len)
+{
+ if (!vfe_dev || !reg_cfg_cmd) {
+ pr_err("%s:%d failed: vfe_dev %pK reg_cfg_cmd %pK\n", __func__,
+ __LINE__, vfe_dev, reg_cfg_cmd);
+ return -EINVAL;
+ }
+ if ((reg_cfg_cmd->cmd_type != VFE_CFG_MASK) &&
+ (!cfg_data || !cmd_len)) {
+ pr_err("%s:%d failed: cmd type %d cfg_data %pK cmd_len %d\n",
+ __func__, __LINE__, reg_cfg_cmd->cmd_type, cfg_data,
+ cmd_len);
+ return -EINVAL;
+ }
+
+ /* Validate input parameters */
+ switch (reg_cfg_cmd->cmd_type) {
+ case VFE_WRITE:
+ case VFE_READ:
+ case VFE_WRITE_MB: {
+ if ((reg_cfg_cmd->u.rw_info.reg_offset >
+ (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
+ ((reg_cfg_cmd->u.rw_info.reg_offset +
+ reg_cfg_cmd->u.rw_info.len) >
+ vfe_dev->vfe_base_size) ||
+ (reg_cfg_cmd->u.rw_info.reg_offset & 0x3)) {
+ pr_err_ratelimited("%s:%d regoffset %d len %d res %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.rw_info.reg_offset,
+ reg_cfg_cmd->u.rw_info.len,
+ (uint32_t)vfe_dev->vfe_base_size);
+ return -EINVAL;
+ }
+
+ if ((reg_cfg_cmd->u.rw_info.cmd_data_offset >
+ (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
+ ((reg_cfg_cmd->u.rw_info.cmd_data_offset +
+ reg_cfg_cmd->u.rw_info.len) > cmd_len)) {
+ pr_err_ratelimited("%s:%d cmd_data_offset %d len %d cmd_len %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.rw_info.cmd_data_offset,
+ reg_cfg_cmd->u.rw_info.len, cmd_len);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ case VFE_WRITE_DMI_16BIT:
+ case VFE_WRITE_DMI_32BIT:
+ case VFE_WRITE_DMI_64BIT:
+ case VFE_READ_DMI_16BIT:
+ case VFE_READ_DMI_32BIT:
+ case VFE_READ_DMI_64BIT: {
+ if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT ||
+ reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
+ if ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset <=
+ reg_cfg_cmd->u.dmi_info.lo_tbl_offset) ||
+ (reg_cfg_cmd->u.dmi_info.hi_tbl_offset -
+ reg_cfg_cmd->u.dmi_info.lo_tbl_offset !=
+ (sizeof(uint32_t)))) {
+ pr_err("%s:%d hi %d lo %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset);
+ return -EINVAL;
+ }
+ if (reg_cfg_cmd->u.dmi_info.len <= sizeof(uint32_t)) {
+ pr_err("%s:%d len %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.dmi_info.len);
+ return -EINVAL;
+ }
+ if (((UINT_MAX -
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset) <
+ (reg_cfg_cmd->u.dmi_info.len -
+ sizeof(uint32_t))) ||
+ ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset +
+ reg_cfg_cmd->u.dmi_info.len -
+ sizeof(uint32_t)) > cmd_len)) {
+ pr_err("%s:%d hi_tbl_offset %d len %d cmd %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
+ reg_cfg_cmd->u.dmi_info.len, cmd_len);
+ return -EINVAL;
+ }
+ }
+ if ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset >
+ (UINT_MAX - reg_cfg_cmd->u.dmi_info.len)) ||
+ ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset +
+ reg_cfg_cmd->u.dmi_info.len) > cmd_len)) {
+ pr_err("%s:%d lo_tbl_offset %d len %d cmd_len %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.dmi_info.lo_tbl_offset,
+ reg_cfg_cmd->u.dmi_info.len, cmd_len);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ switch (reg_cfg_cmd->cmd_type) {
+ case VFE_WRITE: {
+ msm_camera_io_memcpy(vfe_dev->vfe_base +
+ reg_cfg_cmd->u.rw_info.reg_offset,
+ cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
+ reg_cfg_cmd->u.rw_info.len);
+ break;
+ }
+ case VFE_WRITE_MB: {
+ msm_camera_io_memcpy_mb(vfe_dev->vfe_base +
+ reg_cfg_cmd->u.rw_info.reg_offset,
+ cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
+ reg_cfg_cmd->u.rw_info.len);
+ break;
+ }
+ case VFE_CFG_MASK: {
+ uint32_t temp;
+ bool grab_lock;
+ unsigned long flags;
+
+ if ((UINT_MAX - sizeof(temp) <
+ reg_cfg_cmd->u.mask_info.reg_offset) ||
+ (vfe_dev->vfe_base_size <
+ reg_cfg_cmd->u.mask_info.reg_offset +
+ sizeof(temp)) ||
+ (reg_cfg_cmd->u.mask_info.reg_offset & 0x3)) {
+ pr_err("%s: VFE_CFG_MASK: Invalid length\n", __func__);
+ return -EINVAL;
+ }
+ grab_lock = vfe_dev->hw_info->vfe_ops.core_ops.
+ is_module_cfg_lock_needed(reg_cfg_cmd->
+ u.mask_info.reg_offset);
+ if (grab_lock)
+ spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
+ temp = msm_camera_io_r(vfe_dev->vfe_base +
+ reg_cfg_cmd->u.mask_info.reg_offset);
+
+ temp &= ~reg_cfg_cmd->u.mask_info.mask;
+ temp |= reg_cfg_cmd->u.mask_info.val;
+ msm_camera_io_w(temp, vfe_dev->vfe_base +
+ reg_cfg_cmd->u.mask_info.reg_offset);
+ if (grab_lock)
+ spin_unlock_irqrestore(&vfe_dev->shared_data_lock,
+ flags);
+ break;
+ }
+ case VFE_WRITE_DMI_16BIT:
+ case VFE_WRITE_DMI_32BIT:
+ case VFE_WRITE_DMI_64BIT: {
+ int i;
+ uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
+ uint32_t hi_val, lo_val, lo_val1;
+
+ if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT) {
+ hi_tbl_ptr = cfg_data +
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
+ }
+ lo_tbl_ptr = cfg_data +
+ reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
+ if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT)
+ reg_cfg_cmd->u.dmi_info.len =
+ reg_cfg_cmd->u.dmi_info.len / 2;
+ for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
+ lo_val = *lo_tbl_ptr++;
+ if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_16BIT) {
+ lo_val1 = lo_val & 0x0000FFFF;
+ lo_val = (lo_val & 0xFFFF0000)>>16;
+ msm_camera_io_w(lo_val1, vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset + 0x4);
+ } else if (reg_cfg_cmd->cmd_type ==
+ VFE_WRITE_DMI_64BIT) {
+ lo_tbl_ptr++;
+ hi_val = *hi_tbl_ptr;
+ hi_tbl_ptr = hi_tbl_ptr + 2;
+ msm_camera_io_w(hi_val, vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset);
+ }
+ msm_camera_io_w(lo_val, vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset + 0x4);
+ }
+ break;
+ }
+ case VFE_READ_DMI_16BIT:
+ case VFE_READ_DMI_32BIT:
+ case VFE_READ_DMI_64BIT: {
+ int i;
+ uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
+ uint32_t hi_val, lo_val, lo_val1;
+
+ if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
+ hi_tbl_ptr = cfg_data +
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
+ }
+
+ lo_tbl_ptr = cfg_data +
+ reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
+
+ if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT)
+ reg_cfg_cmd->u.dmi_info.len =
+ reg_cfg_cmd->u.dmi_info.len / 2;
+
+ for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
+ lo_val = msm_camera_io_r(vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset + 0x4);
+
+ if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_16BIT) {
+ lo_val1 = msm_camera_io_r(vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset + 0x4);
+ lo_val |= lo_val1 << 16;
+ }
+ *lo_tbl_ptr++ = lo_val;
+ if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
+ hi_val = msm_camera_io_r(vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset);
+ *hi_tbl_ptr = hi_val;
+ hi_tbl_ptr += 2;
+ lo_tbl_ptr++;
+ }
+ }
+ break;
+ }
+ case VFE_HW_UPDATE_LOCK: {
+ uint32_t update_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id;
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id != *cfg_data
+ || update_id == *cfg_data) {
+ pr_err("%s hw update lock failed acq %d, cur id %u, last id %u\n",
+ __func__,
+ *cfg_data,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
+ update_id);
+ return -EINVAL;
+ }
+ break;
+ }
+ case VFE_HW_UPDATE_UNLOCK: {
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id
+ != *cfg_data) {
+ pr_err("hw update across frame boundary,begin id %u, end id %d\n",
+ *cfg_data,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+ }
+ vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ break;
+ }
+ case VFE_READ: {
+ int i;
+ uint32_t *data_ptr = cfg_data +
+ reg_cfg_cmd->u.rw_info.cmd_data_offset/4;
+ for (i = 0; i < reg_cfg_cmd->u.rw_info.len/4; i++) {
+ if ((data_ptr < cfg_data) ||
+ (UINT_MAX / sizeof(*data_ptr) <
+ (data_ptr - cfg_data)) ||
+ (sizeof(*data_ptr) * (data_ptr - cfg_data) >=
+ cmd_len))
+ return -EINVAL;
+ *data_ptr++ = msm_camera_io_r(vfe_dev->vfe_base +
+ reg_cfg_cmd->u.rw_info.reg_offset);
+ reg_cfg_cmd->u.rw_info.reg_offset += 4;
+ }
+ break;
+ }
+ case GET_MAX_CLK_RATE: {
+ int rc = 0;
+ unsigned long rate;
+
+ if (cmd_len != sizeof(__u32)) {
+ pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+ __func__, __LINE__, cmd_len,
+ sizeof(__u32));
+ return -EINVAL;
+ }
+ rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_max_clk_rate(
+ vfe_dev, &rate);
+ if (rc < 0) {
+ pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
+ return -EINVAL;
+ }
+
+ *(__u32 *)cfg_data = (__u32)rate;
+
+ break;
+ }
+ case GET_CLK_RATES: {
+ int rc = 0;
+ struct msm_isp_clk_rates rates;
+ struct msm_isp_clk_rates *user_data =
+ (struct msm_isp_clk_rates *)cfg_data;
+ if (cmd_len != sizeof(struct msm_isp_clk_rates)) {
+ pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+ __func__, __LINE__, cmd_len,
+ sizeof(struct msm_isp_clk_rates));
+ return -EINVAL;
+ }
+ rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_clk_rates(
+ vfe_dev, &rates);
+ if (rc < 0) {
+ pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
+ return -EINVAL;
+ }
+ user_data->svs_rate = rates.svs_rate;
+ user_data->nominal_rate = rates.nominal_rate;
+ user_data->high_rate = rates.high_rate;
+ break;
+ }
+ case GET_ISP_ID: {
+ uint32_t *isp_id = NULL;
+
+ if (cmd_len < sizeof(uint32_t)) {
+ pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+ __func__, __LINE__, cmd_len,
+ sizeof(uint32_t));
+ return -EINVAL;
+ }
+
+ isp_id = (uint32_t *)cfg_data;
+ *isp_id = vfe_dev->pdev->id;
+ break;
+ }
+ case SET_WM_UB_SIZE:
+ break;
+ case SET_UB_POLICY: {
+
+ if (cmd_len < sizeof(vfe_dev->vfe_ub_policy)) {
+ pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+ __func__, __LINE__, cmd_len,
+ sizeof(vfe_dev->vfe_ub_policy));
+ return -EINVAL;
+ }
+ vfe_dev->vfe_ub_policy = *cfg_data;
+ break;
+ }
+ }
+ return 0;
+}
+
+int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, i;
+ struct msm_vfe_cfg_cmd2 *proc_cmd = arg;
+ struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd;
+ uint32_t *cfg_data = NULL;
+
+ if (!proc_cmd->num_cfg
+ || proc_cmd->num_cfg > MAX_ISP_CMD_NUM) {
+ pr_err("%s: num_cfg outside allowed range\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ reg_cfg_cmd = kzalloc(sizeof(struct msm_vfe_reg_cfg_cmd)*
+ proc_cmd->num_cfg, GFP_KERNEL);
+ if (!reg_cfg_cmd) {
+ rc = -ENOMEM;
+ goto reg_cfg_failed;
+ }
+
+ if (copy_from_user(reg_cfg_cmd,
+ (void __user *)(proc_cmd->cfg_cmd),
+ sizeof(struct msm_vfe_reg_cfg_cmd) * proc_cmd->num_cfg)) {
+ rc = -EFAULT;
+ goto copy_cmd_failed;
+ }
+
+ if (proc_cmd->cmd_len > 0) {
+ if (proc_cmd->cmd_len > MAX_ISP_CMD_LEN) {
+ pr_err("%s: cmd_len exceed max allowed\n", __func__);
+ rc = -EINVAL;
+ goto cfg_data_failed;
+ }
+
+ cfg_data = kzalloc(proc_cmd->cmd_len, GFP_KERNEL);
+ if (!cfg_data) {
+ rc = -ENOMEM;
+ goto cfg_data_failed;
+ }
+
+ if (copy_from_user(cfg_data,
+ (void __user *)(proc_cmd->cfg_data),
+ proc_cmd->cmd_len)) {
+ rc = -EFAULT;
+ goto copy_cmd_failed;
+ }
+ }
+
+ for (i = 0; i < proc_cmd->num_cfg; i++)
+ rc = msm_isp_send_hw_cmd(vfe_dev, &reg_cfg_cmd[i],
+ cfg_data, proc_cmd->cmd_len);
+
+ if (copy_to_user(proc_cmd->cfg_data,
+ cfg_data, proc_cmd->cmd_len)) {
+ rc = -EFAULT;
+ goto copy_cmd_failed;
+ }
+
+copy_cmd_failed:
+ kfree(cfg_data);
+cfg_data_failed:
+ kfree(reg_cfg_cmd);
+reg_cfg_failed:
+ return rc;
+}
+
+int msm_isp_send_event(struct vfe_device *vfe_dev,
+ uint32_t event_type,
+ struct msm_isp_event_data *event_data)
+{
+ struct v4l2_event isp_event;
+
+ memset(&isp_event, 0, sizeof(struct v4l2_event));
+ isp_event.id = 0;
+ isp_event.type = event_type;
+
+ memcpy(&isp_event.u.data[0], event_data,
+ sizeof(struct msm_isp_event_data));
+ v4l2_event_queue(vfe_dev->subdev.sd.devnode, &isp_event);
+ return 0;
+}
+
+#define CAL_WORD(width, M, N) ((width * M + N - 1) / N)
+
+int msm_isp_cal_word_per_line(uint32_t output_format,
+ uint32_t pixel_per_line)
+{
+ int val = -1;
+
+ switch (output_format) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_QBGGR8:
+ case V4L2_PIX_FMT_QGBRG8:
+ case V4L2_PIX_FMT_QGRBG8:
+ case V4L2_PIX_FMT_QRGGB8:
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_META:
+ val = CAL_WORD(pixel_per_line, 1, 8);
+ break;
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_SBGGR10DPCM6:
+ case V4L2_PIX_FMT_SGBRG10DPCM6:
+ case V4L2_PIX_FMT_SGRBG10DPCM6:
+ case V4L2_PIX_FMT_SRGGB10DPCM6:
+ case V4L2_PIX_FMT_SBGGR10DPCM8:
+ case V4L2_PIX_FMT_SGBRG10DPCM8:
+ case V4L2_PIX_FMT_SGRBG10DPCM8:
+ case V4L2_PIX_FMT_SRGGB10DPCM8:
+ case V4L2_PIX_FMT_META10:
+ val = CAL_WORD(pixel_per_line, 5, 32);
+ break;
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_Y12:
+ val = CAL_WORD(pixel_per_line, 3, 16);
+ break;
+ case V4L2_PIX_FMT_SBGGR14:
+ case V4L2_PIX_FMT_SGBRG14:
+ case V4L2_PIX_FMT_SGRBG14:
+ case V4L2_PIX_FMT_SRGGB14:
+ val = CAL_WORD(pixel_per_line, 7, 32);
+ break;
+ case V4L2_PIX_FMT_QBGGR10:
+ case V4L2_PIX_FMT_QGBRG10:
+ case V4L2_PIX_FMT_QGRBG10:
+ case V4L2_PIX_FMT_QRGGB10:
+ val = CAL_WORD(pixel_per_line, 1, 6);
+ break;
+ case V4L2_PIX_FMT_QBGGR12:
+ case V4L2_PIX_FMT_QGBRG12:
+ case V4L2_PIX_FMT_QGRBG12:
+ case V4L2_PIX_FMT_QRGGB12:
+ val = CAL_WORD(pixel_per_line, 1, 5);
+ break;
+ case V4L2_PIX_FMT_QBGGR14:
+ case V4L2_PIX_FMT_QGBRG14:
+ case V4L2_PIX_FMT_QGRBG14:
+ case V4L2_PIX_FMT_QRGGB14:
+ val = CAL_WORD(pixel_per_line, 1, 4);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV41:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_GREY:
+ val = CAL_WORD(pixel_per_line, 1, 8);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ val = CAL_WORD(pixel_per_line, 2, 8);
+ break;
+ case V4L2_PIX_FMT_P16BGGR10:
+ case V4L2_PIX_FMT_P16GBRG10:
+ case V4L2_PIX_FMT_P16GRBG10:
+ case V4L2_PIX_FMT_P16RGGB10:
+ val = CAL_WORD(pixel_per_line, 1, 4);
+ break;
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ val = CAL_WORD(pixel_per_line, 1, 8);
+ break;
+ /* TD: Add more image format */
+ default:
+ msm_isp_print_fourcc_error(__func__, output_format);
+ break;
+ }
+ return val;
+}
+
+enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format)
+{
+ switch (output_format) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR10DPCM6:
+ case V4L2_PIX_FMT_SGBRG10DPCM6:
+ case V4L2_PIX_FMT_SGRBG10DPCM6:
+ case V4L2_PIX_FMT_SRGGB10DPCM6:
+ case V4L2_PIX_FMT_SBGGR10DPCM8:
+ case V4L2_PIX_FMT_SGBRG10DPCM8:
+ case V4L2_PIX_FMT_SGRBG10DPCM8:
+ case V4L2_PIX_FMT_SRGGB10DPCM8:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_SBGGR14:
+ case V4L2_PIX_FMT_SGBRG14:
+ case V4L2_PIX_FMT_SGRBG14:
+ case V4L2_PIX_FMT_SRGGB14:
+ case V4L2_PIX_FMT_GREY:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y12:
+ return MIPI;
+ case V4L2_PIX_FMT_QBGGR8:
+ case V4L2_PIX_FMT_QGBRG8:
+ case V4L2_PIX_FMT_QGRBG8:
+ case V4L2_PIX_FMT_QRGGB8:
+ case V4L2_PIX_FMT_QBGGR10:
+ case V4L2_PIX_FMT_QGBRG10:
+ case V4L2_PIX_FMT_QGRBG10:
+ case V4L2_PIX_FMT_QRGGB10:
+ case V4L2_PIX_FMT_QBGGR12:
+ case V4L2_PIX_FMT_QGBRG12:
+ case V4L2_PIX_FMT_QGRBG12:
+ case V4L2_PIX_FMT_QRGGB12:
+ case V4L2_PIX_FMT_QBGGR14:
+ case V4L2_PIX_FMT_QGBRG14:
+ case V4L2_PIX_FMT_QGRBG14:
+ case V4L2_PIX_FMT_QRGGB14:
+ return QCOM;
+ case V4L2_PIX_FMT_P16BGGR10:
+ case V4L2_PIX_FMT_P16GBRG10:
+ case V4L2_PIX_FMT_P16GRBG10:
+ case V4L2_PIX_FMT_P16RGGB10:
+ return PLAIN16;
+ default:
+ msm_isp_print_fourcc_error(__func__, output_format);
+ break;
+ }
+ return -EINVAL;
+}
+
+int msm_isp_get_bit_per_pixel(uint32_t output_format)
+{
+ switch (output_format) {
+ case V4L2_PIX_FMT_Y4:
+ return 4;
+ case V4L2_PIX_FMT_Y6:
+ return 6;
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_QBGGR8:
+ case V4L2_PIX_FMT_QGBRG8:
+ case V4L2_PIX_FMT_QGRBG8:
+ case V4L2_PIX_FMT_QRGGB8:
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_META:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV41:
+ case V4L2_PIX_FMT_YVU410:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YYUV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_YUV411P:
+ case V4L2_PIX_FMT_Y41P:
+ case V4L2_PIX_FMT_YUV444:
+ case V4L2_PIX_FMT_YUV555:
+ case V4L2_PIX_FMT_YUV565:
+ case V4L2_PIX_FMT_YUV32:
+ case V4L2_PIX_FMT_YUV410:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_GREY:
+ case V4L2_PIX_FMT_PAL8:
+ case V4L2_PIX_FMT_UV8:
+ case MSM_V4L2_PIX_FMT_META:
+ return 8;
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR10DPCM6:
+ case V4L2_PIX_FMT_SGBRG10DPCM6:
+ case V4L2_PIX_FMT_SGRBG10DPCM6:
+ case V4L2_PIX_FMT_SRGGB10DPCM6:
+ case V4L2_PIX_FMT_SBGGR10DPCM8:
+ case V4L2_PIX_FMT_SGBRG10DPCM8:
+ case V4L2_PIX_FMT_SGRBG10DPCM8:
+ case V4L2_PIX_FMT_SRGGB10DPCM8:
+ case V4L2_PIX_FMT_QBGGR10:
+ case V4L2_PIX_FMT_QGBRG10:
+ case V4L2_PIX_FMT_QGRBG10:
+ case V4L2_PIX_FMT_QRGGB10:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y10BPACK:
+ case V4L2_PIX_FMT_P16BGGR10:
+ case V4L2_PIX_FMT_P16GBRG10:
+ case V4L2_PIX_FMT_P16GRBG10:
+ case V4L2_PIX_FMT_P16RGGB10:
+ case V4L2_PIX_FMT_META10:
+ case MSM_V4L2_PIX_FMT_META10:
+ return 10;
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_QBGGR12:
+ case V4L2_PIX_FMT_QGBRG12:
+ case V4L2_PIX_FMT_QGRBG12:
+ case V4L2_PIX_FMT_QRGGB12:
+ case V4L2_PIX_FMT_Y12:
+ return 12;
+ case V4L2_PIX_FMT_SBGGR14:
+ case V4L2_PIX_FMT_SGBRG14:
+ case V4L2_PIX_FMT_SGRBG14:
+ case V4L2_PIX_FMT_SRGGB14:
+ case V4L2_PIX_FMT_QBGGR14:
+ case V4L2_PIX_FMT_QGBRG14:
+ case V4L2_PIX_FMT_QGRBG14:
+ case V4L2_PIX_FMT_QRGGB14:
+ return 14;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_Y16:
+ return 16;
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ return 24;
+ /* TD: Add more image format */
+ default:
+ msm_isp_print_fourcc_error(__func__, output_format);
+ pr_err("%s: Invalid output format %x\n",
+ __func__, output_format);
+ return -EINVAL;
+ }
+}
+
+void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
+
+ error_info->info_dump_frame_count++;
+}
+
+
+static int msm_isp_process_iommu_page_fault(struct vfe_device *vfe_dev)
+{
+ int rc = vfe_dev->buf_mgr->pagefault_debug_disable;
+
+ pr_err("%s:%d] VFE%d Handle Page fault! vfe_dev %pK\n", __func__,
+ __LINE__, vfe_dev->pdev->id, vfe_dev);
+
+ msm_isp_halt_send_error(vfe_dev, ISP_EVENT_IOMMU_P_FAULT);
+
+ if (vfe_dev->buf_mgr->pagefault_debug_disable == 0) {
+ vfe_dev->buf_mgr->pagefault_debug_disable = 1;
+ vfe_dev->buf_mgr->ops->buf_mgr_debug(vfe_dev->buf_mgr,
+ vfe_dev->page_fault_addr);
+ msm_isp_print_ping_pong_address(vfe_dev,
+ vfe_dev->page_fault_addr);
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ read_wm_ping_pong_addr(vfe_dev);
+ }
+ return rc;
+}
+
+void msm_isp_process_error_info(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
+
+ if (error_info->error_count == 1 ||
+ !(error_info->info_dump_frame_count % 100)) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ process_error_status(vfe_dev);
+ error_info->error_mask0 = 0;
+ error_info->error_mask1 = 0;
+ error_info->camif_status = 0;
+ error_info->violation_status = 0;
+ }
+}
+
+static inline void msm_isp_update_error_info(struct vfe_device *vfe_dev,
+ uint32_t error_mask0, uint32_t error_mask1)
+{
+ vfe_dev->error_info.error_mask0 |= error_mask0;
+ vfe_dev->error_info.error_mask1 |= error_mask1;
+ vfe_dev->error_info.error_count++;
+}
+
+void msm_isp_process_overflow_irq(
+ struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1,
+ uint32_t force_overflow)
+{
+ uint32_t overflow_mask;
+
+ /* if there are no active streams - do not start recovery */
+ if (!vfe_dev->axi_data.num_active_stream)
+ return;
+
+ /* Mask out all other irqs if recovery is started */
+ if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) {
+ uint32_t halt_restart_mask0, halt_restart_mask1;
+
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ get_halt_restart_mask(&halt_restart_mask0,
+ &halt_restart_mask1);
+ *irq_status0 &= halt_restart_mask0;
+ *irq_status1 &= halt_restart_mask1;
+
+ return;
+ }
+
+ /* Check if any overflow bit is set */
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ get_overflow_mask(&overflow_mask);
+ if (!force_overflow)
+ overflow_mask &= *irq_status1;
+
+ if (overflow_mask) {
+ struct msm_isp_event_data error_event;
+
+ if (vfe_dev->reset_pending == 1) {
+ pr_err("%s:%d failed: overflow %x during reset\n",
+ __func__, __LINE__, overflow_mask);
+ /* Clear overflow bits since reset is pending */
+ *irq_status1 &= ~overflow_mask;
+ return;
+ }
+
+ ISP_DBG("%s: VFE%d Bus overflow detected: start recovery!\n",
+ __func__, vfe_dev->pdev->id);
+
+
+ /* maks off irq for current vfe */
+ atomic_cmpxchg(&vfe_dev->error_info.overflow_state,
+ NO_OVERFLOW, OVERFLOW_DETECTED);
+ vfe_dev->recovery_irq0_mask = vfe_dev->irq0_mask;
+ vfe_dev->recovery_irq1_mask = vfe_dev->irq1_mask;
+
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ set_halt_restart_mask(vfe_dev);
+
+ /* mask off other vfe if dual vfe is used */
+ if (vfe_dev->is_split) {
+ uint32_t other_vfe_id;
+ struct vfe_device *other_vfe_dev;
+
+ other_vfe_id = (vfe_dev->pdev->id == ISP_VFE0) ?
+ ISP_VFE1 : ISP_VFE0;
+ other_vfe_dev = vfe_dev->common_data->
+ dual_vfe_res->vfe_dev[other_vfe_id];
+ if (other_vfe_dev) {
+ other_vfe_dev->recovery_irq0_mask =
+ other_vfe_dev->irq0_mask;
+ other_vfe_dev->recovery_irq1_mask =
+ other_vfe_dev->irq1_mask;
+ }
+
+ atomic_cmpxchg(&(vfe_dev->common_data->dual_vfe_res->
+ vfe_dev[other_vfe_id]->
+ error_info.overflow_state),
+ NO_OVERFLOW, OVERFLOW_DETECTED);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ set_halt_restart_mask(vfe_dev->common_data->
+ dual_vfe_res->vfe_dev[other_vfe_id]);
+ }
+
+ /* reset irq status so skip further process */
+ *irq_status0 = 0;
+ *irq_status1 = 0;
+
+ /* send overflow event as needed */
+ if (atomic_read(&vfe_dev->error_info.overflow_state)
+ != HALT_ENFORCED) {
+ memset(&error_event, 0, sizeof(error_event));
+ error_event.frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ error_event.u.error_info.err_type =
+ ISP_ERROR_BUS_OVERFLOW;
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_ERROR, &error_event);
+ }
+ }
+}
+
+void msm_isp_reset_burst_count_and_frame_drop(
+ struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info)
+{
+ if ((stream_info->state != ACTIVE) ||
+ (stream_info->stream_type != BURST_STREAM)) {
+ return;
+ }
+ if (stream_info->num_burst_capture != 0)
+ msm_isp_reset_framedrop(vfe_dev, stream_info);
+}
+
+static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ uint32_t ping_pong_status)
+{
+ unsigned long flags;
+ struct msm_vfe_tasklet_queue_cmd *queue_cmd = NULL;
+
+ spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
+ queue_cmd = &vfe_dev->tasklet_queue_cmd[vfe_dev->taskletq_idx];
+ if (queue_cmd->cmd_used) {
+ ISP_DBG("%s: Tasklet queue overflow: %d\n",
+ __func__, vfe_dev->pdev->id);
+ list_del(&queue_cmd->list);
+ } else {
+ atomic_add(1, &vfe_dev->irq_cnt);
+ }
+ queue_cmd->vfeInterruptStatus0 = irq_status0;
+ queue_cmd->vfeInterruptStatus1 = irq_status1;
+ queue_cmd->vfePingPongStatus = ping_pong_status;
+ msm_isp_get_timestamp(&queue_cmd->ts, vfe_dev);
+ queue_cmd->cmd_used = 1;
+ vfe_dev->taskletq_idx = (vfe_dev->taskletq_idx + 1) %
+ MSM_VFE_TASKLETQ_SIZE;
+ list_add_tail(&queue_cmd->list, &vfe_dev->tasklet_q);
+ spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+ tasklet_schedule(&vfe_dev->vfe_tasklet);
+}
+
+irqreturn_t msm_isp_process_irq(int irq_num, void *data)
+{
+ struct vfe_device *vfe_dev = (struct vfe_device *) data;
+ uint32_t irq_status0, irq_status1, ping_pong_status;
+ uint32_t error_mask0, error_mask1;
+
+ vfe_dev->hw_info->vfe_ops.irq_ops.
+ read_irq_status_and_clear(vfe_dev, &irq_status0, &irq_status1);
+
+ if ((irq_status0 == 0) && (irq_status1 == 0)) {
+ pr_err("%s:VFE%d irq_status0 & 1 are both 0\n",
+ __func__, vfe_dev->pdev->id);
+ return IRQ_HANDLED;
+ }
+
+ ping_pong_status = vfe_dev->hw_info->vfe_ops.axi_ops.
+ get_pingpong_status(vfe_dev);
+ if (vfe_dev->hw_info->vfe_ops.irq_ops.process_eof_irq) {
+ vfe_dev->hw_info->vfe_ops.irq_ops.process_eof_irq(vfe_dev,
+ irq_status0);
+ }
+ msm_isp_process_overflow_irq(vfe_dev,
+ &irq_status0, &irq_status1, 0);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ get_error_mask(&error_mask0, &error_mask1);
+ error_mask0 &= irq_status0;
+ error_mask1 &= irq_status1;
+ irq_status0 &= ~error_mask0;
+ irq_status1 &= ~error_mask1;
+ if ((error_mask0 != 0) || (error_mask1 != 0))
+ msm_isp_update_error_info(vfe_dev, error_mask0, error_mask1);
+
+ if ((irq_status0 == 0) && (irq_status1 == 0) &&
+ (!(((error_mask0 != 0) || (error_mask1 != 0)) &&
+ vfe_dev->error_info.error_count == 1))) {
+ ISP_DBG("%s: error_mask0/1 & error_count are set!\n", __func__);
+ return IRQ_HANDLED;
+ }
+ dump_data.vfe_dev = (struct vfe_device *) data;
+ if (vfe_dev->is_split &&
+ (vfe_dev->common_data->dual_vfe_res->vfe_dev[
+ !vfe_dev->pdev->id]) &&
+ (vfe_dev->common_data->dual_vfe_res->vfe_dev[
+ !vfe_dev->pdev->id]->vfe_open_cnt)) {
+ spin_lock(&dump_irq_lock);
+ dump_data.arr[dump_data.first].current_vfe_irq.
+ vfe_id = vfe_dev->pdev->id;
+ dump_data.arr[dump_data.first].current_vfe_irq.
+ irq_status0 = irq_status0;
+ dump_data.arr[dump_data.first].current_vfe_irq.
+ irq_status1 = irq_status1;
+ dump_data.arr[dump_data.first].current_vfe_irq.
+ ping_pong_status = ping_pong_status;
+
+ dump_data.arr[dump_data.first].other_vfe.
+ vfe_id = (!vfe_dev->pdev->id);
+ vfe_dev->hw_info->vfe_ops.irq_ops.
+ read_irq_status(
+ vfe_dev->common_data->dual_vfe_res->vfe_dev[
+ !vfe_dev->pdev->id],
+ &dump_data.arr[dump_data.first].other_vfe.irq_status0,
+ &dump_data.arr[dump_data.first].other_vfe.irq_status1);
+ dump_data.arr[dump_data.first].other_vfe.
+ ping_pong_status =
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ get_pingpong_status(
+ vfe_dev->common_data->dual_vfe_res->vfe_dev[
+ !vfe_dev->pdev->id]);
+ msm_isp_get_timestamp(&dump_data.arr[dump_data.first].
+ other_vfe.ts, vfe_dev);
+ dump_data.first =
+ (dump_data.first + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
+ dump_data.fill_count++;
+ spin_unlock(&dump_irq_lock);
+ }
+ msm_isp_enqueue_tasklet_cmd(vfe_dev, irq_status0, irq_status1,
+ ping_pong_status);
+
+ return IRQ_HANDLED;
+}
+
+
+void msm_isp_do_tasklet(unsigned long data)
+{
+ unsigned long flags;
+ struct vfe_device *vfe_dev = (struct vfe_device *) data;
+ struct msm_vfe_irq_ops *irq_ops = &vfe_dev->hw_info->vfe_ops.irq_ops;
+ struct msm_vfe_tasklet_queue_cmd *queue_cmd;
+ struct msm_isp_timestamp ts;
+ uint32_t irq_status0, irq_status1, pingpong_status;
+
+ if (vfe_dev->vfe_base == NULL || vfe_dev->vfe_open_cnt == 0) {
+ ISP_DBG("%s: VFE%d open cnt = %d, device closed(base = %pK)\n",
+ __func__, vfe_dev->pdev->id, vfe_dev->vfe_open_cnt,
+ vfe_dev->vfe_base);
+ return;
+ }
+
+ while (atomic_read(&vfe_dev->irq_cnt)) {
+ spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
+ queue_cmd = list_first_entry(&vfe_dev->tasklet_q,
+ struct msm_vfe_tasklet_queue_cmd, list);
+
+ if (!queue_cmd) {
+ atomic_set(&vfe_dev->irq_cnt, 0);
+ spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+ return;
+ }
+ atomic_sub(1, &vfe_dev->irq_cnt);
+ list_del(&queue_cmd->list);
+ queue_cmd->cmd_used = 0;
+ irq_status0 = queue_cmd->vfeInterruptStatus0;
+ irq_status1 = queue_cmd->vfeInterruptStatus1;
+ pingpong_status = queue_cmd->vfePingPongStatus;
+ ts = queue_cmd->ts;
+ spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+ ISP_DBG("%s: vfe_id %d status0: 0x%x status1: 0x%x\n",
+ __func__, vfe_dev->pdev->id, irq_status0, irq_status1);
+ if (vfe_dev->is_split) {
+ spin_lock(&dump_tasklet_lock);
+ tasklet_data.arr[tasklet_data.first].
+ current_vfe_irq.vfe_id = vfe_dev->pdev->id;
+ tasklet_data.arr[tasklet_data.first].
+ current_vfe_irq.core = smp_processor_id();
+ tasklet_data.arr[tasklet_data.first].
+ current_vfe_irq.irq_status0 = irq_status0;
+ tasklet_data.arr[tasklet_data.first].
+ current_vfe_irq.irq_status1 = irq_status1;
+ tasklet_data.arr[tasklet_data.first].
+ current_vfe_irq.ping_pong_status = pingpong_status;
+ tasklet_data.arr[tasklet_data.first].
+ current_vfe_irq.ts = ts;
+ tasklet_data.first =
+ (tasklet_data.first + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
+ spin_unlock(&dump_tasklet_lock);
+ }
+ irq_ops->process_reset_irq(vfe_dev,
+ irq_status0, irq_status1);
+ irq_ops->process_halt_irq(vfe_dev,
+ irq_status0, irq_status1);
+ if (atomic_read(&vfe_dev->error_info.overflow_state)
+ != NO_OVERFLOW) {
+ ISP_DBG("%s: Recovery in processing, Ignore IRQs!!!\n",
+ __func__);
+ continue;
+ }
+ msm_isp_process_error_info(vfe_dev);
+ irq_ops->process_stats_irq(vfe_dev,
+ irq_status0, irq_status1,
+ pingpong_status, &ts);
+ irq_ops->process_axi_irq(vfe_dev,
+ irq_status0, irq_status1,
+ pingpong_status, &ts);
+ irq_ops->process_camif_irq(vfe_dev,
+ irq_status0, irq_status1, &ts);
+ irq_ops->process_reg_update(vfe_dev,
+ irq_status0, irq_status1, &ts);
+ irq_ops->process_epoch_irq(vfe_dev,
+ irq_status0, irq_status1, &ts);
+ }
+}
+
+int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg)
+{
+ struct msm_vfe_axi_src_state *src_state = arg;
+
+ if (src_state->input_src >= VFE_SRC_MAX)
+ return -EINVAL;
+ vfe_dev->axi_data.src_info[src_state->input_src].active =
+ src_state->src_active;
+ vfe_dev->axi_data.src_info[src_state->input_src].frame_id =
+ src_state->src_frame_id;
+ return 0;
+}
+
+static void msm_vfe_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags, void *token)
+{
+ struct vfe_device *vfe_dev = NULL;
+
+ if (token) {
+ vfe_dev = (struct vfe_device *)token;
+ vfe_dev->page_fault_addr = iova;
+ if (!vfe_dev->buf_mgr || !vfe_dev->buf_mgr->ops ||
+ !vfe_dev->axi_data.num_active_stream) {
+ pr_err("%s:%d buf_mgr %pK active strms %d\n", __func__,
+ __LINE__, vfe_dev->buf_mgr,
+ vfe_dev->axi_data.num_active_stream);
+ goto end;
+ }
+
+ mutex_lock(&vfe_dev->core_mutex);
+ if (vfe_dev->vfe_open_cnt > 0) {
+ atomic_set(&vfe_dev->error_info.overflow_state,
+ HALT_ENFORCED);
+ msm_isp_process_iommu_page_fault(vfe_dev);
+ } else {
+ pr_err("%s: no handling, vfe open cnt = %d\n",
+ __func__, vfe_dev->vfe_open_cnt);
+ }
+ mutex_unlock(&vfe_dev->core_mutex);
+ } else {
+ ISP_DBG("%s:%d] no token received: %pK\n",
+ __func__, __LINE__, token);
+ goto end;
+ }
+end:
+ return;
+}
+
+int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+ long rc = 0;
+
+ ISP_DBG("%s open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
+
+ if (vfe_dev->common_data == NULL ||
+ vfe_dev->common_data->dual_vfe_res == NULL) {
+ pr_err("%s: Error in probe. No common_data or dual vfe res\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (vfe_dev->pdev->id == ISP_VFE0)
+ vfe_dev->common_data->dual_vfe_res->epoch_sync_mask = 0;
+
+ mutex_lock(&vfe_dev->realtime_mutex);
+ mutex_lock(&vfe_dev->core_mutex);
+
+ if (vfe_dev->vfe_open_cnt++) {
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return 0;
+ }
+
+ vfe_dev->reset_pending = 0;
+ vfe_dev->isp_sof_debug = 0;
+ vfe_dev->isp_raw0_debug = 0;
+ vfe_dev->isp_raw1_debug = 0;
+ vfe_dev->isp_raw2_debug = 0;
+
+ if (vfe_dev->hw_info->vfe_ops.core_ops.init_hw(vfe_dev) < 0) {
+ pr_err("%s: init hardware failed\n", __func__);
+ vfe_dev->vfe_open_cnt--;
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return -EBUSY;
+ }
+
+ memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+ atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.clear_status_reg(vfe_dev);
+
+ vfe_dev->vfe_hw_version = msm_camera_io_r(vfe_dev->vfe_base);
+ ISP_DBG("%s: HW Version: 0x%x\n", __func__, vfe_dev->vfe_hw_version);
+ rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 1, 1);
+ if (rc <= 0) {
+ pr_err("%s: reset timeout\n", __func__);
+ vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
+ vfe_dev->vfe_open_cnt--;
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return -EINVAL;
+ }
+
+ vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
+
+ vfe_dev->buf_mgr->ops->buf_mgr_init(vfe_dev->buf_mgr,
+ "msm_isp");
+
+ memset(&vfe_dev->axi_data, 0, sizeof(struct msm_vfe_axi_shared_data));
+ memset(&vfe_dev->stats_data, 0,
+ sizeof(struct msm_vfe_stats_shared_data));
+ memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+ memset(&vfe_dev->fetch_engine_info, 0,
+ sizeof(vfe_dev->fetch_engine_info));
+ vfe_dev->axi_data.hw_info = vfe_dev->hw_info->axi_hw_info;
+ vfe_dev->axi_data.enable_frameid_recovery = 0;
+ vfe_dev->taskletq_idx = 0;
+ vfe_dev->vt_enable = 0;
+ vfe_dev->reg_update_requested = 0;
+ /* Register page fault handler */
+ vfe_dev->buf_mgr->pagefault_debug_disable = 0;
+ cam_smmu_reg_client_page_fault_handler(
+ vfe_dev->buf_mgr->iommu_hdl,
+ msm_vfe_iommu_fault_handler, vfe_dev);
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return 0;
+}
+
+#ifdef CONFIG_MSM_AVTIMER
+void msm_isp_end_avtimer(void)
+{
+ avcs_core_disable_power_collapse(0);
+}
+#else
+void msm_isp_end_avtimer(void)
+{
+ pr_err("AV Timer is not supported\n");
+}
+#endif
+
+int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ long rc = 0;
+ int wm;
+ struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+
+ ISP_DBG("%s E open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
+ mutex_lock(&vfe_dev->realtime_mutex);
+ mutex_lock(&vfe_dev->core_mutex);
+
+ if (!vfe_dev->vfe_open_cnt) {
+ pr_err("%s invalid state open cnt %d\n", __func__,
+ vfe_dev->vfe_open_cnt);
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return -EINVAL;
+ }
+
+ if (vfe_dev->vfe_open_cnt > 1) {
+ vfe_dev->vfe_open_cnt--;
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return 0;
+ }
+ /* Unregister page fault handler */
+ cam_smmu_reg_client_page_fault_handler(
+ vfe_dev->buf_mgr->iommu_hdl,
+ NULL, vfe_dev);
+
+ rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
+ if (rc <= 0)
+ pr_err("%s: halt timeout rc=%ld\n", __func__, rc);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
+ vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 0);
+
+ /* after regular hw stop, reduce open cnt */
+ vfe_dev->vfe_open_cnt--;
+
+ /* put scratch buf in all the wm */
+ for (wm = 0; wm < vfe_dev->axi_data.hw_info->num_wm; wm++) {
+ msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PING_FLAG);
+ msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PONG_FLAG);
+ }
+ vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
+ vfe_dev->buf_mgr->ops->buf_mgr_deinit(vfe_dev->buf_mgr);
+ if (vfe_dev->vt_enable) {
+ msm_isp_end_avtimer();
+ vfe_dev->vt_enable = 0;
+ }
+ vfe_dev->is_split = 0;
+
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return 0;
+}
+
+void msm_isp_flush_tasklet(struct vfe_device *vfe_dev)
+{
+ unsigned long flags;
+ struct msm_vfe_tasklet_queue_cmd *queue_cmd;
+
+ spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
+ while (atomic_read(&vfe_dev->irq_cnt)) {
+ queue_cmd = list_first_entry(&vfe_dev->tasklet_q,
+ struct msm_vfe_tasklet_queue_cmd, list);
+
+ if (!queue_cmd) {
+ atomic_set(&vfe_dev->irq_cnt, 0);
+ break;
+ }
+ atomic_sub(1, &vfe_dev->irq_cnt);
+ list_del(&queue_cmd->list);
+ queue_cmd->cmd_used = 0;
+ }
+ spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+}
+
+void msm_isp_save_framedrop_values(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ struct msm_vfe_axi_stream *stream_info = NULL;
+ uint32_t j = 0;
+ unsigned long flags;
+
+ for (j = 0; j < VFE_AXI_SRC_MAX; j++) {
+ stream_info = &vfe_dev->axi_data.stream_info[j];
+ if (stream_info->state != ACTIVE)
+ continue;
+ if (frame_src != SRC_TO_INTF(stream_info->stream_src))
+ continue;
+
+ stream_info =
+ &vfe_dev->axi_data.stream_info[j];
+ spin_lock_irqsave(&stream_info->lock, flags);
+ stream_info->activated_framedrop_period =
+ stream_info->requested_framedrop_period;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+}
+
+void msm_isp_dump_irq_debug(void)
+{
+ uint32_t index, count, i;
+
+ if (dump_data.fill_count > MAX_ISP_PING_PONG_DUMP_SIZE) {
+ index = dump_data.first;
+ count = MAX_ISP_PING_PONG_DUMP_SIZE;
+ } else {
+ index = 0;
+ count = dump_data.first;
+ }
+ for (i = 0; i < count; i++) {
+ pr_err("%s: trace_msm_cam_ping_pong_debug_dump need re-impl\n",
+ __func__);
+ index = (index + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
+ }
+}
+
+void msm_isp_dump_taskelet_debug(void)
+{
+ uint32_t index, count, i;
+
+ if (tasklet_data.fill_count > MAX_ISP_PING_PONG_DUMP_SIZE) {
+ index = tasklet_data.first;
+ count = MAX_ISP_PING_PONG_DUMP_SIZE;
+ } else {
+ index = 0;
+ count = tasklet_data.first;
+ }
+ for (i = 0; i < count; i++) {
+ pr_err("%s: trace_msm_cam_tasklet_debug_dump need implement\n",
+ __func__);
+ index = (index + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
+ }
+}
+
+void msm_isp_dump_ping_pong_mismatch(void)
+{
+ int i;
+
+ spin_lock(&dump_tasklet_lock);
+ for (i = 0; i < MAX_VFE; i++) {
+ dump_data.vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_irq_mask(
+ dump_data.vfe_dev->common_data->dual_vfe_res->vfe_dev[i]);
+ synchronize_irq(
+ (uint32_t)dump_data.vfe_dev->common_data->dual_vfe_res->vfe_dev[
+ i]->vfe_irq->start);
+ }
+ trace_msm_cam_string(" ***** msm_isp_dump_irq_debug ****");
+ msm_isp_dump_irq_debug();
+ trace_msm_cam_string(" ***** msm_isp_dump_taskelet_debug ****");
+ msm_isp_dump_taskelet_debug();
+ spin_unlock(&dump_tasklet_lock);
+}
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_util.h b/drivers/media/platform/msm/ais/isp/msm_isp_util.h
new file mode 100644
index 000000000000..b5b0140aa513
--- /dev/null
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_util.h
@@ -0,0 +1,105 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_ISP_UTIL_H__
+#define __MSM_ISP_UTIL_H__
+
+#include "msm_isp.h"
+#include <soc/qcom/ais.h>
+#include "msm_camera_io_util.h"
+
+/* #define CONFIG_MSM_ISP_DBG 1 */
+
+#ifdef CONFIG_MSM_ISP_DBG
+#define ISP_DBG(fmt, args...) printk(fmt, ##args)
+#else
+#define ISP_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define ALT_VECTOR_IDX(x) {x = 3 - x; }
+#define MAX_ISP_PING_PONG_DUMP_SIZE 20
+struct ping_pong_state {
+ uint32_t vfe_id;
+ uint32_t irq_status0;
+ uint32_t irq_status1;
+ uint32_t ping_pong_status;
+ uint32_t core;
+ struct msm_isp_timestamp ts;
+};
+struct dual_vfe_state {
+ struct ping_pong_state current_vfe_irq;
+ struct ping_pong_state other_vfe;
+};
+struct dump_ping_pong_state {
+ struct dual_vfe_state arr[MAX_ISP_PING_PONG_DUMP_SIZE];
+ uint32_t first;
+ uint32_t fill_count;
+ struct vfe_device *vfe_dev;
+};
+
+void msm_isp_dump_ping_pong_mismatch(void);
+void msm_isp_get_status(struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1);
+void msm_isp_dump_taskelet_debug(void);
+uint32_t msm_isp_get_framedrop_period(
+ enum msm_vfe_frame_skip_pattern frame_skip_pattern);
+void msm_isp_reset_burst_count_and_frame_drop(
+ struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info);
+
+int msm_isp_init_bandwidth_mgr(struct vfe_device *vfe_dev,
+ enum msm_isp_hw_client client);
+int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
+ uint64_t ab, uint64_t ib);
+void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
+ struct msm_isp_statistics *stats);
+void msm_isp_util_update_last_overflow_ab_ib(struct vfe_device *vfe_dev);
+void msm_isp_util_update_clk_rate(long clock_rate);
+void msm_isp_update_req_history(uint32_t client, uint64_t ab,
+ uint64_t ib,
+ struct msm_isp_bandwidth_info *client_info,
+ unsigned long long ts);
+void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client);
+
+int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub);
+
+int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub);
+
+int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_send_event(struct vfe_device *vfe_dev,
+ uint32_t type, struct msm_isp_event_data *event_data);
+int msm_isp_cal_word_per_line(uint32_t output_format,
+ uint32_t pixel_per_line);
+int msm_isp_get_bit_per_pixel(uint32_t output_format);
+enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format);
+irqreturn_t msm_isp_process_irq(int irq_num, void *data);
+int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_do_tasklet(unsigned long data);
+void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev);
+void msm_isp_process_error_info(struct vfe_device *vfe_dev);
+int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+long msm_isp_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
+void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
+ struct msm_vfe_fetch_engine_info *fetch_engine_info);
+void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format);
+void msm_isp_flush_tasklet(struct vfe_device *vfe_dev);
+void msm_isp_save_framedrop_values(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src);
+void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
+ struct vfe_device *vfe_dev);
+void msm_isp_process_overflow_irq(
+ struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1,
+ uint32_t force_overflow);
+#endif /* __MSM_ISP_UTIL_H__ */
diff --git a/drivers/media/platform/msm/ais/ispif/Makefile b/drivers/media/platform/msm/ais/ispif/Makefile
new file mode 100644
index 000000000000..4e22b41a0fe1
--- /dev/null
+++ b/drivers/media/platform/msm/ais/ispif/Makefile
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+obj-$(CONFIG_MSM_AIS) += msm_ispif.o
diff --git a/drivers/media/platform/msm/ais/ispif/msm_ispif.c b/drivers/media/platform/msm/ais/ispif/msm_ispif.c
new file mode 100644
index 000000000000..8eb88364a2cb
--- /dev/null
+++ b/drivers/media/platform/msm/ais/ispif/msm_ispif.c
@@ -0,0 +1,1802 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/of.h>
+#include <linux/videodev2.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/iopoll.h>
+#include <linux/compat.h>
+#include <media/ais/msm_ais_isp.h>
+#include <linux/ratelimit.h>
+
+#include "msm_ispif.h"
+#include "msm.h"
+#include "msm_sd.h"
+#include "msm_camera_io_util.h"
+#include "cam_hw_ops.h"
+#include "cam_soc_api.h"
+
+#ifdef CONFIG_AIS_MSM_ISPIF_V1
+#include "msm_ispif_hwreg_v1.h"
+#elif defined CONFIG_AIS_MSM_ISPIF_V2
+#include "msm_ispif_hwreg_v2.h"
+#else
+#include "msm_ispif_hwreg_v3.h"
+#endif
+
+#define V4L2_IDENT_ISPIF 50001
+#define MSM_ISPIF_DRV_NAME "msm_ispif"
+
+#define ISPIF_INTF_CMD_DISABLE_FRAME_BOUNDARY 0x00
+#define ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY 0x01
+#define ISPIF_INTF_CMD_DISABLE_IMMEDIATELY 0x02
+
+#define ISPIF_TIMEOUT_SLEEP_US 1000
+#define ISPIF_TIMEOUT_ALL_US 1000000
+#define ISPIF_SOF_DEBUG_COUNT 0
+
+#undef CDBG
+#ifdef CONFIG_MSM_AIS_DEBUG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#else
+#define CDBG(fmt, args...)
+#endif
+
+static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable);
+static int ispif_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+static long msm_ispif_subdev_ioctl_unlocked(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg);
+
+static int msm_ispif_get_clk_info(struct ispif_device *ispif_dev,
+ struct platform_device *pdev);
+
+
+static void msm_ispif_io_dump_reg(struct ispif_device *ispif)
+{
+ if (!ispif->enb_dump_reg)
+ return;
+ msm_camera_io_dump(ispif->base, 0x250, 0);
+}
+
+
+static inline int msm_ispif_is_intf_valid(uint32_t csid_version,
+ enum msm_ispif_vfe_intf intf_type)
+{
+ return ((csid_version <= CSID_VERSION_V22 && intf_type != VFE0) ||
+ (intf_type >= VFE_MAX)) ? false : true;
+}
+
+static struct msm_cam_clk_info ispif_8626_reset_clk_info[] = {
+ {"ispif_ahb_clk", NO_SET_RATE},
+ {"camss_top_ahb_clk", NO_SET_RATE},
+ {"csi0_ahb_clk", NO_SET_RATE},
+ {"csi0_src_clk", NO_SET_RATE},
+ {"csi0_phy_clk", NO_SET_RATE},
+ {"csi0_clk", NO_SET_RATE},
+ {"csi0_pix_clk", NO_SET_RATE},
+ {"csi0_rdi_clk", NO_SET_RATE},
+ {"csi1_ahb_clk", NO_SET_RATE},
+ {"csi1_src_clk", NO_SET_RATE},
+ {"csi1_phy_clk", NO_SET_RATE},
+ {"csi1_clk", NO_SET_RATE},
+ {"csi1_pix_clk", NO_SET_RATE},
+ {"csi1_rdi_clk", NO_SET_RATE},
+ {"camss_vfe_vfe_clk", NO_SET_RATE},
+ {"camss_csi_vfe_clk", NO_SET_RATE},
+};
+
+#ifdef CONFIG_COMPAT
+struct ispif_cfg_data_ext_32 {
+ enum ispif_cfg_type_t cfg_type;
+ compat_caddr_t data;
+ uint32_t size;
+};
+
+#define VIDIOC_MSM_ISPIF_CFG_EXT_COMPAT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE+1, struct ispif_cfg_data_ext_32)
+#endif
+
+static void msm_ispif_get_pack_mask_from_cfg(
+ struct msm_ispif_pack_cfg *pack_cfg,
+ struct msm_ispif_params_entry *entry,
+ uint32_t *pack_mask)
+{
+ int i;
+ uint32_t temp;
+
+ if (WARN_ON(!entry))
+ return;
+
+ memset(pack_mask, 0, sizeof(uint32_t) * 2);
+ for (i = 0; i < entry->num_cids; i++) {
+ temp = (pack_cfg[entry->cids[i]].pack_mode & 0x3)|
+ (pack_cfg[entry->cids[i]].even_odd_sel & 0x1) << 2 |
+ (pack_cfg[entry->cids[i]].pixel_swap_en & 0x1) << 3;
+ temp = (temp & 0xF) << ((entry->cids[i] % CID8) * 4);
+
+ if (entry->cids[i] > CID7)
+ pack_mask[1] |= temp;
+ else
+ pack_mask[0] |= temp;
+ CDBG("%s:num %d cid %d mode %d pack_mask %x %x\n",
+ __func__, entry->num_cids, entry->cids[i],
+ pack_cfg[entry->cids[i]].pack_mode,
+ pack_mask[0], pack_mask[1]);
+
+ }
+}
+
+static int msm_ispif_config2(struct ispif_device *ispif,
+ void *data)
+{
+ int rc = 0, i = 0;
+ enum msm_ispif_intftype intftype;
+ enum msm_ispif_vfe_intf vfe_intf;
+ uint32_t pack_cfg_mask[2];
+ struct msm_ispif_param_data_ext *params =
+ (struct msm_ispif_param_data_ext *)data;
+
+ if (WARN_ON(!ispif) || WARN_ON(!params)) {
+ rc = -EINVAL;
+ return rc;
+ }
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ for (i = 0; i < params->num; i++) {
+ int j;
+
+ if (params->entries[i].num_cids > MAX_CID_CH_v2)
+ return -EINVAL;
+ for (j = 0; j < params->entries[i].num_cids; j++)
+ if (params->entries[i].cids[j] >= CID_MAX)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < params->num; i++) {
+ intftype = params->entries[i].intftype;
+ vfe_intf = params->entries[i].vfe_intf;
+
+ CDBG("%s, num %d intftype %x, vfe_intf %d, csid %d\n", __func__,
+ params->num, intftype, vfe_intf,
+ params->entries[i].csid);
+
+ if ((intftype >= INTF_MAX) ||
+ (vfe_intf >= ispif->vfe_info.num_vfe) ||
+ (ispif->csid_version <= CSID_VERSION_V22 &&
+ (vfe_intf > VFE0))) {
+ pr_err("%s: VFEID %d and CSID version %d mismatch\n",
+ __func__, vfe_intf, ispif->csid_version);
+ return -EINVAL;
+ }
+
+ msm_ispif_get_pack_mask_from_cfg(params->pack_cfg,
+ &params->entries[i], pack_cfg_mask);
+ msm_ispif_cfg_pack_mode(ispif, intftype, vfe_intf,
+ pack_cfg_mask);
+ }
+ return rc;
+}
+
+static long msm_ispif_cmd_ext(struct v4l2_subdev *sd,
+ void *arg)
+{
+ long rc = 0;
+ struct ispif_device *ispif =
+ (struct ispif_device *)v4l2_get_subdevdata(sd);
+ struct ispif_cfg_data_ext pcdata;
+ struct msm_ispif_param_data_ext *params = NULL;
+#ifdef CONFIG_COMPAT
+ struct ispif_cfg_data_ext_32 *pcdata32 =
+ (struct ispif_cfg_data_ext_32 *)arg;
+
+ if (pcdata32 == NULL) {
+ pr_err("Invalid params passed from user\n");
+ return -EINVAL;
+ }
+ pcdata.cfg_type = pcdata32->cfg_type;
+ pcdata.size = pcdata32->size;
+ pcdata.data = compat_ptr(pcdata32->data);
+
+#else
+ struct ispif_cfg_data_ext *pcdata64 =
+ (struct ispif_cfg_data_ext *)arg;
+
+ if (pcdata64 == NULL) {
+ pr_err("Invalid params passed from user\n");
+ return -EINVAL;
+ }
+ pcdata.cfg_type = pcdata64->cfg_type;
+ pcdata.size = pcdata64->size;
+ pcdata.data = pcdata64->data;
+#endif
+ if (pcdata.size != sizeof(struct msm_ispif_param_data_ext)) {
+ pr_err("%s: payload size mismatch\n", __func__);
+ return -EINVAL;
+ }
+
+ params = kzalloc(sizeof(struct msm_ispif_param_data_ext), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+ if (copy_from_user(params, (void __user *)(pcdata.data),
+ pcdata.size)) {
+ kfree(params);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ispif->mutex);
+ switch (pcdata.cfg_type) {
+ case ISPIF_CFG2:
+ rc = msm_ispif_config2(ispif, params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ default:
+ pr_err("%s: invalid cfg_type\n", __func__);
+ rc = -EINVAL;
+ break;
+ }
+ mutex_unlock(&ispif->mutex);
+ kfree(params);
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_ispif_subdev_ioctl_compat(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ if (WARN_ON(!sd))
+ return -EINVAL;
+
+ switch (cmd) {
+ case VIDIOC_MSM_ISPIF_CFG_EXT_COMPAT:
+ return msm_ispif_cmd_ext(sd, arg);
+
+ default:
+ return msm_ispif_subdev_ioctl_unlocked(sd, cmd, arg);
+ }
+}
+static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ if (is_compat_task())
+ return msm_ispif_subdev_ioctl_compat(sd, cmd, arg);
+ else
+ return msm_ispif_subdev_ioctl_unlocked(sd, cmd, arg);
+}
+#else
+static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ return msm_ispif_subdev_ioctl_unlocked(sd, cmd, arg);
+}
+#endif
+static void msm_ispif_put_regulator(struct ispif_device *ispif_dev)
+{
+ int i;
+
+ for (i = 0; i < ispif_dev->ispif_vdd_count; i++) {
+ regulator_put(ispif_dev->ispif_vdd[i]);
+ ispif_dev->ispif_vdd[i] = NULL;
+ }
+ for (i = 0; i < ispif_dev->vfe_vdd_count; i++) {
+ regulator_put(ispif_dev->vfe_vdd[i]);
+ ispif_dev->vfe_vdd[i] = NULL;
+ }
+}
+
+static inline int __get_vdd(struct platform_device *pdev,
+ struct regulator **reg, const char *vdd)
+{
+ int rc = 0;
+ *reg = regulator_get(&pdev->dev, vdd);
+ if (IS_ERR_OR_NULL(*reg)) {
+ rc = PTR_ERR(*reg);
+ rc = rc ? rc : -EINVAL;
+ pr_err("%s: Regulator %s get failed %d\n", __func__, vdd, rc);
+ *reg = NULL;
+ }
+ return rc;
+}
+
+static int msm_ispif_get_regulator_info(struct ispif_device *ispif_dev,
+ struct platform_device *pdev)
+{
+ int rc;
+ const char *vdd_name;
+ struct device_node *of_node;
+ int i;
+ int count;
+
+ of_node = pdev->dev.of_node;
+
+ count = of_property_count_strings(of_node,
+ "qcom,vdd-names");
+ if (count == 0) {
+ pr_err("%s: no regulators found\n", __func__);
+ return -EINVAL;
+ }
+
+ if (WARN_ON(count > (ISPIF_VDD_INFO_MAX + ISPIF_VFE_VDD_INFO_MAX)))
+ return -EINVAL;
+ ispif_dev->vfe_vdd_count = 0;
+ ispif_dev->ispif_vdd_count = 0;
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(
+ of_node, "qcom,vdd-names",
+ i, &vdd_name);
+ if (rc < 0) {
+ pr_err("%s: read property qcom,ispif-vdd-names at index %d failed\n",
+ __func__, i);
+ goto err;
+ }
+ if (strnstr(vdd_name, "vfe", strlen(vdd_name))) {
+ if (WARN_ON(ispif_dev->vfe_vdd_count >=
+ ISPIF_VFE_VDD_INFO_MAX))
+ return -EINVAL;
+ rc = __get_vdd(pdev,
+ &ispif_dev->vfe_vdd[ispif_dev->vfe_vdd_count],
+ vdd_name);
+ if (rc == 0)
+ ispif_dev->vfe_vdd_count++;
+ } else {
+ if (WARN_ON(ispif_dev->vfe_vdd_count >=
+ ISPIF_VDD_INFO_MAX))
+ return -EINVAL;
+ rc = __get_vdd(pdev,
+ &ispif_dev->ispif_vdd
+ [ispif_dev->ispif_vdd_count],
+ vdd_name);
+ if (rc == 0)
+ ispif_dev->ispif_vdd_count++;
+ }
+ if (rc)
+ goto err;
+ }
+ return 0;
+err:
+ for (i = 0; i < ispif_dev->vfe_vdd_count; i++) {
+ regulator_put(ispif_dev->vfe_vdd[i]);
+ ispif_dev->vfe_vdd[i] = NULL;
+ }
+ for (i = 0; i < ispif_dev->ispif_vdd_count; i++) {
+ regulator_put(ispif_dev->ispif_vdd[i]);
+ ispif_dev->ispif_vdd[i] = NULL;
+ }
+ ispif_dev->ispif_vdd_count = 0;
+ ispif_dev->vfe_vdd_count = 0;
+ return rc;
+}
+
+static int msm_ispif_set_regulators(struct regulator **regs, int count,
+ uint8_t enable)
+{
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (enable) {
+ rc = regulator_enable(regs[i]);
+ if (rc)
+ goto err;
+ } else {
+ rc |= regulator_disable(regs[i]);
+ }
+ }
+ if (rc)
+ pr_err("%s: Regulator disable failed\n", __func__);
+ return rc;
+err:
+ pr_err("%s: Regulator enable failed\n", __func__);
+ for (i--; i >= 0; i--)
+ regulator_disable(regs[i]);
+ return rc;
+}
+
+static int msm_ispif_reset_hw(struct ispif_device *ispif)
+{
+ int rc = 0;
+ long timeout = 0;
+ struct clk *reset_clk1[ARRAY_SIZE(ispif_8626_reset_clk_info)];
+
+ ispif->clk_idx = 0;
+
+ /* Turn ON VFE regulators before enabling the vfe clocks */
+ rc = msm_ispif_set_regulators(ispif->vfe_vdd, ispif->vfe_vdd_count, 1);
+ if (rc < 0)
+ return rc;
+
+ rc = msm_camera_clk_enable(&ispif->pdev->dev,
+ ispif->clk_info, ispif->clks,
+ ispif->num_clk, 1);
+ if (rc < 0) {
+ pr_err("%s: cannot enable clock, error = %d\n",
+ __func__, rc);
+ rc = msm_camera_clk_enable(&ispif->pdev->dev,
+ ispif_8626_reset_clk_info, reset_clk1,
+ ARRAY_SIZE(ispif_8626_reset_clk_info), 1);
+ if (rc < 0) {
+ pr_err("%s: cannot enable clock, error = %d",
+ __func__, rc);
+ goto reg_disable;
+ } else {
+ /* This is set when device is 8x26 */
+ ispif->clk_idx = 2;
+ }
+ } else {
+ /* This is set when device is 8974 */
+ ispif->clk_idx = 1;
+ }
+
+ atomic_set(&ispif->reset_trig[VFE0], 1);
+ /* initiate reset of ISPIF */
+ msm_camera_io_w(ISPIF_RST_CMD_MASK,
+ ispif->base + ISPIF_RST_CMD_ADDR);
+
+ timeout = wait_for_completion_timeout(
+ &ispif->reset_complete[VFE0], msecs_to_jiffies(500));
+ CDBG("%s: VFE0 done\n", __func__);
+
+ if (timeout <= 0) {
+ rc = -ETIMEDOUT;
+ pr_err("%s: VFE0 reset wait timeout\n", __func__);
+ goto clk_disable;
+ }
+
+ if (ispif->hw_num_isps > 1) {
+ atomic_set(&ispif->reset_trig[VFE1], 1);
+ msm_camera_io_w(ISPIF_RST_CMD_1_MASK,
+ ispif->base + ISPIF_RST_CMD_1_ADDR);
+ timeout = wait_for_completion_timeout(
+ &ispif->reset_complete[VFE1],
+ msecs_to_jiffies(500));
+ CDBG("%s: VFE1 done\n", __func__);
+ if (timeout <= 0) {
+ pr_err("%s: VFE1 reset wait timeout\n", __func__);
+ rc = -ETIMEDOUT;
+ }
+ }
+
+clk_disable:
+ if (ispif->clk_idx == 1) {
+ rc = rc ? rc : msm_camera_clk_enable(&ispif->pdev->dev,
+ ispif->clk_info, ispif->clks,
+ ispif->num_clk, 0);
+ }
+
+ if (ispif->clk_idx == 2) {
+ rc = rc ? rc : msm_camera_clk_enable(&ispif->pdev->dev,
+ ispif_8626_reset_clk_info, reset_clk1,
+ ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+ }
+reg_disable:
+ rc = rc ? rc : msm_ispif_set_regulators(ispif->vfe_vdd,
+ ispif->vfe_vdd_count, 0);
+
+ return rc;
+}
+
+static int msm_ispif_get_clk_info(struct ispif_device *ispif_dev,
+ struct platform_device *pdev)
+{
+ uint32_t num_ahb_clk = 0, non_ahb_clk = 0;
+ size_t num_clks;
+ int i, rc;
+ int j;
+ struct clk **clks, **temp_clks;
+ struct msm_cam_clk_info *clk_info, *temp_clk_info;
+
+ struct device_node *of_node;
+
+ of_node = pdev->dev.of_node;
+
+ rc = msm_camera_get_clk_info(pdev, &clk_info,
+ &clks, &num_clks);
+
+ if (rc)
+ return rc;
+
+ /*
+ * reshuffle the clock arrays so that the ahb clocks are
+ * at the beginning of array
+ */
+ temp_clks = kcalloc(num_clks, sizeof(struct clk *),
+ GFP_KERNEL);
+ temp_clk_info = kcalloc(num_clks, sizeof(struct msm_cam_clk_info),
+ GFP_KERNEL);
+ if (!temp_clks || !temp_clk_info) {
+ rc = -ENOMEM;
+ kfree(temp_clk_info);
+ kfree(temp_clks);
+ goto alloc_fail;
+ }
+ j = 0;
+ for (i = 0; i < num_clks; i++) {
+ if (strnstr(clk_info[i].clk_name,
+ "ahb", strlen(clk_info[i].clk_name))) {
+ temp_clk_info[j] = clk_info[i];
+ temp_clks[j] = clks[i];
+ j++;
+ num_ahb_clk++;
+ }
+ }
+ for (i = 0; i < num_clks; i++) {
+ if (!strnstr(clk_info[i].clk_name,
+ "ahb", strlen(clk_info[i].clk_name))) {
+ temp_clk_info[j] = clk_info[i];
+ temp_clks[j] = clks[i];
+ j++;
+ non_ahb_clk++;
+ }
+ }
+
+ for (i = 0; i < num_clks; i++) {
+ clk_info[i] = temp_clk_info[i];
+ clks[i] = temp_clks[i];
+ }
+ kfree(temp_clk_info);
+ kfree(temp_clks);
+
+ ispif_dev->ahb_clk = clks;
+ ispif_dev->ahb_clk_info = clk_info;
+ ispif_dev->num_ahb_clk = num_ahb_clk;
+ ispif_dev->clk_info = clk_info + num_ahb_clk;
+ ispif_dev->clks = clks + num_ahb_clk;
+ ispif_dev->num_clk = non_ahb_clk;
+
+ return 0;
+alloc_fail:
+ msm_camera_put_clk_info(pdev, &clk_info, &clks, num_clks);
+ return rc;
+}
+
+static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable)
+{
+ int rc = 0;
+
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif->ahb_clk_info, ispif->ahb_clk,
+ ispif->num_ahb_clk, enable);
+ if (rc < 0) {
+ pr_err("%s: cannot enable clock, error = %d",
+ __func__, rc);
+ }
+
+ return rc;
+}
+
+static int msm_ispif_reset(struct ispif_device *ispif)
+{
+ int rc = 0;
+ int i, vfe_intf;
+
+ if (WARN_ON(!ispif))
+ return -EINVAL;
+
+ memset(ispif->sof_count, 0, sizeof(ispif->sof_count));
+ memset(ispif->vc_enable, 0, sizeof(ispif->vc_enable));
+ for (i = 0; i < ispif->vfe_info.num_vfe; i++) {
+
+ msm_camera_io_w(1 << PIX0_LINE_BUF_EN_BIT,
+ ispif->base + ISPIF_VFE_m_CTRL_0(i));
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(i));
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(i));
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(i));
+ msm_camera_io_w(0xFFFFFFFF, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_0(i));
+ msm_camera_io_w(0xFFFFFFFF, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_1(i));
+ msm_camera_io_w(0xFFFFFFFF, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_2(i));
+
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_INPUT_SEL(i));
+
+ msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_0(i));
+ msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_1(i));
+
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 0));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 1));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 0));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 1));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 2));
+
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CROP(i, 0));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CROP(i, 1));
+ }
+
+ msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+ ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+ for (vfe_intf = 0; vfe_intf < 2; vfe_intf++) {
+ msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_MASK_0(vfe_intf));
+
+ msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_0(vfe_intf));
+
+ msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_MASK_1(vfe_intf));
+
+ msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_1(vfe_intf));
+
+ msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_MASK_2(vfe_intf));
+
+ msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_2(vfe_intf));
+ }
+
+ msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+ ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+ return rc;
+}
+
+static void msm_ispif_sel_csid_core(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
+{
+ uint32_t data;
+
+ if (WARN_ON(!ispif))
+ return;
+
+ if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return;
+ }
+
+ data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_INPUT_SEL(vfe_intf));
+ switch (intftype) {
+ case PIX0:
+ data &= ~(BIT(1) | BIT(0));
+ data |= (uint32_t) csid;
+ break;
+ case RDI0:
+ data &= ~(BIT(5) | BIT(4));
+ data |= ((uint32_t) csid) << 4;
+ break;
+ case PIX1:
+ data &= ~(BIT(9) | BIT(8));
+ data |= ((uint32_t) csid) << 8;
+ break;
+ case RDI1:
+ data &= ~(BIT(13) | BIT(12));
+ data |= ((uint32_t) csid) << 12;
+ break;
+ case RDI2:
+ data &= ~(BIT(21) | BIT(20));
+ data |= ((uint32_t) csid) << 20;
+ break;
+ }
+
+ msm_camera_io_w_mb(data, ispif->base +
+ ISPIF_VFE_m_INPUT_SEL(vfe_intf));
+}
+
+static void msm_ispif_enable_crop(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t vfe_intf, uint16_t start_pixel,
+ uint16_t end_pixel)
+{
+ uint32_t data;
+
+ if (WARN_ON(!ispif))
+ return;
+
+ if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return;
+ }
+
+ data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_CTRL_0(vfe_intf));
+ data |= (1 << (intftype + 7));
+ if (intftype == PIX0)
+ data |= 1 << PIX0_LINE_BUF_EN_BIT;
+ msm_camera_io_w(data,
+ ispif->base + ISPIF_VFE_m_CTRL_0(vfe_intf));
+
+ if (intftype == PIX0)
+ msm_camera_io_w_mb(start_pixel | (end_pixel << 16),
+ ispif->base + ISPIF_VFE_m_PIX_INTF_n_CROP(vfe_intf, 0));
+ else if (intftype == PIX1)
+ msm_camera_io_w_mb(start_pixel | (end_pixel << 16),
+ ispif->base + ISPIF_VFE_m_PIX_INTF_n_CROP(vfe_intf, 1));
+ else {
+ pr_err("%s: invalid intftype=%d\n", __func__, intftype);
+ return;
+ }
+}
+
+static void msm_ispif_enable_intf_cids(struct ispif_device *ispif,
+ uint8_t intftype, uint16_t cid_mask, uint8_t vfe_intf, uint8_t enable)
+{
+ uint32_t intf_addr, data, i;
+
+ if (WARN_ON(!ispif))
+ return;
+
+ if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return;
+ }
+
+ switch (intftype) {
+ case PIX0:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 0);
+ break;
+ case RDI0:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 0);
+ break;
+ case PIX1:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 1);
+ break;
+ case RDI1:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 1);
+ break;
+ case RDI2:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 2);
+ break;
+ default:
+ pr_err("%s: invalid intftype=%d\n", __func__, intftype);
+ return;
+ }
+
+ if (enable)
+ data = (uint32_t) cid_mask;
+ else
+ data = (~((uint32_t) cid_mask)) & 0xFFFF;
+ msm_camera_io_w_mb(data, ispif->base + intf_addr);
+
+ for (i = 0; i < VC_MAX; i++)
+ ispif->vc_enable[vfe_intf][intftype][i] =
+ (data & (0xF << (i*4))) ? 1 : 0;
+
+}
+
+static int msm_ispif_validate_intf_status(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t vfe_intf)
+{
+ int rc = 0;
+ uint32_t data = 0;
+
+ if (WARN_ON(!ispif))
+ return -EINVAL;
+
+ if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (intftype) {
+ case PIX0:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0));
+ break;
+ case RDI0:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0));
+ break;
+ case PIX1:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1));
+ break;
+ case RDI1:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1));
+ break;
+ case RDI2:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2));
+ break;
+ }
+ if ((data & 0xf) != 0xf)
+ rc = -EBUSY;
+ return rc;
+}
+
+static void msm_ispif_select_clk_mux(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
+{
+ uint32_t data = 0;
+
+ switch (intftype) {
+ case PIX0:
+ data = msm_camera_io_r(ispif->clk_mux_base);
+ data &= ~(0xf << (vfe_intf * 8));
+ data |= (csid << (vfe_intf * 8));
+ msm_camera_io_w(data, ispif->clk_mux_base);
+ break;
+
+ case RDI0:
+ data = msm_camera_io_r(ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ data &= ~(0xf << (vfe_intf * 12));
+ data |= (csid << (vfe_intf * 12));
+ msm_camera_io_w(data, ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ break;
+
+ case PIX1:
+ data = msm_camera_io_r(ispif->clk_mux_base);
+ data &= ~(0xf0 << (vfe_intf * 8));
+ data |= (csid << (4 + (vfe_intf * 8)));
+ msm_camera_io_w(data, ispif->clk_mux_base);
+ break;
+
+ case RDI1:
+ data = msm_camera_io_r(ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ data &= ~(0xf << (4 + (vfe_intf * 12)));
+ data |= (csid << (4 + (vfe_intf * 12)));
+ msm_camera_io_w(data, ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ break;
+
+ case RDI2:
+ data = msm_camera_io_r(ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ data &= ~(0xf << (8 + (vfe_intf * 12)));
+ data |= (csid << (8 + (vfe_intf * 12)));
+ msm_camera_io_w(data, ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ break;
+ }
+ CDBG("%s intftype %d data %x\n", __func__, intftype, data);
+ /* ensure clk mux is enabled */
+ mb();
+}
+
+static uint16_t msm_ispif_get_cids_mask_from_cfg(
+ struct msm_ispif_params_entry *entry)
+{
+ int i;
+ uint16_t cids_mask = 0;
+
+ if (WARN_ON(!entry))
+ return 0;
+
+ for (i = 0; i < entry->num_cids && i < MAX_CID_CH_v2; i++)
+ cids_mask |= (1 << entry->cids[i]);
+
+ return cids_mask;
+}
+
+static int msm_ispif_config(struct ispif_device *ispif,
+ struct msm_ispif_param_data *params)
+{
+ int rc = 0, i = 0;
+ uint16_t cid_mask;
+ enum msm_ispif_intftype intftype;
+ enum msm_ispif_vfe_intf vfe_intf;
+
+ if (WARN_ON(!ispif) || WARN_ON(!params)) {
+ rc = -EINVAL;
+ return rc;
+ }
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ for (i = 0; i < params->num; i++) {
+ vfe_intf = params->entries[i].vfe_intf;
+ if (!msm_ispif_is_intf_valid(ispif->csid_version,
+ vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < params->num; i++) {
+ intftype = params->entries[i].intftype;
+ vfe_intf = params->entries[i].vfe_intf;
+
+ CDBG("%s %d - intftype %x, vfe_intf %d, csid %d\n", __func__,
+ i, intftype, vfe_intf, params->entries[i].csid);
+
+ if ((intftype >= INTF_MAX) ||
+ (vfe_intf >= ispif->vfe_info.num_vfe) ||
+ (ispif->csid_version <= CSID_VERSION_V22 &&
+ (vfe_intf > VFE0))) {
+ pr_err("%s: VFEID %d and CSID version %d mismatch\n",
+ __func__, vfe_intf, ispif->csid_version);
+ return -EINVAL;
+ }
+
+ if (ispif->csid_version >= CSID_VERSION_V30)
+ msm_ispif_select_clk_mux(ispif, intftype,
+ params->entries[i].csid, vfe_intf);
+
+ rc = msm_ispif_validate_intf_status(ispif, intftype, vfe_intf);
+ if (rc) {
+ pr_err("%s:validate_intf_status failed, rc = %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+
+ msm_ispif_sel_csid_core(ispif, intftype,
+ params->entries[i].csid, vfe_intf);
+
+ cid_mask = msm_ispif_get_cids_mask_from_cfg(
+ &params->entries[i]);
+
+ msm_ispif_enable_intf_cids(ispif, intftype,
+ cid_mask, vfe_intf, 1);
+
+ if (params->entries[i].crop_enable)
+ msm_ispif_enable_crop(ispif, intftype, vfe_intf,
+ params->entries[i].crop_start_pixel,
+ params->entries[i].crop_end_pixel);
+
+ CDBG("ISPIF: crop_enable:%d, cid_mask = %d\n",
+ params->entries[i].crop_enable, cid_mask);
+ }
+
+ return rc;
+}
+
+static void msm_ispif_intf_cmd(struct ispif_device *ispif, uint32_t cmd_bits,
+ struct msm_ispif_param_data *params)
+{
+ uint8_t vc;
+ int i;
+ enum msm_ispif_intftype intf_type;
+ enum msm_ispif_vfe_intf vfe_intf;
+
+ if (WARN_ON(!ispif) || WARN_ON(!params))
+ return;
+
+ for (i = 0; i < params->num; i++) {
+ intf_type = params->entries[i].intftype;
+ vfe_intf = params->entries[i].vfe_intf;
+ CDBG("%s: cmd_bits=%d, intf_type=%d : vfe_intf =%d\n",
+ __func__, cmd_bits, intf_type, vfe_intf);
+ for (vc = 0; vc < VC_MAX; vc++) {
+ CDBG("vc_enable[%d]=%d",
+ vc, ispif->vc_enable[vfe_intf][intf_type][vc]);
+ if (!ispif->vc_enable[vfe_intf][intf_type][vc])
+ continue;
+ if (intf_type == RDI2) {
+ /* zero out two bits */
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd1 &=
+ ~(0x3 << (vc * 2 + 8));
+ /* set cmd bits */
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd1 |=
+ (cmd_bits << (vc * 2 + 8));
+ } else {
+ /* zero 2 bits */
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd &=
+ ~(0x3 << (vc * 2 + intf_type * 8));
+ /* set cmd bits */
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd |=
+ (cmd_bits << (vc * 2 + intf_type * 8));
+ }
+ }
+ /* cmd for PIX0, PIX1, RDI0, RDI1 */
+ if (ispif->applied_intf_cmd[vfe_intf].intf_cmd != 0xFFFFFFFF)
+ msm_camera_io_w_mb(
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe_intf));
+
+ /* cmd for RDI2 */
+ if (ispif->applied_intf_cmd[vfe_intf].intf_cmd1 != 0xFFFFFFFF)
+ msm_camera_io_w_mb(
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd1,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_1(vfe_intf));
+ }
+}
+
+static int msm_ispif_stop_immediately(struct ispif_device *ispif,
+ struct msm_ispif_param_data *params)
+{
+ int rc = 0, i = 0;
+
+ if (WARN_ON(!ispif) || WARN_ON(!params)) {
+ rc = -EINVAL;
+ return rc;
+ }
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+ CDBG("%s: params->num= %d\n",
+ __func__, params->num);
+
+ for (i = 0; i < params->num; i++) {
+ if (!msm_ispif_is_intf_valid(ispif->csid_version,
+ params->entries[i].vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ rc = -EINVAL;
+ return rc;
+ }
+ }
+
+ msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_DISABLE_IMMEDIATELY, params);
+
+ return rc;
+}
+
+static int msm_ispif_start_frame_boundary(struct ispif_device *ispif,
+ struct msm_ispif_param_data *params)
+{
+ int rc = 0, i = 0;
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+ for (i = 0; i < params->num; i++) {
+ if (!msm_ispif_is_intf_valid(ispif->csid_version,
+ params->entries[i].vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ rc = -EINVAL;
+ return rc;
+ }
+ }
+
+ msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params);
+
+ return rc;
+}
+
+static int msm_ispif_restart_frame_boundary(struct ispif_device *ispif,
+ struct msm_ispif_param_data *params)
+{
+ int rc = 0;
+
+ rc = msm_ispif_reset_hw(ispif);
+ if (!rc)
+ rc = msm_ispif_reset(ispif);
+ if (!rc)
+ rc = msm_ispif_config(ispif, params);
+ if (!rc)
+ rc = msm_ispif_start_frame_boundary(ispif, params);
+
+ if (!rc)
+ pr_info("ISPIF restart Successful\n");
+ else
+ pr_info("ISPIF restart Failed\n");
+
+ return rc;
+}
+
+static int msm_ispif_stop_frame_boundary(struct ispif_device *ispif,
+ struct msm_ispif_param_data *params)
+{
+ int i, rc = 0;
+ uint16_t cid_mask = 0;
+ uint32_t intf_addr;
+ enum msm_ispif_vfe_intf vfe_intf;
+ uint32_t stop_flag = 0;
+
+ if (WARN_ON(!ispif) || WARN_ON(!params)) {
+ rc = -EINVAL;
+ return rc;
+ }
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ for (i = 0; i < params->num; i++) {
+ if (!msm_ispif_is_intf_valid(ispif->csid_version,
+ params->entries[i].vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+ msm_ispif_intf_cmd(ispif,
+ ISPIF_INTF_CMD_DISABLE_FRAME_BOUNDARY, params);
+
+ for (i = 0; i < params->num; i++) {
+ cid_mask =
+ msm_ispif_get_cids_mask_from_cfg(&params->entries[i]);
+ vfe_intf = params->entries[i].vfe_intf;
+
+ switch (params->entries[i].intftype) {
+ case PIX0:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0);
+ break;
+ case RDI0:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0);
+ break;
+ case PIX1:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1);
+ break;
+ case RDI1:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1);
+ break;
+ case RDI2:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2);
+ break;
+ default:
+ pr_err("%s: invalid intftype=%d\n", __func__,
+ params->entries[i].intftype);
+ rc = -EPERM;
+ goto end;
+ }
+
+ rc = readl_poll_timeout(ispif->base + intf_addr, stop_flag,
+ (stop_flag & 0xF) == 0xF,
+ ISPIF_TIMEOUT_SLEEP_US,
+ ISPIF_TIMEOUT_ALL_US);
+ if (rc < 0)
+ pr_err("ISPIF stop frame boundary timeout\n");
+ }
+
+end:
+ return rc;
+}
+
+static void ispif_process_irq(struct ispif_device *ispif,
+ struct ispif_irq_status *out, enum msm_ispif_vfe_intf vfe_id)
+{
+ if (WARN_ON(!ispif) || WARN_ON(!out))
+ return;
+
+ if (out[vfe_id].ispifIrqStatus0 &
+ ISPIF_IRQ_STATUS_PIX_SOF_MASK) {
+ if (ispif->ispif_sof_debug < ISPIF_SOF_DEBUG_COUNT)
+ pr_err("%s: PIX0 frame id: %u irqstatus0 0x%x, irqstatus1 0x%x\n",
+ __func__,
+ ispif->sof_count[vfe_id].sof_cnt[PIX0],
+ out[vfe_id].ispifIrqStatus0,
+ out[vfe_id].ispifIrqStatus1);
+ ispif->sof_count[vfe_id].sof_cnt[PIX0]++;
+ ispif->ispif_sof_debug++;
+ }
+ if (out[vfe_id].ispifIrqStatus0 &
+ ISPIF_IRQ_STATUS_RDI0_SOF_MASK) {
+ if (ispif->ispif_rdi0_debug < ISPIF_SOF_DEBUG_COUNT)
+ pr_err("%s: RDI0 frame id: %u\n", __func__,
+ ispif->sof_count[vfe_id].sof_cnt[RDI0]);
+ ispif->sof_count[vfe_id].sof_cnt[RDI0]++;
+ ispif->ispif_rdi0_debug++;
+ }
+ if (out[vfe_id].ispifIrqStatus1 &
+ ISPIF_IRQ_STATUS_RDI1_SOF_MASK) {
+ if (ispif->ispif_rdi1_debug < ISPIF_SOF_DEBUG_COUNT)
+ pr_err("%s: RDI1 frame id: %u\n", __func__,
+ ispif->sof_count[vfe_id].sof_cnt[RDI1]);
+ ispif->sof_count[vfe_id].sof_cnt[RDI1]++;
+ ispif->ispif_rdi1_debug++;
+ }
+ if (out[vfe_id].ispifIrqStatus2 &
+ ISPIF_IRQ_STATUS_RDI2_SOF_MASK) {
+ if (ispif->ispif_rdi2_debug < ISPIF_SOF_DEBUG_COUNT)
+ pr_err("%s: RDI2 frame id: %u\n", __func__,
+ ispif->sof_count[vfe_id].sof_cnt[RDI2]);
+ ispif->sof_count[vfe_id].sof_cnt[RDI2]++;
+ ispif->ispif_rdi2_debug++;
+ }
+}
+
+static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out,
+ void *data)
+{
+ struct ispif_device *ispif = (struct ispif_device *)data;
+ bool fatal_err = false;
+ int i = 0;
+
+ if (WARN_ON(!ispif) || WARN_ON(!out))
+ return;
+
+ out[VFE0].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_0(VFE0));
+ msm_camera_io_w(out[VFE0].ispifIrqStatus0,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE0));
+
+ out[VFE0].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_1(VFE0));
+ msm_camera_io_w(out[VFE0].ispifIrqStatus1,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE0));
+
+ out[VFE0].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_2(VFE0));
+ msm_camera_io_w_mb(out[VFE0].ispifIrqStatus2,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE0));
+
+ if (ispif->vfe_info.num_vfe > 1) {
+ out[VFE1].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_0(VFE1));
+ msm_camera_io_w(out[VFE1].ispifIrqStatus0,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE1));
+
+ out[VFE1].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_1(VFE1));
+ msm_camera_io_w(out[VFE1].ispifIrqStatus1,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE1));
+
+ out[VFE1].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_2(VFE1));
+ msm_camera_io_w_mb(out[VFE1].ispifIrqStatus2,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE1));
+ }
+ msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+ ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+ if (out[VFE0].ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) {
+ if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ) {
+ if (atomic_dec_and_test(&ispif->reset_trig[VFE0]))
+ complete(&ispif->reset_complete[VFE0]);
+ }
+
+ if (out[VFE0].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE0 pix0 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ if (out[VFE0].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE0 rdi0 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ if (out[VFE0].ispifIrqStatus1 & RAW_INTF_1_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE0 rdi1 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ if (out[VFE0].ispifIrqStatus2 & RAW_INTF_2_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE0 rdi2 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ ispif_process_irq(ispif, out, VFE0);
+ }
+ if (ispif->hw_num_isps > 1) {
+ if (out[VFE1].ispifIrqStatus0 & RESET_DONE_IRQ) {
+ if (atomic_dec_and_test(&ispif->reset_trig[VFE1]))
+ complete(&ispif->reset_complete[VFE1]);
+ }
+
+ if (out[VFE1].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE1 pix0 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ if (out[VFE1].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE1 rdi0 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ if (out[VFE1].ispifIrqStatus1 & RAW_INTF_1_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE1 rdi1 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ if (out[VFE1].ispifIrqStatus2 & RAW_INTF_2_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE1 rdi2 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ ispif_process_irq(ispif, out, VFE1);
+ }
+
+ if (fatal_err == true) {
+ pr_err_ratelimited("%s: fatal error, stop ispif immediately\n",
+ __func__);
+ for (i = 0; i < ispif->vfe_info.num_vfe; i++) {
+ msm_camera_io_w(0x0,
+ ispif->base + ISPIF_VFE_m_IRQ_MASK_0(i));
+ msm_camera_io_w(0x0,
+ ispif->base + ISPIF_VFE_m_IRQ_MASK_1(i));
+ msm_camera_io_w(0x0,
+ ispif->base + ISPIF_VFE_m_IRQ_MASK_2(i));
+ msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_0(i));
+ msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_1(i));
+ }
+ }
+}
+
+static irqreturn_t msm_io_ispif_irq(int irq_num, void *data)
+{
+ struct ispif_irq_status irq[VFE_MAX];
+
+ msm_ispif_read_irq_status(irq, data);
+ return IRQ_HANDLED;
+}
+
+
+static int msm_ispif_set_vfe_info(struct ispif_device *ispif,
+ struct msm_ispif_vfe_info *vfe_info)
+{
+ if (!vfe_info || (vfe_info->num_vfe == 0) ||
+ (vfe_info->num_vfe > ispif->hw_num_isps)) {
+ pr_err("Invalid VFE info: %pK %d\n", vfe_info,
+ (vfe_info ? vfe_info->num_vfe : 0));
+ return -EINVAL;
+ }
+
+ memcpy(&ispif->vfe_info, vfe_info, sizeof(struct msm_ispif_vfe_info));
+
+ return 0;
+}
+
+
+static int msm_ispif_init(struct ispif_device *ispif,
+ uint32_t csid_version)
+{
+ int rc = 0;
+
+ if (WARN_ON(!ispif)) {
+ rc = -EINVAL;
+ return rc;
+ }
+
+ if (ispif->ispif_state == ISPIF_POWER_UP) {
+ pr_err("%s: ispif already initted state = %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+
+ /* can we set to zero? */
+ ispif->applied_intf_cmd[VFE0].intf_cmd = 0xFFFFFFFF;
+ ispif->applied_intf_cmd[VFE0].intf_cmd1 = 0xFFFFFFFF;
+ ispif->applied_intf_cmd[VFE1].intf_cmd = 0xFFFFFFFF;
+ ispif->applied_intf_cmd[VFE1].intf_cmd1 = 0xFFFFFFFF;
+ memset(ispif->sof_count, 0, sizeof(ispif->sof_count));
+ memset(ispif->vc_enable, 0, sizeof(ispif->vc_enable));
+
+ ispif->csid_version = csid_version;
+ CDBG("%s: CSID_VERSION= %d\n ", __func__, csid_version);
+ if (ispif->csid_version >= CSID_VERSION_V30 && !ispif->clk_mux_base) {
+ ispif->clk_mux_base = msm_camera_get_reg_base(ispif->pdev,
+ "csi_clk_mux", 1);
+ if (!ispif->clk_mux_base)
+ return -ENOMEM;
+ }
+
+ rc = cam_config_ahb_clk(NULL, 0,
+ CAM_AHB_CLIENT_ISPIF, CAM_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ rc = msm_ispif_reset_hw(ispif);
+ if (rc)
+ goto error_ahb;
+
+ rc = msm_ispif_reset(ispif);
+ if (rc)
+ goto error_ahb;
+ ispif->ispif_state = ISPIF_POWER_UP;
+ return 0;
+
+error_ahb:
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_ISPIF,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+ return rc;
+}
+
+static void msm_ispif_release(struct ispif_device *ispif)
+{
+ if (WARN_ON(!ispif))
+ return;
+
+ msm_ispif_reset(ispif);
+ msm_ispif_reset_hw(ispif);
+
+ msm_camera_enable_irq(ispif->irq, 0);
+
+ ispif->ispif_state = ISPIF_POWER_DOWN;
+
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_ISPIF,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+}
+
+static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg)
+{
+ long rc = 0;
+ struct ispif_cfg_data *pcdata = (struct ispif_cfg_data *)arg;
+ struct ispif_device *ispif =
+ (struct ispif_device *)v4l2_get_subdevdata(sd);
+
+ if (WARN_ON(!sd) || WARN_ON(!pcdata)) {
+ rc = -EINVAL;
+ return rc;
+ }
+
+ mutex_lock(&ispif->mutex);
+ CDBG("%s cfg_type = %d\n", __func__, pcdata->cfg_type);
+ switch (pcdata->cfg_type) {
+ case ISPIF_ENABLE_REG_DUMP:
+ ispif->enb_dump_reg = pcdata->reg_dump; /* save dump config */
+ break;
+ case ISPIF_INIT:
+ rc = msm_ispif_init(ispif, ispif->csid_version);
+ if (rc) {
+ pr_err("%s: %d Init failed\n", __func__, __LINE__);
+ break;
+ }
+ msm_ispif_io_dump_reg(ispif);
+ break;
+
+ case ISPIF_RESET:
+ case ISPIF_RELEASE:
+ msm_ispif_reset(ispif);
+ msm_ispif_reset_hw(ispif);
+ break;
+ case ISPIF_CFG:
+ rc = msm_ispif_config(ispif, &pcdata->params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ case ISPIF_START_FRAME_BOUNDARY:
+ rc = msm_ispif_start_frame_boundary(ispif, &pcdata->params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ case ISPIF_RESTART_FRAME_BOUNDARY:
+ rc = msm_ispif_restart_frame_boundary(ispif, &pcdata->params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ case ISPIF_STOP_FRAME_BOUNDARY:
+ rc = msm_ispif_stop_frame_boundary(ispif, &pcdata->params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ case ISPIF_STOP:
+ rc = msm_ispif_stop_immediately(ispif, &pcdata->params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ case ISPIF_SET_VFE_INFO:
+ rc = msm_ispif_set_vfe_info(ispif, &pcdata->vfe_info);
+ break;
+ default:
+ pr_err("%s: invalid cfg_type\n", __func__);
+ rc = -EINVAL;
+ break;
+ }
+ mutex_unlock(&ispif->mutex);
+ return rc;
+}
+static struct v4l2_file_operations msm_ispif_v4l2_subdev_fops;
+
+static long msm_ispif_subdev_ioctl_unlocked(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct ispif_device *ispif =
+ (struct ispif_device *)v4l2_get_subdevdata(sd);
+
+ switch (cmd) {
+ case VIDIOC_MSM_ISPIF_CFG:
+ return msm_ispif_cmd(sd, arg);
+ case VIDIOC_MSM_ISPIF_CFG_EXT:
+ return msm_ispif_cmd_ext(sd, arg);
+ case MSM_SD_NOTIFY_FREEZE: {
+ ispif->ispif_sof_debug = 0;
+ ispif->ispif_rdi0_debug = 0;
+ ispif->ispif_rdi1_debug = 0;
+ ispif->ispif_rdi2_debug = 0;
+ return 0;
+ }
+ case MSM_SD_UNNOTIFY_FREEZE:
+ return 0;
+ case MSM_SD_SHUTDOWN:
+ return 0;
+ default:
+ pr_err_ratelimited("%s: invalid cmd 0x%x received\n",
+ __func__, cmd);
+ return -ENOIOCTLCMD;
+ }
+}
+
+static long msm_ispif_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ return msm_ispif_subdev_ioctl(sd, cmd, arg);
+}
+
+static long msm_ispif_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_ispif_subdev_do_ioctl);
+}
+
+static int ispif_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct ispif_device *ispif = v4l2_get_subdevdata(sd);
+ int rc = 0;
+
+ CDBG("ISPIF: ispif_open_node");
+ mutex_lock(&ispif->mutex);
+ if (ispif->open_cnt == 0) {
+ /* enable regulator and clocks on first open */
+ rc = msm_ispif_set_regulators(ispif->ispif_vdd,
+ ispif->ispif_vdd_count, 1);
+ if (rc)
+ goto unlock;
+
+ rc = msm_ispif_clk_ahb_enable(ispif, 1);
+ if (rc)
+ goto ahb_clk_enable_fail;
+
+ rc = msm_camera_enable_irq(ispif->irq, 1);
+ if (rc)
+ goto irq_enable_fail;
+ }
+ /* mem remap is done in init when the clock is on */
+ ispif->open_cnt++;
+ mutex_unlock(&ispif->mutex);
+ return rc;
+irq_enable_fail:
+ msm_ispif_clk_ahb_enable(ispif, 0);
+ahb_clk_enable_fail:
+ msm_ispif_set_regulators(ispif->ispif_vdd, ispif->ispif_vdd_count, 0);
+unlock:
+ mutex_unlock(&ispif->mutex);
+ return rc;
+}
+
+static int ispif_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ int rc = 0;
+ struct ispif_device *ispif = v4l2_get_subdevdata(sd);
+
+ CDBG("ISPIF: ispif_close_node");
+ if (!ispif) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+ mutex_lock(&ispif->mutex);
+ if (ispif->open_cnt == 0) {
+ pr_err("%s: Invalid close\n", __func__);
+ rc = -ENODEV;
+ goto end;
+ }
+ ispif->open_cnt--;
+ if (ispif->open_cnt == 0) {
+ msm_ispif_release(ispif);
+ /* disable clocks and regulator on last close */
+ msm_ispif_clk_ahb_enable(ispif, 0);
+ msm_ispif_set_regulators(ispif->ispif_vdd,
+ ispif->ispif_vdd_count, 0);
+ }
+end:
+ mutex_unlock(&ispif->mutex);
+ return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_ispif_subdev_core_ops = {
+ .ioctl = &msm_ispif_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_ops msm_ispif_subdev_ops = {
+ .core = &msm_ispif_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_ispif_internal_ops = {
+ .open = ispif_open_node,
+ .close = ispif_close_node,
+};
+
+static int ispif_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct ispif_device *ispif;
+
+ ispif = kzalloc(sizeof(struct ispif_device), GFP_KERNEL);
+ if (!ispif)
+ return -ENOMEM;
+
+ if (pdev->dev.of_node) {
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+ rc = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,num-isps", &ispif->hw_num_isps);
+ if (rc)
+ /* backward compatibility */
+ ispif->hw_num_isps = 1;
+ /* not an error condition */
+ rc = 0;
+ }
+
+ rc = msm_ispif_get_regulator_info(ispif, pdev);
+ if (rc < 0)
+ goto regulator_fail;
+
+ rc = msm_ispif_get_clk_info(ispif, pdev);
+ if (rc < 0) {
+ pr_err("%s: msm_isp_get_clk_info() failed", __func__);
+ rc = -EFAULT;
+ goto get_clk_fail;
+ }
+ mutex_init(&ispif->mutex);
+ ispif->base = msm_camera_get_reg_base(pdev, "ispif", 1);
+ if (!ispif->base) {
+ rc = -ENOMEM;
+ goto reg_base_fail;
+ }
+
+ ispif->irq = msm_camera_get_irq(pdev, "ispif");
+ if (!ispif->irq) {
+ rc = -ENODEV;
+ goto get_irq_fail;
+ }
+ rc = msm_camera_register_irq(pdev, ispif->irq, msm_io_ispif_irq,
+ IRQF_TRIGGER_RISING, "ispif", ispif);
+ if (rc) {
+ rc = -ENODEV;
+ goto get_irq_fail;
+ }
+ rc = msm_camera_enable_irq(ispif->irq, 0);
+ if (rc)
+ goto sd_reg_fail;
+
+ ispif->pdev = pdev;
+
+ v4l2_subdev_init(&ispif->msm_sd.sd, &msm_ispif_subdev_ops);
+ ispif->msm_sd.sd.internal_ops = &msm_ispif_internal_ops;
+ ispif->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ snprintf(ispif->msm_sd.sd.name,
+ ARRAY_SIZE(ispif->msm_sd.sd.name), MSM_ISPIF_DRV_NAME);
+ v4l2_set_subdevdata(&ispif->msm_sd.sd, ispif);
+
+ platform_set_drvdata(pdev, &ispif->msm_sd.sd);
+
+ media_entity_init(&ispif->msm_sd.sd.entity, 0, NULL, 0);
+ ispif->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ ispif->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_ISPIF;
+ ispif->msm_sd.sd.entity.name = pdev->name;
+ ispif->msm_sd.close_seq = MSM_SD_CLOSE_1ST_CATEGORY | 0x1;
+ rc = msm_sd_register(&ispif->msm_sd);
+ if (rc) {
+ pr_err("%s: msm_sd_register error = %d\n", __func__, rc);
+ goto sd_reg_fail;
+ }
+ msm_cam_copy_v4l2_subdev_fops(&msm_ispif_v4l2_subdev_fops);
+ msm_ispif_v4l2_subdev_fops.unlocked_ioctl =
+ msm_ispif_subdev_fops_ioctl;
+#ifdef CONFIG_COMPAT
+ msm_ispif_v4l2_subdev_fops.compat_ioctl32 = msm_ispif_subdev_fops_ioctl;
+#endif
+ ispif->msm_sd.sd.devnode->fops = &msm_ispif_v4l2_subdev_fops;
+ ispif->ispif_state = ISPIF_POWER_DOWN;
+ ispif->open_cnt = 0;
+ init_completion(&ispif->reset_complete[VFE0]);
+ init_completion(&ispif->reset_complete[VFE1]);
+ atomic_set(&ispif->reset_trig[VFE0], 0);
+ atomic_set(&ispif->reset_trig[VFE1], 0);
+
+ ispif->csid_version = CSID_VERSION_V35;
+ memset(&ispif->vfe_info, 0, sizeof(struct msm_ispif_vfe_info));
+ ispif->vfe_info.num_vfe = 2;
+
+ return 0;
+
+sd_reg_fail:
+ msm_camera_unregister_irq(pdev, ispif->irq, ispif);
+get_irq_fail:
+ msm_camera_put_reg_base(pdev, ispif->base, "ispif", 1);
+reg_base_fail:
+ msm_camera_put_clk_info(pdev, &ispif->ahb_clk_info,
+ &ispif->ahb_clk,
+ ispif->num_ahb_clk + ispif->num_clk);
+get_clk_fail:
+ msm_ispif_put_regulator(ispif);
+regulator_fail:
+ mutex_destroy(&ispif->mutex);
+ kfree(ispif);
+ return rc;
+}
+
+static const struct of_device_id msm_ispif_dt_match[] = {
+ {.compatible = "qcom,ispif"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_ispif_dt_match);
+
+static struct platform_driver ispif_driver = {
+ .probe = ispif_probe,
+ .driver = {
+ .name = MSM_ISPIF_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ispif_dt_match,
+ },
+};
+
+static int __init msm_ispif_init_module(void)
+{
+ return platform_driver_register(&ispif_driver);
+}
+
+static void __exit msm_ispif_exit_module(void)
+{
+ platform_driver_unregister(&ispif_driver);
+}
+
+module_init(msm_ispif_init_module);
+module_exit(msm_ispif_exit_module);
+MODULE_DESCRIPTION("MSM ISP Interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/ispif/msm_ispif.h b/drivers/media/platform/msm/ais/ispif/msm_ispif.h
new file mode 100644
index 000000000000..00ec48254ddb
--- /dev/null
+++ b/drivers/media/platform/msm/ais/ispif/msm_ispif.h
@@ -0,0 +1,82 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_ISPIF_H
+#define MSM_ISPIF_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <media/ais/msm_ais_ispif.h>
+#include "msm_sd.h"
+
+/* Maximum number of voltage supply for ispif and vfe */
+#define ISPIF_VDD_INFO_MAX 2
+#define ISPIF_VFE_VDD_INFO_MAX 2
+
+#define ISPIF_CLK_INFO_MAX 27
+
+struct ispif_irq_status {
+ uint32_t ispifIrqStatus0;
+ uint32_t ispifIrqStatus1;
+ uint32_t ispifIrqStatus2;
+};
+
+enum msm_ispif_state_t {
+ ISPIF_POWER_UP,
+ ISPIF_POWER_DOWN,
+};
+struct ispif_sof_count {
+ uint32_t sof_cnt[INTF_MAX];
+};
+
+struct ispif_intf_cmd {
+ uint32_t intf_cmd;
+ uint32_t intf_cmd1;
+};
+
+struct ispif_device {
+ struct platform_device *pdev;
+ struct msm_sd_subdev msm_sd;
+ struct resource *irq;
+ void __iomem *base;
+ void __iomem *clk_mux_base;
+ struct mutex mutex;
+ uint8_t start_ack_pending;
+ uint32_t csid_version;
+ int enb_dump_reg;
+ uint32_t open_cnt;
+ struct ispif_sof_count sof_count[VFE_MAX];
+ struct ispif_intf_cmd applied_intf_cmd[VFE_MAX];
+ uint8_t vc_enable[VFE_MAX][INTF_MAX][VC_MAX];
+ enum msm_ispif_state_t ispif_state;
+ struct msm_ispif_vfe_info vfe_info;
+ struct clk **ahb_clk;
+ struct msm_cam_clk_info *ahb_clk_info;
+ struct clk **clks;
+ struct msm_cam_clk_info *clk_info;
+ struct completion reset_complete[VFE_MAX];
+ atomic_t reset_trig[VFE_MAX];
+ uint32_t hw_num_isps;
+ uint32_t num_ahb_clk;
+ uint32_t num_clk;
+ uint32_t clk_idx;
+ uint32_t ispif_sof_debug;
+ uint32_t ispif_rdi0_debug;
+ uint32_t ispif_rdi1_debug;
+ uint32_t ispif_rdi2_debug;
+ struct regulator *ispif_vdd[ISPIF_VDD_INFO_MAX];
+ int ispif_vdd_count;
+ struct regulator *vfe_vdd[ISPIF_VFE_VDD_INFO_MAX];
+ int vfe_vdd_count;
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/ispif/msm_ispif_hwreg_v1.h b/drivers/media/platform/msm/ais/ispif/msm_ispif_hwreg_v1.h
new file mode 100644
index 000000000000..f791e283a979
--- /dev/null
+++ b/drivers/media/platform/msm/ais/ispif/msm_ispif_hwreg_v1.h
@@ -0,0 +1,124 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_ISPIF_HWREG_V1_H__
+#define __MSM_ISPIF_HWREG_V1_H__
+
+/* common registers */
+#define ISPIF_RST_CMD_ADDR 0x0000
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x0124
+#define PIX0_LINE_BUF_EN_BIT 0
+
+#define ISPIF_VFE(m) (0x0)
+
+#define ISPIF_VFE_m_CTRL_0(m) (0x0008 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_0(m) (0x0100 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_1(m) (0x010C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_2(m) (0x0118 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_0(m) (0x0108 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_1(m) (0x0114 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_2(m) (0x0120 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_0(m) (0x0104 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_1(m) (0x0110 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_2(m) (0x011C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INPUT_SEL(m) (0x000C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_0(m) (0x0004 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_1(m) (0x0030 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) (0x0010 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) (0x0014 + ISPIF_VFE(m) + \
+ ((n > 0) ? (0x20) : 0) \
+ + 8*(n))
+#define ISPIF_VFE_m_PIX_OUTPUT_n_MISR(m, n) (0x0290 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_0(m, n) (0x001C + ISPIF_VFE(m) + \
+ ((n > 0) ? (0x24) : 0) \
+ + 0xc*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_1(m, n) (0x0020 + ISPIF_VFE(m) + \
+ ((n > 0) ? (0x24) : 0) \
+ + 0xc*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) (0x0024 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) (0x0028 + ISPIF_VFE(m) + \
+ ((n > 0) ? (0x34) : 0) \
+ + 8*(n))
+
+/* Defines for compatibility with newer ISPIF versions */
+#define ISPIF_RST_CMD_1_ADDR (0x0000)
+#define ISPIF_VFE_m_PIX_INTF_n_CROP(m, n) (0x0000 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_THRESHOLD(m) (0x0000 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_OUTPUT_SEL(m) (0x0000 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_3D_DESKEW_SIZE(m) (0x0000 + ISPIF_VFE(m))
+
+
+
+/* CSID CLK MUX SEL REGISTERS */
+#define ISPIF_RDI_CLK_MUX_SEL_ADDR 0x8
+
+/* ISPIF RESET BITS */
+#define VFE_CLK_DOMAIN_RST BIT(31)
+#define RDI_CLK_DOMAIN_RST BIT(30)
+#define PIX_CLK_DOMAIN_RST BIT(29)
+#define AHB_CLK_DOMAIN_RST BIT(28)
+#define RDI_1_CLK_DOMAIN_RST BIT(27)
+#define PIX_1_CLK_DOMAIN_RST BIT(26)
+#define RDI_2_CLK_DOMAIN_RST BIT(25)
+#define RDI_2_MISR_RST_STB BIT(20)
+#define RDI_2_VFE_RST_STB BIT(19)
+#define RDI_2_CSID_RST_STB BIT(18)
+#define RDI_1_MISR_RST_STB BIT(14)
+#define RDI_1_VFE_RST_STB BIT(13)
+#define RDI_1_CSID_RST_STB BIT(12)
+#define PIX_1_VFE_RST_STB BIT(10)
+#define PIX_1_CSID_RST_STB BIT(9)
+#define RDI_0_MISR_RST_STB BIT(8)
+#define RDI_0_VFE_RST_STB BIT(7)
+#define RDI_0_CSID_RST_STB BIT(6)
+#define PIX_0_MISR_RST_STB BIT(5)
+#define PIX_0_VFE_RST_STB BIT(4)
+#define PIX_0_CSID_RST_STB BIT(3)
+#define SW_REG_RST_STB BIT(2)
+#define MISC_LOGIC_RST_STB BIT(1)
+#define STROBED_RST_EN BIT(0)
+
+#define ISPIF_RST_CMD_MASK 0xFE1C77FF
+#define ISPIF_RST_CMD_1_MASK 0xFFFFFFFF /* undefined */
+
+#define ISPIF_RST_CMD_MASK_RESTART 0x00001FF9
+#define ISPIF_RST_CMD_1_MASK_RESTART 0x00001FF9 /* undefined */
+
+/* irq_mask_0 */
+#define PIX_INTF_0_OVERFLOW_IRQ BIT(12)
+#define RAW_INTF_0_OVERFLOW_IRQ BIT(25)
+#define RESET_DONE_IRQ BIT(27)
+/* irq_mask_1 */
+#define PIX_INTF_1_OVERFLOW_IRQ BIT(12)
+#define RAW_INTF_1_OVERFLOW_IRQ BIT(25)
+/* irq_mask_2 */
+#define RAW_INTF_2_OVERFLOW_IRQ BIT(12)
+
+#define ISPIF_IRQ_STATUS_MASK 0x0A493249
+#define ISPIF_IRQ_STATUS_1_MASK 0x02493249
+#define ISPIF_IRQ_STATUS_2_MASK 0x00001249
+
+#define ISPIF_IRQ_STATUS_PIX_SOF_MASK 0x000249
+#define ISPIF_IRQ_STATUS_RDI0_SOF_MASK 0x492000
+#define ISPIF_IRQ_STATUS_RDI1_SOF_MASK 0x492000
+#define ISPIF_IRQ_STATUS_RDI2_SOF_MASK 0x000249
+
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x000001
+
+#define ISPIF_STOP_INTF_IMMEDIATELY 0xAAAAAAAA
+
+/* ISPIF RDI pack mode not supported */
+static inline void msm_ispif_cfg_pack_mode(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t vfe_intf, uint32_t *pack_cfg_mask)
+{
+}
+#endif /* __MSM_ISPIF_HWREG_V1_H__ */
diff --git a/drivers/media/platform/msm/ais/ispif/msm_ispif_hwreg_v2.h b/drivers/media/platform/msm/ais/ispif/msm_ispif_hwreg_v2.h
new file mode 100644
index 000000000000..c91feb341e1a
--- /dev/null
+++ b/drivers/media/platform/msm/ais/ispif/msm_ispif_hwreg_v2.h
@@ -0,0 +1,104 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_ISPIF_HWREG_V2_H__
+#define __MSM_ISPIF_HWREG_V2_H__
+
+/* common registers */
+#define ISPIF_RST_CMD_ADDR 0x008
+#define ISPIF_RST_CMD_1_ADDR 0x00C
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x01C
+#define PIX0_LINE_BUF_EN_BIT 6
+
+#define ISPIF_VFE(m) ((m) * 0x200)
+
+#define ISPIF_VFE_m_CTRL_0(m) (0x200 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_0(m) (0x208 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_1(m) (0x20C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_2(m) (0x210 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_0(m) (0x21C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_1(m) (0x220 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_2(m) (0x224 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_0(m) (0x230 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_1(m) (0x234 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_2(m) (0x238 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INPUT_SEL(m) (0x244 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_0(m) (0x248 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_1(m) (0x24C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) (0x254 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) (0x264 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_CROP(m, n) (0x278 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_THRESHOLD(m) (0x288 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_OUTPUT_SEL(m) (0x28C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_OUTPUT_n_MISR(m, n) (0x290 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_0(m, n) (0x298 + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_1(m, n) (0x29C + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) (0x2C0 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) (0x2D0 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_DESKEW_SIZE(m) (0x2E4 + ISPIF_VFE(m))
+
+/* CSID CLK MUX SEL REGISTERS */
+#define ISPIF_RDI_CLK_MUX_SEL_ADDR 0x8
+
+/* ISPIF RESET BITS */
+#define VFE_CLK_DOMAIN_RST BIT(31)
+#define PIX_1_CLK_DOMAIN_RST BIT(30)
+#define PIX_CLK_DOMAIN_RST BIT(29)
+#define RDI_2_CLK_DOMAIN_RST BIT(28)
+#define RDI_1_CLK_DOMAIN_RST BIT(27)
+#define RDI_CLK_DOMAIN_RST BIT(26)
+#define AHB_CLK_DOMAIN_RST BIT(25)
+#define RDI_2_VFE_RST_STB BIT(12)
+#define RDI_2_CSID_RST_STB BIT(11)
+#define RDI_1_VFE_RST_STB BIT(10)
+#define RDI_1_CSID_RST_STB BIT(9)
+#define RDI_0_VFE_RST_STB BIT(8)
+#define RDI_0_CSID_RST_STB BIT(7)
+#define PIX_1_VFE_RST_STB BIT(6)
+#define PIX_1_CSID_RST_STB BIT(5)
+#define PIX_0_VFE_RST_STB BIT(4)
+#define PIX_0_CSID_RST_STB BIT(3)
+#define SW_REG_RST_STB BIT(2)
+#define MISC_LOGIC_RST_STB BIT(1)
+#define STROBED_RST_EN BIT(0)
+
+#define ISPIF_RST_CMD_MASK 0xFE0F1FFF
+#define ISPIF_RST_CMD_1_MASK 0xFC0F1FF9
+
+#define ISPIF_RST_CMD_MASK_RESTART 0x00001FF9
+#define ISPIF_RST_CMD_1_MASK_RESTART 0x00001FF9
+
+#define PIX_INTF_0_OVERFLOW_IRQ BIT(12)
+#define RAW_INTF_0_OVERFLOW_IRQ BIT(25)
+#define RAW_INTF_1_OVERFLOW_IRQ BIT(25)
+#define RAW_INTF_2_OVERFLOW_IRQ BIT(12)
+#define RESET_DONE_IRQ BIT(27)
+
+#define ISPIF_IRQ_STATUS_MASK 0x0A493249
+#define ISPIF_IRQ_STATUS_1_MASK 0x02493249
+#define ISPIF_IRQ_STATUS_2_MASK 0x00001249
+
+#define ISPIF_IRQ_STATUS_PIX_SOF_MASK 0x249
+#define ISPIF_IRQ_STATUS_RDI0_SOF_MASK 0x492000
+#define ISPIF_IRQ_STATUS_RDI1_SOF_MASK 0x492000
+#define ISPIF_IRQ_STATUS_RDI2_SOF_MASK 0x249
+
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x1
+
+#define ISPIF_STOP_INTF_IMMEDIATELY 0xAAAAAAAA
+
+/* ISPIF RDI pack mode not supported */
+static inline void msm_ispif_cfg_pack_mode(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t vfe_intf, uint32_t *pack_cfg_mask)
+{
+}
+#endif /* __MSM_ISPIF_HWREG_V2_H__ */
diff --git a/drivers/media/platform/msm/ais/ispif/msm_ispif_hwreg_v3.h b/drivers/media/platform/msm/ais/ispif/msm_ispif_hwreg_v3.h
new file mode 100644
index 000000000000..c1eba029545e
--- /dev/null
+++ b/drivers/media/platform/msm/ais/ispif/msm_ispif_hwreg_v3.h
@@ -0,0 +1,135 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_ISPIF_HWREG_V3_H__
+#define __MSM_ISPIF_HWREG_V3_H__
+
+/* common registers */
+#define ISPIF_RST_CMD_ADDR 0x008
+#define ISPIF_RST_CMD_1_ADDR 0x00C
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x01C
+#define PIX0_LINE_BUF_EN_BIT 6
+
+#define ISPIF_VFE(m) ((m) * 0x200)
+
+#define ISPIF_VFE_m_CTRL_0(m) (0x200 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_CTRL_1(m) (0x204 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_0(m) (0x208 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_1(m) (0x20C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_2(m) (0x210 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_0(m) (0x21C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_1(m) (0x220 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_2(m) (0x224 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_0(m) (0x230 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_1(m) (0x234 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_2(m) (0x238 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INPUT_SEL(m) (0x244 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_0(m) (0x248 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_1(m) (0x24C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) (0x254 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) (0x264 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_PACK_0(m, n) (0x270 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_PACK_1(m, n) (0x27C + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_CROP(m, n) (0x288 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_THRESHOLD(m) (0x290 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_OUTPUT_SEL(m) (0x294 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_OUTPUT_n_MISR(m, n) (0x298 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_0(m, n) (0x29C + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_1(m, n) (0x2A0 + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) (0x2C0 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) (0x2D0 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_DESKEW_SIZE(m) (0x2E4 + ISPIF_VFE(m))
+
+/* CSID CLK MUX SEL REGISTERS */
+#define ISPIF_RDI_CLK_MUX_SEL_ADDR 0x8
+
+/* ISPIF RESET BITS */
+#define VFE_CLK_DOMAIN_RST BIT(31)
+#define PIX_1_CLK_DOMAIN_RST BIT(30)
+#define PIX_CLK_DOMAIN_RST BIT(29)
+#define RDI_2_CLK_DOMAIN_RST BIT(28)
+#define RDI_1_CLK_DOMAIN_RST BIT(27)
+#define RDI_CLK_DOMAIN_RST BIT(26)
+#define AHB_CLK_DOMAIN_RST BIT(25)
+#define RDI_2_VFE_RST_STB BIT(12)
+#define RDI_2_CSID_RST_STB BIT(11)
+#define RDI_1_VFE_RST_STB BIT(10)
+#define RDI_1_CSID_RST_STB BIT(9)
+#define RDI_0_VFE_RST_STB BIT(8)
+#define RDI_0_CSID_RST_STB BIT(7)
+#define PIX_1_VFE_RST_STB BIT(6)
+#define PIX_1_CSID_RST_STB BIT(5)
+#define PIX_0_VFE_RST_STB BIT(4)
+#define PIX_0_CSID_RST_STB BIT(3)
+#define SW_REG_RST_STB BIT(2)
+#define MISC_LOGIC_RST_STB BIT(1)
+#define STROBED_RST_EN BIT(0)
+
+#define ISPIF_RST_CMD_MASK 0xFE7F1FFF
+#define ISPIF_RST_CMD_1_MASK 0xFC7F1FF9
+
+#define ISPIF_RST_CMD_MASK_RESTART 0x7F1FF9
+#define ISPIF_RST_CMD_1_MASK_RESTART 0x7F1FF9
+
+#define PIX_INTF_0_OVERFLOW_IRQ BIT(12)
+#define RAW_INTF_0_OVERFLOW_IRQ BIT(25)
+#define RAW_INTF_1_OVERFLOW_IRQ BIT(25)
+#define RAW_INTF_2_OVERFLOW_IRQ BIT(12)
+#define RESET_DONE_IRQ BIT(27)
+
+#define ISPIF_IRQ_STATUS_MASK 0x0A493249
+#define ISPIF_IRQ_STATUS_1_MASK 0x02493249
+#define ISPIF_IRQ_STATUS_2_MASK 0x00001249
+
+#define ISPIF_IRQ_STATUS_PIX_SOF_MASK 0x249
+#define ISPIF_IRQ_STATUS_RDI0_SOF_MASK 0x492000
+#define ISPIF_IRQ_STATUS_RDI1_SOF_MASK 0x492000
+#define ISPIF_IRQ_STATUS_RDI2_SOF_MASK 0x249
+
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x1
+
+#define ISPIF_STOP_INTF_IMMEDIATELY 0xAAAAAAAA
+
+/* ISPIF RDI pack mode support */
+static inline void msm_ispif_cfg_pack_mode(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t vfe_intf, uint32_t *pack_cfg_mask)
+{
+ uint32_t pack_addr[2];
+
+ WARN_ON(!ispif);
+
+ switch (intftype) {
+ case RDI0:
+ pack_addr[0] = ISPIF_VFE_m_RDI_INTF_n_PACK_0(vfe_intf, 0);
+ pack_addr[1] = ISPIF_VFE_m_RDI_INTF_n_PACK_1(vfe_intf, 0);
+ break;
+ case RDI1:
+ pack_addr[0] = ISPIF_VFE_m_RDI_INTF_n_PACK_0(vfe_intf, 1);
+ pack_addr[1] = ISPIF_VFE_m_RDI_INTF_n_PACK_1(vfe_intf, 1);
+ break;
+ case RDI2:
+ pack_addr[0] = ISPIF_VFE_m_RDI_INTF_n_PACK_0(vfe_intf, 2);
+ pack_addr[1] = ISPIF_VFE_m_RDI_INTF_n_PACK_1(vfe_intf, 2);
+ break;
+ default:
+ pr_debug("%s: pack_mode not supported on intftype=%d\n",
+ __func__, intftype);
+ return;
+ }
+ pr_debug("%s: intftype %d pack_mask %x: 0x%x, %x:0x%x\n",
+ __func__, intftype, pack_addr[0],
+ pack_cfg_mask[0], pack_addr[1],
+ pack_cfg_mask[1]);
+ msm_camera_io_w_mb(pack_cfg_mask[0], ispif->base + pack_addr[0]);
+ msm_camera_io_w_mb(pack_cfg_mask[1], ispif->base + pack_addr[1]);
+}
+#endif /* __MSM_ISPIF_HWREG_V3_H__ */
diff --git a/drivers/media/platform/msm/ais/jpeg_10/Makefile b/drivers/media/platform/msm/ais/jpeg_10/Makefile
new file mode 100644
index 000000000000..fe2e00a8012b
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_10/Makefile
@@ -0,0 +1,7 @@
+GCC_VERSION := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+
+ccflags-y += -Idrivers/media/platform/msm/ais/jpeg_10
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+
+obj-$(CONFIG_MSM_AIS_JPEG) += msm_jpeg_dev.o msm_jpeg_sync.o msm_jpeg_core.o msm_jpeg_hw.o msm_jpeg_platform.o
diff --git a/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_common.h b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_common.h
new file mode 100644
index 000000000000..9db73005e13c
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_common.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_JPEG_COMMON_H
+#define MSM_JPEG_COMMON_H
+
+#define JPEG_DBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define JPEG_PR_ERR pr_err
+#define JPEG_DBG_HIGH pr_debug
+
+#define JPEG_BUS_VOTED(pgmn_dev) (pgmn_dev->jpeg_bus_vote = 1)
+#define JPEG_BUS_UNVOTED(pgmn_dev) (pgmn_dev->jpeg_bus_vote = 0)
+
+enum JPEG_MODE {
+ JPEG_MODE_DISABLE,
+ JPEG_MODE_OFFLINE,
+ JPEG_MODE_REALTIME,
+ JPEG_MODE_REALTIME_ROTATION
+};
+
+enum JPEG_ROTATION {
+ JPEG_ROTATION_0,
+ JPEG_ROTATION_90,
+ JPEG_ROTATION_180,
+ JPEG_ROTATION_270
+};
+
+#endif /* MSM_JPEG_COMMON_H */
diff --git a/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_core.c b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_core.c
new file mode 100644
index 000000000000..bb433bdad735
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_core.c
@@ -0,0 +1,384 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include "msm_jpeg_hw.h"
+#include "msm_jpeg_core.h"
+#include "msm_jpeg_platform.h"
+#include "msm_jpeg_common.h"
+
+int msm_jpeg_core_reset(struct msm_jpeg_device *pgmn_dev, uint8_t op_mode,
+ void *base, int size) {
+ unsigned long flags;
+ int rc = 0;
+ int tm = 500; /*500ms*/
+
+ JPEG_DBG("%s:%d] reset", __func__, __LINE__);
+ memset(&pgmn_dev->fe_pingpong_buf, 0,
+ sizeof(pgmn_dev->fe_pingpong_buf));
+ pgmn_dev->fe_pingpong_buf.is_fe = 1;
+ memset(&pgmn_dev->we_pingpong_buf, 0,
+ sizeof(pgmn_dev->we_pingpong_buf));
+ spin_lock_irqsave(&pgmn_dev->reset_lock, flags);
+ pgmn_dev->reset_done_ack = 0;
+ if (pgmn_dev->core_type == MSM_JPEG_CORE_CODEC)
+ msm_jpeg_hw_reset(base, size);
+ else
+ msm_jpeg_hw_reset_dma(base, size);
+
+ spin_unlock_irqrestore(&pgmn_dev->reset_lock, flags);
+ rc = wait_event_timeout(
+ pgmn_dev->reset_wait,
+ pgmn_dev->reset_done_ack,
+ msecs_to_jiffies(tm));
+
+ if (!pgmn_dev->reset_done_ack) {
+ JPEG_DBG("%s: reset ACK failed %d", __func__, rc);
+ return -EBUSY;
+ }
+
+ JPEG_DBG("%s: reset_done_ack rc %d", __func__, rc);
+ spin_lock_irqsave(&pgmn_dev->reset_lock, flags);
+ pgmn_dev->reset_done_ack = 0;
+ pgmn_dev->state = MSM_JPEG_RESET;
+ spin_unlock_irqrestore(&pgmn_dev->reset_lock, flags);
+
+ return 0;
+}
+
+void msm_jpeg_core_release(struct msm_jpeg_device *pgmn_dev)
+{
+ int i = 0;
+
+ for (i = 0; i < 2; i++) {
+ if (pgmn_dev->we_pingpong_buf.buf_status[i] &&
+ pgmn_dev->release_buf)
+ msm_jpeg_platform_p2v(pgmn_dev->iommu_hdl,
+ pgmn_dev->we_pingpong_buf.buf[i].ion_fd);
+ pgmn_dev->we_pingpong_buf.buf_status[i] = 0;
+ }
+}
+
+void msm_jpeg_core_init(struct msm_jpeg_device *pgmn_dev)
+{
+ init_waitqueue_head(&pgmn_dev->reset_wait);
+ spin_lock_init(&pgmn_dev->reset_lock);
+}
+
+int msm_jpeg_core_fe_start(struct msm_jpeg_device *pgmn_dev)
+{
+ msm_jpeg_hw_fe_start(pgmn_dev->base);
+ return 0;
+}
+
+/* fetch engine */
+int msm_jpeg_core_fe_buf_update(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_core_buf *buf)
+{
+ int rc = 0;
+
+ if (buf->cbcr_len == 0)
+ buf->cbcr_buffer_addr = 0x0;
+
+ JPEG_DBG("%s:%d] 0x%08x %d 0x%08x %d\n", __func__, __LINE__,
+ (int) buf->y_buffer_addr, buf->y_len,
+ (int) buf->cbcr_buffer_addr, buf->cbcr_len);
+
+ if (pgmn_dev->core_type == MSM_JPEG_CORE_CODEC) {
+ rc = msm_jpeg_hw_pingpong_update(&pgmn_dev->fe_pingpong_buf,
+ buf, pgmn_dev->base);
+ if (rc < 0)
+ return rc;
+ msm_jpeg_hw_fe_mmu_prefetch(buf, pgmn_dev->base,
+ pgmn_dev->decode_flag);
+ } else {
+ rc = msm_jpegdma_hw_pingpong_update(
+ &pgmn_dev->fe_pingpong_buf, buf, pgmn_dev->base);
+ if (rc < 0)
+ return rc;
+ msm_jpegdma_hw_fe_mmu_prefetch(buf, pgmn_dev->base);
+ }
+
+ return rc;
+}
+
+void *msm_jpeg_core_fe_pingpong_irq(int jpeg_irq_status,
+ struct msm_jpeg_device *pgmn_dev)
+{
+ return msm_jpeg_hw_pingpong_irq(&pgmn_dev->fe_pingpong_buf);
+}
+
+/* write engine */
+int msm_jpeg_core_we_buf_update(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_core_buf *buf) {
+
+ JPEG_DBG("%s:%d] 0x%08x 0x%08x %d\n", __func__, __LINE__,
+ (int) buf->y_buffer_addr, (int) buf->cbcr_buffer_addr,
+ buf->y_len);
+
+ pgmn_dev->we_pingpong_buf.buf[0] = *buf;
+ pgmn_dev->we_pingpong_buf.buf_status[0] = 1;
+
+ if (pgmn_dev->core_type == MSM_JPEG_CORE_CODEC) {
+ msm_jpeg_hw_we_buffer_update(
+ &pgmn_dev->we_pingpong_buf.buf[0], 0, pgmn_dev->base);
+ msm_jpeg_hw_we_mmu_prefetch(buf, pgmn_dev->base,
+ pgmn_dev->decode_flag);
+ } else {
+ msm_jpegdma_hw_we_buffer_update(
+ &pgmn_dev->we_pingpong_buf.buf[0], 0, pgmn_dev->base);
+ msm_jpegdma_hw_we_mmu_prefetch(buf, pgmn_dev->base);
+ }
+
+ return 0;
+}
+
+int msm_jpeg_core_we_buf_reset(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_hw_buf *buf)
+{
+ int i = 0;
+
+ for (i = 0; i < 2; i++) {
+ if (pgmn_dev->we_pingpong_buf.buf[i].y_buffer_addr
+ == buf->y_buffer_addr)
+ pgmn_dev->we_pingpong_buf.buf_status[i] = 0;
+ }
+ return 0;
+}
+
+void *msm_jpeg_core_we_pingpong_irq(int jpeg_irq_status,
+ struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+
+ return msm_jpeg_hw_pingpong_irq(&pgmn_dev->we_pingpong_buf);
+}
+
+void *msm_jpeg_core_framedone_irq(int jpeg_irq_status,
+ struct msm_jpeg_device *pgmn_dev)
+{
+ struct msm_jpeg_hw_buf *buf_p;
+
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+
+ buf_p = msm_jpeg_hw_pingpong_active_buffer(
+ &pgmn_dev->we_pingpong_buf);
+ if (buf_p && !pgmn_dev->decode_flag) {
+ buf_p->framedone_len =
+ msm_jpeg_hw_encode_output_size(pgmn_dev->base);
+ JPEG_DBG("%s:%d] framedone_len %d\n", __func__, __LINE__,
+ buf_p->framedone_len);
+ }
+
+ return buf_p;
+}
+
+void *msm_jpeg_core_reset_ack_irq(int jpeg_irq_status,
+ struct msm_jpeg_device *pgmn_dev)
+{
+ /* @todo return the status back to msm_jpeg_core_reset */
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+ return NULL;
+}
+
+void *msm_jpeg_core_err_irq(int jpeg_irq_status,
+ struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_PR_ERR("%s: Error %x\n", __func__, jpeg_irq_status);
+ return NULL;
+}
+
+static int (*msm_jpeg_irq_handler)(int, void *, void *);
+
+void msm_jpeg_core_return_buffers(struct msm_jpeg_device *pgmn_dev,
+ int jpeg_irq_status)
+{
+ void *data = NULL;
+
+ data = msm_jpeg_core_fe_pingpong_irq(jpeg_irq_status,
+ pgmn_dev);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(MSM_JPEG_HW_MASK_COMP_FE,
+ pgmn_dev, data);
+ data = msm_jpeg_core_we_pingpong_irq(jpeg_irq_status,
+ pgmn_dev);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(MSM_JPEG_HW_MASK_COMP_WE,
+ pgmn_dev, data);
+}
+
+irqreturn_t msm_jpeg_core_irq(int irq_num, void *context)
+{
+ void *data = NULL;
+ unsigned long flags;
+ int jpeg_irq_status;
+ struct msm_jpeg_device *pgmn_dev = (struct msm_jpeg_device *)context;
+
+ JPEG_DBG("%s:%d] irq_num = %d\n", __func__, __LINE__, irq_num);
+
+ jpeg_irq_status = msm_jpeg_hw_irq_get_status(pgmn_dev->base);
+
+ JPEG_DBG("%s:%d] jpeg_irq_status = %0x\n", __func__, __LINE__,
+ jpeg_irq_status);
+
+ /* For reset and framedone IRQs, clear all bits */
+ if (pgmn_dev->state == MSM_JPEG_IDLE) {
+ JPEG_DBG_HIGH("%s %d ] Error IRQ received state %d",
+ __func__, __LINE__, pgmn_dev->state);
+ JPEG_DBG_HIGH("%s %d ] Ignoring the Error", __func__,
+ __LINE__);
+ msm_jpeg_hw_irq_clear(JPEG_IRQ_CLEAR_BMSK,
+ JPEG_IRQ_CLEAR_ALL, pgmn_dev->base);
+ return IRQ_HANDLED;
+ } else if (jpeg_irq_status & 0x10000000) {
+ msm_jpeg_hw_irq_clear(JPEG_IRQ_CLEAR_BMSK,
+ JPEG_IRQ_CLEAR_ALL, pgmn_dev->base);
+ } else if (jpeg_irq_status & 0x1) {
+ msm_jpeg_hw_irq_clear(JPEG_IRQ_CLEAR_BMSK,
+ JPEG_IRQ_CLEAR_ALL, pgmn_dev->base);
+ if (pgmn_dev->decode_flag)
+ msm_jpeg_decode_status(pgmn_dev->base);
+ } else {
+ msm_jpeg_hw_irq_clear(JPEG_IRQ_CLEAR_BMSK,
+ jpeg_irq_status, pgmn_dev->base);
+ }
+
+ if (msm_jpeg_hw_irq_is_frame_done(jpeg_irq_status)) {
+ /* send fe ping pong irq */
+ JPEG_DBG_HIGH("%s:%d] Session done\n", __func__, __LINE__);
+ data = msm_jpeg_core_fe_pingpong_irq(jpeg_irq_status,
+ pgmn_dev);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(MSM_JPEG_HW_MASK_COMP_FE,
+ context, data);
+ data = msm_jpeg_core_framedone_irq(jpeg_irq_status,
+ pgmn_dev);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(
+ MSM_JPEG_HW_MASK_COMP_FRAMEDONE,
+ context, data);
+ pgmn_dev->state = MSM_JPEG_INIT;
+ }
+ if (msm_jpeg_hw_irq_is_reset_ack(jpeg_irq_status)) {
+ data = msm_jpeg_core_reset_ack_irq(jpeg_irq_status,
+ pgmn_dev);
+ spin_lock_irqsave(&pgmn_dev->reset_lock, flags);
+ pgmn_dev->reset_done_ack = 1;
+ spin_unlock_irqrestore(&pgmn_dev->reset_lock, flags);
+ wake_up(&pgmn_dev->reset_wait);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(
+ MSM_JPEG_HW_MASK_COMP_RESET_ACK,
+ context, data);
+ }
+
+ /* Unexpected/unintended HW interrupt */
+ if (msm_jpeg_hw_irq_is_err(jpeg_irq_status)) {
+ if (pgmn_dev->state != MSM_JPEG_EXECUTING) {
+ /* Clear all the bits and ignore the IRQ */
+ JPEG_DBG_HIGH("%s %d ] Error IRQ received state %d",
+ __func__, __LINE__, pgmn_dev->state);
+ JPEG_DBG_HIGH("%s %d ] Ignoring the Error", __func__,
+ __LINE__);
+ msm_jpeg_hw_irq_clear(JPEG_IRQ_CLEAR_BMSK,
+ JPEG_IRQ_CLEAR_ALL, pgmn_dev->base);
+ return IRQ_HANDLED;
+ }
+ if (pgmn_dev->decode_flag)
+ msm_jpeg_decode_status(pgmn_dev->base);
+ msm_jpeg_core_return_buffers(pgmn_dev, jpeg_irq_status);
+ data = msm_jpeg_core_err_irq(jpeg_irq_status, pgmn_dev);
+ if (msm_jpeg_irq_handler) {
+ msm_jpeg_irq_handler(MSM_JPEG_HW_MASK_COMP_ERR,
+ context, data);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t msm_jpegdma_core_irq(int irq_num, void *context)
+{
+ void *data = NULL;
+ unsigned long flags;
+ int jpeg_irq_status;
+ struct msm_jpeg_device *pgmn_dev = context;
+
+ JPEG_DBG("%s:%d] irq_num = %d\n", __func__, __LINE__, irq_num);
+
+ jpeg_irq_status = msm_jpegdma_hw_irq_get_status(pgmn_dev->base);
+
+ JPEG_DBG("%s:%d] jpeg_irq_status = %0x\n", __func__, __LINE__,
+ jpeg_irq_status);
+
+ /* For reset and framedone IRQs, clear all bits */
+ if (pgmn_dev->state == MSM_JPEG_IDLE) {
+ JPEG_DBG_HIGH("%s %d ] Error IRQ received state %d",
+ __func__, __LINE__, pgmn_dev->state);
+ JPEG_DBG_HIGH("%s %d ] Ignoring the Error", __func__,
+ __LINE__);
+ msm_jpegdma_hw_irq_clear(JPEGDMA_IRQ_CLEAR_BMSK,
+ JPEGDMA_IRQ_CLEAR_ALL, pgmn_dev->base);
+ return IRQ_HANDLED;
+ } else if (jpeg_irq_status & 0x00000400) {
+ msm_jpegdma_hw_irq_clear(JPEGDMA_IRQ_CLEAR_BMSK,
+ JPEGDMA_IRQ_CLEAR_ALL, pgmn_dev->base);
+ } else if (jpeg_irq_status & 0x1) {
+ msm_jpegdma_hw_irq_clear(JPEGDMA_IRQ_CLEAR_BMSK,
+ JPEGDMA_IRQ_CLEAR_ALL, pgmn_dev->base);
+ } else {
+ msm_jpegdma_hw_irq_clear(JPEGDMA_IRQ_CLEAR_BMSK,
+ jpeg_irq_status, pgmn_dev->base);
+ }
+
+ if (msm_jpegdma_hw_irq_is_frame_done(jpeg_irq_status)) {
+ /* send fe ping pong irq */
+ JPEG_DBG_HIGH("%s:%d] Session done\n", __func__, __LINE__);
+ data = msm_jpeg_core_fe_pingpong_irq(jpeg_irq_status,
+ pgmn_dev);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(MSM_JPEG_HW_MASK_COMP_FE,
+ context, data);
+ data = msm_jpeg_core_framedone_irq(jpeg_irq_status,
+ pgmn_dev);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(
+ MSM_JPEG_HW_MASK_COMP_FRAMEDONE,
+ context, data);
+ pgmn_dev->state = MSM_JPEG_INIT;
+ }
+ if (msm_jpegdma_hw_irq_is_reset_ack(jpeg_irq_status)) {
+ data = msm_jpeg_core_reset_ack_irq(jpeg_irq_status,
+ pgmn_dev);
+ spin_lock_irqsave(&pgmn_dev->reset_lock, flags);
+ pgmn_dev->reset_done_ack = 1;
+ spin_unlock_irqrestore(&pgmn_dev->reset_lock, flags);
+ wake_up(&pgmn_dev->reset_wait);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(
+ MSM_JPEG_HW_MASK_COMP_RESET_ACK,
+ context, data);
+ }
+
+ return IRQ_HANDLED;
+}
+
+void msm_jpeg_core_irq_install(int (*irq_handler) (int, void *, void *))
+{
+ msm_jpeg_irq_handler = irq_handler;
+}
+
+void msm_jpeg_core_irq_remove(void)
+{
+ msm_jpeg_irq_handler = NULL;
+}
diff --git a/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_core.h b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_core.h
new file mode 100644
index 000000000000..994c110e56b6
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_core.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_JPEG_CORE_H
+#define MSM_JPEG_CORE_H
+
+#include <linux/interrupt.h>
+#include "msm_jpeg_hw.h"
+#include "msm_jpeg_sync.h"
+
+#define msm_jpeg_core_buf msm_jpeg_hw_buf
+
+irqreturn_t msm_jpeg_core_irq(int irq_num, void *context);
+irqreturn_t msm_jpegdma_core_irq(int irq_num, void *context);
+void msm_jpeg_core_irq_install(int (*irq_handler) (int, void *, void *));
+void msm_jpeg_core_irq_remove(void);
+
+int msm_jpeg_core_fe_buf_update(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_core_buf *buf);
+int msm_jpeg_core_we_buf_update(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_core_buf *buf);
+int msm_jpeg_core_we_buf_reset(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_hw_buf *buf);
+
+int msm_jpeg_core_reset(struct msm_jpeg_device *pgmn_dev, uint8_t op_mode,
+ void *base, int size);
+int msm_jpeg_core_fe_start(struct msm_jpeg_device *);
+
+void msm_jpeg_core_release(struct msm_jpeg_device *);
+void msm_jpeg_core_init(struct msm_jpeg_device *);
+#endif /* MSM_JPEG_CORE_H */
diff --git a/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_dev.c b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_dev.c
new file mode 100644
index 000000000000..88c822c50491
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_dev.c
@@ -0,0 +1,345 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <media/msm_jpeg.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "msm_jpeg_sync.h"
+#include "msm_jpeg_common.h"
+
+#define MSM_JPEG_NAME "jpeg"
+#define DEV_NAME_LEN 10
+
+static int msm_jpeg_open(struct inode *inode, struct file *filp)
+{
+ int rc = 0;
+
+ struct msm_jpeg_device *pgmn_dev = container_of(inode->i_cdev,
+ struct msm_jpeg_device, cdev);
+ filp->private_data = pgmn_dev;
+
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+
+ rc = __msm_jpeg_open(pgmn_dev);
+
+ JPEG_DBG(KERN_INFO "%s:%d] %s open_count = %d\n", __func__, __LINE__,
+ filp->f_path.dentry->d_name.name, pgmn_dev->open_count);
+
+ return rc;
+}
+
+static int msm_jpeg_release(struct inode *inode, struct file *filp)
+{
+ int rc;
+
+ struct msm_jpeg_device *pgmn_dev = filp->private_data;
+
+ JPEG_DBG(KERN_INFO "%s:%d]\n", __func__, __LINE__);
+
+ rc = __msm_jpeg_release(pgmn_dev);
+
+ JPEG_DBG(KERN_INFO "%s:%d] %s open_count = %d\n", __func__, __LINE__,
+ filp->f_path.dentry->d_name.name, pgmn_dev->open_count);
+ return rc;
+}
+#ifdef CONFIG_COMPAT
+static long msm_jpeg_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc;
+ struct msm_jpeg_device *pgmn_dev = filp->private_data;
+
+ JPEG_DBG("%s:%d] cmd=%d pgmn_dev=0x%pK arg=0x%lx\n", __func__,
+ __LINE__, _IOC_NR(cmd), pgmn_dev,
+ (unsigned long)arg);
+
+ rc = __msm_jpeg_compat_ioctl(pgmn_dev, cmd, arg);
+
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+ return rc;
+}
+#endif
+static long msm_jpeg_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc;
+ struct msm_jpeg_device *pgmn_dev = filp->private_data;
+
+ JPEG_DBG("%s:%d] cmd=%d pgmn_dev=0x%pK arg=0x%lx\n", __func__,
+ __LINE__, _IOC_NR(cmd), pgmn_dev,
+ (unsigned long)arg);
+
+ rc = __msm_jpeg_ioctl(pgmn_dev, cmd, arg);
+
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+ return rc;
+}
+
+static const struct file_operations msm_jpeg_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_jpeg_open,
+ .release = msm_jpeg_release,
+ .unlocked_ioctl = msm_jpeg_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = msm_jpeg_compat_ioctl,
+#endif
+};
+
+
+int msm_jpeg_subdev_init(struct v4l2_subdev *jpeg_sd)
+{
+ int rc;
+ struct msm_jpeg_device *pgmn_dev =
+ (struct msm_jpeg_device *)jpeg_sd->host_priv;
+
+ JPEG_DBG("%s:%d: jpeg_sd=0x%lx pgmn_dev=0x%pK\n",
+ __func__, __LINE__, (unsigned long)jpeg_sd,
+ pgmn_dev);
+ rc = __msm_jpeg_open(pgmn_dev);
+ JPEG_DBG("%s:%d: rc=%d\n",
+ __func__, __LINE__, rc);
+ return rc;
+}
+
+static long msm_jpeg_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ long rc;
+ struct msm_jpeg_device *pgmn_dev =
+ (struct msm_jpeg_device *)sd->host_priv;
+
+ JPEG_DBG("%s: cmd=%d\n", __func__, cmd);
+
+ JPEG_DBG("%s: pgmn_dev 0x%pK", __func__, pgmn_dev);
+
+ JPEG_DBG("%s: Calling __msm_jpeg_ioctl\n", __func__);
+
+ rc = __msm_jpeg_ioctl(pgmn_dev, cmd, (unsigned long)arg);
+ pr_debug("%s: X\n", __func__);
+ return rc;
+}
+
+void msm_jpeg_subdev_release(struct v4l2_subdev *jpeg_sd)
+{
+ int rc;
+ struct msm_jpeg_device *pgmn_dev =
+ (struct msm_jpeg_device *)jpeg_sd->host_priv;
+ JPEG_DBG("%s:pgmn_dev=0x%pK", __func__, pgmn_dev);
+ rc = __msm_jpeg_release(pgmn_dev);
+ JPEG_DBG("%s:rc=%d", __func__, rc);
+}
+
+static const struct v4l2_subdev_core_ops msm_jpeg_subdev_core_ops = {
+ .ioctl = msm_jpeg_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_ops msm_jpeg_subdev_ops = {
+ .core = &msm_jpeg_subdev_core_ops,
+};
+
+struct msm_jpeg_priv_data {
+ enum msm_jpeg_core_type core_type;
+};
+
+static const struct msm_jpeg_priv_data msm_jpeg_priv_data_jpg = {
+ .core_type = MSM_JPEG_CORE_CODEC
+};
+static const struct msm_jpeg_priv_data msm_jpeg_priv_data_dma = {
+ .core_type = MSM_JPEG_CORE_DMA
+};
+
+static const struct of_device_id msm_jpeg_dt_match[] = {
+ {.compatible = "qcom,jpeg", .data = &msm_jpeg_priv_data_jpg},
+ {.compatible = "qcom,jpeg_dma", .data = &msm_jpeg_priv_data_dma},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_jpeg_dt_match);
+
+static int msm_jpeg_init_dev(struct platform_device *pdev)
+{
+ int rc = -1;
+ struct device *dev;
+ struct msm_jpeg_device *msm_jpeg_device_p;
+ const struct of_device_id *device_id;
+ const struct msm_jpeg_priv_data *priv_data;
+ char devname[DEV_NAME_LEN];
+
+ msm_jpeg_device_p = kzalloc(sizeof(struct msm_jpeg_device), GFP_ATOMIC);
+ if (!msm_jpeg_device_p) {
+ JPEG_PR_ERR("%s: no mem\n", __func__);
+ return -EFAULT;
+ }
+
+ msm_jpeg_device_p->pdev = pdev;
+
+ device_id = of_match_device(msm_jpeg_dt_match, &pdev->dev);
+ if (!device_id) {
+ JPEG_PR_ERR("%s: device_id is NULL\n", __func__);
+ goto fail;
+ }
+
+ priv_data = device_id->data;
+ msm_jpeg_device_p->core_type = priv_data->core_type;
+
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node, "cell-index",
+ &pdev->id);
+
+ snprintf(devname, sizeof(devname), "%s%d", MSM_JPEG_NAME, pdev->id);
+
+ rc = __msm_jpeg_init(msm_jpeg_device_p);
+ if (rc < -1) {
+ JPEG_PR_ERR("%s: initialization failed\n", __func__);
+ goto fail;
+ }
+
+ v4l2_subdev_init(&msm_jpeg_device_p->subdev, &msm_jpeg_subdev_ops);
+ v4l2_set_subdev_hostdata(&msm_jpeg_device_p->subdev, msm_jpeg_device_p);
+ JPEG_DBG("%s: msm_jpeg_device_p 0x%lx", __func__,
+ (unsigned long)msm_jpeg_device_p);
+
+ rc = alloc_chrdev_region(&msm_jpeg_device_p->msm_jpeg_devno, 0, 1,
+ devname);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: failed to allocate chrdev\n", __func__);
+ goto fail_1;
+ }
+
+ if (!msm_jpeg_device_p->msm_jpeg_class) {
+ msm_jpeg_device_p->msm_jpeg_class =
+ class_create(THIS_MODULE, devname);
+ if (IS_ERR(msm_jpeg_device_p->msm_jpeg_class)) {
+ rc = PTR_ERR(msm_jpeg_device_p->msm_jpeg_class);
+ JPEG_PR_ERR("%s: create device class failed\n",
+ __func__);
+ goto fail_2;
+ }
+ }
+
+ dev = device_create(msm_jpeg_device_p->msm_jpeg_class, NULL,
+ MKDEV(MAJOR(msm_jpeg_device_p->msm_jpeg_devno),
+ MINOR(msm_jpeg_device_p->msm_jpeg_devno)), NULL,
+ "%s%d", MSM_JPEG_NAME, pdev->id);
+ if (IS_ERR(dev)) {
+ JPEG_PR_ERR("%s: error creating device\n", __func__);
+ rc = -ENODEV;
+ goto fail_3;
+ }
+
+ cdev_init(&msm_jpeg_device_p->cdev, &msm_jpeg_fops);
+ msm_jpeg_device_p->cdev.owner = THIS_MODULE;
+ msm_jpeg_device_p->cdev.ops =
+ (const struct file_operations *) &msm_jpeg_fops;
+ rc = cdev_add(&msm_jpeg_device_p->cdev,
+ msm_jpeg_device_p->msm_jpeg_devno, 1);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: error adding cdev\n", __func__);
+ rc = -ENODEV;
+ goto fail_4;
+ }
+
+ platform_set_drvdata(pdev, &msm_jpeg_device_p);
+
+ JPEG_DBG("%s %s%d: success\n", __func__, MSM_JPEG_NAME, pdev->id);
+
+ return rc;
+
+fail_4:
+ device_destroy(msm_jpeg_device_p->msm_jpeg_class,
+ msm_jpeg_device_p->msm_jpeg_devno);
+
+fail_3:
+ class_destroy(msm_jpeg_device_p->msm_jpeg_class);
+
+fail_2:
+ unregister_chrdev_region(msm_jpeg_device_p->msm_jpeg_devno, 1);
+
+fail_1:
+ __msm_jpeg_exit(msm_jpeg_device_p);
+ return rc;
+
+fail:
+ kfree(msm_jpeg_device_p);
+ return rc;
+
+}
+
+static void msm_jpeg_exit(struct msm_jpeg_device *msm_jpeg_device_p)
+{
+ cdev_del(&msm_jpeg_device_p->cdev);
+ device_destroy(msm_jpeg_device_p->msm_jpeg_class,
+ msm_jpeg_device_p->msm_jpeg_devno);
+ class_destroy(msm_jpeg_device_p->msm_jpeg_class);
+ unregister_chrdev_region(msm_jpeg_device_p->msm_jpeg_devno, 1);
+ cam_smmu_destroy_handle(msm_jpeg_device_p->iommu_hdl);
+
+ __msm_jpeg_exit(msm_jpeg_device_p);
+}
+
+static int __msm_jpeg_probe(struct platform_device *pdev)
+{
+ return msm_jpeg_init_dev(pdev);
+}
+
+static int __msm_jpeg_remove(struct platform_device *pdev)
+{
+ struct msm_jpeg_device *msm_jpegd_device_p;
+
+ msm_jpegd_device_p = platform_get_drvdata(pdev);
+ if (msm_jpegd_device_p)
+ msm_jpeg_exit(msm_jpegd_device_p);
+
+ return 0;
+}
+
+static struct platform_driver msm_jpeg_driver = {
+ .probe = __msm_jpeg_probe,
+ .remove = __msm_jpeg_remove,
+ .driver = {
+ .name = "msm_jpeg",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_jpeg_dt_match,
+ },
+};
+
+static int __init msm_jpeg_driver_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&msm_jpeg_driver);
+ return rc;
+}
+
+static void __exit msm_jpeg_driver_exit(void)
+{
+ platform_driver_unregister(&msm_jpeg_driver);
+}
+
+MODULE_DESCRIPTION("msm jpeg jpeg driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(msm_jpeg_driver_init);
+module_exit(msm_jpeg_driver_exit);
diff --git a/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_hw.c b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_hw.c
new file mode 100644
index 000000000000..b0316cb3874d
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_hw.c
@@ -0,0 +1,928 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include "msm_jpeg_hw.h"
+#include "msm_jpeg_common.h"
+#include "msm_camera_io_util.h"
+
+#include <linux/io.h>
+
+int msm_jpeg_hw_pingpong_update(struct msm_jpeg_hw_pingpong *pingpong_hw,
+ struct msm_jpeg_hw_buf *buf, void *base)
+{
+ int buf_free_index = -1;
+
+ if (!pingpong_hw->buf_status[0]) {
+ buf_free_index = 0;
+ } else if (!pingpong_hw->buf_status[1]) {
+ buf_free_index = 1;
+ } else {
+ JPEG_PR_ERR("%s:%d: pingpong buffer busy\n",
+ __func__, __LINE__);
+ return -EBUSY;
+ }
+
+ pingpong_hw->buf[buf_free_index] = *buf;
+ pingpong_hw->buf_status[buf_free_index] = 1;
+
+ if (pingpong_hw->is_fe) {
+ /* it is fe */
+ msm_jpeg_hw_fe_buffer_update(
+ &pingpong_hw->buf[buf_free_index], buf_free_index,
+ base);
+ } else {
+ /* it is we */
+ msm_jpeg_hw_we_buffer_update(
+ &pingpong_hw->buf[buf_free_index], buf_free_index,
+ base);
+ }
+ return 0;
+}
+
+int msm_jpegdma_hw_pingpong_update(struct msm_jpeg_hw_pingpong *pingpong_hw,
+ struct msm_jpeg_hw_buf *buf, void *base)
+{
+ int buf_free_index = -1;
+
+ if (!pingpong_hw->buf_status[0]) {
+ buf_free_index = 0;
+ } else if (!pingpong_hw->buf_status[1]) {
+ buf_free_index = 1;
+ } else {
+ JPEG_PR_ERR("%s:%d: pingpong buffer busy\n",
+ __func__, __LINE__);
+ return -EBUSY;
+ }
+
+ pingpong_hw->buf[buf_free_index] = *buf;
+ pingpong_hw->buf_status[buf_free_index] = 1;
+
+ if (pingpong_hw->is_fe) {
+ /* it is fe */
+ msm_jpegdma_hw_fe_buffer_update(
+ &pingpong_hw->buf[buf_free_index], buf_free_index,
+ base);
+ } else {
+ /* it is we */
+ msm_jpegdma_hw_we_buffer_update(
+ &pingpong_hw->buf[buf_free_index], buf_free_index,
+ base);
+ }
+ return 0;
+}
+void *msm_jpeg_hw_pingpong_irq(struct msm_jpeg_hw_pingpong *pingpong_hw)
+{
+ struct msm_jpeg_hw_buf *buf_p = NULL;
+
+ if (pingpong_hw->buf_status[pingpong_hw->buf_active_index]) {
+ buf_p = &pingpong_hw->buf[pingpong_hw->buf_active_index];
+ pingpong_hw->buf_status[pingpong_hw->buf_active_index] = 0;
+ }
+
+ pingpong_hw->buf_active_index = !pingpong_hw->buf_active_index;
+
+ return (void *) buf_p;
+}
+
+void *msm_jpeg_hw_pingpong_active_buffer(
+ struct msm_jpeg_hw_pingpong *pingpong_hw)
+{
+ struct msm_jpeg_hw_buf *buf_p = NULL;
+
+ if (pingpong_hw->buf_status[pingpong_hw->buf_active_index])
+ buf_p = &pingpong_hw->buf[pingpong_hw->buf_active_index];
+
+ return (void *) buf_p;
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_irq_get_status[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_READ, 1, JPEG_IRQ_STATUS_ADDR,
+ JPEG_IRQ_STATUS_BMSK, {0} },
+};
+
+int msm_jpeg_hw_irq_get_status(void *base)
+{
+ uint32_t n_irq_status = 0;
+
+ n_irq_status = msm_jpeg_hw_read(&hw_cmd_irq_get_status[0], base);
+ return n_irq_status;
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_irq_get_dmastatus[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_READ, 1, JPEGDMA_IRQ_STATUS_ADDR,
+ JPEGDMA_IRQ_STATUS_BMSK, {0} },
+};
+
+int msm_jpegdma_hw_irq_get_status(void *base)
+{
+ uint32_t n_irq_status = 0;
+
+ n_irq_status = msm_jpeg_hw_read(&hw_cmd_irq_get_dmastatus[0], base);
+ return n_irq_status;
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_encode_output_size[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_READ, 1,
+ JPEG_ENCODE_OUTPUT_SIZE_STATUS_ADDR,
+ JPEG_ENCODE_OUTPUT_SIZE_STATUS_BMSK, {0} },
+};
+
+long msm_jpeg_hw_encode_output_size(void *base)
+{
+ uint32_t encode_output_size = 0;
+
+ encode_output_size = msm_jpeg_hw_read(&hw_cmd_encode_output_size[0],
+ base);
+
+ return encode_output_size;
+}
+
+void msm_jpeg_hw_irq_clear(uint32_t mask, uint32_t data, void *base)
+{
+ struct msm_jpeg_hw_cmd cmd_irq_clear;
+
+ cmd_irq_clear.type = MSM_JPEG_HW_CMD_TYPE_WRITE;
+ cmd_irq_clear.n = 1;
+ cmd_irq_clear.offset = JPEG_IRQ_CLEAR_ADDR;
+ cmd_irq_clear.mask = mask;
+ cmd_irq_clear.data = data;
+ JPEG_DBG("%s:%d] mask %0x data %0x", __func__, __LINE__, mask, data);
+ msm_jpeg_hw_write(&cmd_irq_clear, base);
+}
+
+void msm_jpegdma_hw_irq_clear(uint32_t mask, uint32_t data, void *base)
+{
+ struct msm_jpeg_hw_cmd cmd_irq_clear;
+
+ cmd_irq_clear.type = MSM_JPEG_HW_CMD_TYPE_WRITE;
+ cmd_irq_clear.n = 1;
+ cmd_irq_clear.offset = JPEGDMA_IRQ_CLEAR_ADDR;
+ cmd_irq_clear.mask = mask;
+ cmd_irq_clear.data = data;
+ JPEG_DBG("%s:%d] mask %0x data %0x", __func__, __LINE__, mask, data);
+ msm_jpeg_hw_write(&cmd_irq_clear, base);
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_fe_ping_update[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_IRQ_MASK_ADDR,
+ JPEG_IRQ_MASK_BMSK, {JPEG_IRQ_ALLSOURCES_ENABLE} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_CMD_ADDR,
+ JPEG_CMD_BMSK, {JPEG_CMD_CLEAR_WRITE_PLN_QUEUES} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN0_RD_OFFSET_ADDR,
+ JPEG_PLN0_RD_OFFSET_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN0_RD_PNTR_ADDR,
+ JPEG_PLN0_RD_PNTR_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN1_RD_OFFSET_ADDR,
+ JPEG_PLN1_RD_OFFSET_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN1_RD_PNTR_ADDR,
+ JPEG_PLN1_RD_PNTR_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN2_RD_OFFSET_ADDR,
+ JPEG_PLN1_RD_OFFSET_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN2_RD_PNTR_ADDR,
+ JPEG_PLN2_RD_PNTR_BMSK, {0} },
+};
+
+void msm_jpeg_hw_fe_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *base)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ if (pingpong_index == 0) {
+ hw_cmd_p = &hw_cmd_fe_ping_update[0];
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->y_buffer_addr;
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->cbcr_buffer_addr;
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->pln2_addr;
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+ }
+}
+
+struct msm_jpeg_hw_cmd hw_dma_cmd_fe_ping_update[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEGDMA_IRQ_MASK_ADDR,
+ JPEGDMA_IRQ_MASK_BMSK, {JPEG_IRQ_ALLSOURCES_ENABLE} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEGDMA_CMD_ADDR,
+ JPEGDMA_CMD_BMSK, {JPEGDMA_CMD_CLEAR_READ_PLN_QUEUES} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_FE_0_RD_PNTR,
+ JPEG_PLN0_RD_PNTR_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_FE_1_RD_PNTR,
+ JPEG_PLN1_RD_PNTR_BMSK, {0} },
+};
+
+void msm_jpegdma_hw_fe_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *base)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ if (pingpong_index != 0)
+ return;
+
+ hw_cmd_p = &hw_dma_cmd_fe_ping_update[0];
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->y_buffer_addr;
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->cbcr_buffer_addr;
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_fe_start[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_CMD_ADDR,
+ JPEG_CMD_BMSK, {JPEG_OFFLINE_CMD_START} },
+};
+
+void msm_jpeg_hw_fe_start(void *base)
+{
+ msm_jpeg_hw_write(&hw_cmd_fe_start[0], base);
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_we_ping_update[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN0_WR_PNTR_ADDR,
+ JPEG_PLN0_WR_PNTR_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN1_WR_PNTR_ADDR,
+ JPEG_PLN0_WR_PNTR_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN2_WR_PNTR_ADDR,
+ JPEG_PLN0_WR_PNTR_BMSK, {0} },
+};
+
+void msm_jpeg_decode_status(void *base)
+{
+ uint32_t data;
+
+ data = msm_camera_io_r(base + JPEG_DECODE_MCUS_DECODED_STATUS);
+ JPEG_DBG_HIGH("Decode MCUs decode status %u", data);
+ data = msm_camera_io_r(base + JPEG_DECODE_BITS_CONSUMED_STATUS);
+ JPEG_DBG_HIGH("Decode bits consumed status %u", data);
+ data = msm_camera_io_r(base + JPEG_DECODE_PRED_Y_STATE);
+ JPEG_DBG_HIGH("Decode prediction Y state %u", data);
+ data = msm_camera_io_r(base + JPEG_DECODE_PRED_C_STATE);
+ JPEG_DBG_HIGH("Decode prediction C state %u", data);
+ data = msm_camera_io_r(base + JPEG_DECODE_RSM_STATE);
+ JPEG_DBG_HIGH("Decode prediction RSM state %u", data);
+}
+
+
+void msm_jpeg_hw_we_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *base)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ if (pingpong_index == 0) {
+ hw_cmd_p = &hw_cmd_we_ping_update[0];
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->y_buffer_addr;
+ JPEG_DBG_HIGH("%s Output pln0 buffer address is %x\n", __func__,
+ p_input->y_buffer_addr);
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->cbcr_buffer_addr;
+ JPEG_DBG_HIGH("%s Output pln1 buffer address is %x\n", __func__,
+ p_input->cbcr_buffer_addr);
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->pln2_addr;
+ JPEG_DBG_HIGH("%s Output pln2 buffer address is %x\n", __func__,
+ p_input->pln2_addr);
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ }
+}
+
+struct msm_jpeg_hw_cmd hw_dma_cmd_we_ping_update[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEGDMA_CMD_ADDR,
+ JPEGDMA_CMD_BMSK, {JPEGDMA_CMD_CLEAR_WRITE_PLN_QUEUES} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_WE_0_WR_PNTR,
+ JPEG_PLN0_WR_PNTR_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_WE_1_WR_PNTR,
+ JPEG_PLN0_WR_PNTR_BMSK, {0} },
+};
+void msm_jpegdma_hw_we_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *base)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ if (pingpong_index != 0)
+ return;
+
+ hw_cmd_p = &hw_dma_cmd_we_ping_update[0];
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+
+ /* ensure write is done */
+ wmb();
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->y_buffer_addr;
+ JPEG_DBG_HIGH("%s Output we 0 buffer address is %x\n", __func__,
+ p_input->y_buffer_addr);
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->cbcr_buffer_addr;
+ JPEG_DBG_HIGH("%s Output we 1 buffer address is %x\n", __func__,
+ p_input->cbcr_buffer_addr);
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_fe_mmu_prefetch[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S0_MMU_PF_ADDR_MIN,
+ MSM_JPEG_S0_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S0_MMU_PF_ADDR_MAX,
+ MSM_JPEG_S0_MMU_PF_ADDR_MAX_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S1_MMU_PF_ADDR_MIN,
+ MSM_JPEG_S1_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S1_MMU_PF_ADDR_MAX,
+ MSM_JPEG_S1_MMU_PF_ADDR_MAX_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S2_MMU_PF_ADDR_MIN,
+ MSM_JPEG_S2_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S2_MMU_PF_ADDR_MAX,
+ MSM_JPEG_S2_MMU_PF_ADDR_MAX_BMSK, {0} },
+};
+
+/*
+ * msm_jpeg_hw_fe_mmu_prefetch() - writes fe min/max addrs for each plane to
+ * MMU prefetch registers.
+ * @buf: Pointer to jpeg hw buffer.
+ * @base: Pointer to base address.
+ * @decode_flag: Jpeg decode flag.
+ *
+ * This function writes fe min/max address for each plane to MMU prefetch
+ * registers, MMU prefetch hardware will only prefetch address translations
+ * within this min/max boundary.
+ *
+ * Return: None.
+ */
+void msm_jpeg_hw_fe_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *base,
+ uint8_t decode_flag)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ hw_cmd_p = &hw_cmd_fe_mmu_prefetch[0];
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+
+ JPEG_DBG("%s:%d: MIN y_buf_addr %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data);
+
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+ if (buf->y_len)
+ tmp_hw_cmd.data += buf->y_len - 1;
+
+ JPEG_DBG("%s:%d: MAX y_buf_addr %08x, y_len %d\n",
+ __func__, __LINE__, tmp_hw_cmd.data, buf->y_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ if (!decode_flag) {
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->cbcr_buffer_addr;
+
+ JPEG_DBG("%s:%d: MIN cbcr_buf_addr %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->cbcr_buffer_addr;
+ if (buf->cbcr_len)
+ tmp_hw_cmd.data += buf->cbcr_len - 1;
+
+ JPEG_DBG("%s:%d: MAX cbcr_buf_addr %08x, cbcr_len %d\n"
+ , __func__, __LINE__, tmp_hw_cmd.data, buf->cbcr_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->pln2_addr;
+
+ JPEG_DBG("%s:%d: MIN pln2_buf_addr %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->pln2_addr;
+ if (buf->pln2_len)
+ tmp_hw_cmd.data += buf->pln2_len - 1;
+
+ JPEG_DBG("%s:%d: MAX pln2_buf_addr %08x, pln2_len %d\n"
+ , __func__, __LINE__, tmp_hw_cmd.data, buf->pln2_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ }
+ /* ensure write is done */
+ wmb();
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_we_mmu_prefetch[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S1_MMU_PF_ADDR_MIN,
+ MSM_JPEG_S1_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S1_MMU_PF_ADDR_MAX,
+ MSM_JPEG_S1_MMU_PF_ADDR_MAX_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S2_MMU_PF_ADDR_MIN,
+ MSM_JPEG_S2_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S2_MMU_PF_ADDR_MAX,
+ MSM_JPEG_S2_MMU_PF_ADDR_MAX_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S3_MMU_PF_ADDR_MIN,
+ MSM_JPEG_S3_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S3_MMU_PF_ADDR_MAX,
+ MSM_JPEG_S3_MMU_PF_ADDR_MAX_BMSK, {0} },
+};
+
+/*
+ * msm_jpeg_hw_we_mmu_prefetch() - write we min/max addrs for each plane to
+ * MMU prefetch registers.
+ * @buf: Pointer to jpeg hw buffer.
+ * @base: Pointer to base address.
+ * @decode_flag: Jpeg decode flag.
+ *
+ * This function writes we min/max address for each plane to MMU prefetch
+ * registers, MMU prefetch hardware will only prefetch address translations
+ * within this min/max boundary.
+ *
+ * Return: None.
+ */
+void msm_jpeg_hw_we_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *base,
+ uint8_t decode_flag)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ hw_cmd_p = &hw_cmd_we_mmu_prefetch[0];
+
+ /* ensure write is done */
+ wmb();
+ if (decode_flag) {
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+
+ JPEG_DBG("%s:%d: MIN y_buf_addr %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+ if (buf->y_len)
+ tmp_hw_cmd.data += buf->y_len - 1;
+
+ JPEG_DBG("%s:%d: MAX y_buf_addr %08x, y_len %d\n",
+ __func__, __LINE__, tmp_hw_cmd.data, buf->y_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->cbcr_buffer_addr;
+
+ JPEG_DBG("%s:%d: MIN cbcr_buf_addr %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->cbcr_buffer_addr;
+ if (buf->cbcr_len)
+ tmp_hw_cmd.data += buf->cbcr_len - 1;
+
+ JPEG_DBG("%s:%d: MAX cbcr_buf_addr %08x, cbcr_len %d\n"
+ , __func__, __LINE__, tmp_hw_cmd.data, buf->cbcr_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->pln2_addr;
+
+ JPEG_DBG("%s:%d: MIN pln2_buf_addr %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->pln2_addr;
+ if (buf->pln2_len)
+ tmp_hw_cmd.data += buf->pln2_len - 1;
+
+ JPEG_DBG("%s:%d: MIN pln2_buf_addr %08x, pln2_len %d\n"
+ , __func__, __LINE__, tmp_hw_cmd.data, buf->pln2_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ } else {
+ hw_cmd_p += 4;
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+
+ JPEG_DBG("%s:%d: MIN y_buf_addr %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+ if (buf->y_len)
+ tmp_hw_cmd.data += buf->y_len - 1;
+
+ JPEG_DBG("%s:%d: MAX y_buf_addr %08x, y_len %d\n",
+ __func__, __LINE__, tmp_hw_cmd.data, buf->y_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ }
+ /* ensure write is done */
+ wmb();
+}
+
+struct msm_jpeg_hw_cmd hw_dma_cmd_fe_mmu_prefetch[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_S0_MMU_PF_ADDR_MIN,
+ MSM_JPEGDMA_S0_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_S0_MMU_PF_ADDR_MAX,
+ MSM_JPEGDMA_S0_MMU_PF_ADDR_MAX_BMSK, {0} },
+};
+
+/*
+ * msm_jpegdma_hw_fe_mmu_prefetch() - write DMA fe min/max addrs to
+ * MMU prefetch registers.
+ * @buf: Pointer to jpeg hw buffer.
+ * @base: Pointer to base address.
+ *
+ * This function writes DMA fe min/max address for each plane to MMU prefetch
+ * registers, MMU prefetch hardware will only prefetch address translations
+ * with in this min/max boundary.
+ *
+ * Return: None.
+ */
+void msm_jpegdma_hw_fe_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *base)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ hw_cmd_p = &hw_dma_cmd_fe_mmu_prefetch[0];
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+
+ JPEG_DBG("%s:%d: MIN DMA addr %08x , reg offset %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data, tmp_hw_cmd.offset);
+
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+ if (buf->y_len)
+ tmp_hw_cmd.data += buf->y_len - 1;
+
+ JPEG_DBG("%s:%d: MAX DMA addr %08x , reg offset %08x , length %d\n",
+ __func__, __LINE__, tmp_hw_cmd.data, tmp_hw_cmd.offset,
+ buf->y_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+}
+
+struct msm_jpeg_hw_cmd hw_dma_cmd_we_mmu_prefetch[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_S1_MMU_PF_ADDR_MIN,
+ MSM_JPEGDMA_S1_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_S1_MMU_PF_ADDR_MAX,
+ MSM_JPEGDMA_S1_MMU_PF_ADDR_MAX_BMSK, {0} },
+};
+
+/*
+ * msm_jpegdma_hw_we_mmu_prefetch() - write DMA we min/max addrs to
+ * MMU prefetch registers.
+ * @buf: Pointer to jpeg hw buffer.
+ * @base: Pointer to base address.
+ *
+ * This function writes DMA we min/max address for each plane to MMU prefetch
+ * registers, MMU prefetch hardware will only prefetch address translations
+ * with in this min/max boundary.
+ *
+ * Return: None.
+ */
+void msm_jpegdma_hw_we_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *base)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ hw_cmd_p = &hw_dma_cmd_we_mmu_prefetch[0];
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+
+ JPEG_DBG("%s:%d: MIN DMA addr %08x , reg offset %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data, tmp_hw_cmd.offset);
+
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+ if (buf->y_len)
+ tmp_hw_cmd.data += buf->y_len - 1;
+
+ JPEG_DBG("%s:%d: MAX DMA addr %08x , reg offset %08x , length %d\n",
+ __func__, __LINE__, tmp_hw_cmd.data, tmp_hw_cmd.offset,
+ buf->y_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_reset[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_IRQ_MASK_ADDR,
+ JPEG_IRQ_MASK_BMSK, {JPEG_IRQ_DISABLE_ALL} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_IRQ_CLEAR_ADDR,
+ JPEG_IRQ_MASK_BMSK, {JPEG_IRQ_CLEAR_ALL} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_IRQ_MASK_ADDR,
+ JPEG_IRQ_MASK_BMSK, {JPEG_IRQ_ALLSOURCES_ENABLE} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_RESET_CMD_ADDR,
+ JPEG_RESET_CMD_RMSK, {JPEG_RESET_DEFAULT} },
+};
+
+void msm_jpeg_hw_reset(void *base, int size)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+
+ hw_cmd_p = &hw_cmd_reset[0];
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p, base);
+ /* ensure write is done */
+ wmb();
+}
+struct msm_jpeg_hw_cmd hw_cmd_reset_dma[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEGDMA_IRQ_MASK_ADDR,
+ JPEGDMA_IRQ_MASK_BMSK, {JPEGDMA_IRQ_DISABLE_ALL} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEGDMA_IRQ_CLEAR_ADDR,
+ JPEGDMA_IRQ_MASK_BMSK, {JPEGDMA_IRQ_CLEAR_ALL} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEGDMA_IRQ_MASK_ADDR,
+ JPEGDMA_IRQ_MASK_BMSK, {JPEGDMA_IRQ_ALLSOURCES_ENABLE} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEGDMA_RESET_CMD_ADDR,
+ JPEGDMA_RESET_CMD_BMSK, {JPEGDMA_RESET_DEFAULT} },
+};
+
+void msm_jpeg_hw_reset_dma(void *base, int size)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+
+ hw_cmd_p = &hw_cmd_reset_dma[0];
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p, base);
+ /* ensure write is done */
+ wmb();
+}
+
+uint32_t msm_jpeg_hw_read(struct msm_jpeg_hw_cmd *hw_cmd_p,
+ void *jpeg_region_base)
+{
+ uint32_t *paddr;
+ uint32_t data;
+
+ paddr = jpeg_region_base + hw_cmd_p->offset;
+
+ data = msm_camera_io_r(paddr);
+ data &= hw_cmd_p->mask;
+
+ return data;
+}
+
+void msm_jpeg_hw_write(struct msm_jpeg_hw_cmd *hw_cmd_p,
+ void *jpeg_region_base)
+{
+ uint32_t *paddr;
+ uint32_t old_data, new_data;
+
+ paddr = jpeg_region_base + hw_cmd_p->offset;
+
+ if (hw_cmd_p->mask == 0xffffffff) {
+ old_data = 0;
+ } else {
+ old_data = msm_camera_io_r(paddr);
+ old_data &= ~hw_cmd_p->mask;
+ }
+
+ new_data = hw_cmd_p->data & hw_cmd_p->mask;
+ new_data |= old_data;
+ JPEG_DBG("%s:%d] %pK %08x\n", __func__, __LINE__,
+ paddr, new_data);
+ msm_camera_io_w(new_data, paddr);
+}
+
+int msm_jpeg_hw_wait(struct msm_jpeg_hw_cmd *hw_cmd_p, int m_us,
+ void *base)
+{
+ int tm = hw_cmd_p->n;
+ uint32_t data;
+ uint32_t wait_data = hw_cmd_p->data & hw_cmd_p->mask;
+
+ data = msm_jpeg_hw_read(hw_cmd_p, base);
+ if (data != wait_data) {
+ while (tm) {
+ udelay(m_us);
+ data = msm_jpeg_hw_read(hw_cmd_p, base);
+ if (data == wait_data)
+ break;
+ tm--;
+ }
+ }
+ hw_cmd_p->data = data;
+ return tm;
+}
+
+void msm_jpeg_hw_delay(struct msm_jpeg_hw_cmd *hw_cmd_p, int m_us)
+{
+ int tm = hw_cmd_p->n;
+
+ while (tm) {
+ udelay(m_us);
+ tm--;
+ }
+}
+
+int msm_jpeg_hw_exec_cmds(struct msm_jpeg_hw_cmd *hw_cmd_p, uint32_t m_cmds,
+ uint32_t max_size, void *base)
+{
+ int is_copy_to_user = 0;
+ uint32_t data;
+
+ while (m_cmds--) {
+ if (hw_cmd_p->offset >= max_size) {
+ JPEG_PR_ERR("%s:%d] %d exceed hw region %d\n", __func__,
+ __LINE__, hw_cmd_p->offset, max_size);
+ return -EFAULT;
+ }
+ if (hw_cmd_p->offset & 0x3) {
+ JPEG_PR_ERR("%s:%d] %d Invalid alignment\n", __func__,
+ __LINE__, hw_cmd_p->offset);
+ return -EFAULT;
+ }
+
+ switch (hw_cmd_p->type) {
+ case MSM_JPEG_HW_CMD_TYPE_READ:
+ hw_cmd_p->data = msm_jpeg_hw_read(hw_cmd_p, base);
+ is_copy_to_user = 1;
+ break;
+
+ case MSM_JPEG_HW_CMD_TYPE_WRITE:
+ msm_jpeg_hw_write(hw_cmd_p, base);
+ break;
+
+ case MSM_JPEG_HW_CMD_TYPE_WRITE_OR:
+ data = msm_jpeg_hw_read(hw_cmd_p, base);
+ hw_cmd_p->data = (hw_cmd_p->data & hw_cmd_p->mask) |
+ data;
+ msm_jpeg_hw_write(hw_cmd_p, base);
+ break;
+
+ case MSM_JPEG_HW_CMD_TYPE_UWAIT:
+ msm_jpeg_hw_wait(hw_cmd_p, 1, base);
+ break;
+
+ case MSM_JPEG_HW_CMD_TYPE_MWAIT:
+ msm_jpeg_hw_wait(hw_cmd_p, 1000, base);
+ break;
+
+ case MSM_JPEG_HW_CMD_TYPE_UDELAY:
+ msm_jpeg_hw_delay(hw_cmd_p, 1);
+ break;
+
+ case MSM_JPEG_HW_CMD_TYPE_MDELAY:
+ msm_jpeg_hw_delay(hw_cmd_p, 1000);
+ break;
+
+ default:
+ JPEG_PR_ERR("wrong hw command type\n");
+ break;
+ }
+
+ hw_cmd_p++;
+ }
+ return is_copy_to_user;
+}
+
+void msm_jpeg_io_dump(void *base, int size)
+{
+ char line_str[128], *p_str;
+ void __iomem *addr = (void __iomem *)base;
+ int i;
+ u32 *p = (u32 *) addr;
+ u32 data;
+
+ JPEG_DBG_HIGH("%s:%d] %pK %d", __func__, __LINE__, addr, size);
+ line_str[0] = '\0';
+ p_str = line_str;
+ for (i = 0; i < size/4; i++) {
+ if (i % 4 == 0) {
+ snprintf(p_str, 12, "%08lx: ", (unsigned long)p);
+ p_str += 10;
+ }
+ data = msm_camera_io_r(p++);
+ snprintf(p_str, 12, "%08x ", data);
+ p_str += 9;
+ if ((i + 1) % 4 == 0) {
+ JPEG_DBG_HIGH("%s\n", line_str);
+ line_str[0] = '\0';
+ p_str = line_str;
+ }
+ }
+ if (line_str[0] != '\0')
+ JPEG_DBG_HIGH("%s\n", line_str);
+}
+
diff --git a/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_hw.h b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_hw.h
new file mode 100644
index 000000000000..b745d333b3ab
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_hw.h
@@ -0,0 +1,142 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_JPEG_HW_H
+#define MSM_JPEG_HW_H
+
+#include <media/msm_jpeg.h>
+#include "msm_jpeg_hw_reg.h"
+#include <linux/ion.h>
+
+struct msm_jpeg_hw_buf {
+ struct msm_jpeg_buf vbuf;
+ struct file *file;
+ uint32_t framedone_len;
+ uint32_t y_buffer_addr;
+ uint32_t y_len;
+ uint32_t cbcr_buffer_addr;
+ uint32_t cbcr_len;
+ uint32_t num_of_mcu_rows;
+ int ion_fd;
+ uint32_t pln2_addr;
+ uint32_t pln2_len;
+};
+
+struct msm_jpeg_hw_pingpong {
+ uint8_t is_fe; /* 1: fe; 0: we */
+ struct msm_jpeg_hw_buf buf[2];
+ int buf_status[2];
+ int buf_active_index;
+};
+
+int msm_jpeg_hw_pingpong_update(struct msm_jpeg_hw_pingpong *pingpong_hw,
+ struct msm_jpeg_hw_buf *buf, void *);
+int msm_jpegdma_hw_pingpong_update(struct msm_jpeg_hw_pingpong *pingpong_hw,
+ struct msm_jpeg_hw_buf *buf, void *);
+void *msm_jpeg_hw_pingpong_irq(struct msm_jpeg_hw_pingpong *pingpong_hw);
+void *msm_jpeg_hw_pingpong_active_buffer(struct msm_jpeg_hw_pingpong
+ *pingpong_hw);
+
+void msm_jpeg_hw_irq_clear(uint32_t, uint32_t, void *);
+void msm_jpegdma_hw_irq_clear(uint32_t, uint32_t, void *);
+int msm_jpeg_hw_irq_get_status(void *);
+int msm_jpegdma_hw_irq_get_status(void *);
+long msm_jpeg_hw_encode_output_size(void *);
+#define MSM_JPEG_HW_MASK_COMP_FRAMEDONE \
+ MSM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK
+#define MSM_JPEG_HW_MASK_COMP_FE \
+ MSM_JPEG_HW_IRQ_STATUS_FE_RD_DONE_MASK
+#define MSM_JPEG_HW_MASK_COMP_WE \
+ (MSM_JPEG_HW_IRQ_STATUS_WE_Y_PINGPONG_MASK | \
+ MSM_JPEG_HW_IRQ_STATUS_WE_CBCR_PINGPONG_MASK)
+#define MSM_JPEG_HW_MASK_COMP_RESET_ACK \
+ MSM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK
+#define MSM_JPEG_HW_MASK_COMP_ERR \
+ (MSM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF | \
+ MSM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR | \
+ MSM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR | \
+ MSM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF | \
+ MSM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW | \
+ MSM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM | \
+ MSM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ | \
+ MSM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM | \
+ MSM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK)
+
+#define msm_jpeg_hw_irq_is_frame_done(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEG_HW_MASK_COMP_FRAMEDONE)
+#define msm_jpeg_hw_irq_is_fe_pingpong(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEG_HW_MASK_COMP_FE)
+#define msm_jpeg_hw_irq_is_we_pingpong(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEG_HW_MASK_COMP_WE)
+#define msm_jpeg_hw_irq_is_reset_ack(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEG_HW_MASK_COMP_RESET_ACK)
+#define msm_jpeg_hw_irq_is_err(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEG_HW_MASK_COMP_ERR)
+
+
+#define MSM_JPEGDMA_HW_MASK_COMP_FRAMEDONE \
+ MSM_JPEGDMA_HW_IRQ_STATUS_FRAMEDONE_MASK
+#define MSM_JPEGDMA_HW_MASK_COMP_FE \
+ MSM_JPEGDMA_HW_IRQ_STATUS_FE_RD_DONE_MASK
+#define MSM_JPEGDMA_HW_MASK_COMP_WE \
+ (MSM_JPEGDMA_HW_IRQ_STATUS_WE_WR_DONE_MASK)
+#define MSM_JPEGDMA_HW_MASK_COMP_RESET_ACK \
+ MSM_JPEGDMA_HW_IRQ_STATUS_RESET_ACK_MASK
+
+
+#define msm_jpegdma_hw_irq_is_frame_done(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEGDMA_HW_MASK_COMP_FRAMEDONE)
+#define msm_jpegdma_hw_irq_is_fe_pingpong(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEGDMA_HW_MASK_COMP_FE)
+#define msm_jpegdma_hw_irq_is_we_pingpong(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEGDMA_HW_MASK_COMP_WE)
+#define msm_jpegdma_hw_irq_is_reset_ack(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEGDMA_HW_MASK_COMP_RESET_ACK)
+
+
+void msm_jpeg_hw_fe_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *);
+void msm_jpeg_hw_we_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *);
+void msm_jpegdma_hw_fe_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *);
+void msm_jpegdma_hw_we_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *);
+
+
+void msm_jpeg_hw_we_buffer_cfg(uint8_t is_realtime);
+
+void msm_jpeg_hw_fe_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *,
+ uint8_t decode_flag);
+void msm_jpeg_hw_we_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *,
+ uint8_t decode_flag);
+void msm_jpegdma_hw_fe_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *);
+void msm_jpegdma_hw_we_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *);
+
+void msm_jpeg_hw_fe_start(void *);
+void msm_jpeg_hw_clk_cfg(void);
+
+void msm_jpeg_hw_reset(void *base, int size);
+void msm_jpeg_hw_irq_cfg(void);
+
+uint32_t msm_jpeg_hw_read(struct msm_jpeg_hw_cmd *, void *);
+void msm_jpeg_hw_write(struct msm_jpeg_hw_cmd *, void *);
+int msm_jpeg_hw_wait(struct msm_jpeg_hw_cmd *, int, void *);
+void msm_jpeg_hw_delay(struct msm_jpeg_hw_cmd *, int);
+int msm_jpeg_hw_exec_cmds(struct msm_jpeg_hw_cmd *, uint32_t,
+ uint32_t, void *);
+void msm_jpeg_hw_region_dump(int size);
+void msm_jpeg_io_dump(void *base, int size);
+void msm_jpeg_decode_status(void *base);
+void msm_jpeg_hw_reset_dma(void *base, int size);
+
+#endif /* MSM_JPEG_HW_H */
diff --git a/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_hw_reg.h b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_hw_reg.h
new file mode 100644
index 000000000000..68510b52c2fe
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_hw_reg.h
@@ -0,0 +1,210 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_JPEG_HW_REG_H
+#define MSM_JPEG_HW_REG_H
+
+#define JPEG_REG_BASE 0
+
+#define MSM_JPEG_HW_IRQ_MASK_ADDR 0x00000018
+#define MSM_JPEG_HW_IRQ_MASK_RMSK 0xFFFFFFFF
+#define MSM_JPEG_HW_IRQ_ENABLE 0xFFFFFFFF
+
+#define MSM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK 0x00000001
+#define MSM_JPEG_HW_IRQ_STATUS_FRAMEDONE_SHIFT 0x00000000
+
+#define MSM_JPEG_HW_IRQ_STATUS_FE_RD_DONE_MASK 0x00000010
+#define MSM_JPEG_HW_IRQ_STATUS_FE_RD_DONE_SHIFT 0x00000001
+
+#define MSM_JPEG_HW_IRQ_STATUS_FE_RTOVF_MASK 0x00000004
+#define MSM_JPEG_HW_IRQ_STATUS_FE_RTOVF_SHIFT 0x00000002
+
+#define MSM_JPEG_HW_IRQ_STATUS_FE_VFE_OVERFLOW_MASK 0x00000008
+#define MSM_JPEG_HW_IRQ_STATUS_FE_VFE_OVERFLOW_SHIFT 0x00000003
+
+#define MSM_JPEG_HW_IRQ_STATUS_WE_Y_PINGPONG_MASK 0x00000010
+#define MSM_JPEG_HW_IRQ_STATUS_WE_Y_PINGPONG_SHIFT 0x00000004
+
+#define MSM_JPEG_HW_IRQ_STATUS_WE_CBCR_PINGPONG_MASK 0x00000020
+#define MSM_JPEG_HW_IRQ_STATUS_WE_CBCR_PINGPONG_SHIFT 0x00000005
+
+#define MSM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK 0x10000000
+#define MSM_JPEG_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a
+
+#define MSM_JPEG_HW_IRQ_STATUS_BUS_ERROR_MASK 0x00000800
+#define MSM_JPEG_HW_IRQ_STATUS_BUS_ERROR_SHIFT 0x0000000b
+
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF (0x1<<19)
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR (0x1<<20)
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR (0x1<<21)
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF (0x1<<22)
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW (0x1<<23)
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM (0x1<<24)
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ (0x1<<25)
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM (0x1<<26)
+#define MSM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK (0x1<<29)
+
+#define JPEG_OFFLINE_CMD_START 0x00000001
+
+#define JPEG_RESET_DEFAULT 0x00032093
+
+#define JPEG_IRQ_DISABLE_ALL 0x00000000
+#define JPEG_IRQ_CLEAR_ALL 0xFFFFFFFF
+
+#define JPEG_PLN0_RD_PNTR_ADDR (JPEG_REG_BASE + 0x00000038)
+#define JPEG_PLN0_RD_PNTR_BMSK 0xFFFFFFFF
+
+#define JPEG_PLN0_RD_OFFSET_ADDR 0x0000003C
+#define JPEG_PLN0_RD_OFFSET_BMSK 0xFFFFFFFF
+
+#define JPEG_PLN1_RD_PNTR_ADDR (JPEG_REG_BASE + 0x00000044)
+#define JPEG_PLN1_RD_PNTR_BMSK 0xFFFFFFFF
+
+#define JPEG_PLN1_RD_OFFSET_ADDR 0x00000048
+#define JPEG_PLN1_RD_OFFSET_BMSK 0xFFFFFFFF
+
+#define JPEG_PLN2_RD_PNTR_ADDR (JPEG_REG_BASE + 0x00000050)
+#define JPEG_PLN2_RD_PNTR_BMSK 0xFFFFFFFF
+
+#define JPEG_PLN2_RD_OFFSET_ADDR 0x00000054
+#define JPEG_PLN2_RD_OFFSET_BMSK 0xFFFFFFFF
+
+#define JPEG_CMD_ADDR (JPEG_REG_BASE + 0x00000010)
+#define JPEG_CMD_BMSK 0xFFFFFFFF
+#define JPEG_CMD_CLEAR_WRITE_PLN_QUEUES 0x700
+
+#define JPEG_PLN0_WR_PNTR_ADDR (JPEG_REG_BASE + 0x000000cc)
+#define JPEG_PLN0_WR_PNTR_BMSK 0xFFFFFFFF
+
+#define JPEG_PLN1_WR_PNTR_ADDR (JPEG_REG_BASE + 0x000000D0)
+#define JPEG_PLN1_WR_PNTR_BMSK 0xFFFFFFFF
+
+#define JPEG_PLN2_WR_PNTR_ADDR (JPEG_REG_BASE + 0x000000D4)
+#define JPEG_PLN2_WR_PNTR_BMSK 0xFFFFFFFF
+
+#define JPEG_IRQ_MASK_ADDR (JPEG_REG_BASE + 0x00000018)
+#define JPEG_IRQ_MASK_BMSK 0xFFFFFFFF
+#define JPEG_IRQ_ALLSOURCES_ENABLE 0xFFFFFFFF
+
+#define JPEG_IRQ_CLEAR_ADDR (JPEG_REG_BASE + 0x0000001c)
+#define JPEG_IRQ_CLEAR_BMSK 0xFFFFFFFF
+
+#define JPEG_RESET_CMD_ADDR (JPEG_REG_BASE + 0x00000008)
+#define JPEG_RESET_CMD_RMSK 0xFFFFFFFF
+
+#define JPEG_IRQ_STATUS_ADDR (JPEG_REG_BASE + 0x00000020)
+#define JPEG_IRQ_STATUS_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S0_MMU_PF_ADDR_MIN (JPEG_REG_BASE + 0x00000310)
+#define MSM_JPEG_S0_MMU_PF_ADDR_MIN_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S0_MMU_PF_ADDR_MAX (JPEG_REG_BASE + 0x00000314)
+#define MSM_JPEG_S0_MMU_PF_ADDR_MAX_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S1_MMU_PF_ADDR_MIN (JPEG_REG_BASE + 0x0000031C)
+#define MSM_JPEG_S1_MMU_PF_ADDR_MIN_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S1_MMU_PF_ADDR_MAX (JPEG_REG_BASE + 0x00000320)
+#define MSM_JPEG_S1_MMU_PF_ADDR_MAX_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S2_MMU_PF_ADDR_MIN (JPEG_REG_BASE + 0x00000328)
+#define MSM_JPEG_S2_MMU_PF_ADDR_MIN_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S2_MMU_PF_ADDR_MAX (JPEG_REG_BASE + 0x0000032C)
+#define MSM_JPEG_S2_MMU_PF_ADDR_MAX_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S3_MMU_PF_ADDR_MIN (JPEG_REG_BASE + 0x00000334)
+#define MSM_JPEG_S3_MMU_PF_ADDR_MIN_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S3_MMU_PF_ADDR_MAX (JPEG_REG_BASE + 0x00000338)
+#define MSM_JPEG_S3_MMU_PF_ADDR_MAX_BMSK 0xFFFFFFFF
+
+#define JPEG_ENCODE_OUTPUT_SIZE_STATUS_ADDR (JPEG_REG_BASE + 0x00000180)
+#define JPEG_ENCODE_OUTPUT_SIZE_STATUS_BMSK 0x1FFFFFFF
+
+#define JPEG_DECODE_MCUS_DECODED_STATUS (JPEG_REG_BASE + 0x00000258)
+#define JPEG_DECODE_BITS_CONSUMED_STATUS (JPEG_REG_BASE + 0x0000025C)
+#define JPEG_DECODE_PRED_Y_STATE (JPEG_REG_BASE + 0x00000260)
+#define JPEG_DECODE_PRED_C_STATE (JPEG_REG_BASE + 0x00000264)
+#define JPEG_DECODE_RSM_STATE (JPEG_REG_BASE + 0x00000268)
+
+#define JPEG_HW_VERSION (JPEG_REG_BASE + 0x00000000)
+
+#define VBIF_BASE_ADDRESS 0xFDA60000
+#define VBIF_REGION_SIZE 0xC30
+#define JPEG_VBIF_CLKON 0x4
+#define JPEG_VBIF_IN_RD_LIM_CONF0 0xB0
+#define JPEG_VBIF_IN_RD_LIM_CONF1 0xB4
+#define JPEG_VBIF_IN_RD_LIM_CONF2 0xB8
+#define JPEG_VBIF_IN_WR_LIM_CONF0 0xC0
+#define JPEG_VBIF_IN_WR_LIM_CONF1 0xC4
+#define JPEG_VBIF_IN_WR_LIM_CONF2 0xC8
+#define JPEG_VBIF_OUT_RD_LIM_CONF0 0xD0
+#define JPEG_VBIF_OUT_WR_LIM_CONF0 0xD4
+#define JPEG_VBIF_DDR_OUT_MAX_BURST 0xD8
+#define JPEG_VBIF_OCMEM_OUT_MAX_BURST 0xDC
+#define JPEG_VBIF_ARB_CTL 0xF0
+#define JPEG_VBIF_OUT_AXI_AOOO_EN 0x178
+#define JPEG_VBIF_OUT_AXI_AOOO 0x17c
+#define JPEG_VBIF_ROUND_ROBIN_QOS_ARB 0x124
+#define JPEG_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x160
+#define JPEG_VBIF_OUT_AXI_AMEMTYPE_CONF1 0x164
+
+#define JPEGDMA_IRQ_MASK_ADDR (JPEG_REG_BASE + 0x0000000C)
+#define JPEGDMA_IRQ_MASK_BMSK 0xFFFFFFFF
+#define JPEGDMA_IRQ_ALLSOURCES_ENABLE 0xFFFFFFFF
+
+#define JPEGDMA_IRQ_CLEAR_ADDR (JPEG_REG_BASE + 0x00000014)
+#define JPEGDMA_IRQ_CLEAR_BMSK 0xFFFFFFFF
+
+#define JPEGDMA_RESET_CMD_ADDR (JPEG_REG_BASE + 0x00000008)
+#define JPEGDMA_RESET_CMD_BMSK 0xFFFFFFFF
+
+#define JPEGDMA_IRQ_STATUS_ADDR (JPEG_REG_BASE + 0x00000010)
+#define JPEGDMA_IRQ_STATUS_BMSK 0xFFFFFFFF
+#define JPEGDMA_RESET_DEFAULT 0x00032083
+
+
+#define JPEGDMA_CMD_ADDR (JPEG_REG_BASE + 0x0000001C)
+#define JPEGDMA_CMD_BMSK (0xFFFFFFFF)
+#define JPEGDMA_CMD_CLEAR_READ_PLN_QUEUES 0x030
+#define JPEGDMA_CMD_CLEAR_WRITE_PLN_QUEUES 0x300
+
+#define JPEGDMA_IRQ_DISABLE_ALL 0x00000000
+#define JPEGDMA_IRQ_CLEAR_ALL 0x00001FFF
+#define MSM_JPEGDMA_HW_IRQ_STATUS_FRAMEDONE_MASK 0x00000001
+#define MSM_JPEGDMA_HW_IRQ_STATUS_FRAMEDONE_SHIFT 0x00000000
+#define MSM_JPEGDMA_HW_IRQ_STATUS_FE_RD_DONE_MASK 0x00000006
+#define MSM_JPEGDMA_HW_IRQ_STATUS_FE_RD_DONE_SHIFT 0x00000001
+#define MSM_JPEGDMA_HW_IRQ_STATUS_WE_WR_DONE_MASK 0x00000060
+#define MSM_JPEGDMA_HW_IRQ_STATUS_WE_WR_DONE_SHIFT 0x00000005
+#define MSM_JPEGDMA_HW_IRQ_STATUS_RESET_ACK_MASK 0x00000400
+#define MSM_JPEGDMA_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a
+
+#define MSM_JPEGDMA_FE_0_RD_PNTR (JPEG_REG_BASE + 0x00000034)
+#define MSM_JPEGDMA_FE_1_RD_PNTR (JPEG_REG_BASE + 0x00000078)
+#define MSM_JPEGDMA_WE_0_WR_PNTR (JPEG_REG_BASE + 0x000000BC)
+#define MSM_JPEGDMA_WE_1_WR_PNTR (JPEG_REG_BASE + 0x000000EC)
+
+#define MSM_JPEGDMA_S0_MMU_PF_ADDR_MIN (JPEG_REG_BASE + 0x00000190)
+#define MSM_JPEGDMA_S0_MMU_PF_ADDR_MIN_BMSK 0xFFFFFFFF
+
+#define MSM_JPEGDMA_S0_MMU_PF_ADDR_MAX (JPEG_REG_BASE + 0x00000198)
+#define MSM_JPEGDMA_S0_MMU_PF_ADDR_MAX_BMSK 0xFFFFFFFF
+
+#define MSM_JPEGDMA_S1_MMU_PF_ADDR_MIN (JPEG_REG_BASE + 0x000001A4)
+#define MSM_JPEGDMA_S1_MMU_PF_ADDR_MIN_BMSK 0xFFFFFFFF
+
+#define MSM_JPEGDMA_S1_MMU_PF_ADDR_MAX (JPEG_REG_BASE + 0x000001AC)
+#define MSM_JPEGDMA_S1_MMU_PF_ADDR_MAX_BMSK 0xFFFFFFFF
+
+#endif /* MSM_JPEG_HW_REG_H */
diff --git a/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_platform.c b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_platform.c
new file mode 100644
index 000000000000..1969b7097393
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_platform.c
@@ -0,0 +1,514 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/pm_qos.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/iommu.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-buf.h>
+
+#include "msm_camera_io_util.h"
+#include "msm_jpeg_platform.h"
+#include "msm_jpeg_sync.h"
+#include "msm_jpeg_common.h"
+#include "msm_jpeg_hw.h"
+
+#define JPEG_DT_PROP_CNT 2
+
+
+int msm_jpeg_get_clock_index(struct msm_jpeg_device *pgmn_dev,
+ const char *clk_name)
+{
+ uint32_t i = 0;
+
+ for (i = 0; i < pgmn_dev->num_clk; i++) {
+ if (!strcmp(clk_name, pgmn_dev->jpeg_clk_info[i].clk_name))
+ return i;
+ }
+ return -EINVAL;
+}
+
+int msm_jpeg_platform_set_clk_rate(struct msm_jpeg_device *pgmn_dev,
+ long clk_rate)
+{
+ int rc = 0;
+ uint32_t msm_jpeg_idx;
+
+ /* retrieve clock index from list of clocks */
+ msm_jpeg_idx = msm_jpeg_get_clock_index(pgmn_dev,
+ "core_clk");
+ if (msm_jpeg_idx < 0) {
+ JPEG_PR_ERR("%s:Fail to get clock index\n", __func__);
+ return -EINVAL;
+ }
+
+ /* set the rate */
+ msm_camera_clk_set_rate(&pgmn_dev->pdev->dev,
+ pgmn_dev->jpeg_clk[msm_jpeg_idx], clk_rate);
+
+ return rc;
+}
+
+void msm_jpeg_platform_p2v(int iommu_hdl, int fd)
+{
+ cam_smmu_put_phy_addr(iommu_hdl, fd);
+}
+
+uint32_t msm_jpeg_platform_v2p(struct msm_jpeg_device *pgmn_dev, int fd,
+ uint32_t len, int iommu_hdl)
+{
+ dma_addr_t paddr;
+ size_t size;
+ int rc;
+
+ rc = cam_smmu_get_phy_addr(pgmn_dev->iommu_hdl, fd, CAM_SMMU_MAP_RW,
+ &paddr, &size);
+ JPEG_DBG("%s:%d] addr 0x%x size %zu", __func__, __LINE__,
+ (uint32_t)paddr, size);
+
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: fd %d got phy addr error %d\n", __func__, fd,
+ rc);
+ goto err_get_phy;
+ }
+
+ /* validate user input */
+ if (len > size) {
+ JPEG_PR_ERR("%s: invalid offset + len\n", __func__);
+ goto err_size;
+ }
+
+ return paddr;
+err_size:
+ cam_smmu_put_phy_addr(pgmn_dev->iommu_hdl, fd);
+err_get_phy:
+ return 0;
+}
+
+static void set_vbif_params(struct msm_jpeg_device *pgmn_dev,
+ void *jpeg_vbif_base)
+{
+ msm_camera_io_w(0x1,
+ jpeg_vbif_base + JPEG_VBIF_CLKON);
+
+ if (pgmn_dev->hw_version != JPEG_8994) {
+ msm_camera_io_w(0x10101010,
+ jpeg_vbif_base + JPEG_VBIF_IN_RD_LIM_CONF0);
+ msm_camera_io_w(0x10101010,
+ jpeg_vbif_base + JPEG_VBIF_IN_RD_LIM_CONF1);
+ msm_camera_io_w(0x10101010,
+ jpeg_vbif_base + JPEG_VBIF_IN_RD_LIM_CONF2);
+ msm_camera_io_w(0x10101010,
+ jpeg_vbif_base + JPEG_VBIF_IN_WR_LIM_CONF0);
+ msm_camera_io_w(0x10101010,
+ jpeg_vbif_base + JPEG_VBIF_IN_WR_LIM_CONF1);
+ msm_camera_io_w(0x10101010,
+ jpeg_vbif_base + JPEG_VBIF_IN_WR_LIM_CONF2);
+ msm_camera_io_w(0x00001010,
+ jpeg_vbif_base + JPEG_VBIF_OUT_RD_LIM_CONF0);
+ msm_camera_io_w(0x00000110,
+ jpeg_vbif_base + JPEG_VBIF_OUT_WR_LIM_CONF0);
+ msm_camera_io_w(0x00000707,
+ jpeg_vbif_base + JPEG_VBIF_DDR_OUT_MAX_BURST);
+ msm_camera_io_w(0x00000FFF,
+ jpeg_vbif_base + JPEG_VBIF_OUT_AXI_AOOO_EN);
+ msm_camera_io_w(0x0FFF0FFF,
+ jpeg_vbif_base + JPEG_VBIF_OUT_AXI_AOOO);
+ msm_camera_io_w(0x2222,
+ jpeg_vbif_base + JPEG_VBIF_OUT_AXI_AMEMTYPE_CONF1);
+ }
+
+ msm_camera_io_w(0x7,
+ jpeg_vbif_base + JPEG_VBIF_OCMEM_OUT_MAX_BURST);
+ msm_camera_io_w(0x00000030,
+ jpeg_vbif_base + JPEG_VBIF_ARB_CTL);
+
+ /* FE and WE QOS configuration need to be set when
+ * QOS RR arbitration is enabled
+ */
+ if (pgmn_dev->hw_version != JPEG_8974_V1)
+ msm_camera_io_w(0x00000003,
+ jpeg_vbif_base + JPEG_VBIF_ROUND_ROBIN_QOS_ARB);
+ else
+ msm_camera_io_w(0x00000001,
+ jpeg_vbif_base + JPEG_VBIF_ROUND_ROBIN_QOS_ARB);
+
+ msm_camera_io_w(0x22222222,
+ jpeg_vbif_base + JPEG_VBIF_OUT_AXI_AMEMTYPE_CONF0);
+
+}
+
+/*
+ * msm_jpeg_set_init_dt_parms() - get device tree config and write to registers.
+ * @pgmn_dev: Pointer to jpeg device.
+ * @dt_prop_name: Device tree property name.
+ * @base: Base address.
+ *
+ * This function reads register offsets and values from dtsi based on
+ * device tree property name and writes to jpeg registers.
+ *
+ * Return: 0 on success and negative error on failure.
+ */
+static int32_t msm_jpeg_set_init_dt_parms(struct msm_jpeg_device *pgmn_dev,
+ const char *dt_prop_name,
+ void *base)
+{
+ struct device_node *of_node;
+ int32_t i = 0, rc = 0;
+ uint32_t *dt_reg_settings = NULL;
+ uint32_t dt_count = 0;
+
+ of_node = pgmn_dev->pdev->dev.of_node;
+ JPEG_DBG("%s:%d E\n", __func__, __LINE__);
+
+ if (!of_get_property(of_node, dt_prop_name,
+ &dt_count)) {
+ JPEG_DBG("%s: Error property does not exist\n",
+ __func__);
+ return -ENOENT;
+ }
+ if (dt_count % 8) {
+ JPEG_PR_ERR("%s: Error invalid entries\n",
+ __func__);
+ return -EINVAL;
+ }
+ dt_count /= 4;
+ if (dt_count != 0) {
+ dt_reg_settings = kcalloc(dt_count, sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!dt_reg_settings) {
+ JPEG_PR_ERR("%s:%d No memory\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(of_node,
+ dt_prop_name,
+ dt_reg_settings,
+ dt_count);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: No reg info\n",
+ __func__);
+ kfree(dt_reg_settings);
+ return -EINVAL;
+ }
+ for (i = 0; i < dt_count; i = i + 2) {
+ JPEG_DBG("%s:%d] %pK %08x\n",
+ __func__, __LINE__,
+ base + dt_reg_settings[i],
+ dt_reg_settings[i + 1]);
+ msm_camera_io_w(dt_reg_settings[i + 1],
+ base + dt_reg_settings[i]);
+ }
+ kfree(dt_reg_settings);
+ }
+ return 0;
+}
+
+static int msm_jpeg_attach_iommu(struct msm_jpeg_device *pgmn_dev)
+{
+ int rc;
+
+ rc = cam_smmu_ops(pgmn_dev->iommu_hdl, CAM_SMMU_ATTACH);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: Device attach failed\n", __func__);
+ return -ENODEV;
+ }
+ JPEG_DBG("%s:%d] handle %d attach\n",
+ __func__, __LINE__, pgmn_dev->iommu_hdl);
+ return 0;
+}
+
+static int msm_jpeg_detach_iommu(struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_DBG("%s:%d] handle %d detach\n",
+ __func__, __LINE__, pgmn_dev->iommu_hdl);
+ cam_smmu_ops(pgmn_dev->iommu_hdl, CAM_SMMU_DETACH);
+ return 0;
+}
+
+
+int msm_jpeg_platform_init(irqreturn_t (*handler)(int, void *),
+ void *context)
+{
+ int rc = -1;
+ struct msm_jpeg_device *pgmn_dev =
+ (struct msm_jpeg_device *) context;
+ struct platform_device *pdev = pgmn_dev->pdev;
+
+ pgmn_dev->state = MSM_JPEG_IDLE;
+
+ /* enable all regulators */
+ rc = msm_camera_regulator_enable(pgmn_dev->jpeg_vdd,
+ pgmn_dev->num_reg, true);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: failed to enable regulators\n", __func__);
+ goto err_reg_enable;
+ }
+
+ /* enable all clocks */
+ rc = msm_camera_clk_enable(&pgmn_dev->pdev->dev,
+ pgmn_dev->jpeg_clk_info, pgmn_dev->jpeg_clk,
+ pgmn_dev->num_clk, true);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: clk enable failed\n", __func__);
+ goto err_clk_enable;
+ }
+
+ /* attach the smmu context banks */
+ rc = msm_jpeg_attach_iommu(pgmn_dev);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: iommu attach failed\n", __func__);
+ goto err_fail_iommu;
+ }
+ rc = msm_jpeg_set_init_dt_parms(pgmn_dev, "qcom,vbif-reg-settings",
+ pgmn_dev->vbif_base);
+ if (rc == -ENOENT) {
+ JPEG_DBG("%s: No qcom,vbif-reg-settings property\n", __func__);
+ set_vbif_params(pgmn_dev, pgmn_dev->vbif_base);
+ } else if (rc < 0) {
+ JPEG_PR_ERR("%s: vbif params set fail\n", __func__);
+ goto err_fail_set_vbif;
+ }
+
+ /* register the interrupt handler */
+ rc = msm_camera_register_irq(pgmn_dev->pdev,
+ pgmn_dev->jpeg_irq_res, handler, IRQF_TRIGGER_RISING,
+ "jpeg", context);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: irq request fail\n", __func__);
+ goto err_reg_irq_fail;
+ }
+
+ pgmn_dev->hw_version = msm_camera_io_r(pgmn_dev->base +
+ JPEG_HW_VERSION);
+ JPEG_DBG_HIGH("%s:%d] jpeg HW version 0x%x", __func__, __LINE__,
+ pgmn_dev->hw_version);
+ pgmn_dev->state = MSM_JPEG_INIT;
+
+ return 0;
+err_reg_irq_fail:
+err_fail_set_vbif:
+ msm_jpeg_detach_iommu(pgmn_dev);
+err_fail_iommu:
+ msm_camera_clk_enable(&pdev->dev, pgmn_dev->jpeg_clk_info,
+ pgmn_dev->jpeg_clk, pgmn_dev->num_clk, false);
+err_clk_enable:
+ msm_camera_regulator_enable(pgmn_dev->jpeg_vdd,
+ pgmn_dev->num_reg, false);
+err_reg_enable:
+ return rc;
+}
+
+int msm_jpeg_platform_setup(struct msm_jpeg_device *pgmn_dev)
+{
+ int rc = -1;
+ struct resource *jpeg_irq_res;
+ void *jpeg_base, *vbif_base;
+ struct platform_device *pdev = pgmn_dev->pdev;
+
+ /* get the jpeg hardware device address */
+ jpeg_base = msm_camera_get_reg_base(pdev, "jpeg_hw", true);
+ if (!jpeg_base) {
+ JPEG_PR_ERR("%s: jpeg no mem resource?\n", __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* get the jpeg vbif device address */
+ vbif_base = msm_camera_get_reg_base(pdev, "jpeg_vbif", false);
+ if (!vbif_base) {
+ JPEG_PR_ERR("%s: vbif no mem resource?\n", __func__);
+ rc = -ENXIO;
+ goto err_vbif_base;
+ }
+
+ /* get the irq resource for the jpeg hardware */
+ jpeg_irq_res = msm_camera_get_irq(pdev, "jpeg");
+ if (!jpeg_irq_res) {
+ JPEG_PR_ERR("%s: no irq resource?\n", __func__);
+ rc = -ENXIO;
+ goto err_jpeg_irq_res;
+ }
+
+ /* get all the clocks information */
+ rc = msm_camera_get_clk_info(pdev, &pgmn_dev->jpeg_clk_info,
+ &pgmn_dev->jpeg_clk, &pgmn_dev->num_clk);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: failed to get the clocks\n", __func__);
+ rc = -ENXIO;
+ goto err_jpeg_clk;
+ }
+
+ /* get all the regulators information */
+ rc = msm_camera_get_regulator_info(pdev, &pgmn_dev->jpeg_vdd,
+ &pgmn_dev->num_reg);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: failed to get the regulators\n", __func__);
+ rc = -ENXIO;
+ goto err_jpeg_get_reg;
+ }
+
+ /* map the dtsi cell id to bus client id */
+ switch (pgmn_dev->pdev->id) {
+ case 0:
+ pgmn_dev->bus_client = CAM_BUS_CLIENT_JPEG_ENC0;
+ break;
+ case 1:
+ pgmn_dev->bus_client = CAM_BUS_CLIENT_JPEG_ENC1;
+ break;
+ case 2:
+ pgmn_dev->bus_client = CAM_BUS_CLIENT_JPEG_DEC;
+ break;
+ case 3:
+ pgmn_dev->bus_client = CAM_BUS_CLIENT_JPEG_DMA;
+ break;
+ default:
+ JPEG_PR_ERR("%s: invalid cell id :%d\n",
+ __func__, pgmn_dev->pdev->id);
+ goto err_jpeg_get_reg;
+ }
+
+ /* register the bus client */
+ rc = msm_camera_register_bus_client(pgmn_dev->pdev,
+ pgmn_dev->bus_client);
+ if (rc < 0) {
+ JPEG_PR_ERR("Fail to register bus client\n");
+ rc = -EINVAL;
+ goto err_reg_bus;
+ }
+
+ /* get the resource size of jpeg hardware */
+ pgmn_dev->res_size = msm_camera_get_res_size(pdev, "jpeg_hw");
+ if (!pgmn_dev->res_size) {
+ JPEG_PR_ERR("Fail to resource size\n");
+ rc = -EINVAL;
+ goto err_res_size;
+ }
+
+ pgmn_dev->base = jpeg_base;
+ pgmn_dev->vbif_base = vbif_base;
+ pgmn_dev->jpeg_irq_res = jpeg_irq_res;
+
+ return 0;
+
+err_res_size:
+ msm_camera_unregister_bus_client(pgmn_dev->bus_client);
+err_reg_bus:
+ msm_camera_put_regulators(pdev, &pgmn_dev->jpeg_vdd,
+ pgmn_dev->num_reg);
+err_jpeg_get_reg:
+ msm_camera_put_clk_info(pdev, &pgmn_dev->jpeg_clk_info,
+ &pgmn_dev->jpeg_clk, pgmn_dev->num_clk);
+err_jpeg_clk:
+err_jpeg_irq_res:
+ msm_camera_put_reg_base(pdev, vbif_base, "jpeg_vbif", false);
+err_vbif_base:
+ msm_camera_put_reg_base(pdev, jpeg_base, "jpeg_hw", true);
+out:
+ return rc;
+}
+
+void msm_jpeg_platform_cleanup(struct msm_jpeg_device *pgmn_dev)
+{
+ /* unregister the bus client */
+ msm_camera_unregister_bus_client(pgmn_dev->bus_client);
+ /* release the regulators */
+ msm_camera_put_regulators(pgmn_dev->pdev, &pgmn_dev->jpeg_vdd,
+ pgmn_dev->num_reg);
+ /* release all the clocks */
+ msm_camera_put_clk_info(pgmn_dev->pdev, &pgmn_dev->jpeg_clk_info,
+ &pgmn_dev->jpeg_clk, pgmn_dev->num_clk);
+ /* release the jpeg device memory */
+ msm_camera_put_reg_base(pgmn_dev->pdev, pgmn_dev->vbif_base,
+ "jpeg_vbif", false);
+ /* release the jpeg vbif device memory */
+ msm_camera_put_reg_base(pgmn_dev->pdev, pgmn_dev->base,
+ "jpeg_hw", true);
+}
+
+int msm_jpeg_platform_release(void *context)
+{
+ int result = 0;
+
+ struct msm_jpeg_device *pgmn_dev =
+ (struct msm_jpeg_device *) context;
+
+ /* release the irq */
+ msm_camera_unregister_irq(pgmn_dev->pdev,
+ pgmn_dev->jpeg_irq_res, context);
+
+ msm_jpeg_detach_iommu(pgmn_dev);
+
+ if (pgmn_dev->bus_client) {
+ if (pgmn_dev->jpeg_bus_vote) {
+ /* update the bw with zeroth vector */
+ msm_camera_update_bus_vector(pgmn_dev->bus_client, 0);
+ JPEG_BUS_UNVOTED(pgmn_dev);
+ JPEG_DBG("%s:%d] Bus unvoted\n", __func__, __LINE__);
+ }
+ }
+
+ /* disable all the clocks */
+ msm_camera_clk_enable(&pgmn_dev->pdev->dev, pgmn_dev->jpeg_clk_info,
+ pgmn_dev->jpeg_clk, pgmn_dev->num_clk, false);
+ JPEG_DBG("%s:%d] clock disbale done", __func__, __LINE__);
+
+ /* disable all the regulators */
+ msm_camera_regulator_enable(pgmn_dev->jpeg_vdd,
+ pgmn_dev->num_reg, false);
+ JPEG_DBG("%s:%d] regulator disable done", __func__, __LINE__);
+
+ pgmn_dev->state = MSM_JPEG_IDLE;
+ JPEG_DBG("%s:%d] success\n", __func__, __LINE__);
+ return result;
+}
+
+/*
+ * msm_jpeg_platform_set_dt_config() - set jpeg device tree configuration.
+ * @pgmn_dev: Pointer to jpeg device.
+ *
+ * This function holds an array of device tree property names and calls
+ * msm_jpeg_set_init_dt_parms() for each property.
+ *
+ * Return: 0 on success and negative error on failure.
+ */
+int msm_jpeg_platform_set_dt_config(struct msm_jpeg_device *pgmn_dev)
+{
+ int rc = 0;
+ uint8_t dt_prop_cnt = JPEG_DT_PROP_CNT;
+ char *dt_prop_name[JPEG_DT_PROP_CNT] = {"qcom,qos-reg-settings",
+ "qcom,prefetch-reg-settings"};
+
+ while (dt_prop_cnt) {
+ dt_prop_cnt--;
+ rc = msm_jpeg_set_init_dt_parms(pgmn_dev,
+ dt_prop_name[dt_prop_cnt],
+ pgmn_dev->base);
+ if (rc == -ENOENT) {
+ JPEG_DBG("%s: No %s property\n", __func__,
+ dt_prop_name[dt_prop_cnt]);
+ } else if (rc < 0) {
+ JPEG_PR_ERR("%s: %s params set fail\n", __func__,
+ dt_prop_name[dt_prop_cnt]);
+ return rc;
+ }
+ }
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_platform.h b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_platform.h
new file mode 100644
index 000000000000..cbd9d3e22f09
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_platform.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_JPEG_PLATFORM_H
+#define MSM_JPEG_PLATFORM_H
+
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/ion.h>
+#include "msm_jpeg_sync.h"
+#define JPEG_CLK_RATE 266670000
+
+int msm_jpeg_platform_set_clk_rate(struct msm_jpeg_device *pgmn_dev,
+ long clk_rate);
+void msm_jpeg_platform_p2v(int iommu_hdl, int fd);
+uint32_t msm_jpeg_platform_v2p(struct msm_jpeg_device *pgmn_dev, int fd,
+ uint32_t len, int iommu_hdl);
+
+int msm_jpeg_platform_clk_enable(void);
+int msm_jpeg_platform_clk_disable(void);
+
+int msm_jpeg_platform_init(irqreturn_t (*handler)(int, void *),
+ void *context);
+int msm_jpeg_platform_release(void *context);
+int msm_jpeg_platform_set_dt_config(struct msm_jpeg_device *pgmn_dev);
+int msm_jpeg_platform_setup(struct msm_jpeg_device *pgmn_dev);
+void msm_jpeg_platform_cleanup(struct msm_jpeg_device *pgmn_dev);
+
+#endif /* MSM_JPEG_PLATFORM_H */
diff --git a/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_sync.c b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_sync.c
new file mode 100644
index 000000000000..e085f0afaa77
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_sync.c
@@ -0,0 +1,1584 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/ratelimit.h>
+#include <media/msm_jpeg.h>
+#include <linux/msm-bus.h>
+#include "msm_jpeg_sync.h"
+#include "msm_jpeg_core.h"
+#include "msm_jpeg_platform.h"
+#include "msm_jpeg_common.h"
+#include "cam_hw_ops.h"
+
+#define JPEG_REG_SIZE 0x308
+#define JPEG_DEV_CNT 4
+#define JPEG_DEC_ID 2
+#define UINT32_MAX (0xFFFFFFFFU)
+
+#ifdef CONFIG_COMPAT
+
+#define MSM_JPEG_IOCTL_GET_HW_VERSION32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 1, struct msm_jpeg_hw_cmd32)
+
+#define MSM_JPEG_IOCTL_RESET32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 2, struct msm_jpeg_ctrl_cmd32)
+
+#define MSM_JPEG_IOCTL_STOP32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 3, struct msm_jpeg_hw_cmds32)
+
+#define MSM_JPEG_IOCTL_START32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 4, struct msm_jpeg_hw_cmds32)
+
+#define MSM_JPEG_IOCTL_INPUT_BUF_ENQUEUE32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 5, struct msm_jpeg_buf32)
+
+#define MSM_JPEG_IOCTL_INPUT_GET32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 6, struct msm_jpeg_buf32)
+
+#define MSM_JPEG_IOCTL_OUTPUT_BUF_ENQUEUE32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 8, struct msm_jpeg_buf32)
+
+#define MSM_JPEG_IOCTL_OUTPUT_GET32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 9, struct msm_jpeg_buf32)
+
+#define MSM_JPEG_IOCTL_EVT_GET32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 11, struct msm_jpeg_ctrl_cmd32)
+
+#define MSM_JPEG_IOCTL_HW_CMD32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 13, struct msm_jpeg_hw_cmd32)
+
+#define MSM_JPEG_IOCTL_HW_CMDS32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 14, struct msm_jpeg_hw_cmds32)
+
+#define MSM_JPEG_IOCTL_TEST_DUMP_REGION32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 15, compat_ulong_t)
+
+struct msm_jpeg_ctrl_cmd32 {
+ uint32_t type;
+ uint32_t len;
+ compat_uptr_t value;
+};
+struct msm_jpeg_buf32 {
+ uint32_t type;
+ int fd;
+
+ compat_uptr_t vaddr;
+
+ uint32_t y_off;
+ uint32_t y_len;
+ uint32_t framedone_len;
+
+ uint32_t cbcr_off;
+ uint32_t cbcr_len;
+
+ uint32_t num_of_mcu_rows;
+ uint32_t offset;
+ uint32_t pln2_off;
+ uint32_t pln2_len;
+};
+
+struct msm_jpeg_hw_cmd32 {
+
+ uint32_t type:4;
+
+ /* n microseconds of timeout for WAIT */
+ /* n microseconds of time for DELAY */
+ /* repeat n times for READ/WRITE */
+ /* max is 0xFFF, 4095 */
+ uint32_t n:12;
+ uint32_t offset:16;
+ uint32_t mask;
+ union {
+ uint32_t data; /* for single READ/WRITE/WAIT, n = 1 */
+ compat_uptr_t pdata; /* for multiple READ/WRITE/WAIT, n > 1 */
+ };
+};
+
+struct msm_jpeg_hw_cmds32 {
+ uint32_t m; /* number of elements in the hw_cmd array */
+ struct msm_jpeg_hw_cmd32 hw_cmd[1];
+};
+#endif
+
+
+inline void msm_jpeg_q_init(char const *name, struct msm_jpeg_q *q_p)
+{
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, name);
+ q_p->name = name;
+ spin_lock_init(&q_p->lck);
+ INIT_LIST_HEAD(&q_p->q);
+ init_waitqueue_head(&q_p->wait);
+ q_p->unblck = 0;
+}
+
+inline void *msm_jpeg_q_out(struct msm_jpeg_q *q_p)
+{
+ unsigned long flags;
+ struct msm_jpeg_q_entry *q_entry_p = NULL;
+ void *data = NULL;
+
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+ spin_lock_irqsave(&q_p->lck, flags);
+ if (!list_empty(&q_p->q)) {
+ q_entry_p = list_first_entry(&q_p->q, struct msm_jpeg_q_entry,
+ list);
+ list_del_init(&q_entry_p->list);
+ }
+ spin_unlock_irqrestore(&q_p->lck, flags);
+
+ if (q_entry_p) {
+ data = q_entry_p->data;
+ kfree(q_entry_p);
+ } else {
+ JPEG_DBG("%s:%d] %s no entry\n", __func__, __LINE__,
+ q_p->name);
+ }
+
+ return data;
+}
+
+inline int msm_jpeg_q_in(struct msm_jpeg_q *q_p, void *data)
+{
+ unsigned long flags;
+
+ struct msm_jpeg_q_entry *q_entry_p;
+
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+
+ q_entry_p = kmalloc(sizeof(struct msm_jpeg_q_entry), GFP_ATOMIC);
+ if (!q_entry_p) {
+ JPEG_PR_ERR("%s: no mem\n", __func__);
+ return -EFAULT;
+ }
+ q_entry_p->data = data;
+
+ spin_lock_irqsave(&q_p->lck, flags);
+ list_add_tail(&q_entry_p->list, &q_p->q);
+ spin_unlock_irqrestore(&q_p->lck, flags);
+
+ return 0;
+}
+
+inline int msm_jpeg_q_in_buf(struct msm_jpeg_q *q_p,
+ struct msm_jpeg_core_buf *buf)
+{
+ struct msm_jpeg_core_buf *buf_p;
+
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+ buf_p = kmalloc(sizeof(struct msm_jpeg_core_buf), GFP_ATOMIC);
+ if (!buf_p) {
+ JPEG_PR_ERR("%s: no mem\n", __func__);
+ return -EFAULT;
+ }
+
+ memcpy(buf_p, buf, sizeof(struct msm_jpeg_core_buf));
+
+ msm_jpeg_q_in(q_p, buf_p);
+ return 0;
+}
+
+inline int msm_jpeg_q_wait(struct msm_jpeg_q *q_p)
+{
+ long tm = MAX_SCHEDULE_TIMEOUT; /* 500ms */
+ int rc;
+
+ JPEG_DBG("%s:%d] %s wait\n", __func__, __LINE__, q_p->name);
+ rc = wait_event_timeout(q_p->wait,
+ (!list_empty_careful(&q_p->q) || q_p->unblck),
+ msecs_to_jiffies(tm));
+ JPEG_DBG("%s:%d] %s wait done\n", __func__, __LINE__, q_p->name);
+ if (list_empty_careful(&q_p->q)) {
+ if (rc == 0) {
+ rc = -ETIMEDOUT;
+ JPEG_PR_ERR("%s:%d] %s timeout\n", __func__, __LINE__,
+ q_p->name);
+ } else if (q_p->unblck) {
+ JPEG_DBG("%s:%d] %s unblock is true\n", __func__,
+ __LINE__, q_p->name);
+ q_p->unblck = 0;
+ rc = -ECANCELED;
+ }
+ }
+ return rc;
+}
+
+inline int msm_jpeg_q_wakeup(struct msm_jpeg_q *q_p)
+{
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+ wake_up(&q_p->wait);
+ return 0;
+}
+
+inline int msm_jpeg_q_unblock(struct msm_jpeg_q *q_p)
+{
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+ q_p->unblck = 1;
+ wake_up(&q_p->wait);
+ return 0;
+}
+
+inline void msm_jpeg_outbuf_q_cleanup(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_q *q_p)
+{
+ struct msm_jpeg_core_buf *buf_p;
+
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+ do {
+ buf_p = msm_jpeg_q_out(q_p);
+ if (buf_p) {
+ msm_jpeg_platform_p2v(pgmn_dev->iommu_hdl,
+ buf_p->ion_fd);
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+ kfree(buf_p);
+ }
+ } while (buf_p);
+ q_p->unblck = 0;
+}
+
+inline void msm_jpeg_q_cleanup(struct msm_jpeg_q *q_p)
+{
+ void *data;
+
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+ do {
+ data = msm_jpeg_q_out(q_p);
+ if (data) {
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+ kfree(data);
+ }
+ } while (data);
+ q_p->unblck = 0;
+}
+
+/*************** event queue ****************/
+
+int msm_jpeg_framedone_irq(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_core_buf *buf_in)
+{
+ int rc = 0;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+
+ if (buf_in) {
+ buf_in->vbuf.framedone_len = buf_in->framedone_len;
+ buf_in->vbuf.type = MSM_JPEG_EVT_SESSION_DONE;
+ JPEG_DBG("%s:%d] 0x%08x %d framedone_len %d\n",
+ __func__, __LINE__,
+ (int) buf_in->y_buffer_addr, buf_in->y_len,
+ buf_in->vbuf.framedone_len);
+ rc = msm_jpeg_q_in_buf(&pgmn_dev->evt_q, buf_in);
+ } else {
+ JPEG_PR_ERR("%s:%d] no output return buffer\n",
+ __func__, __LINE__);
+ rc = -1;
+ }
+
+ if (buf_in)
+ rc = msm_jpeg_q_wakeup(&pgmn_dev->evt_q);
+
+ return rc;
+}
+
+int msm_jpeg_evt_get(struct msm_jpeg_device *pgmn_dev,
+ void __user *to)
+{
+ struct msm_jpeg_core_buf *buf_p;
+ struct msm_jpeg_ctrl_cmd ctrl_cmd;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+
+ msm_jpeg_q_wait(&pgmn_dev->evt_q);
+ buf_p = msm_jpeg_q_out(&pgmn_dev->evt_q);
+
+ if (!buf_p) {
+ JPEG_DBG("%s:%d] no buffer\n", __func__, __LINE__);
+ return -EAGAIN;
+ }
+
+ memset(&ctrl_cmd, 0, sizeof(ctrl_cmd));
+ ctrl_cmd.type = buf_p->vbuf.type;
+ kfree(buf_p);
+
+ if (ctrl_cmd.type == MSM_JPEG_EVT_SESSION_DONE) {
+ /* update the bw with zeroth vector */
+ msm_camera_update_bus_vector(pgmn_dev->bus_client, 0);
+ JPEG_BUS_UNVOTED(pgmn_dev);
+ JPEG_DBG("%s:%d] Bus unvoted\n", __func__, __LINE__);
+ }
+
+ JPEG_DBG("%s:%d] 0x%08lx %d\n", __func__, __LINE__,
+ (unsigned long) ctrl_cmd.value, ctrl_cmd.len);
+
+ if (copy_to_user(to, &ctrl_cmd, sizeof(ctrl_cmd))) {
+ JPEG_PR_ERR("%s:%d]\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int msm_jpeg_evt_get_unblock(struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ msm_jpeg_q_unblock(&pgmn_dev->evt_q);
+ return 0;
+}
+
+void msm_jpeg_reset_ack_irq(struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+}
+
+void msm_jpeg_err_irq(struct msm_jpeg_device *pgmn_dev,
+ int event)
+{
+ int rc = 0;
+ struct msm_jpeg_core_buf buf;
+
+ JPEG_PR_ERR("%s:%d] error: %d\n", __func__, __LINE__, event);
+
+ buf.vbuf.type = MSM_JPEG_EVT_ERR;
+ rc = msm_jpeg_q_in_buf(&pgmn_dev->evt_q, &buf);
+ if (!rc)
+ rc = msm_jpeg_q_wakeup(&pgmn_dev->evt_q);
+
+ if (!rc)
+ JPEG_PR_ERR("%s:%d] err err\n", __func__, __LINE__);
+}
+
+/*************** output queue ****************/
+
+int msm_jpeg_we_pingpong_irq(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_core_buf *buf_in)
+{
+ int rc = 0;
+ struct msm_jpeg_core_buf *buf_out;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ if (buf_in) {
+ JPEG_DBG("%s:%d] 0x%08x %d\n", __func__, __LINE__,
+ (int) buf_in->y_buffer_addr, buf_in->y_len);
+ rc = msm_jpeg_q_in_buf(&pgmn_dev->output_rtn_q, buf_in);
+ } else {
+ JPEG_DBG("%s:%d] no output return buffer\n", __func__,
+ __LINE__);
+ rc = -1;
+ return rc;
+ }
+
+ buf_out = msm_jpeg_q_out(&pgmn_dev->output_buf_q);
+
+ if (buf_out) {
+ JPEG_DBG("%s:%d] 0x%08x %d\n", __func__, __LINE__,
+ (int) buf_out->y_buffer_addr, buf_out->y_len);
+ rc = msm_jpeg_core_we_buf_update(pgmn_dev, buf_out);
+ kfree(buf_out);
+ } else {
+ msm_jpeg_core_we_buf_reset(pgmn_dev, buf_in);
+ JPEG_DBG("%s:%d] no output buffer\n", __func__, __LINE__);
+ rc = -2;
+ }
+
+ if (buf_in)
+ rc = msm_jpeg_q_wakeup(&pgmn_dev->output_rtn_q);
+
+ return rc;
+}
+
+int msm_jpeg_output_get(struct msm_jpeg_device *pgmn_dev, void __user *to)
+{
+ struct msm_jpeg_core_buf *buf_p;
+ struct msm_jpeg_buf buf_cmd;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+
+ msm_jpeg_q_wait(&pgmn_dev->output_rtn_q);
+ buf_p = msm_jpeg_q_out(&pgmn_dev->output_rtn_q);
+
+ if (!buf_p) {
+ JPEG_DBG("%s:%d] no output buffer return\n",
+ __func__, __LINE__);
+ return -EAGAIN;
+ }
+
+ buf_cmd = buf_p->vbuf;
+ msm_jpeg_platform_p2v(pgmn_dev->iommu_hdl, buf_p->ion_fd);
+ kfree(buf_p);
+
+ JPEG_DBG("%s:%d] 0x%08lx %d\n", __func__, __LINE__,
+ (unsigned long) buf_cmd.vaddr, buf_cmd.y_len);
+
+ if (copy_to_user(to, &buf_cmd, sizeof(buf_cmd))) {
+ JPEG_PR_ERR("%s:%d]", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int msm_jpeg_output_get_unblock(struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ msm_jpeg_q_unblock(&pgmn_dev->output_rtn_q);
+ return 0;
+}
+
+static inline int msm_jpeg_add_u32_check(uint32_t *p, uint32_t n, uint32_t *res)
+{
+ *res = 0;
+
+ while (n--) {
+ if ((*res + *p) < *res)
+ return -EFAULT;
+ *res += *p++;
+ }
+ return 0;
+}
+
+int msm_jpeg_output_buf_enqueue(struct msm_jpeg_device *pgmn_dev,
+ void __user *arg)
+{
+ struct msm_jpeg_buf buf_cmd;
+ struct msm_jpeg_core_buf *buf_p;
+ uint32_t buf_len_params[10];
+ uint32_t total_len = 0;
+ int n = 0;
+
+ memset(&buf_cmd, 0x0, sizeof(struct msm_jpeg_buf));
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ if (copy_from_user(&buf_cmd, arg, sizeof(struct msm_jpeg_buf))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ buf_len_params[n++] = buf_cmd.y_len;
+ buf_len_params[n++] = buf_cmd.cbcr_len;
+ buf_len_params[n++] = buf_cmd.pln2_len;
+ buf_len_params[n++] = buf_cmd.offset;
+ buf_len_params[n++] = buf_cmd.y_off;
+ if (msm_jpeg_add_u32_check(buf_len_params, n, &total_len) < 0) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ buf_p = kmalloc(sizeof(struct msm_jpeg_core_buf), GFP_ATOMIC);
+ if (!buf_p) {
+ JPEG_PR_ERR("%s:%d] no mem\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+
+ JPEG_DBG("%s:%d] vaddr = 0x%08lx y_len = %d\n, fd = %d",
+ __func__, __LINE__, (unsigned long) buf_cmd.vaddr,
+ buf_cmd.y_len, buf_cmd.fd);
+
+ buf_p->ion_fd = buf_cmd.fd;
+ buf_p->y_buffer_addr = msm_jpeg_platform_v2p(pgmn_dev, buf_cmd.fd,
+ total_len, pgmn_dev->iommu_hdl);
+
+ if (!buf_p->y_buffer_addr) {
+ JPEG_PR_ERR("%s:%d] v2p wrong\n", __func__, __LINE__);
+ kfree(buf_p);
+ return -EFAULT;
+ }
+
+ buf_p->y_buffer_addr += buf_cmd.offset + buf_cmd.y_off;
+
+ if (buf_cmd.cbcr_len)
+ buf_p->cbcr_buffer_addr = buf_p->y_buffer_addr +
+ buf_cmd.y_len;
+ else
+ buf_p->cbcr_buffer_addr = 0x0;
+
+ if (buf_cmd.pln2_len)
+ buf_p->pln2_addr = buf_p->cbcr_buffer_addr +
+ buf_cmd.cbcr_len;
+ else
+ buf_p->pln2_addr = 0x0;
+
+ JPEG_DBG("%s:%d]After v2p pln0_addr %x pln0_len %d",
+ __func__, __LINE__, buf_p->y_buffer_addr,
+ buf_cmd.y_len);
+
+ JPEG_DBG("pl1_len %d, pln1_addr %x, pln2_adrr %x,pln2_len %d",
+ buf_cmd.cbcr_len, buf_p->cbcr_buffer_addr,
+ buf_p->pln2_addr, buf_cmd.pln2_len);
+
+ buf_p->y_len = buf_cmd.y_len;
+ buf_p->cbcr_len = buf_cmd.cbcr_len;
+ buf_p->pln2_len = buf_cmd.pln2_len;
+ buf_p->vbuf = buf_cmd;
+
+ msm_jpeg_q_in(&pgmn_dev->output_buf_q, buf_p);
+ return 0;
+}
+
+/*************** input queue ****************/
+
+int msm_jpeg_fe_pingpong_irq(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_core_buf *buf_in)
+{
+ struct msm_jpeg_core_buf *buf_out;
+ int rc = 0;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ if (buf_in) {
+ JPEG_DBG("%s:%d] 0x%08x %d\n", __func__, __LINE__,
+ (int) buf_in->y_buffer_addr, buf_in->y_len);
+ rc = msm_jpeg_q_in_buf(&pgmn_dev->input_rtn_q, buf_in);
+ } else {
+ JPEG_DBG("%s:%d] no input return buffer\n", __func__,
+ __LINE__);
+ rc = -EFAULT;
+ }
+
+ buf_out = msm_jpeg_q_out(&pgmn_dev->input_buf_q);
+
+ if (buf_out) {
+ rc = msm_jpeg_core_fe_buf_update(pgmn_dev, buf_out);
+ kfree(buf_out);
+ msm_jpeg_core_fe_start(pgmn_dev);
+ } else {
+ JPEG_DBG("%s:%d] no input buffer\n", __func__, __LINE__);
+ rc = -EFAULT;
+ }
+
+ if (buf_in)
+ rc = msm_jpeg_q_wakeup(&pgmn_dev->input_rtn_q);
+
+ return rc;
+}
+
+int msm_jpeg_input_get(struct msm_jpeg_device *pgmn_dev, void __user *to)
+{
+ struct msm_jpeg_core_buf *buf_p;
+ struct msm_jpeg_buf buf_cmd;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ msm_jpeg_q_wait(&pgmn_dev->input_rtn_q);
+ buf_p = msm_jpeg_q_out(&pgmn_dev->input_rtn_q);
+
+ if (!buf_p) {
+ JPEG_DBG("%s:%d] no input buffer return\n",
+ __func__, __LINE__);
+ return -EAGAIN;
+ }
+
+ buf_cmd = buf_p->vbuf;
+
+ msm_jpeg_platform_p2v(pgmn_dev->iommu_hdl, buf_p->ion_fd);
+ kfree(buf_p);
+
+ JPEG_DBG("%s:%d] 0x%08lx %d\n", __func__, __LINE__,
+ (unsigned long) buf_cmd.vaddr, buf_cmd.y_len);
+
+ if (copy_to_user(to, &buf_cmd, sizeof(buf_cmd))) {
+ JPEG_PR_ERR("%s:%d]\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int msm_jpeg_input_get_unblock(struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ msm_jpeg_q_unblock(&pgmn_dev->input_rtn_q);
+ return 0;
+}
+
+int msm_jpeg_input_buf_enqueue(struct msm_jpeg_device *pgmn_dev,
+ void __user *arg)
+{
+ struct msm_jpeg_core_buf *buf_p;
+ struct msm_jpeg_buf buf_cmd;
+ uint32_t buf_len_params[10];
+ uint32_t total_len = 0;
+ int n = 0;
+
+ memset(&buf_cmd, 0x0, sizeof(struct msm_jpeg_buf));
+
+ if (copy_from_user(&buf_cmd, arg, sizeof(struct msm_jpeg_buf))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ buf_len_params[n++] = buf_cmd.y_len;
+ buf_len_params[n++] = buf_cmd.cbcr_len;
+ buf_len_params[n++] = buf_cmd.pln2_len;
+ buf_len_params[n++] = buf_cmd.offset;
+ buf_len_params[n++] = buf_cmd.y_off;
+ if (buf_cmd.cbcr_len)
+ buf_len_params[n++] = buf_cmd.cbcr_off;
+ if (buf_cmd.pln2_len)
+ buf_len_params[n++] = buf_cmd.pln2_off;
+
+ if (msm_jpeg_add_u32_check(buf_len_params, n, &total_len) < 0) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ buf_p = kmalloc(sizeof(struct msm_jpeg_core_buf), GFP_ATOMIC);
+ if (!buf_p) {
+ JPEG_PR_ERR("%s:%d] no mem\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ JPEG_DBG("%s:%d] 0x%08lx %d\n", __func__, __LINE__,
+ (unsigned long) buf_cmd.vaddr, buf_cmd.y_len);
+
+ buf_p->ion_fd = buf_cmd.fd;
+ buf_p->y_buffer_addr = msm_jpeg_platform_v2p(pgmn_dev, buf_cmd.fd,
+ total_len, pgmn_dev->iommu_hdl);
+
+ if (!buf_p->y_buffer_addr) {
+ JPEG_PR_ERR("%s:%d] v2p wrong\n", __func__, __LINE__);
+ kfree(buf_p);
+ return -EFAULT;
+ }
+
+ buf_p->y_buffer_addr += buf_cmd.offset + buf_cmd.y_off;
+
+ buf_p->y_len = buf_cmd.y_len;
+ buf_p->cbcr_len = buf_cmd.cbcr_len;
+ buf_p->pln2_len = buf_cmd.pln2_len;
+ buf_p->num_of_mcu_rows = buf_cmd.num_of_mcu_rows;
+
+ if (buf_cmd.cbcr_len)
+ buf_p->cbcr_buffer_addr = buf_p->y_buffer_addr +
+ buf_cmd.y_len + buf_cmd.cbcr_off;
+ else
+ buf_p->cbcr_buffer_addr = 0x0;
+
+ if (buf_cmd.pln2_len)
+ buf_p->pln2_addr = buf_p->cbcr_buffer_addr +
+ buf_cmd.cbcr_len + buf_cmd.pln2_off;
+ else
+ buf_p->pln2_addr = 0x0;
+
+ JPEG_DBG("%s: y_addr=%x, y_len=%x, cbcr_addr=%x, cbcr_len=%d",
+ __func__, buf_p->y_buffer_addr, buf_p->y_len,
+ buf_p->cbcr_buffer_addr, buf_p->cbcr_len);
+ JPEG_DBG("pln2_addr = %x, pln2_len = %d, fd =%d\n",
+ buf_p->pln2_addr, buf_p->pln2_len, buf_cmd.fd);
+
+ buf_p->vbuf = buf_cmd;
+
+ msm_jpeg_q_in(&pgmn_dev->input_buf_q, buf_p);
+
+ return 0;
+}
+
+int msm_jpeg_irq(int event, void *context, void *data)
+{
+ struct msm_jpeg_device *pgmn_dev =
+ (struct msm_jpeg_device *) context;
+
+ switch (event) {
+ case MSM_JPEG_EVT_SESSION_DONE:
+ msm_jpeg_framedone_irq(pgmn_dev, data);
+ msm_jpeg_we_pingpong_irq(pgmn_dev, data);
+ break;
+
+ case MSM_JPEG_HW_MASK_COMP_FE:
+ msm_jpeg_fe_pingpong_irq(pgmn_dev, data);
+ break;
+
+ case MSM_JPEG_HW_MASK_COMP_WE:
+ msm_jpeg_we_pingpong_irq(pgmn_dev, data);
+ break;
+
+ case MSM_JPEG_HW_MASK_COMP_RESET_ACK:
+ msm_jpeg_reset_ack_irq(pgmn_dev);
+ break;
+
+ case MSM_JPEG_HW_MASK_COMP_ERR:
+ default:
+ msm_jpeg_err_irq(pgmn_dev, event);
+ break;
+ }
+
+ return 0;
+}
+
+int __msm_jpeg_open(struct msm_jpeg_device *pgmn_dev)
+{
+ int rc;
+ irqreturn_t (*core_irq)(int, void *);
+
+ mutex_lock(&pgmn_dev->lock);
+ if (pgmn_dev->open_count) {
+ /* only open once */
+ JPEG_PR_ERR("%s:%d] busy\n", __func__, __LINE__);
+ mutex_unlock(&pgmn_dev->lock);
+ return -EBUSY;
+ }
+ pgmn_dev->open_count++;
+ if (pgmn_dev->open_count == 1)
+ pgmn_dev->state = MSM_JPEG_INIT;
+
+ mutex_unlock(&pgmn_dev->lock);
+
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_JPEG,
+ CAM_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ msm_jpeg_core_irq_install(msm_jpeg_irq);
+ if (pgmn_dev->core_type == MSM_JPEG_CORE_CODEC)
+ core_irq = msm_jpeg_core_irq;
+ else
+ core_irq = msm_jpegdma_core_irq;
+
+ /* initialize the platform resources */
+ rc = msm_jpeg_platform_init(core_irq, pgmn_dev);
+ if (rc) {
+ JPEG_PR_ERR("%s:%d] platform_init fail %d\n", __func__,
+ __LINE__, rc);
+ goto platform_init_fail;
+ }
+ JPEG_DBG("%s:%d] platform resources - base %pK, irq %d\n",
+ __func__, __LINE__,
+ pgmn_dev->base, (int)pgmn_dev->jpeg_irq_res->start);
+ msm_jpeg_q_cleanup(&pgmn_dev->evt_q);
+ msm_jpeg_q_cleanup(&pgmn_dev->output_rtn_q);
+ msm_jpeg_outbuf_q_cleanup(pgmn_dev, &pgmn_dev->output_buf_q);
+ msm_jpeg_q_cleanup(&pgmn_dev->input_rtn_q);
+ msm_jpeg_q_cleanup(&pgmn_dev->input_buf_q);
+ msm_jpeg_core_init(pgmn_dev);
+
+ JPEG_DBG("%s:%d] success\n", __func__, __LINE__);
+ return rc;
+
+platform_init_fail:
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_JPEG,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+ return rc;
+}
+
+int __msm_jpeg_release(struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ mutex_lock(&pgmn_dev->lock);
+ if (!pgmn_dev->open_count) {
+ JPEG_PR_ERR(KERN_ERR "%s: not opened\n", __func__);
+ mutex_unlock(&pgmn_dev->lock);
+ return -EINVAL;
+ }
+ pgmn_dev->open_count--;
+ mutex_unlock(&pgmn_dev->lock);
+
+ msm_jpeg_core_release(pgmn_dev);
+ msm_jpeg_q_cleanup(&pgmn_dev->evt_q);
+ msm_jpeg_q_cleanup(&pgmn_dev->output_rtn_q);
+ msm_jpeg_outbuf_q_cleanup(pgmn_dev, &pgmn_dev->output_buf_q);
+ msm_jpeg_q_cleanup(&pgmn_dev->input_rtn_q);
+ msm_jpeg_outbuf_q_cleanup(pgmn_dev, &pgmn_dev->input_buf_q);
+
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+ if (pgmn_dev->open_count)
+ JPEG_PR_ERR(KERN_ERR "%s: multiple opens\n", __func__);
+
+ /* release the platform resources */
+ msm_jpeg_platform_release(pgmn_dev);
+
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_JPEG,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+
+ return 0;
+}
+
+int msm_jpeg_ioctl_hw_cmd(struct msm_jpeg_device *pgmn_dev,
+ void * __user arg)
+{
+ struct msm_jpeg_hw_cmd hw_cmd;
+ int is_copy_to_user;
+
+ if (copy_from_user(&hw_cmd, arg, sizeof(struct msm_jpeg_hw_cmd))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ is_copy_to_user = msm_jpeg_hw_exec_cmds(&hw_cmd, 1,
+ pgmn_dev->res_size, pgmn_dev->base);
+ JPEG_DBG(
+ "%s:%d] type %d, n %d, offset %d, mask %x, data %x, pdata %lx\n",
+ __func__, __LINE__, hw_cmd.type, hw_cmd.n, hw_cmd.offset,
+ hw_cmd.mask, hw_cmd.data, (unsigned long) hw_cmd.pdata);
+
+ if (is_copy_to_user >= 0) {
+ if (copy_to_user(arg, &hw_cmd, sizeof(hw_cmd))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ } else {
+ return is_copy_to_user;
+ }
+
+ return 0;
+}
+
+int msm_jpeg_ioctl_hw_cmds(struct msm_jpeg_device *pgmn_dev,
+ void * __user arg)
+{
+ int is_copy_to_user;
+ uint32_t len;
+ uint32_t m;
+ struct msm_jpeg_hw_cmds *hw_cmds_p;
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+
+ if (copy_from_user(&m, arg, sizeof(m))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ if ((m == 0) || (m > ((UINT32_MAX - sizeof(struct msm_jpeg_hw_cmds)) /
+ sizeof(struct msm_jpeg_hw_cmd)))) {
+ JPEG_PR_ERR("%s:%d] m_cmds out of range\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ len = sizeof(struct msm_jpeg_hw_cmds) +
+ sizeof(struct msm_jpeg_hw_cmd) * (m - 1);
+ hw_cmds_p = kmalloc(len, GFP_KERNEL);
+ if (!hw_cmds_p) {
+ JPEG_PR_ERR("%s:%d] no mem %d\n", __func__, __LINE__, len);
+ return -EFAULT;
+ }
+
+ if (copy_from_user(hw_cmds_p, arg, len)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ kfree(hw_cmds_p);
+ return -EFAULT;
+ }
+
+ hw_cmd_p = (struct msm_jpeg_hw_cmd *) &(hw_cmds_p->hw_cmd);
+
+ is_copy_to_user = msm_jpeg_hw_exec_cmds(hw_cmd_p, m,
+ pgmn_dev->res_size, pgmn_dev->base);
+
+ if (is_copy_to_user >= 0) {
+ if (copy_to_user(arg, hw_cmds_p, len)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ kfree(hw_cmds_p);
+ return -EFAULT;
+ }
+ } else {
+ kfree(hw_cmds_p);
+ return is_copy_to_user;
+ }
+ kfree(hw_cmds_p);
+ return 0;
+}
+
+int msm_jpeg_start(struct msm_jpeg_device *pgmn_dev, void * __user arg,
+ int (*hw_ioctl)(struct msm_jpeg_device *, void * __user))
+{
+ struct msm_jpeg_core_buf *buf_out;
+ struct msm_jpeg_core_buf *buf_out_free[2] = {NULL, NULL};
+ int i, rc;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+
+ msm_jpeg_platform_set_dt_config(pgmn_dev);
+
+ /* update the bw with vector index "1" */
+ msm_camera_update_bus_vector(pgmn_dev->bus_client, 1);
+ JPEG_BUS_VOTED(pgmn_dev);
+ JPEG_DBG("%s:%d] Bus Voted\n", __func__, __LINE__);
+
+ pgmn_dev->release_buf = 1;
+ for (i = 0; i < 2; i++) {
+ buf_out = msm_jpeg_q_out(&pgmn_dev->input_buf_q);
+
+ if (buf_out) {
+ msm_jpeg_core_fe_buf_update(pgmn_dev, buf_out);
+ kfree(buf_out);
+ } else {
+ JPEG_DBG("%s:%d] no input buffer\n", __func__,
+ __LINE__);
+ break;
+ }
+ }
+
+ for (i = 0; i < 2; i++) {
+ buf_out_free[i] = msm_jpeg_q_out(&pgmn_dev->output_buf_q);
+
+ if (buf_out_free[i]) {
+ msm_jpeg_core_we_buf_update(pgmn_dev, buf_out_free[i]);
+ pgmn_dev->release_buf = 0;
+ } else {
+ JPEG_DBG("%s:%d] no output buffer\n",
+ __func__, __LINE__);
+ break;
+ }
+ }
+
+ for (i = 0; i < 2; i++)
+ kfree(buf_out_free[i]);
+
+ JPEG_DBG_HIGH("%s:%d] START\n", __func__, __LINE__);
+ pgmn_dev->state = MSM_JPEG_EXECUTING;
+ /* ensure write is done */
+ wmb();
+ rc = hw_ioctl(pgmn_dev, arg);
+ /* ensure write is done */
+ wmb();
+ JPEG_DBG("%s:%d]", __func__, __LINE__);
+ return rc;
+}
+
+int msm_jpeg_ioctl_reset(struct msm_jpeg_device *pgmn_dev, void * __user arg)
+{
+ int rc;
+ struct msm_jpeg_ctrl_cmd ctrl_cmd, *p_ctrl_cmd;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ p_ctrl_cmd = &ctrl_cmd;
+
+ if (pgmn_dev->state == MSM_JPEG_INIT) {
+ if (copy_from_user(&ctrl_cmd, arg, sizeof(ctrl_cmd))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ pgmn_dev->op_mode = p_ctrl_cmd->type;
+
+ rc = msm_jpeg_core_reset(pgmn_dev, pgmn_dev->op_mode,
+ pgmn_dev->base, pgmn_dev->res_size);
+ } else {
+ JPEG_PR_ERR("%s:%d] JPEG not been initialized Wrong state\n",
+ __func__, __LINE__);
+ rc = -1;
+ }
+ return rc;
+}
+
+int msm_jpeg_ioctl_test_dump_region(struct msm_jpeg_device *pgmn_dev,
+ unsigned long arg)
+{
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ msm_jpeg_io_dump(pgmn_dev->base, JPEG_REG_SIZE);
+ return 0;
+}
+
+int msm_jpeg_ioctl_set_clk_rate(struct msm_jpeg_device *pgmn_dev,
+ void * __user arg)
+{
+ long clk_rate;
+ int rc;
+
+ if ((pgmn_dev->state != MSM_JPEG_INIT) &&
+ (pgmn_dev->state != MSM_JPEG_RESET)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ if (get_user(clk_rate, (unsigned int __user *)arg)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ JPEG_DBG("%s:%d] Requested clk rate %ld\n", __func__, __LINE__,
+ clk_rate);
+ if (clk_rate < 0) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ rc = msm_jpeg_platform_set_clk_rate(pgmn_dev, clk_rate);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: clk failed rc = %d\n", __func__, rc);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+#ifdef CONFIG_COMPAT
+int msm_jpeg_get_ctrl_cmd32(struct msm_jpeg_ctrl_cmd *ctrl_cmd,
+ void __user *arg)
+{
+ struct msm_jpeg_ctrl_cmd32 ctrl_cmd32;
+ unsigned long temp;
+
+ if (copy_from_user(&ctrl_cmd32, arg,
+ sizeof(struct msm_jpeg_ctrl_cmd32))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ ctrl_cmd->type = ctrl_cmd32.type;
+ ctrl_cmd->len = ctrl_cmd32.len;
+ temp = (unsigned long) ctrl_cmd32.value;
+ ctrl_cmd->value = (void *) temp;
+
+ return 0;
+}
+int msm_jpeg_put_ctrl_cmd32(struct msm_jpeg_ctrl_cmd *ctrl_cmd,
+ void __user *arg)
+{
+ struct msm_jpeg_ctrl_cmd32 ctrl_cmd32;
+ unsigned long temp;
+
+ ctrl_cmd32.type = ctrl_cmd->type;
+ ctrl_cmd32.len = ctrl_cmd->len;
+ temp = (unsigned long) ctrl_cmd->value;
+ ctrl_cmd32.value = (compat_uptr_t) temp;
+
+ if (copy_to_user(arg, &ctrl_cmd32,
+ sizeof(struct msm_jpeg_ctrl_cmd32))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int msm_jpeg_get_jpeg_buf32(struct msm_jpeg_buf *jpeg_buf,
+ void __user *arg)
+{
+ struct msm_jpeg_buf32 jpeg_buf32;
+ unsigned long temp;
+
+ if (copy_from_user(&jpeg_buf32, arg, sizeof(struct msm_jpeg_buf32))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ jpeg_buf->type = jpeg_buf32.type;
+ jpeg_buf->fd = jpeg_buf32.fd;
+ temp = (unsigned long) jpeg_buf32.vaddr;
+ jpeg_buf->vaddr = (void *) temp;
+ jpeg_buf->y_off = jpeg_buf32.y_off;
+ jpeg_buf->y_len = jpeg_buf32.y_len;
+ jpeg_buf->framedone_len = jpeg_buf32.framedone_len;
+ jpeg_buf->cbcr_off = jpeg_buf32.cbcr_off;
+ jpeg_buf->cbcr_len = jpeg_buf32.cbcr_len;
+ jpeg_buf->num_of_mcu_rows = jpeg_buf32.num_of_mcu_rows;
+ jpeg_buf->offset = jpeg_buf32.offset;
+ jpeg_buf->pln2_off = jpeg_buf32.pln2_off;
+ jpeg_buf->pln2_len = jpeg_buf32.pln2_len;
+
+ return 0;
+}
+int msm_jpeg_put_jpeg_buf32(struct msm_jpeg_buf *jpeg_buf,
+ void __user *arg)
+{
+ struct msm_jpeg_buf32 jpeg_buf32;
+ unsigned long temp;
+
+ jpeg_buf32.type = jpeg_buf->type;
+ jpeg_buf32.fd = jpeg_buf->fd;
+ temp = (unsigned long) jpeg_buf->vaddr;
+ jpeg_buf32.vaddr = (compat_uptr_t) temp;
+ jpeg_buf32.y_off = jpeg_buf->y_off;
+ jpeg_buf32.y_len = jpeg_buf->y_len;
+ jpeg_buf32.framedone_len = jpeg_buf->framedone_len;
+ jpeg_buf32.cbcr_off = jpeg_buf->cbcr_off;
+ jpeg_buf32.cbcr_len = jpeg_buf->cbcr_len;
+ jpeg_buf32.num_of_mcu_rows = jpeg_buf->num_of_mcu_rows;
+ jpeg_buf32.offset = jpeg_buf->offset;
+ jpeg_buf32.pln2_off = jpeg_buf->pln2_off;
+ jpeg_buf32.pln2_len = jpeg_buf->pln2_len;
+
+ if (copy_to_user(arg, &jpeg_buf32, sizeof(struct msm_jpeg_buf32))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int msm_jpeg_put_hw_cmd32(void __user *arg,
+ struct msm_jpeg_hw_cmd *phw_cmd, int copy)
+{
+ struct msm_jpeg_hw_cmd32 hw_cmd32;
+ struct msm_jpeg_hw_cmd32 *phw_cmd32;
+
+ phw_cmd32 = (struct msm_jpeg_hw_cmd32 *) arg;
+ if (copy)
+ phw_cmd32 = &hw_cmd32;
+
+
+ phw_cmd32->type = phw_cmd->type;
+ phw_cmd32->n = phw_cmd->n;
+ phw_cmd32->offset = phw_cmd->offset;
+ phw_cmd32->mask = phw_cmd->mask;
+ phw_cmd32->data = phw_cmd->data;
+
+ if (copy && copy_to_user(arg, &hw_cmd32, sizeof(hw_cmd32))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+int msm_jpeg_get_hw_cmd32(struct msm_jpeg_hw_cmd *phw_cmd,
+ void __user *arg, int copy)
+{
+ struct msm_jpeg_hw_cmd32 hw_cmd32;
+ struct msm_jpeg_hw_cmd32 *phw_cmd32;
+
+ if (copy) {
+ phw_cmd32 = &hw_cmd32;
+ if (copy_from_user(&hw_cmd32, arg, sizeof(hw_cmd32))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ } else {
+ phw_cmd32 = (struct msm_jpeg_hw_cmd32 *) arg;
+ }
+ phw_cmd->type = phw_cmd32->type;
+ phw_cmd->n = phw_cmd32->n;
+ phw_cmd->offset = phw_cmd32->offset;
+ phw_cmd->mask = phw_cmd32->mask;
+ phw_cmd->data = phw_cmd32->data;
+
+ return 0;
+}
+int msm_jpeg_ioctl_hw_cmds32(struct msm_jpeg_device *pgmn_dev,
+ void __user *arg)
+{
+ int is_copy_to_user;
+ uint32_t len, len32;
+ uint32_t m;
+ struct msm_jpeg_hw_cmds32 *phw_cmds32;
+ struct msm_jpeg_hw_cmds *phw_cmds;
+
+ if (copy_from_user(&m, arg, sizeof(m))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ if ((m == 0) || (m > ((UINT32_MAX - sizeof(struct msm_jpeg_hw_cmds32)) /
+ sizeof(struct msm_jpeg_hw_cmd32)))) {
+ JPEG_PR_ERR("%s:%d] m_cmds out of range\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ len32 = sizeof(struct msm_jpeg_hw_cmds32) +
+ sizeof(struct msm_jpeg_hw_cmd32) * (m - 1);
+ phw_cmds32 = kmalloc(len32, GFP_KERNEL);
+ if (!phw_cmds32) {
+ JPEG_PR_ERR("%s:%d] no mem %d\n", __func__, __LINE__, len32);
+ return -EFAULT;
+ }
+
+ if (copy_from_user(phw_cmds32, arg, len32)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ kfree(phw_cmds32);
+ return -EFAULT;
+ }
+ len = sizeof(struct msm_jpeg_hw_cmds) +
+ sizeof(struct msm_jpeg_hw_cmd) * (m - 1);
+ phw_cmds = kmalloc(len, GFP_KERNEL);
+ if (!phw_cmds) {
+ JPEG_PR_ERR("%s:%d] no mem %d\n", __func__, __LINE__, len);
+ kfree(phw_cmds32);
+ return -EFAULT;
+ }
+ (phw_cmds)->m = m;
+ while (m--) {
+ struct msm_jpeg_hw_cmd32 *src;
+ struct msm_jpeg_hw_cmd *dst;
+
+ src = &phw_cmds32->hw_cmd[m];
+ dst = &(phw_cmds)->hw_cmd[m];
+ msm_jpeg_get_hw_cmd32(dst, src, 0);
+ }
+
+ is_copy_to_user = msm_jpeg_hw_exec_cmds(phw_cmds->hw_cmd, phw_cmds->m,
+ pgmn_dev->res_size, pgmn_dev->base);
+
+ if (is_copy_to_user >= 0) {
+ m = phw_cmds->m;
+ while (m--) {
+ struct msm_jpeg_hw_cmd *src;
+ struct msm_jpeg_hw_cmd32 *dst;
+
+ dst = &phw_cmds32->hw_cmd[m];
+ src = &phw_cmds->hw_cmd[m];
+
+ msm_jpeg_put_hw_cmd32(dst, src, 0);
+ }
+ if (copy_to_user(arg, phw_cmds32, len32)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ kfree(phw_cmds);
+ kfree(phw_cmds32);
+ return -EFAULT;
+ }
+
+ } else {
+ kfree(phw_cmds);
+ kfree(phw_cmds32);
+ return is_copy_to_user;
+ }
+ kfree(phw_cmds);
+ kfree(phw_cmds32);
+
+ return 0;
+}
+int msm_jpeg_ioctl_hw_cmd32(struct msm_jpeg_device *pgmn_dev,
+ void * __user arg)
+{
+ struct msm_jpeg_hw_cmd hw_cmd;
+ int is_copy_to_user;
+
+ if (msm_jpeg_get_hw_cmd32(&hw_cmd, arg, 1)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ is_copy_to_user = msm_jpeg_hw_exec_cmds(&hw_cmd, 1,
+ pgmn_dev->res_size, pgmn_dev->base);
+ JPEG_DBG("%s:%d] type %d, n %d, offst %d, mask %x, data %x pdata %lx\n",
+ __func__, __LINE__, hw_cmd.type, hw_cmd.n, hw_cmd.offset,
+ hw_cmd.mask, hw_cmd.data, (unsigned long) hw_cmd.pdata);
+
+ if (is_copy_to_user >= 0) {
+ if (msm_jpeg_put_hw_cmd32(arg, &hw_cmd, 1)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ } else
+ return is_copy_to_user;
+
+
+ return 0;
+}
+
+long __msm_jpeg_compat_ioctl(struct msm_jpeg_device *pgmn_dev,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ struct msm_jpeg_ctrl_cmd *pctrl_cmd = NULL, ctrl_cmd;
+ struct msm_jpeg_buf jpeg_buf;
+ mm_segment_t old_fs;
+
+ old_fs = get_fs();
+
+ switch (cmd) {
+ case MSM_JPEG_IOCTL_GET_HW_VERSION:
+ JPEG_DBG("%s:%d] VERSION 1\n", __func__, __LINE__);
+ rc = msm_jpeg_ioctl_hw_cmd(pgmn_dev, (void __user *) arg);
+ break;
+ case MSM_JPEG_IOCTL_GET_HW_VERSION32:
+ JPEG_DBG("%s:%d] VERSION 1 32bit\n", __func__, __LINE__);
+ rc = msm_jpeg_ioctl_hw_cmd32(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_RESET:
+ rc = msm_jpeg_ioctl_reset(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_RESET32:
+ rc = msm_jpeg_get_ctrl_cmd32(&ctrl_cmd,
+ (void __user *) arg);
+ if (rc < 0)
+ break;
+
+ set_fs(KERNEL_DS);
+ rc = msm_jpeg_ioctl_reset(pgmn_dev, (void __user *) &ctrl_cmd);
+ set_fs(old_fs);
+ kfree(pctrl_cmd);
+ break;
+
+ case MSM_JPEG_IOCTL_STOP:
+ rc = msm_jpeg_ioctl_hw_cmds(pgmn_dev, (void __user *) arg);
+ pgmn_dev->state = MSM_JPEG_STOPPED;
+ break;
+
+ case MSM_JPEG_IOCTL_STOP32:
+ rc = msm_jpeg_ioctl_hw_cmds32(pgmn_dev, (void __user *) arg);
+ pgmn_dev->state = MSM_JPEG_STOPPED;
+ break;
+
+ case MSM_JPEG_IOCTL_START:
+ rc = msm_jpeg_start(pgmn_dev, (void __user *) arg,
+ msm_jpeg_ioctl_hw_cmds);
+ break;
+
+ case MSM_JPEG_IOCTL_START32:
+ rc = msm_jpeg_start(pgmn_dev, (void __user *) arg,
+ msm_jpeg_ioctl_hw_cmds32);
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_BUF_ENQUEUE:
+ rc = msm_jpeg_input_buf_enqueue(pgmn_dev,
+ (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_BUF_ENQUEUE32:
+ rc = msm_jpeg_get_jpeg_buf32(&jpeg_buf, (void __user *) arg);
+ if (rc < 0)
+ break;
+ set_fs(KERNEL_DS);
+ rc = msm_jpeg_input_buf_enqueue(pgmn_dev,
+ (void __user *) &jpeg_buf);
+ set_fs(old_fs);
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_GET:
+ rc = msm_jpeg_input_get(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_GET32:
+ set_fs(KERNEL_DS);
+ rc = msm_jpeg_input_get(pgmn_dev, (void __user *) &jpeg_buf);
+ set_fs(old_fs);
+ if (rc < 0)
+ break;
+ rc = msm_jpeg_put_jpeg_buf32(&jpeg_buf, (void __user *) arg);
+
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_GET_UNBLOCK:
+ rc = msm_jpeg_input_get_unblock(pgmn_dev);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_BUF_ENQUEUE:
+ rc = msm_jpeg_output_buf_enqueue(pgmn_dev,
+ (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_BUF_ENQUEUE32:
+ rc = msm_jpeg_get_jpeg_buf32(&jpeg_buf, (void __user *) arg);
+ if (rc < 0)
+ break;
+ set_fs(KERNEL_DS);
+ rc = msm_jpeg_output_buf_enqueue(pgmn_dev,
+ (void __user *) &jpeg_buf);
+ set_fs(old_fs);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_GET:
+ rc = msm_jpeg_output_get(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_GET32:
+ set_fs(KERNEL_DS);
+ rc = msm_jpeg_output_get(pgmn_dev, (void __user *) &jpeg_buf);
+ set_fs(old_fs);
+ if (rc < 0)
+ break;
+ rc = msm_jpeg_put_jpeg_buf32(&jpeg_buf, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_GET_UNBLOCK:
+ rc = msm_jpeg_output_get_unblock(pgmn_dev);
+ break;
+
+ case MSM_JPEG_IOCTL_EVT_GET:
+ rc = msm_jpeg_evt_get(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_EVT_GET32:
+ set_fs(KERNEL_DS);
+ rc = msm_jpeg_evt_get(pgmn_dev, (void __user *) &ctrl_cmd);
+ set_fs(old_fs);
+ if (rc < 0)
+ break;
+ msm_jpeg_put_ctrl_cmd32(&ctrl_cmd, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_EVT_GET_UNBLOCK:
+ rc = msm_jpeg_evt_get_unblock(pgmn_dev);
+ break;
+
+ case MSM_JPEG_IOCTL_HW_CMD32:
+ rc = msm_jpeg_ioctl_hw_cmd32(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_HW_CMD:
+ rc = msm_jpeg_ioctl_hw_cmd(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_HW_CMDS32:
+ rc = msm_jpeg_ioctl_hw_cmds32(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_HW_CMDS:
+ rc = msm_jpeg_ioctl_hw_cmds(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_TEST_DUMP_REGION:
+ rc = msm_jpeg_ioctl_test_dump_region(pgmn_dev, arg);
+ break;
+
+ case MSM_JPEG_IOCTL_TEST_DUMP_REGION32:
+ rc = msm_jpeg_ioctl_test_dump_region(pgmn_dev, arg);
+ break;
+
+ case MSM_JPEG_IOCTL_SET_CLK_RATE:
+ rc = msm_jpeg_ioctl_set_clk_rate(pgmn_dev,
+ (void __user *) arg);
+ break;
+
+ default:
+ JPEG_PR_ERR(KERN_INFO "%s:%d] cmd = %d not supported\n",
+ __func__, __LINE__, _IOC_NR(cmd));
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+#else
+long __msm_jpeg_compat_ioctl(struct msm_jpeg_device *pgmn_dev,
+ unsigned int cmd, unsigned long arg)
+{
+ return 0;
+}
+#endif
+
+long __msm_jpeg_ioctl(struct msm_jpeg_device *pgmn_dev,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+
+ switch (cmd) {
+ case MSM_JPEG_IOCTL_GET_HW_VERSION:
+ JPEG_DBG("%s:%d] VERSION 1\n", __func__, __LINE__);
+ rc = msm_jpeg_ioctl_hw_cmd(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_RESET:
+ rc = msm_jpeg_ioctl_reset(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_STOP:
+ rc = msm_jpeg_ioctl_hw_cmds(pgmn_dev, (void __user *) arg);
+ pgmn_dev->state = MSM_JPEG_STOPPED;
+ break;
+
+ case MSM_JPEG_IOCTL_START:
+ rc = msm_jpeg_start(pgmn_dev, (void __user *) arg,
+ msm_jpeg_ioctl_hw_cmds);
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_BUF_ENQUEUE:
+ rc = msm_jpeg_input_buf_enqueue(pgmn_dev,
+ (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_GET:
+ rc = msm_jpeg_input_get(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_GET_UNBLOCK:
+ rc = msm_jpeg_input_get_unblock(pgmn_dev);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_BUF_ENQUEUE:
+ rc = msm_jpeg_output_buf_enqueue(pgmn_dev,
+ (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_GET:
+ rc = msm_jpeg_output_get(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_GET_UNBLOCK:
+ rc = msm_jpeg_output_get_unblock(pgmn_dev);
+ break;
+
+ case MSM_JPEG_IOCTL_EVT_GET:
+ rc = msm_jpeg_evt_get(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_EVT_GET_UNBLOCK:
+ rc = msm_jpeg_evt_get_unblock(pgmn_dev);
+ break;
+
+ case MSM_JPEG_IOCTL_HW_CMD:
+ rc = msm_jpeg_ioctl_hw_cmd(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_HW_CMDS:
+ rc = msm_jpeg_ioctl_hw_cmds(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_TEST_DUMP_REGION:
+ rc = msm_jpeg_ioctl_test_dump_region(pgmn_dev, arg);
+ break;
+
+ case MSM_JPEG_IOCTL_SET_CLK_RATE:
+ rc = msm_jpeg_ioctl_set_clk_rate(pgmn_dev, (void __user *) arg);
+ break;
+ default:
+ pr_err_ratelimited("%s:%d] cmd = %d not supported\n",
+ __func__, __LINE__, _IOC_NR(cmd));
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+int __msm_jpeg_init(struct msm_jpeg_device *pgmn_dev)
+{
+ int rc = 0;
+ int idx = 0;
+
+ char *iommu_name[JPEG_DEV_CNT] = {"jpeg_enc0", "jpeg_enc1",
+ "jpeg_dec", "jpeg_dma"};
+
+
+ mutex_init(&pgmn_dev->lock);
+
+ pr_err("%s:%d] Jpeg Device id %d", __func__, __LINE__,
+ pgmn_dev->pdev->id);
+ idx = pgmn_dev->pdev->id;
+ pgmn_dev->idx = idx;
+ pgmn_dev->decode_flag = (idx == JPEG_DEC_ID);
+
+ msm_jpeg_q_init("evt_q", &pgmn_dev->evt_q);
+ msm_jpeg_q_init("output_rtn_q", &pgmn_dev->output_rtn_q);
+ msm_jpeg_q_init("output_buf_q", &pgmn_dev->output_buf_q);
+ msm_jpeg_q_init("input_rtn_q", &pgmn_dev->input_rtn_q);
+ msm_jpeg_q_init("input_buf_q", &pgmn_dev->input_buf_q);
+
+ /* get device context for IOMMU */
+ rc = cam_smmu_get_handle(iommu_name[idx], &pgmn_dev->iommu_hdl);
+ JPEG_DBG("%s:%d] hdl %d", __func__, __LINE__,
+ pgmn_dev->iommu_hdl);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: No iommu fw context found\n",
+ __func__);
+ goto err_smmu;
+ }
+
+ /* setup all the resources for the jpeg driver */
+ rc = msm_jpeg_platform_setup(pgmn_dev);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: setup failed\n",
+ __func__);
+ goto err_setup;
+ }
+
+ return rc;
+err_setup:
+err_smmu:
+ mutex_destroy(&pgmn_dev->lock);
+ return -EFAULT;
+}
+
+int __msm_jpeg_exit(struct msm_jpeg_device *pgmn_dev)
+{
+ msm_jpeg_platform_cleanup(pgmn_dev);
+ mutex_destroy(&pgmn_dev->lock);
+ kfree(pgmn_dev);
+ return 0;
+}
diff --git a/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_sync.h b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_sync.h
new file mode 100644
index 000000000000..1fe62a7b3555
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_sync.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_JPEG_SYNC_H
+#define MSM_JPEG_SYNC_H
+
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include "msm_camera_io_util.h"
+#include "msm_jpeg_hw.h"
+#include "cam_smmu_api.h"
+#include "cam_soc_api.h"
+
+#define JPEG_8974_V1 0x10000000
+#define JPEG_8974_V2 0x10010000
+#define JPEG_8994 0x10020000
+#define JPEG_CLK_MAX 16
+#define JPEG_REGULATOR_MAX 3
+
+enum msm_jpeg_state {
+ MSM_JPEG_INIT,
+ MSM_JPEG_RESET,
+ MSM_JPEG_EXECUTING,
+ MSM_JPEG_STOPPED,
+ MSM_JPEG_IDLE
+};
+
+enum msm_jpeg_core_type {
+ MSM_JPEG_CORE_CODEC,
+ MSM_JPEG_CORE_DMA
+};
+
+struct msm_jpeg_q {
+ char const *name;
+ struct list_head q;
+ spinlock_t lck;
+ wait_queue_head_t wait;
+ int unblck;
+};
+
+struct msm_jpeg_q_entry {
+ struct list_head list;
+ void *data;
+};
+
+struct msm_jpeg_device {
+ struct platform_device *pdev;
+ struct resource *jpeg_irq_res;
+ void *base;
+ void *vbif_base;
+ struct clk **jpeg_clk;
+ struct msm_cam_clk_info *jpeg_clk_info;
+ size_t num_clk;
+ int num_reg;
+ struct msm_cam_regulator *jpeg_vdd;
+ uint32_t hw_version;
+
+ struct device *device;
+ struct cdev cdev;
+ struct mutex lock;
+ char open_count;
+ uint8_t op_mode;
+
+ /* Flag to store the jpeg bus vote state
+ */
+ int jpeg_bus_vote;
+
+ /* event queue including frame done & err indications
+ */
+ struct msm_jpeg_q evt_q;
+
+ /* output return queue
+ */
+ struct msm_jpeg_q output_rtn_q;
+
+ /* output buf queue
+ */
+ struct msm_jpeg_q output_buf_q;
+
+ /* input return queue
+ */
+ struct msm_jpeg_q input_rtn_q;
+
+ /* input buf queue
+ */
+ struct msm_jpeg_q input_buf_q;
+
+ struct v4l2_subdev subdev;
+
+ struct class *msm_jpeg_class;
+
+ dev_t msm_jpeg_devno;
+
+ /* iommu domain and context */
+ int idx;
+ int iommu_hdl;
+ int decode_flag;
+ void *jpeg_vbif;
+ int release_buf;
+ struct msm_jpeg_hw_pingpong fe_pingpong_buf;
+ struct msm_jpeg_hw_pingpong we_pingpong_buf;
+ int we_pingpong_index;
+ int reset_done_ack;
+ spinlock_t reset_lock;
+ wait_queue_head_t reset_wait;
+ uint32_t res_size;
+ enum msm_jpeg_state state;
+ enum msm_jpeg_core_type core_type;
+ enum cam_bus_client bus_client;
+};
+
+int __msm_jpeg_open(struct msm_jpeg_device *pgmn_dev);
+int __msm_jpeg_release(struct msm_jpeg_device *pgmn_dev);
+
+long __msm_jpeg_ioctl(struct msm_jpeg_device *pgmn_dev,
+ unsigned int cmd, unsigned long arg);
+
+#ifdef CONFIG_COMPAT
+long __msm_jpeg_compat_ioctl(struct msm_jpeg_device *pgmn_dev,
+ unsigned int cmd, unsigned long arg);
+#endif
+
+int __msm_jpeg_init(struct msm_jpeg_device *pgmn_dev);
+int __msm_jpeg_exit(struct msm_jpeg_device *pgmn_dev);
+
+#endif /* MSM_JPEG_SYNC_H */
diff --git a/drivers/media/platform/msm/ais/jpeg_dma/Makefile b/drivers/media/platform/msm/ais/jpeg_dma/Makefile
new file mode 100644
index 000000000000..a3311efa6ab1
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_dma/Makefile
@@ -0,0 +1,4 @@
+GCC_VERSION := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+obj-$(CONFIG_MSM_AIS_JPEGDMA) += msm_jpeg_dma_dev.o msm_jpeg_dma_hw.o
diff --git a/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c
new file mode 100644
index 000000000000..76fe7dfa68cb
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c
@@ -0,0 +1,1380 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/ion.h>
+#include <linux/msm_ion.h>
+#include <linux/delay.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/msm_jpeg_dma.h>
+
+#include "msm_jpeg_dma_dev.h"
+#include "msm_jpeg_dma_hw.h"
+#include "cam_hw_ops.h"
+
+#define MSM_JPEGDMA_DRV_NAME "msm_jpegdma"
+
+/* Jpeg dma stream off timeout */
+#define MSM_JPEGDMA_STREAM_OFF_TIMEOUT_MS 500
+
+/* Jpeg dma formats lookup table */
+static struct msm_jpegdma_format formats[] = {
+ {
+ .name = "Greyscale",
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .depth = 8,
+ .num_planes = 1,
+ .colplane_h = 1,
+ .colplane_v = 1,
+ .h_align = 1,
+ .v_align = 1,
+ .planes[0] = JPEGDMA_PLANE_TYPE_Y,
+ },
+ {
+ .name = "Y/CbCr 4:2:0",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .depth = 12,
+ .num_planes = 2,
+ .colplane_h = 1,
+ .colplane_v = 2,
+ .h_align = 2,
+ .v_align = 2,
+ .planes[0] = JPEGDMA_PLANE_TYPE_Y,
+ .planes[1] = JPEGDMA_PLANE_TYPE_CBCR,
+ },
+ {
+ .name = "Y/CrCb 4:2:0",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .depth = 12,
+ .num_planes = 2,
+ .colplane_h = 1,
+ .colplane_v = 2,
+ .h_align = 2,
+ .v_align = 2,
+ .planes[0] = JPEGDMA_PLANE_TYPE_Y,
+ .planes[1] = JPEGDMA_PLANE_TYPE_CBCR,
+ },
+ {
+ .name = "YUV 4:2:0 planar, YCbCr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .num_planes = 3,
+ .colplane_h = 2,
+ .colplane_v = 2,
+ .h_align = 2,
+ .v_align = 2,
+ .planes[0] = JPEGDMA_PLANE_TYPE_Y,
+ .planes[1] = JPEGDMA_PLANE_TYPE_CR,
+ .planes[2] = JPEGDMA_PLANE_TYPE_CB,
+ },
+};
+
+/*
+ * msm_jpegdma_ctx_from_fh - Get dma context from v4l2 fh.
+ * @fh: Pointer to v4l2 fh.
+ */
+static inline struct jpegdma_ctx *msm_jpegdma_ctx_from_fh(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct jpegdma_ctx, fh);
+}
+
+/*
+ * msm_jpegdma_get_next_config_idx - get next configuration index.
+ * @ctx: Pointer to jpegdma context.
+ */
+static inline int msm_jpegdma_get_next_config_idx(struct jpegdma_ctx *ctx)
+{
+ return (ctx->config_idx + 1) % MSM_JPEGDMA_MAX_CONFIGS;
+}
+
+/*
+ * msm_jpegdma_schedule_next_config - Schedule next configuration.
+ * @ctx: Pointer to jpegdma context.
+ */
+static inline void msm_jpegdma_schedule_next_config(struct jpegdma_ctx *ctx)
+{
+ ctx->config_idx = (ctx->config_idx + 1) % MSM_JPEGDMA_MAX_CONFIGS;
+}
+
+/*
+ * msm_jpegdma_get_format_idx - Get jpeg dma format lookup index.
+ * @ctx: Pointer to dma ctx.
+ * @f: v4l2 format.
+ */
+static int msm_jpegdma_get_format_idx(struct jpegdma_ctx *ctx,
+ struct v4l2_format *f)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++)
+ if (formats[i].fourcc == f->fmt.pix.pixelformat)
+ break;
+
+ if (i == ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ return i;
+}
+
+/*
+ * msm_jpegdma_fill_size_from_ctx - Fill jpeg dma format lookup index.
+ * @ctx: Pointer to dma ctx.
+ * @size: Size config.
+ */
+static void msm_jpegdma_fill_size_from_ctx(struct jpegdma_ctx *ctx,
+ struct msm_jpegdma_size_config *size)
+{
+
+ size->in_size.top = ctx->crop.top;
+ size->in_size.left = ctx->crop.left;
+ size->in_size.width = ctx->crop.width;
+ size->in_size.height = ctx->crop.height;
+ size->in_size.scanline = ctx->format_out.fmt.pix.height;
+ size->in_size.stride = ctx->format_out.fmt.pix.bytesperline;
+
+ size->out_size.top = 0;
+ size->out_size.left = 0;
+ size->out_size.width = ctx->format_cap.fmt.pix.width;
+ size->out_size.height = ctx->format_cap.fmt.pix.height;
+ size->out_size.scanline = ctx->format_cap.fmt.pix.height;
+ size->out_size.stride = ctx->format_cap.fmt.pix.bytesperline;
+}
+
+/*
+ * msm_jpegdma_align_format - Align jpeg dma format.
+ * @f: v4l2 format.
+ * @format_idx: format lookup index.
+ */
+static void msm_jpegdma_align_format(struct v4l2_format *f, int format_idx)
+{
+ unsigned int size_image;
+ int i;
+
+ if (f->fmt.pix.width > MSM_JPEGDMA_MAX_WIDTH)
+ f->fmt.pix.width = MSM_JPEGDMA_MAX_WIDTH;
+
+ if (f->fmt.pix.width < MSM_JPEGDMA_MIN_WIDTH)
+ f->fmt.pix.width = MSM_JPEGDMA_MIN_WIDTH;
+
+ if (f->fmt.pix.height > MSM_JPEGDMA_MAX_HEIGHT)
+ f->fmt.pix.height = MSM_JPEGDMA_MAX_HEIGHT;
+
+ if (f->fmt.pix.height < MSM_JPEGDMA_MIN_HEIGHT)
+ f->fmt.pix.height = MSM_JPEGDMA_MIN_HEIGHT;
+
+ if (formats[format_idx].h_align > 1)
+ f->fmt.pix.width &= ~(formats[format_idx].h_align - 1);
+
+ if (formats[format_idx].v_align > 1)
+ f->fmt.pix.height &= ~(formats[format_idx].v_align - 1);
+
+ if (f->fmt.pix.bytesperline < f->fmt.pix.width)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+
+ f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.bytesperline,
+ MSM_JPEGDMA_STRIDE_ALIGN);
+
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+
+ size_image = f->fmt.pix.bytesperline * f->fmt.pix.height;
+
+ if (formats[format_idx].num_planes > 1)
+ for (i = 1; i < formats[format_idx].num_planes; i++)
+ size_image += (f->fmt.pix.bytesperline *
+ (f->fmt.pix.height / formats[format_idx].colplane_v));
+
+ f->fmt.pix.sizeimage = size_image;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+}
+
+/*
+ * msm_jpegdma_config_ok - Check if jpeg dma format is ok for processing.
+ * @ctx: Pointer to dma ctx.
+ */
+static int msm_jpegdma_config_ok(struct jpegdma_ctx *ctx)
+{
+ int ret;
+ int cap_idx;
+ int out_idx;
+ struct msm_jpegdma_size_config size;
+
+ cap_idx = msm_jpegdma_get_format_idx(ctx, &ctx->format_cap);
+ if (cap_idx < 0)
+ return 0;
+
+ out_idx = msm_jpegdma_get_format_idx(ctx, &ctx->format_out);
+ if (out_idx < 0)
+ return 0;
+
+ /* jpeg dma can not convert formats */
+ if (cap_idx != out_idx)
+ return 0;
+
+ msm_jpegdma_fill_size_from_ctx(ctx, &size);
+
+ size.format = formats[ctx->format_idx];
+
+ ret = msm_jpegdma_hw_check_config(ctx->jdma_device, &size);
+ if (ret < 0)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * msm_jpegdma_update_hw_config - Update dma hw configuration/
+ * @ctx: Pointer to dma ctx.
+ */
+static int msm_jpegdma_update_hw_config(struct jpegdma_ctx *ctx)
+{
+ struct msm_jpegdma_size_config size;
+ int idx;
+ int ret = 0;
+
+ if (msm_jpegdma_config_ok(ctx)) {
+ size.fps = ctx->timeperframe.denominator /
+ ctx->timeperframe.numerator;
+
+ size.format = formats[ctx->format_idx];
+
+ msm_jpegdma_fill_size_from_ctx(ctx, &size);
+
+ idx = msm_jpegdma_get_next_config_idx(ctx);
+
+ ret = msm_jpegdma_hw_set_config(ctx->jdma_device,
+ &size, &ctx->plane_config[idx]);
+ if (ret < 0)
+ dev_err(ctx->jdma_device->dev, "Can not get hw cfg\n");
+ else
+ ctx->pending_config = 1;
+ }
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_queue_setup - vb2_ops queue_setup callback.
+ * @q: Pointer to vb2 queue struct.
+ * @fmt: Pointer to v4l2 format struct (NULL is valid argument).
+ * @num_buffers: Pointer of number of buffers requested.
+ * @num_planes: Pointer to number of planes requested.
+ * @sizes: Array containing sizes of planes.
+ * @alloc_ctxs: Array of allocated contexts for each plane.
+ */
+static int msm_jpegdma_queue_setup(struct vb2_queue *q,
+ const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct jpegdma_ctx *ctx = vb2_get_drv_priv(q);
+
+ if (fmt == NULL) {
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ sizes[0] = ctx->format_out.fmt.pix.sizeimage;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ sizes[0] = ctx->format_cap.fmt.pix.sizeimage;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ sizes[0] = fmt->fmt.pix.sizeimage;
+ }
+
+ *num_planes = 1;
+ alloc_ctxs[0] = ctx->jdma_device;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_buf_queue - vb2_ops buf_queue callback.
+ * @vb: Pointer to vb2 buffer struct.
+ */
+static void msm_jpegdma_buf_queue(struct vb2_buffer *vb)
+{
+ struct jpegdma_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+/*
+ * msm_jpegdma_start_streaming - vb2_ops start_streaming callback.
+ * @q: Pointer to vb2 queue struct.
+ * @count: Number of buffer queued before stream on call.
+ */
+static int msm_jpegdma_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct jpegdma_ctx *ctx = vb2_get_drv_priv(q);
+ int ret;
+
+ ret = msm_jpegdma_hw_get(ctx->jdma_device);
+ if (ret < 0) {
+ dev_err(ctx->jdma_device->dev, "Fail to get dma hw\n");
+ return ret;
+ }
+ if (!atomic_read(&ctx->active)) {
+ ret = msm_jpegdma_update_hw_config(ctx);
+ if (ret < 0) {
+ dev_err(ctx->jdma_device->dev, "Fail to configure hw\n");
+ return ret;
+ }
+ atomic_set(&ctx->active, 1);
+ }
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_stop_streaming - vb2_ops stop_streaming callback.
+ * @q: Pointer to vb2 queue struct.
+ */
+static void msm_jpegdma_stop_streaming(struct vb2_queue *q)
+{
+ struct jpegdma_ctx *ctx = vb2_get_drv_priv(q);
+ unsigned long time;
+ int ret = 0;
+
+ atomic_set(&ctx->active, 0);
+
+ time = wait_for_completion_timeout(&ctx->completion,
+ msecs_to_jiffies(MSM_JPEGDMA_STREAM_OFF_TIMEOUT_MS));
+ if (!time) {
+ dev_err(ctx->jdma_device->dev, "Ctx wait timeout\n");
+ ret = -ETIME;
+ }
+ msm_jpegdma_hw_put(ctx->jdma_device);
+}
+
+/* Videobuf2 queue callbacks. */
+static struct vb2_ops msm_jpegdma_vb2_q_ops = {
+ .queue_setup = msm_jpegdma_queue_setup,
+ .buf_queue = msm_jpegdma_buf_queue,
+ .start_streaming = msm_jpegdma_start_streaming,
+ .stop_streaming = msm_jpegdma_stop_streaming,
+};
+
+/*
+ * msm_jpegdma_get_userptr - Map and get buffer handler for user pointer buffer.
+ * @alloc_ctx: Contexts allocated in buf_setup.
+ * @vaddr: Virtual addr passed from userpsace (in our case ion fd)
+ * @size: Size of the buffer
+ * @write: True if buffer will be used for writing the data.
+ */
+static void *msm_jpegdma_get_userptr(void *alloc_ctx,
+ unsigned long vaddr, unsigned long size, int write)
+{
+ struct msm_jpegdma_device *dma = alloc_ctx;
+ struct msm_jpegdma_buf_handle *buf;
+ int ret;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ ret = msm_jpegdma_hw_map_buffer(dma, vaddr, buf);
+ if (ret < 0 || buf->size < size)
+ goto error;
+
+ return buf;
+error:
+ kzfree(buf);
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * msm_jpegdma_put_userptr - Unmap and free buffer handler.
+ * @buf_priv: Buffer handler allocated get_userptr callback.
+ */
+static void msm_jpegdma_put_userptr(void *buf_priv)
+{
+ if (IS_ERR_OR_NULL(buf_priv))
+ return;
+
+ msm_jpegdma_hw_unmap_buffer(buf_priv);
+
+ kzfree(buf_priv);
+}
+
+/* Videobuf2 memory callbacks. */
+static struct vb2_mem_ops msm_jpegdma_vb2_mem_ops = {
+ .get_userptr = msm_jpegdma_get_userptr,
+ .put_userptr = msm_jpegdma_put_userptr,
+};
+
+/*
+ * msm_jpegdma_queue_init - m2m_ops queue_setup callback.
+ * @priv: Pointer to jpegdma ctx.
+ * @src_vq: vb2 source queue.
+ * @dst_vq: vb2 destination queue.
+ */
+static int msm_jpegdma_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct jpegdma_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->mem_ops = &msm_jpegdma_vb2_mem_ops;
+ src_vq->ops = &msm_jpegdma_vb2_q_ops;
+ src_vq->buf_struct_size = sizeof(struct vb2_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret) {
+ dev_err(ctx->jdma_device->dev, "Can not init src queue\n");
+ return ret;
+ }
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->mem_ops = &msm_jpegdma_vb2_mem_ops;
+ dst_vq->ops = &msm_jpegdma_vb2_q_ops;
+ dst_vq->buf_struct_size = sizeof(struct vb2_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ ret = vb2_queue_init(dst_vq);
+ if (ret) {
+ dev_err(ctx->jdma_device->dev, "Can not init dst queue\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_open - Fd device open method.
+ * @file: Pointer to file struct.
+ */
+static int msm_jpegdma_open(struct file *file)
+{
+ struct msm_jpegdma_device *device = video_drvdata(file);
+ struct video_device *video = video_devdata(file);
+ struct jpegdma_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mutex_init(&ctx->lock);
+ ctx->jdma_device = device;
+ dev_dbg(ctx->jdma_device->dev, "Jpeg v4l2 dma open\n");
+ /* Set ctx defaults */
+ ctx->timeperframe.numerator = 1;
+ ctx->timeperframe.denominator = MSM_JPEGDMA_DEFAULT_FPS;
+ atomic_set(&ctx->active, 0);
+
+ v4l2_fh_init(&ctx->fh, video);
+
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(device->m2m_dev,
+ ctx, msm_jpegdma_queue_init);
+ if (IS_ERR_OR_NULL(ctx->m2m_ctx)) {
+ ret = PTR_ERR(ctx->m2m_ctx);
+ goto error_m2m_init;
+ }
+ ret = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_JPEG,
+ CAM_AHB_SVS_VOTE);
+ if (ret < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ goto ahb_vote_fail;
+ }
+ init_completion(&ctx->completion);
+ complete_all(&ctx->completion);
+ dev_dbg(ctx->jdma_device->dev, "Jpeg v4l2 dma open success\n");
+
+ return 0;
+
+ahb_vote_fail:
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+error_m2m_init:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
+}
+
+/*
+ * msm_jpegdma_release - Fd device release method.
+ * @file: Pointer to file struct.
+ */
+static int msm_jpegdma_release(struct file *file)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(file->private_data);
+
+ /* release all the resources */
+ msm_jpegdma_hw_put(ctx->jdma_device);
+ atomic_set(&ctx->active, 0);
+ complete_all(&ctx->completion);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_JPEG,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_poll - Fd device pool method.
+ * @file: Pointer to file struct.
+ * @wait: Pointer to pool table struct.
+ */
+static unsigned int msm_jpegdma_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(file->private_data);
+
+ return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+}
+
+/* Dma device file operations callbacks */
+static const struct v4l2_file_operations fd_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_jpegdma_open,
+ .release = msm_jpegdma_release,
+ .poll = msm_jpegdma_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+/*
+ * msm_jpegdma_querycap - V4l2 ioctl query capability handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @cap: Pointer to v4l2_capability struct need to be filled.
+ */
+static int msm_jpegdma_querycap(struct file *file,
+ void *fh, struct v4l2_capability *cap)
+{
+ cap->bus_info[0] = 0;
+ strlcpy(cap->driver, MSM_JPEGDMA_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, MSM_JPEGDMA_DRV_NAME, sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_CAPTURE;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_enum_fmt_vid_cap - V4l2 ioctl enumerate output format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_fmtdesc struct need to be filled.
+ */
+static int msm_jpegdma_enum_fmt_vid_cap(struct file *file,
+ void *fh, struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ f->pixelformat = formats[f->index].fourcc;
+ strlcpy(f->description, formats[f->index].name,
+ sizeof(f->description));
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_enum_fmt_vid_out - V4l2 ioctl enumerate capture format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_fmtdesc struct need to be filled.
+ */
+static int msm_jpegdma_enum_fmt_vid_out(struct file *file,
+ void *fh, struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ f->pixelformat = formats[f->index].fourcc;
+ strlcpy(f->description, formats[f->index].name,
+ sizeof(f->description));
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_g_fmt_cap - V4l2 ioctl get capture format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct need to be filled.
+ */
+static int msm_jpegdma_g_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ *f = ctx->format_cap;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_g_fmt_out - V4l2 ioctl get output format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct need to be filled.
+ */
+static int msm_jpegdma_g_fmt_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ *f = ctx->format_out;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_try_fmt_vid_cap - V4l2 ioctl try capture format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int msm_jpegdma_try_fmt_vid_cap(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ msm_jpegdma_align_format(f, ctx->format_idx);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_try_fmt_vid_out - V4l2 ioctl try output format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int msm_jpegdma_try_fmt_vid_out(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ msm_jpegdma_align_format(f, ctx->format_idx);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_s_fmt_vid_cap - V4l2 ioctl set capture format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int msm_jpegdma_s_fmt_vid_cap(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ int ret;
+
+ ret = msm_jpegdma_get_format_idx(ctx, f);
+ if (ret < 0)
+ return -EINVAL;
+
+ ctx->format_idx = ret;
+
+ msm_jpegdma_align_format(f, ctx->format_idx);
+
+ /* Initialize crop with output height */
+ ctx->crop.top = 0;
+ ctx->crop.left = 0;
+ ctx->crop.width = ctx->format_out.fmt.pix.width;
+ ctx->crop.height = ctx->format_out.fmt.pix.height;
+
+ ctx->format_cap = *f;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_s_fmt_vid_out - V4l2 ioctl set output format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int msm_jpegdma_s_fmt_vid_out(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ int ret;
+
+ ret = msm_jpegdma_get_format_idx(ctx, f);
+ if (ret < 0)
+ return -EINVAL;
+
+ ctx->format_idx = ret;
+
+ msm_jpegdma_align_format(f, ctx->format_idx);
+
+ /* Initialize crop */
+ ctx->crop.top = 0;
+ ctx->crop.left = 0;
+ ctx->crop.width = f->fmt.pix.width;
+ ctx->crop.height = f->fmt.pix.height;
+
+ ctx->format_out = *f;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_reqbufs - V4l2 ioctl request buffers handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @req: Pointer to v4l2_requestbuffer struct.
+ */
+static int msm_jpegdma_reqbufs(struct file *file,
+ void *fh, struct v4l2_requestbuffers *req)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, req);
+}
+
+/*
+ * msm_jpegdma_qbuf - V4l2 ioctl queue buffer handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf: Pointer to v4l2_buffer struct.
+ */
+static int msm_jpegdma_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ int ret;
+
+ mutex_lock(&ctx->lock);
+
+ ret = v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+ if (ret < 0)
+ dev_err(ctx->jdma_device->dev, "QBuf fail\n");
+
+ mutex_unlock(&ctx->lock);
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_dqbuf - V4l2 ioctl dequeue buffer handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf: Pointer to v4l2_buffer struct.
+ */
+static int msm_jpegdma_dqbuf(struct file *file,
+ void *fh, struct v4l2_buffer *buf)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+/*
+ * msm_jpegdma_streamon - V4l2 ioctl stream on handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf_type: V4l2 buffer type.
+ */
+static int msm_jpegdma_streamon(struct file *file,
+ void *fh, enum v4l2_buf_type buf_type)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ int ret;
+
+ if (!msm_jpegdma_config_ok(ctx))
+ return -EINVAL;
+
+ mutex_lock(&ctx->lock);
+
+ ret = v4l2_m2m_streamon(file, ctx->m2m_ctx, buf_type);
+ if (ret < 0)
+ dev_err(ctx->jdma_device->dev, "Stream on fail\n");
+
+ mutex_unlock(&ctx->lock);
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_streamoff - V4l2 ioctl stream off handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf_type: V4l2 buffer type.
+ */
+static int msm_jpegdma_streamoff(struct file *file,
+ void *fh, enum v4l2_buf_type buf_type)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ int ret;
+
+ ret = v4l2_m2m_streamoff(file, ctx->m2m_ctx, buf_type);
+ if (ret < 0)
+ dev_err(ctx->jdma_device->dev, "Stream off fails\n");
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_cropcap - V4l2 ioctl crop capabilities.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @a: Pointer to v4l2_cropcap struct need to be set.
+ */
+static int msm_jpegdma_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *a)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ struct v4l2_format *format;
+
+ switch (a->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ format = &ctx->format_out;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ format = &ctx->format_cap;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ a->bounds.top = 0;
+ a->bounds.left = 0;
+ a->bounds.width = format->fmt.pix.width;
+ a->bounds.height = format->fmt.pix.height;
+
+ a->defrect = ctx->crop;
+
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_g_crop - V4l2 ioctl get crop.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @crop: Pointer to v4l2_crop struct need to be set.
+ */
+static int msm_jpegdma_g_crop(struct file *file, void *fh,
+ struct v4l2_crop *crop)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ switch (crop->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ crop->c = ctx->crop;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ crop->c.left = 0;
+ crop->c.top = 0;
+ crop->c.width = ctx->format_cap.fmt.pix.width;
+ crop->c.height = ctx->format_cap.fmt.pix.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * msm_jpegdma_s_crop - V4l2 ioctl set crop.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @crop: Pointer to v4l2_crop struct need to be set.
+ */
+static int msm_jpegdma_s_crop(struct file *file, void *fh,
+ const struct v4l2_crop *crop)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ int ret = 0;
+
+ /* Crop is supported only for input buffers */
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ if (crop->c.left < 0 || crop->c.top < 0 ||
+ crop->c.height < 0 || crop->c.width < 0)
+ return -EINVAL;
+
+ /* Upscale is not supported */
+ if (crop->c.width < ctx->format_cap.fmt.pix.width)
+ return -EINVAL;
+
+ if (crop->c.height < ctx->format_cap.fmt.pix.height)
+ return -EINVAL;
+
+ if (crop->c.width + crop->c.left > ctx->format_out.fmt.pix.width)
+ return -EINVAL;
+
+ if (crop->c.height + crop->c.top > ctx->format_out.fmt.pix.height)
+ return -EINVAL;
+
+ if (crop->c.width % formats[ctx->format_idx].h_align)
+ return -EINVAL;
+
+ if (crop->c.left % formats[ctx->format_idx].h_align)
+ return -EINVAL;
+
+ if (crop->c.height % formats[ctx->format_idx].v_align)
+ return -EINVAL;
+
+ if (crop->c.top % formats[ctx->format_idx].v_align)
+ return -EINVAL;
+
+ mutex_lock(&ctx->lock);
+
+ ctx->crop = crop->c;
+ if (atomic_read(&ctx->active))
+ ret = msm_jpegdma_update_hw_config(ctx);
+
+ mutex_unlock(&ctx->lock);
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_g_crop - V4l2 ioctl get parm.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @a: Pointer to v4l2_streamparm struct need to be filled.
+ */
+static int msm_jpegdma_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ /* Get param is supported only for input buffers */
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ a->parm.output.capability = 0;
+ a->parm.output.extendedmode = 0;
+ a->parm.output.outputmode = 0;
+ a->parm.output.writebuffers = 0;
+ a->parm.output.timeperframe = ctx->timeperframe;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_s_crop - V4l2 ioctl set parm.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @a: Pointer to v4l2_streamparm struct need to be set.
+ */
+static int msm_jpegdma_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ /* Set param is supported only for input buffers */
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ if (!a->parm.output.timeperframe.numerator ||
+ !a->parm.output.timeperframe.denominator)
+ return -EINVAL;
+
+ /* Frame rate is not supported during streaming */
+ if (atomic_read(&ctx->active))
+ return -EINVAL;
+
+ ctx->timeperframe = a->parm.output.timeperframe;
+ return 0;
+}
+
+/* V4l2 ioctl handlers */
+static const struct v4l2_ioctl_ops fd_ioctl_ops = {
+ .vidioc_querycap = msm_jpegdma_querycap,
+ .vidioc_enum_fmt_vid_out = msm_jpegdma_enum_fmt_vid_out,
+ .vidioc_enum_fmt_vid_cap = msm_jpegdma_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_out = msm_jpegdma_g_fmt_out,
+ .vidioc_g_fmt_vid_cap = msm_jpegdma_g_fmt_cap,
+ .vidioc_try_fmt_vid_out = msm_jpegdma_try_fmt_vid_out,
+ .vidioc_try_fmt_vid_cap = msm_jpegdma_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_out = msm_jpegdma_s_fmt_vid_out,
+ .vidioc_s_fmt_vid_cap = msm_jpegdma_s_fmt_vid_cap,
+ .vidioc_reqbufs = msm_jpegdma_reqbufs,
+ .vidioc_qbuf = msm_jpegdma_qbuf,
+ .vidioc_dqbuf = msm_jpegdma_dqbuf,
+ .vidioc_streamon = msm_jpegdma_streamon,
+ .vidioc_streamoff = msm_jpegdma_streamoff,
+ .vidioc_cropcap = msm_jpegdma_cropcap,
+ .vidioc_g_crop = msm_jpegdma_g_crop,
+ .vidioc_s_crop = msm_jpegdma_s_crop,
+ .vidioc_g_parm = msm_jpegdma_g_parm,
+ .vidioc_s_parm = msm_jpegdma_s_parm,
+};
+
+/*
+ * msm_jpegdma_process_buffers - Start dma processing.
+ * @ctx: Pointer dma context.
+ * @src_buf: Pointer to Vb2 source buffer.
+ * @dst_buf: Pointer to Vb2 destination buffer.
+ */
+static void msm_jpegdma_process_buffers(struct jpegdma_ctx *ctx,
+ struct vb2_buffer *src_buf, struct vb2_buffer *dst_buf)
+{
+ struct msm_jpegdma_buf_handle *buf_handle;
+ struct msm_jpegdma_addr addr;
+ int plane_idx;
+ int config_idx;
+
+ buf_handle = dst_buf->planes[0].mem_priv;
+ addr.out_addr = buf_handle->addr;
+
+ buf_handle = src_buf->planes[0].mem_priv;
+ addr.in_addr = buf_handle->addr;
+
+ plane_idx = ctx->plane_idx;
+ config_idx = ctx->config_idx;
+ msm_jpegdma_hw_start(ctx->jdma_device, &addr,
+ &ctx->plane_config[config_idx].plane[plane_idx],
+ &ctx->plane_config[config_idx].speed);
+}
+
+/*
+ * msm_jpegdma_device_run - Dma device run.
+ * @priv: Pointer dma context.
+ */
+static void msm_jpegdma_device_run(void *priv)
+{
+ struct vb2_buffer *src_buf;
+ struct vb2_buffer *dst_buf;
+ struct jpegdma_ctx *ctx = priv;
+
+ dev_dbg(ctx->jdma_device->dev, "Jpeg v4l2 dma device run E\n");
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (src_buf == NULL || dst_buf == NULL) {
+ dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
+ return;
+ }
+
+ if (ctx->pending_config) {
+ msm_jpegdma_schedule_next_config(ctx);
+ ctx->pending_config = 0;
+ }
+
+ msm_jpegdma_process_buffers(ctx, src_buf, dst_buf);
+ dev_dbg(ctx->jdma_device->dev, "Jpeg v4l2 dma device run X\n");
+}
+
+/*
+ * msm_jpegdma_job_abort - Dma abort job.
+ * @priv: Pointer dma context.
+ */
+static void msm_jpegdma_job_abort(void *priv)
+{
+ struct jpegdma_ctx *ctx = priv;
+
+ msm_jpegdma_hw_abort(ctx->jdma_device);
+ v4l2_m2m_job_finish(ctx->jdma_device->m2m_dev, ctx->m2m_ctx);
+}
+
+/*
+ * msm_jpegdma_job_ready - Dma check if job is ready
+ * @priv: Pointer dma context.
+ */
+static int msm_jpegdma_job_ready(void *priv)
+{
+ struct jpegdma_ctx *ctx = priv;
+
+ if (atomic_read(&ctx->active)) {
+ init_completion(&ctx->completion);
+ return 1;
+ }
+ return 0;
+}
+
+/* V4l2 mem2mem handlers */
+static struct v4l2_m2m_ops msm_jpegdma_m2m_ops = {
+ .device_run = msm_jpegdma_device_run,
+ .job_abort = msm_jpegdma_job_abort,
+ .job_ready = msm_jpegdma_job_ready,
+};
+
+/*
+ * msm_jpegdma_isr_processing_done - Invoked by dma_hw when processing is done.
+ * @dma: Pointer dma device.
+ */
+void msm_jpegdma_isr_processing_done(struct msm_jpegdma_device *dma)
+{
+ struct vb2_buffer *src_buf;
+ struct vb2_buffer *dst_buf;
+ struct jpegdma_ctx *ctx;
+
+ mutex_lock(&dma->lock);
+ ctx = v4l2_m2m_get_curr_priv(dma->m2m_dev);
+ if (ctx) {
+ mutex_lock(&ctx->lock);
+ ctx->plane_idx++;
+ if (ctx->plane_idx >= formats[ctx->format_idx].num_planes) {
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ if (src_buf == NULL || dst_buf == NULL) {
+ dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
+ mutex_unlock(&ctx->lock);
+ mutex_unlock(&dma->lock);
+ return;
+ }
+ complete_all(&ctx->completion);
+ ctx->plane_idx = 0;
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(ctx->jdma_device->m2m_dev,
+ ctx->m2m_ctx);
+ } else {
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (src_buf == NULL || dst_buf == NULL) {
+ dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
+ mutex_unlock(&ctx->lock);
+ mutex_unlock(&dma->lock);
+ return;
+ }
+ msm_jpegdma_process_buffers(ctx, src_buf, dst_buf);
+ }
+ mutex_unlock(&ctx->lock);
+ }
+ mutex_unlock(&dma->lock);
+}
+
+/*
+ * jpegdma_probe - Dma device probe method.
+ * @pdev: Pointer Dma platform device.
+ */
+static int jpegdma_probe(struct platform_device *pdev)
+{
+ struct msm_jpegdma_device *jpegdma;
+ int ret;
+
+ dev_dbg(&pdev->dev, "jpeg v4l2 DMA probed\n");
+ /* Jpeg dma device struct */
+ jpegdma = kzalloc(sizeof(struct msm_jpegdma_device), GFP_KERNEL);
+ if (!jpegdma)
+ return -ENOMEM;
+
+ mutex_init(&jpegdma->lock);
+
+ init_completion(&jpegdma->hw_reset_completion);
+ init_completion(&jpegdma->hw_halt_completion);
+ jpegdma->dev = &pdev->dev;
+ jpegdma->pdev = pdev;
+
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node, "cell-index",
+ &pdev->id);
+
+ /* Get resources */
+ ret = msm_jpegdma_hw_get_mem_resources(pdev, jpegdma);
+ if (ret < 0)
+ goto error_mem_resources;
+
+ /* get all the regulators */
+ ret = msm_camera_get_regulator_info(pdev, &jpegdma->dma_vdd,
+ &jpegdma->num_reg);
+ if (ret < 0)
+ goto error_get_regulators;
+
+ /* get all the clocks */
+ ret = msm_camera_get_clk_info(pdev, &jpegdma->jpeg_clk_info,
+ &jpegdma->clk, &jpegdma->num_clk);
+ if (ret < 0)
+ goto error_get_clocks;
+
+ ret = msm_jpegdma_hw_get_qos(jpegdma);
+ if (ret < 0)
+ goto error_qos_get;
+
+ ret = msm_jpegdma_hw_get_vbif(jpegdma);
+ if (ret < 0)
+ goto error_vbif_get;
+
+ ret = msm_jpegdma_hw_get_prefetch(jpegdma);
+ if (ret < 0)
+ goto error_prefetch_get;
+
+ /* get the irq resource */
+ jpegdma->irq = msm_camera_get_irq(pdev, "jpeg");
+ if (!jpegdma->irq)
+ goto error_hw_get_irq;
+
+ switch (pdev->id) {
+ case 3:
+ jpegdma->bus_client = CAM_BUS_CLIENT_JPEG_DMA;
+ break;
+ default:
+ pr_err("%s: invalid cell id :%d\n",
+ __func__, pdev->id);
+ goto error_reg_bus;
+ }
+
+ /* register bus client */
+ ret = msm_camera_register_bus_client(pdev,
+ jpegdma->bus_client);
+ if (ret < 0) {
+ pr_err("Fail to register bus client\n");
+ ret = -EINVAL;
+ goto error_reg_bus;
+ }
+
+ ret = msm_jpegdma_hw_get_capabilities(jpegdma);
+ if (ret < 0)
+ goto error_hw_get_cap;
+
+ /* mem2mem device */
+ jpegdma->m2m_dev = v4l2_m2m_init(&msm_jpegdma_m2m_ops);
+ if (IS_ERR(jpegdma->m2m_dev)) {
+ dev_err(&pdev->dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(jpegdma->m2m_dev);
+ goto error_m2m_init;
+ }
+
+ /* v4l2 device */
+ ret = v4l2_device_register(&pdev->dev, &jpegdma->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ goto error_v4l2_register;
+ }
+
+ jpegdma->video.fops = &fd_fops;
+ jpegdma->video.ioctl_ops = &fd_ioctl_ops;
+ jpegdma->video.minor = -1;
+ jpegdma->video.release = video_device_release;
+ jpegdma->video.v4l2_dev = &jpegdma->v4l2_dev;
+ jpegdma->video.vfl_dir = VFL_DIR_M2M;
+ jpegdma->video.vfl_type = VFL_TYPE_GRABBER;
+ strlcpy(jpegdma->video.name, MSM_JPEGDMA_DRV_NAME,
+ sizeof(jpegdma->video.name));
+
+ ret = video_register_device(&jpegdma->video, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register video device\n");
+ goto error_video_register;
+ }
+
+ video_set_drvdata(&jpegdma->video, jpegdma);
+
+ platform_set_drvdata(pdev, jpegdma);
+
+ dev_dbg(&pdev->dev, "jpeg v4l2 DMA probe success\n");
+ return 0;
+
+error_video_register:
+ v4l2_device_unregister(&jpegdma->v4l2_dev);
+error_v4l2_register:
+ v4l2_m2m_release(jpegdma->m2m_dev);
+error_m2m_init:
+error_hw_get_cap:
+ msm_camera_unregister_bus_client(jpegdma->bus_client);
+error_reg_bus:
+error_hw_get_irq:
+ msm_jpegdma_hw_put_prefetch(jpegdma);
+error_prefetch_get:
+ msm_jpegdma_hw_put_vbif(jpegdma);
+error_vbif_get:
+ msm_jpegdma_hw_put_qos(jpegdma);
+error_qos_get:
+ msm_camera_put_clk_info(pdev, &jpegdma->jpeg_clk_info,
+ &jpegdma->clk, jpegdma->num_clk);
+error_get_clocks:
+ msm_camera_put_regulators(pdev, &jpegdma->dma_vdd,
+ jpegdma->num_reg);
+error_get_regulators:
+ msm_jpegdma_hw_release_mem_resources(jpegdma);
+error_mem_resources:
+ kfree(jpegdma);
+ return ret;
+}
+
+/*
+ * jpegdma_device_remove - Jpegdma device remove method.
+ * @pdev: Pointer jpegdma platform device.
+ */
+static int jpegdma_device_remove(struct platform_device *pdev)
+{
+ struct msm_jpegdma_device *dma;
+
+ dma = platform_get_drvdata(pdev);
+ if (dma == NULL) {
+ dev_err(&pdev->dev, "Can not get jpeg dma drvdata\n");
+ return 0;
+ }
+ video_unregister_device(&dma->video);
+ v4l2_device_unregister(&dma->v4l2_dev);
+ v4l2_m2m_release(dma->m2m_dev);
+ /* unregister bus client */
+ msm_camera_unregister_bus_client(dma->bus_client);
+ /* release all the regulators */
+ msm_camera_put_regulators(dma->pdev, &dma->dma_vdd,
+ dma->num_reg);
+ /* release all the clocks */
+ msm_camera_put_clk_info(dma->pdev, &dma->jpeg_clk_info,
+ &dma->clk, dma->num_clk);
+ msm_jpegdma_hw_release_mem_resources(dma);
+ kfree(dma);
+
+ return 0;
+}
+
+/* Device tree match struct */
+static const struct of_device_id msm_jpegdma_dt_match[] = {
+ {.compatible = "qcom,jpegdma"},
+ {}
+};
+
+/* Jpeg dma platform driver definition */
+static struct platform_driver jpegdma_driver = {
+ .probe = jpegdma_probe,
+ .remove = jpegdma_device_remove,
+ .driver = {
+ .name = MSM_JPEGDMA_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_jpegdma_dt_match,
+ },
+};
+
+static int __init msm_jpegdma_init_module(void)
+{
+ return platform_driver_register(&jpegdma_driver);
+}
+
+static void __exit msm_jpegdma_exit_module(void)
+{
+ platform_driver_unregister(&jpegdma_driver);
+}
+
+module_init(msm_jpegdma_init_module);
+module_exit(msm_jpegdma_exit_module);
+MODULE_DESCRIPTION("MSM JPEG DMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.h b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.h
new file mode 100644
index 000000000000..ebdaf345121b
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.h
@@ -0,0 +1,374 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_JPEG_DMA_DEV_H__
+#define __MSM_JPEG_DMA_DEV_H__
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/msm-bus.h>
+#include "cam_soc_api.h"
+
+/* Max number of clocks defined in device tree */
+#define MSM_JPEGDMA_MAX_CLK 10
+/* Core clock index */
+#define MSM_JPEGDMA_CORE_CLK 0
+/* Max number of regulators defined in device tree */
+#define MSM_JPEGDMA_MAX_REGULATOR_NUM 3
+/* Max number of planes supported */
+#define MSM_JPEGDMA_MAX_PLANES 3
+/* Max number of hw pipes supported */
+#define MSM_JPEGDMA_MAX_PIPES 2
+/* Max number of hw configurations supported */
+#define MSM_JPEGDMA_MAX_CONFIGS 2
+/* Dma default fps */
+#define MSM_JPEGDMA_DEFAULT_FPS 30
+
+/* Dma input output size limitations */
+#define MSM_JPEGDMA_MAX_WIDTH 65536
+#define MSM_JPEGDMA_MIN_WIDTH 32
+#define MSM_JPEGDMA_MAX_HEIGHT 65536
+#define MSM_JPEGDMA_MIN_HEIGHT 32
+#define MSM_JPEGDMA_STRIDE_ALIGN 8
+
+/*
+ * enum msm_jpegdma_plane_type - Dma format.
+ * @JPEGDMA_PLANE_TYPE_Y: Y plane type.
+ * @JPEGDMA_PLANE_TYPE_CR: Chroma CB plane.
+ * @JPEGDMA_PLANE_TYPE_CB: Chroma CR plane.
+ * @JPEGDMA_PLANE_TYPE_CBCR: Interlevaed CbCr plane.
+ */
+enum msm_jpegdma_plane_type {
+ JPEGDMA_PLANE_TYPE_Y,
+ JPEGDMA_PLANE_TYPE_CR,
+ JPEGDMA_PLANE_TYPE_CB,
+ JPEGDMA_PLANE_TYPE_CBCR,
+};
+
+/*
+ * struct msm_jpegdma_format - Dma format.
+ * @name: Format name.
+ * @fourcc: v4l2 fourcc code.
+ * @depth: Number of bits per pixel.
+ * @num_planes: number of planes.
+ * @colplane_h: Color plane horizontal subsample.
+ * @colplane_v: Color plane vertical subsample.
+ * @h_align: Horizontal align.
+ * @v_align: Vertical align.
+ * @planes: Array with plane types.
+ */
+struct msm_jpegdma_format {
+ char *name;
+ u32 fourcc;
+ int depth;
+ int num_planes;
+ int colplane_h;
+ int colplane_v;
+ int h_align;
+ int v_align;
+ enum msm_jpegdma_plane_type planes[MSM_JPEGDMA_MAX_PLANES];
+};
+
+/*
+ * struct msm_jpegdma_size - Dma size.
+ * @top: Top position.
+ * @left: Left position
+ * @width: Width
+ * @height: height.
+ * @scanline: Number of lines per plane.
+ * @stride: Stride bytes per line.
+ */
+struct msm_jpegdma_size {
+ unsigned int top;
+ unsigned int left;
+ unsigned int width;
+ unsigned int height;
+ unsigned int scanline;
+ unsigned int stride;
+};
+
+/*
+ * struct msm_jpegdma_size_config - Dma engine size configuration.
+ * @in_size: Input size.
+ * @out_size: Output size.
+ * @format: Format.
+ * @fps: Requested frames per second.
+ */
+struct msm_jpegdma_size_config {
+ struct msm_jpegdma_size in_size;
+ struct msm_jpegdma_size out_size;
+ struct msm_jpegdma_format format;
+ unsigned int fps;
+};
+
+/*
+ * struct msm_jpegdma_block - Dma hw block.
+ * @div: Block divider.
+ * @width: Block width.
+ * @reg_val: Block register value.
+ */
+struct msm_jpegdma_block {
+ unsigned int div;
+ unsigned int width;
+ unsigned int reg_val;
+};
+
+/*
+ * struct msm_jpegdma_block_config - Dma hw block configuration.
+ * @block: Block settings.
+ * @blocks_per_row: Blocks per row.
+ * @blocks_per_col: Blocks per column.
+ * @h_step: Horizontal step value
+ * @v_step: Vertical step value
+ * @h_step_last: Last horizontal step.
+ * @v_step_last: Last vertical step.
+ */
+struct msm_jpegdma_block_config {
+ struct msm_jpegdma_block block;
+ unsigned int blocks_per_row;
+ unsigned int blocks_per_col;
+ unsigned int h_step;
+ unsigned int v_step;
+ unsigned int h_step_last;
+ unsigned int v_step_last;
+};
+
+/*
+ * msm_jpegdma_scale - Dma hw scale configuration.
+ * @enable: Scale enable.
+ * @hor_scale: Horizontal scale factor in Q21 format.
+ * @ver_scale: Vertical scale factor in Q21 format.
+ */
+struct msm_jpegdma_scale {
+ int enable;
+ unsigned int hor_scale;
+ unsigned int ver_scale;
+};
+
+/*
+ * struct msm_jpegdma_config - Dma hw configuration.
+ * @size_cfg: Size configuration.
+ * @scale_cfg: Scale configuration
+ * @block_cfg: Block configuration.
+ * @phase: Starting phase.
+ * @in_offset: Input offset.
+ * @out_offset: Output offset.
+ */
+struct msm_jpegdma_config {
+ struct msm_jpegdma_size_config size_cfg;
+ struct msm_jpegdma_scale scale_cfg;
+ struct msm_jpegdma_block_config block_cfg;
+ unsigned int phase;
+ unsigned int in_offset;
+ unsigned int out_offset;
+};
+
+/*
+ * struct msm_jpegdma_plane_config - Contain input output address.
+ * @bus_ab: Bus average bandwidth.
+ * @bus_ib: Bus instantaneous bandwidth.
+ * @core_clock: Core clock freq.
+ */
+struct msm_jpegdma_speed {
+ u64 bus_ab;
+ u64 bus_ib;
+ u64 core_clock;
+};
+
+/*
+ * struct msm_jpegdma_plane_config - Contain input output address.
+ * @active_pipes: Number of active pipes.
+ * @config: Plane configurations.
+ * @type: Plane type.
+ */
+struct msm_jpegdma_plane {
+ unsigned int active_pipes;
+ struct msm_jpegdma_config config[MSM_JPEGDMA_MAX_PIPES];
+ enum msm_jpegdma_plane_type type;
+};
+
+/*
+ * struct msm_jpegdma_plane_config - Contain input output address.
+ * @num_planes: Number of planes.
+ * @plane: Plane configuration.
+ * @speed: Processing speed.
+ */
+struct msm_jpegdma_plane_config {
+ unsigned int num_planes;
+ struct msm_jpegdma_plane plane[MSM_JPEGDMA_MAX_PLANES];
+ struct msm_jpegdma_speed speed;
+};
+
+/*
+ * struct msm_jpegdma_addr - Contain input output address.
+ * @in_addr: Input dma address.
+ * @out_addr: Output dma address.
+ */
+struct msm_jpegdma_addr {
+ u32 in_addr;
+ u32 out_addr;
+};
+
+/*
+ * struct msm_jpegdma_buf_handle - Structure contain dma buffer information.
+ * @fd: ion dma from which this buffer is imported.
+ * @dma: Pointer to jpeg dma device.
+ * @size: Size of the buffer.
+ * @addr: Adders of dma mmu mapped buffer. This address should be set to dma hw.
+ */
+struct msm_jpegdma_buf_handle {
+ int fd;
+ struct msm_jpegdma_device *dma;
+ unsigned long size;
+ ion_phys_addr_t addr;
+};
+
+/*
+ * @jpegdma_ctx - Structure contains per open file handle context.
+ * @lock: Lock protecting dma ctx.
+ * @jdma_device: Pointer to dma device.
+ * @active: Set if context is active.
+ * @completion: Context processing completion.
+ * @fh: V4l2 file handle.
+ * @m2m_ctx: Memory to memory context.
+ * @format_cap: Current capture format.
+ * @format_out: Current output format.
+ * @crop: Current crop.
+ * @timeperframe: Time per frame in seconds.
+ * @config_idx: Plane configuration active index.
+ * @plane_config: Array of plane configurations.
+ * @pending_config: Flag set if there is pending plane configuration.
+ * @plane_idx: Processing plane index.
+ * @format_idx: Current format index.
+ */
+struct jpegdma_ctx {
+ struct mutex lock;
+ struct msm_jpegdma_device *jdma_device;
+ atomic_t active;
+ struct completion completion;
+ struct v4l2_fh fh;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_format format_cap;
+ struct v4l2_format format_out;
+ struct v4l2_rect crop;
+ struct v4l2_fract timeperframe;
+
+ unsigned int config_idx;
+ struct msm_jpegdma_plane_config plane_config[MSM_JPEGDMA_MAX_CONFIGS];
+ unsigned int pending_config;
+
+ unsigned int plane_idx;
+ unsigned int format_idx;
+};
+
+/*
+ * struct jpegdma_reg_cfg - Registry values configuration
+ * @reg: Register offset.
+ * @val: Register value.
+ */
+struct jpegdma_reg_cfg {
+ unsigned int reg;
+ unsigned int val;
+};
+
+/*
+ * enum msm_jpegdma_mem_resources - jpegdma device iomem resources.
+ * @MSM_JPEGDMA_IOMEM_CORE: Index of jpegdma core registers.
+ * @MSM_JPEGDMA_IOMEM_VBIF: Index of jpegdma vbif registers.
+ * @MSM_JPEGDMA_IOMEM_LAST: Not valid.
+ */
+enum msm_jpegdma_mem_resources {
+ MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_IOMEM_VBIF,
+ MSM_JPEGDMA_IOMEM_LAST
+};
+
+/*
+ * struct msm_jpegdma_device - FD device structure.
+ * @lock: Lock protecting dma device.
+ * @ref_count: Device reference count.
+ * @irq_num: Face detection irq number.
+ * @res_mem: Array of memory resources used by Dma device.
+ * @iomem_base: Array of register mappings used by Dma device.
+ * @ioarea: Array of register ioarea used by Dma device.
+ * @vdd: Pointer to vdd regulator.
+ * @regulator_num: Number of regulators attached to the device.
+ * @clk_num: Number of clocks attached to the device.
+ * @clk: Array of clock resources used by dma device.
+ * @clk_rates: Array of clock rates.
+ * @vbif_regs_num: number of vbif regs.
+ * @vbif_regs: Array of vbif regs need to be set.
+ * @qos_regs_num: Number of qos regs .
+ * @qos_regs: Array of qos regs need to be set.
+ * @bus_client: Memory access bus client.
+ * @bus_vectors: Bus vector
+ * @bus_paths: Bus path.
+ * @bus_scale_data: Memory access bus scale data.
+ * @iommu_hndl: Dma device iommu handle.
+ * @iommu_attached_cnt: Iommu attached devices reference count.
+ * @iommu_dev: Pointer to Ion iommu device.
+ * @dev: Pointer to device struct.
+ * @v4l2_dev: V4l2 device.
+ * @video: Video device.
+ * @m2m_dev: Memory to memory device.
+ * @hw_num_pipes: Number of dma hw pipes.
+ * @active_clock_rate: Active clock rate index.
+ * @hw_reset_completion: Dma reset completion.
+ * @hw_halt_completion: Dma halt completion.
+ */
+struct msm_jpegdma_device {
+ struct mutex lock;
+ int ref_count;
+
+ int irq_num;
+ void __iomem *iomem_base[MSM_JPEGDMA_IOMEM_LAST];
+
+ struct resource *irq;
+ struct msm_cam_regulator *dma_vdd;
+ int num_reg;
+
+ struct clk **clk;
+ size_t num_clk;
+ struct msm_cam_clk_info *jpeg_clk_info;
+
+ unsigned int vbif_regs_num;
+ struct jpegdma_reg_cfg *vbif_regs;
+ unsigned int qos_regs_num;
+ struct jpegdma_reg_cfg *qos_regs;
+ unsigned int prefetch_regs_num;
+ struct jpegdma_reg_cfg *prefetch_regs;
+
+ enum cam_bus_client bus_client;
+ struct msm_bus_vectors bus_vectors;
+ struct msm_bus_paths bus_paths;
+ struct msm_bus_scale_pdata bus_scale_data;
+
+ int iommu_hndl;
+ unsigned int iommu_attached_cnt;
+
+ struct device *iommu_dev;
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+ struct video_device video;
+ struct v4l2_m2m_dev *m2m_dev;
+
+ int hw_num_pipes;
+ struct completion hw_reset_completion;
+ struct completion hw_halt_completion;
+ u64 active_clock_rate;
+ struct platform_device *pdev;
+};
+
+void msm_jpegdma_isr_processing_done(struct msm_jpegdma_device *dma);
+
+#endif /* __MSM_JPEG_DMA_DEV_H__ */
diff --git a/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_hw.c b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_hw.c
new file mode 100644
index 000000000000..3202699aec85
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_hw.c
@@ -0,0 +1,1776 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+#include <linux/iommu.h>
+#include <linux/msm_ion.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <media/videobuf2-core.h>
+
+#include "msm_camera_io_util.h"
+#include "cam_smmu_api.h"
+#include "msm_jpeg_dma_dev.h"
+#include "msm_jpeg_dma_hw.h"
+#include "msm_jpeg_dma_regs.h"
+
+/* Jpeg dma scale unity */
+#define MSM_JPEGDMA_SCALE_UNI (1 << 21)
+/* Jpeg dma bw numerator */
+#define MSM_JPEGDMA_BW_NUM 38
+/* Jpeg dma bw denominator */
+#define MSM_JPEGDMA_BW_DEN 10
+/* Jpeg bus client name */
+#define MSM_JPEGDMA_BUS_CLIENT_NAME "msm_jpeg_dma"
+/* Jpeg dma engine timeout in ms */
+#define MSM_JPEGDMA_TIMEOUT_MS 500
+/* Jpeg dma smmu name */
+#define MSM_JPEGDMA_SMMU_NAME "jpeg_dma"
+
+static const struct msm_jpegdma_block msm_jpegdma_block_sel[] = {
+ {
+ .div = 0x3C0000,
+ .width = 256,
+ .reg_val = 4,
+ },
+ {
+ .div = 0x7C0000,
+ .width = 128,
+ .reg_val = 3,
+ },
+ {
+ .div = 0xFC0000,
+ .width = 64,
+ .reg_val = 2,
+ },
+ {
+ .div = 0x1FC0000,
+ .width = 32,
+ .reg_val = 1,
+ },
+ {
+ .div = 0x4000000,
+ .width = 16,
+ .reg_val = 0,
+ },
+};
+
+/*
+ * msm_jpegdma_hw_read_reg - dma read from register.
+ * @dma: Pointer to dma device.
+ * @base_idx: dma memory resource index.
+ * @reg: Register addr need to be read from.
+ */
+static inline u32 msm_jpegdma_hw_read_reg(struct msm_jpegdma_device *dma,
+ enum msm_jpegdma_mem_resources base_idx, u32 reg)
+{
+ return msm_camera_io_r(dma->iomem_base[base_idx] + reg);
+}
+
+/*
+ * msm_jpegdma_hw_write_reg - dma write to register.
+ * @dma: Pointer to dma device.
+ * @base_idx: dma memory resource index.
+ * @reg: Register addr need to be read from.
+ * @value: Value to be written.
+ */
+static inline void msm_jpegdma_hw_write_reg(struct msm_jpegdma_device *dma,
+ enum msm_jpegdma_mem_resources base_idx, u32 reg, u32 value)
+{
+ pr_debug("%s:%d]%pK %08x\n", __func__, __LINE__,
+ dma->iomem_base[base_idx] + reg,
+ value);
+ msm_camera_io_w(value, dma->iomem_base[base_idx] + reg);
+}
+
+/*
+ * msm_jpegdma_hw_enable_irq - Enable dma interrupts.
+ * @dma: Pointer to dma device.
+ */
+static void msm_jpegdma_hw_enable_irq(struct msm_jpegdma_device *dma)
+{
+ u32 reg;
+
+ reg = MSM_JPEGDMA_IRQ_MASK_SESSION_DONE |
+ MSM_JPEGDMA_IRQ_MASK_AXI_HALT |
+ MSM_JPEGDMA_IRQ_MASK_RST_DONE;
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_IRQ_MASK_ADDR, reg);
+}
+
+/*
+ * msm_jpegdma_hw_disable_irq - Disable dma interrupts.
+ * @dma: Pointer to dma device.
+ */
+static void msm_jpegdma_hw_disable_irq(struct msm_jpegdma_device *dma)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_IRQ_MASK_ADDR, 0);
+}
+
+/*
+ * msm_jpegdma_hw_clear_irq - Clear dma interrupts.
+ * @dma: Pointer to dma device.
+ * @status: Status to clear.
+ */
+static void msm_jpegdma_hw_clear_irq(struct msm_jpegdma_device *dma,
+ u32 status)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_IRQ_CLEAR_ADDR, status);
+}
+
+/*
+ * msm_jpegdma_hw_get_irq_status - Get dma irq status
+ * @dma: Pointer to dma device.
+ */
+static u32 msm_jpegdma_hw_get_irq_status(struct msm_jpegdma_device *dma)
+{
+ return msm_jpegdma_hw_read_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_IRQ_STATUS);
+}
+
+/*
+ * msm_jpegdma_hw_get_num_pipes - Get number of dma pipes
+ * @dma: Pointer to dma device.
+ */
+static int msm_jpegdma_hw_get_num_pipes(struct msm_jpegdma_device *dma)
+{
+ int num_pipes;
+ u32 reg;
+
+ reg = msm_jpegdma_hw_read_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_HW_CAPABILITY);
+
+ num_pipes = (reg & MSM_JPEGDMA_HW_CAPABILITY_NUM_PIPES_BMSK) >>
+ MSM_JPEGDMA_HW_CAPABILITY_NUM_PIPES_SHFT;
+
+ return num_pipes;
+}
+
+/*
+ * msm_jpegdma_hw_reset - Reset jpeg dma core.
+ * @dma: Pointer to dma device.
+ */
+static int msm_jpegdma_hw_reset(struct msm_jpegdma_device *dma)
+{
+ unsigned long time;
+
+ init_completion(&dma->hw_reset_completion);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_HW_JPEGDMA_RESET, MSM_HW_JPEGDMA_RESET_DEFAULT);
+
+ time = wait_for_completion_timeout(&dma->hw_reset_completion,
+ msecs_to_jiffies(MSM_JPEGDMA_TIMEOUT_MS));
+ if (!time) {
+ dev_err(dma->dev, "Jpeg dma detection reset timeout\n");
+ return -ETIME;
+ }
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_halt - Halt jpeg dma core.
+* @dma: Pointer to dma device.
+*/
+static int msm_jpegdma_hw_halt(struct msm_jpegdma_device *dma)
+{
+ unsigned long time;
+
+ init_completion(&dma->hw_halt_completion);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_CMD_ADDR, 0x4);
+
+ time = wait_for_completion_timeout(&dma->hw_halt_completion,
+ msecs_to_jiffies(MSM_JPEGDMA_TIMEOUT_MS));
+ if (!time) {
+ dev_err(dma->dev, "Jpeg dma detection halt timeout\n");
+ return -ETIME;
+ }
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_run - Enable dma processing.
+* @dma: Pointer to dma device.
+*/
+static int msm_jpegdma_hw_run(struct msm_jpegdma_device *dma)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_CMD_ADDR, 0x1);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_core_config - Set jpeg dma core configuration.
+ * @dma: Pointer to dma device.
+ * @num_pipes: Number of pipes.
+ * @scale_0: Scaler 0 enable.
+ * @scale_1: Scaler 1 enable.
+ */
+static int msm_jpegdma_hw_core_config(struct msm_jpegdma_device *dma,
+ int num_pipes, int scale_0, int scale_1)
+{
+ u32 reg;
+
+ reg = (scale_0 << MSM_JPEGDMA_CORE_CFG_SCALE_0_ENABLE_SHFT) |
+ (0x1 << MSM_JPEGDMA_CORE_CFG_TEST_BUS_ENABLE_SHFT) |
+ (0x1 << MSM_JPEGDMA_CORE_CFG_BRIDGE_ENABLE_SHFT) |
+ (0x1 << MSM_JPEGDMA_CORE_CFG_WE_0_ENABLE_SHFT) |
+ (0x1 << MSM_JPEGDMA_CORE_CFG_FE_0_ENABLE_SHFT);
+
+ /* Enable read write ports for second pipe */
+ if (num_pipes > 1) {
+ reg |= (scale_1 << MSM_JPEGDMA_CORE_CFG_SCALE_1_ENABLE_SHFT) |
+ (0x1 << MSM_JPEGDMA_CORE_CFG_WE_1_ENABLE_SHFT) |
+ (0x1 << MSM_JPEGDMA_CORE_CFG_FE_1_ENABLE_SHFT);
+ }
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_CORE_CFG_ADDR, reg);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_0_block - Fetch engine 0 block configuration.
+ * @dma: Pointer to dma device.
+ * @block_config: Pointer to block configuration.
+ * @plane_type: Plane type.
+ */
+static int msm_jpegdma_hw_fe_0_block(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_block_config *block_config,
+ enum msm_jpegdma_plane_type plane_type)
+{
+ u32 reg;
+
+ switch (plane_type) {
+ case JPEGDMA_PLANE_TYPE_Y:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_Y <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ case JPEGDMA_PLANE_TYPE_CB:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CB <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ case JPEGDMA_PLANE_TYPE_CR:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CR <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ case JPEGDMA_PLANE_TYPE_CBCR:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CBCR <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ default:
+ dev_err(dma->dev, "Unsupported plane type %d\n", plane_type);
+ return -EINVAL;
+ }
+
+ reg |= (block_config->block.reg_val <<
+ MSM_JPEGDMA_FE_CFG_BLOCK_WIDTH_SHFT) |
+ (0x1 << MSM_JPEGDMA_FE_CFG_MAL_BOUNDARY_SHFT) |
+ (0x1 << MSM_JPEGDMA_FE_CFG_MAL_EN_SHFT) |
+ (0xF << MSM_JPEGDMA_FE_CFG_BURST_LENGTH_MAX_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_0_CFG_ADDR, reg);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_1_block - Fetch engine 1 block configuration.
+ * @dma: Pointer to dma device.
+ * @block_config: Pointer to block configuration.
+ * @plane_type: Plane type.
+ */
+static int msm_jpegdma_hw_fe_1_block(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_block_config *block_config,
+ enum msm_jpegdma_plane_type plane_type)
+{
+ u32 reg;
+
+ switch (plane_type) {
+ case JPEGDMA_PLANE_TYPE_Y:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_Y <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ case JPEGDMA_PLANE_TYPE_CB:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CB <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ case JPEGDMA_PLANE_TYPE_CR:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CR <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ case JPEGDMA_PLANE_TYPE_CBCR:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CBCR <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ default:
+ dev_err(dma->dev, "Unsupported plane type %d\n", plane_type);
+ return -EINVAL;
+ }
+
+ reg |= (block_config->block.reg_val <<
+ MSM_JPEGDMA_FE_CFG_BLOCK_WIDTH_SHFT) |
+ (0xF << MSM_JPEGDMA_FE_CFG_BURST_LENGTH_MAX_SHFT) |
+ (0x1 << MSM_JPEGDMA_FE_CFG_MAL_EN_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_1_CFG_ADDR, reg);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_0_phase - Fetch engine 0 phase configuration.
+ * @dma: Pointer to dma device.
+ * @phase: Fetch engine 0 phase.
+ */
+static int msm_jpegdma_hw_fe_0_phase(struct msm_jpegdma_device *dma, int phase)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_HINIT_ADDR, 0x00);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_HINIT_INT_ADDR, 0x00);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_VINIT_INT_ADDR, 0x00);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_VINIT_INT_ADDR, phase);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_1_phase - Fetch engine 1 phase configuration.
+ * @dma: Pointer to dma device.
+ * @phase: Fetch engine 1 phase.
+ */
+static int msm_jpegdma_hw_fe_1_phase(struct msm_jpegdma_device *dma, int phase)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_HINIT_ADDR, 0x00);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_HINIT_INT_ADDR, 0x00);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_VINIT_INT_ADDR, 0x00);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_VINIT_INT_ADDR, phase);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_0_size - Fetch engine 0 size configuration.
+ * @dma: Pointer to dma device.
+ * @size: Pointer to size configuration.
+ * @plane_type: Plane type.
+ */
+static int msm_jpegdma_hw_fe_0_size(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size *size, enum msm_jpegdma_plane_type plane_type)
+{
+ u32 reg;
+
+ reg = (size->width + size->left - 1) |
+ ((size->height + size->top - 1) <<
+ MSM_JPEGDMA_FE_RD_BUFFER_SIZE_HEIGHT_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_BUFFER_SIZE_0_ADDR, reg);
+
+ if (size->left && plane_type == JPEGDMA_PLANE_TYPE_CBCR)
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_HINIT_INT_ADDR, size->left / 2);
+ else
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_HINIT_INT_ADDR, size->left);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_VINIT_INT_ADDR, size->top);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_STRIDE_ADDR, size->stride);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_1_size - Fetch engine 1 size configuration.
+ * @dma: Pointer to dma device.
+ * @size: Pointer to size configuration.
+ * @plane_type: Plane type.
+ */
+static int msm_jpegdma_hw_fe_1_size(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size *size, enum msm_jpegdma_plane_type plane_type)
+{
+ u32 reg;
+
+ reg = (size->width + size->left - 1) |
+ ((size->height + size->top - 1) <<
+ MSM_JPEGDMA_FE_RD_BUFFER_SIZE_HEIGHT_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_BUFFER_SIZE_1_ADDR, reg);
+
+ if (size->left && plane_type == JPEGDMA_PLANE_TYPE_CBCR)
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_HINIT_INT_ADDR, size->left / 2);
+ else
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_HINIT_INT_ADDR, size->left);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_VINIT_INT_ADDR, size->top);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_STRIDE_ADDR, size->stride);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_0_addr - Set fetch engine 0 address.
+ * @dma: Pointer to dma device.
+ * @addr: Fetch engine address.
+ */
+static int msm_jpegdma_hw_fe_0_addr(struct msm_jpegdma_device *dma, u32 addr)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_CMD_ADDR, MSM_JPEGDMA_CMD_CLEAR_READ_PLN_QUEUES);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_PNTR_ADDR, addr);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_1_addr - Set fetch engine 1 address.
+ * @dma: Pointer to dma device.
+ * @addr: Fetch engine address.
+ */
+static int msm_jpegdma_hw_fe_1_addr(struct msm_jpegdma_device *dma, u32 addr)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_PNTR_ADDR, addr);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_0_block - Write engine 0 block configuration.
+ * @dma: Pointer to dma device.
+ * @block_config: Pointer to block configuration.
+ * @plane_type: Plane type.
+ */
+static int msm_jpegdma_hw_we_0_block(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_block_config *block,
+ enum msm_jpegdma_plane_type plane_type)
+{
+ u32 reg;
+
+ reg = (0xF << MSM_JPEGDMA_WE_CFG_BURST_LENGTH_MAX_SHFT) |
+ (0x1 << MSM_JPEGDMA_WE_CFG_MAL_BOUNDARY_SHFT) |
+ (0x1 << MSM_JPEGDMA_WE_CFG_MAL_EN_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_CFG_ADDR, reg);
+
+ reg = ((block->blocks_per_row - 1) <<
+ MSM_JPEGDMA_WE_PLN_WR_CFG_0_BLOCKS_PER_ROW_SHFT) |
+ (block->blocks_per_col - 1);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_0_WR_CFG_0_ADDR, reg);
+
+ reg = ((block->h_step_last - 1) <<
+ MSM_JPEGDMA_WE_PLN_WR_CFG_1_LAST_H_STEP_SHFT) |
+ (block->h_step - 1);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_0_WR_CFG_1_ADDR, reg);
+
+ reg = ((block->v_step_last - 1) <<
+ MSM_JPEGDMA_WE_PLN_WR_CFG_2_LAST_V_STEP_SHFT) |
+ (block->v_step - 1);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_0_WR_CFG_2_ADDR, reg);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_0_WR_CFG_3_ADDR, 0x0);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_we_1_block - Write engine 1 block configuration.
+ * @dma: Pointer to dma device.
+ * @block_config: Pointer to block configuration.
+ * @plane_type: Plane type.
+ */
+static int msm_jpegdma_hw_we_1_block(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_block_config *block,
+ enum msm_jpegdma_plane_type plane_type)
+{
+ u32 reg;
+
+ reg = ((block->blocks_per_row - 1) <<
+ MSM_JPEGDMA_WE_PLN_WR_CFG_0_BLOCKS_PER_ROW_SHFT) |
+ (block->blocks_per_col - 1);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_1_WR_CFG_0_ADDR, reg);
+
+ reg = ((block->h_step_last - 1) <<
+ MSM_JPEGDMA_WE_PLN_WR_CFG_1_LAST_H_STEP_SHFT) |
+ (block->h_step - 1);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_1_WR_CFG_1_ADDR, reg);
+
+ reg = ((block->v_step_last - 1) <<
+ MSM_JPEGDMA_WE_PLN_WR_CFG_2_LAST_V_STEP_SHFT) |
+ (block->v_step - 1);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_1_WR_CFG_2_ADDR, reg);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_1_WR_CFG_3_ADDR, 0x0);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_we_0_size - Write engine 0 size configuration.
+ * @dma: Pointer to dma device.
+ * @size: Pointer to size configuration.
+ */
+static int msm_jpegdma_hw_we_0_size(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size *size)
+{
+ u32 reg;
+
+ reg = (size->width) | ((size->height) <<
+ MSM_JPEGDMA_WE_PLN_WR_BUFFER_SIZE_HEIGHT_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_WR_BUFFER_SIZE_0_ADDR, reg);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_0_WR_STRIDE_ADDR, size->stride);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_we_1_size - Write engine 1 size configuration.
+ * @dma: Pointer to dma device.
+ * @size: Pointer to size configuration.
+ */
+static int msm_jpegdma_hw_we_1_size(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size *size)
+{
+ u32 reg;
+
+ reg = (size->width) | ((size->height) <<
+ MSM_JPEGDMA_WE_PLN_WR_BUFFER_SIZE_HEIGHT_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_WR_BUFFER_SIZE_1_ADDR, reg);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_1_WR_STRIDE_ADDR, size->stride);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_we_0_addr - Set write engine 0 address.
+ * @dma: Pointer to dma device.
+ * @addr: Fetch engine address.
+ */
+static int msm_jpegdma_hw_we_0_addr(struct msm_jpegdma_device *dma, u32 addr)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_CMD_ADDR, MSM_JPEGDMA_CMD_CLEAR_WRITE_PLN_QUEUES);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_0_WR_PNTR_ADDR, addr);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_we_1_addr - Set write engine 1 address.
+ * @dma: Pointer to dma device.
+ * @addr: Fetch engine address.
+ */
+static int msm_jpegdma_hw_we_1_addr(struct msm_jpegdma_device *dma, u32 addr)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_1_WR_PNTR_ADDR, addr);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_scale_0_config - Scale configuration for 0 pipeline.
+ * @dma: Pointer to dma device.
+ * @scale: Scale configuration.
+ */
+static int msm_jpegdma_hw_scale_0_config(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_scale *scale)
+{
+ u32 reg;
+ u32 h_down_en;
+ u32 v_down_en;
+
+ h_down_en = (scale->hor_scale == MSM_JPEGDMA_SCALE_UNI) ? 0 : 1;
+ v_down_en = (scale->ver_scale == MSM_JPEGDMA_SCALE_UNI) ? 0 : 1;
+
+ reg = (h_down_en << MSM_JPEGDMA_PP_SCALE_CFG_HSCALE_ENABLE_SHFT) |
+ (v_down_en << MSM_JPEGDMA_PP_SCALE_CFG_VSCALE_ENABLE_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_PP_0_SCALE_CFG_ADDR, reg);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_PP_0_SCALE_PHASEH_STEP_ADDR, scale->hor_scale);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_PP_0_SCALE_PHASEV_STEP_ADDR, scale->ver_scale);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_scale_1_config - Scale configuration for 1 pipeline.
+ * @dma: Pointer to dma device.
+ * @scale: Scale configuration.
+ */
+static int msm_jpegdma_hw_scale_1_config(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_scale *scale)
+{
+ u32 reg;
+ u32 h_down_en;
+ u32 v_down_en;
+
+ h_down_en = (scale->hor_scale == MSM_JPEGDMA_SCALE_UNI) ? 0 : 1;
+ v_down_en = (scale->ver_scale == MSM_JPEGDMA_SCALE_UNI) ? 0 : 1;
+
+ reg = (h_down_en << MSM_JPEGDMA_PP_SCALE_CFG_HSCALE_ENABLE_SHFT) |
+ (v_down_en << MSM_JPEGDMA_PP_SCALE_CFG_VSCALE_ENABLE_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_PP_1_SCALE_CFG_ADDR, reg);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_PP_1_SCALE_PHASEH_STEP_ADDR, scale->hor_scale);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_PP_1_SCALE_PHASEV_STEP_ADDR, scale->ver_scale);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_config_qos - Configure qos registers.
+ * @dma: Pointer to dma device.
+ */
+static void msm_jpegdma_hw_config_qos(struct msm_jpegdma_device *dma)
+{
+ int i;
+
+ if (!dma->qos_regs_num)
+ return;
+
+ for (i = 0; i < dma->qos_regs_num; i++)
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ dma->qos_regs[i].reg, dma->qos_regs[i].val);
+}
+
+/*
+ * msm_jpegdma_hw_config_vbif - Configure and vbif interface.
+ * @dma: Pointer to dma device.
+ */
+static void msm_jpegdma_hw_config_vbif(struct msm_jpegdma_device *dma)
+{
+ int i;
+
+ if (!dma->vbif_regs_num)
+ return;
+
+ for (i = 0; i < dma->vbif_regs_num; i++)
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_VBIF,
+ dma->vbif_regs[i].reg, dma->vbif_regs[i].val);
+}
+
+/*
+ * msm_jpegdma_hw_config_mmu_prefetch - Configure mmu prefetch registers.
+ * @dma: Pointer to dma device.
+ * @min_addr: Pointer to jpeg dma addr, containing min addrs of the plane.
+ * @max_addr: Pointer to jpeg dma addr, containing max addrs of the plane.
+ */
+static void msm_jpegdma_hw_config_mmu_prefetch(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_addr *min_addr,
+ struct msm_jpegdma_addr *max_addr)
+{
+ int i;
+
+ if (!dma->prefetch_regs_num)
+ return;
+
+ for (i = 0; i < dma->prefetch_regs_num; i++)
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_VBIF,
+ dma->prefetch_regs[i].reg, dma->prefetch_regs[i].val);
+
+ if (min_addr != NULL && max_addr != NULL) {
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_S0_MMU_PF_ADDR_MIN, min_addr->in_addr);
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_S0_MMU_PF_ADDR_MAX, max_addr->in_addr);
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_S1_MMU_PF_ADDR_MIN, min_addr->out_addr);
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_S1_MMU_PF_ADDR_MAX, max_addr->out_addr);
+ }
+}
+
+/*
+* msm_jpegdma_hw_calc_speed - Calculate speed based on framerate and size.
+* @dma: Pointer to dma device.
+* @size: Dma user size configuration.
+* @speed: Calculated speed.
+*/
+static int msm_jpegdma_hw_calc_speed(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size_config *size,
+ struct msm_jpegdma_speed *speed)
+{
+ u64 width;
+ u64 height;
+ u64 real_clock;
+ u64 calc_rate;
+
+ width = size->in_size.width + size->in_size.left;
+ height = size->in_size.height + size->in_size.top;
+
+ calc_rate = (width * height * size->format.depth * size->fps) / 16;
+ real_clock = clk_round_rate(dma->clk[MSM_JPEGDMA_CORE_CLK], calc_rate);
+ if (real_clock < 0) {
+ dev_err(dma->dev, "Can not round core clock\n");
+ return -EINVAL;
+ }
+
+ speed->bus_ab = calc_rate * 2;
+ speed->bus_ib = (real_clock *
+ (MSM_JPEGDMA_BW_NUM + MSM_JPEGDMA_BW_DEN - 1)) /
+ MSM_JPEGDMA_BW_DEN;
+ speed->core_clock = real_clock;
+ dev_dbg(dma->dev, "Speed core clk %llu ab %llu ib %llu fps %d\n",
+ speed->core_clock, speed->bus_ab, speed->bus_ib, size->fps);
+
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_set_speed - Configure clock and bus bandwidth based on
+* requested speed and dma clients.
+* @size: Jpeg dma size configuration.
+* @speed: Requested dma speed.
+*/
+static int msm_jpegdma_hw_set_speed(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size_config *size,
+ struct msm_jpegdma_speed *speed)
+{
+ struct msm_jpegdma_speed new_sp;
+ struct msm_jpegdma_size_config new_size;
+ int ret;
+
+ if (dma->active_clock_rate >= speed->core_clock)
+ return 0;
+
+ new_sp = *speed;
+ if (dma->ref_count > 2) {
+ new_size = *size;
+ new_size.fps = size->fps * ((dma->ref_count + 1) / 2);
+ ret = msm_jpegdma_hw_calc_speed(dma, &new_size, &new_sp);
+ if (ret < 0)
+ return -EINVAL;
+ }
+
+ ret = clk_set_rate(dma->clk[MSM_JPEGDMA_CORE_CLK], new_sp.core_clock);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail Core clock rate %d\n", ret);
+ return -EINVAL;
+ }
+ dma->active_clock_rate = speed->core_clock;
+
+ dma->bus_vectors.ab = new_sp.bus_ab;
+ dma->bus_vectors.ib = new_sp.bus_ib;
+
+ ret = msm_bus_scale_client_update_request(dma->bus_client, 0);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail bus scale update %d\n", ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_add_plane_offset - Add plane offset to all pipelines.
+* @plane: Jpeg dma plane configuration.
+* @in_offset: Input plane offset.
+* @out_offset: Output plane offset.
+*/
+static int msm_jpegdma_hw_add_plane_offset(struct msm_jpegdma_plane *plane,
+ unsigned int in_offset, unsigned int out_offset)
+{
+ int i;
+
+ for (i = 0; i < plane->active_pipes; i++) {
+ plane->config[i].in_offset += in_offset;
+ plane->config[i].out_offset += out_offset;
+ }
+
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_calc_config - Calculate plane configuration.
+* @size_cfg: Size configuration.
+* @plane: Plane configuration need to be calculated.
+*/
+static int msm_jpegdma_hw_calc_config(struct msm_jpegdma_size_config *size_cfg,
+ struct msm_jpegdma_plane *plane)
+{
+ u64 scale_hor, scale_ver, phase;
+ u64 in_width, in_height;
+ u64 out_width, out_height;
+ struct msm_jpegdma_config *config;
+ int i;
+
+ if (!size_cfg->out_size.width || !size_cfg->out_size.height)
+ return -EINVAL;
+
+ config = &plane->config[0];
+ config->scale_cfg.enable = 0;
+
+ in_width = size_cfg->in_size.width;
+ out_width = size_cfg->out_size.width;
+ scale_hor = (in_width * MSM_JPEGDMA_SCALE_UNI) / out_width;
+ if (scale_hor != MSM_JPEGDMA_SCALE_UNI)
+ config->scale_cfg.enable = 1;
+
+ in_height = size_cfg->in_size.height;
+ out_height = size_cfg->out_size.height;
+ scale_ver = (in_height * MSM_JPEGDMA_SCALE_UNI) / out_height;
+ if (scale_ver != MSM_JPEGDMA_SCALE_UNI)
+ config->scale_cfg.enable = 1;
+
+ config->scale_cfg.ver_scale = scale_ver;
+ config->scale_cfg.hor_scale = scale_hor;
+
+ for (i = 0; ARRAY_SIZE(msm_jpegdma_block_sel); i++)
+ if (scale_hor <= msm_jpegdma_block_sel[i].div)
+ break;
+
+ if (i == ARRAY_SIZE(msm_jpegdma_block_sel))
+ return -EINVAL;
+
+ config->block_cfg.block = msm_jpegdma_block_sel[i];
+
+ if (plane->active_pipes > 1) {
+ phase = (out_height * scale_ver + (plane->active_pipes - 1)) /
+ plane->active_pipes;
+ phase &= (MSM_JPEGDMA_SCALE_UNI - 1);
+ out_height = (out_height + (plane->active_pipes - 1)) /
+ plane->active_pipes;
+ in_height = (out_height * scale_ver) / MSM_JPEGDMA_SCALE_UNI;
+ }
+
+ config->block_cfg.blocks_per_row = out_width /
+ config->block_cfg.block.width;
+
+ config->block_cfg.blocks_per_col = out_height;
+
+ config->block_cfg.h_step = config->block_cfg.block.width;
+
+ config->block_cfg.h_step_last = out_width %
+ config->block_cfg.block.width;
+ if (!config->block_cfg.h_step_last)
+ config->block_cfg.h_step_last = config->block_cfg.h_step;
+ else
+ config->block_cfg.blocks_per_row++;
+
+ config->block_cfg.v_step = 1;
+ config->block_cfg.v_step_last = 1;
+
+ config->size_cfg = *size_cfg;
+ config->size_cfg.in_size.width = in_width;
+ config->size_cfg.in_size.height = in_height;
+ config->size_cfg.out_size.width = out_width;
+ config->size_cfg.out_size.height = out_height;
+ config->in_offset = 0;
+ config->out_offset = 0;
+
+ if (plane->active_pipes > 1) {
+ plane->config[1] = *config;
+ /* Recalculate offset for second pipe */
+ plane->config[1].in_offset =
+ config->size_cfg.in_size.scanline *
+ config->size_cfg.in_size.stride;
+
+ plane->config[1].out_offset =
+ config->size_cfg.out_size.scanline *
+ config->size_cfg.out_size.stride;
+
+ plane->config[1].phase = phase;
+ }
+
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_check_config - Check configuration based on size is possible.
+ *@dma: Pointer to dma device.
+* @size_cfg: Size configuration.
+*/
+int msm_jpegdma_hw_check_config(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size_config *size_cfg)
+{
+ u64 in_width, in_height;
+ u64 out_width, out_height;
+ u64 scale;
+
+ if (!size_cfg->out_size.width || !size_cfg->out_size.height)
+ return -EINVAL;
+
+ in_width = size_cfg->in_size.width;
+ out_width = size_cfg->out_size.width;
+ scale = ((in_width * MSM_JPEGDMA_SCALE_UNI)) / out_width;
+ if (scale < MSM_JPEGDMA_SCALE_UNI)
+ return -EINVAL;
+
+
+ in_height = size_cfg->in_size.height;
+ out_height = size_cfg->out_size.height;
+ scale = (in_height * MSM_JPEGDMA_SCALE_UNI) / out_height;
+ if (scale < MSM_JPEGDMA_SCALE_UNI)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_set_config - Set dma configuration based on size.
+ *@dma: Pointer to dma device.
+* @size_cfg: Size configuration.
+* @plane_cfg: Calculated plane configuration.
+*/
+int msm_jpegdma_hw_set_config(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size_config *size_cfg,
+ struct msm_jpegdma_plane_config *plane_cfg)
+{
+ unsigned int in_offset;
+ unsigned int out_offset;
+ struct msm_jpegdma_size_config plane_size;
+ int ret;
+ int i;
+
+ if (!size_cfg->format.colplane_h || !size_cfg->format.colplane_v)
+ return -EINVAL;
+
+ ret = msm_jpegdma_hw_calc_speed(dma, size_cfg, &plane_cfg->speed);
+ if (ret < 0)
+ return -EINVAL;
+
+ dma->active_clock_rate = 0;
+
+ plane_cfg->plane[0].active_pipes = dma->hw_num_pipes;
+ plane_cfg->plane[0].type = size_cfg->format.planes[0];
+ msm_jpegdma_hw_calc_config(size_cfg, &plane_cfg->plane[0]);
+ if (size_cfg->format.num_planes == 1)
+ return 0;
+
+ in_offset = size_cfg->in_size.scanline *
+ size_cfg->in_size.stride;
+ out_offset = size_cfg->out_size.scanline *
+ size_cfg->out_size.stride;
+
+ memset(&plane_size, 0x00, sizeof(plane_size));
+ for (i = 1; i < size_cfg->format.num_planes; i++) {
+ plane_cfg->plane[i].active_pipes = dma->hw_num_pipes;
+ plane_cfg->plane[i].type = size_cfg->format.planes[i];
+
+ if (size_cfg->in_size.top)
+ plane_size.in_size.top = size_cfg->in_size.top /
+ size_cfg->format.colplane_v;
+
+ if (size_cfg->in_size.left)
+ plane_size.in_size.left = size_cfg->in_size.left /
+ size_cfg->format.colplane_h;
+
+ plane_size.in_size.width = size_cfg->in_size.width /
+ size_cfg->format.colplane_h;
+ plane_size.in_size.height = size_cfg->in_size.height /
+ size_cfg->format.colplane_v;
+ plane_size.in_size.scanline = size_cfg->in_size.scanline /
+ size_cfg->format.colplane_v;
+
+ plane_size.in_size.stride = size_cfg->in_size.stride;
+
+ plane_size.out_size.width = size_cfg->out_size.width /
+ size_cfg->format.colplane_h;
+ plane_size.out_size.height = size_cfg->out_size.height /
+ size_cfg->format.colplane_v;
+ plane_size.out_size.scanline = size_cfg->out_size.scanline /
+ size_cfg->format.colplane_v;
+
+ plane_size.out_size.stride = size_cfg->out_size.stride;
+
+ plane_size.format = size_cfg->format;
+ plane_size.fps = size_cfg->fps;
+
+ msm_jpegdma_hw_calc_config(&plane_size,
+ &plane_cfg->plane[i]);
+
+ msm_jpegdma_hw_add_plane_offset(&plane_cfg->plane[i],
+ in_offset, out_offset);
+
+ in_offset += (plane_size.in_size.scanline *
+ plane_size.in_size.stride);
+ out_offset += (plane_size.out_size.scanline *
+ plane_size.out_size.stride);
+ }
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_start - Start dma processing.
+ *@dma: Pointer to dma device.
+* @addr: Input address.
+* @plane: Plane configuration.
+* @speed: Clock and bus bandwidth configuration.
+*/
+int msm_jpegdma_hw_start(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_addr *addr,
+ struct msm_jpegdma_plane *plane,
+ struct msm_jpegdma_speed *speed)
+{
+ struct msm_jpegdma_config *cfg;
+ struct msm_jpegdma_addr prefetch_max_addr;
+ unsigned int prefetch_in_size;
+ unsigned int prefetch_out_size;
+
+ int ret;
+
+ if (!plane->active_pipes)
+ return -EINVAL;
+
+ if (plane->active_pipes > MSM_JPEGDMA_MAX_PIPES)
+ return -EINVAL;
+ ret = msm_jpegdma_hw_set_speed(dma, &plane->config[0].size_cfg, speed);
+ if (ret < 0)
+ return -EINVAL;
+
+ msm_jpegdma_hw_core_config(dma, plane->active_pipes,
+ plane->config[0].scale_cfg.enable,
+ plane->config[1].scale_cfg.enable);
+
+ cfg = &plane->config[0];
+ msm_jpegdma_hw_scale_0_config(dma, &cfg->scale_cfg);
+
+ msm_jpegdma_hw_fe_0_block(dma, &cfg->block_cfg, plane->type);
+ msm_jpegdma_hw_fe_0_phase(dma, cfg->phase);
+ msm_jpegdma_hw_fe_0_size(dma, &cfg->size_cfg.in_size, plane->type);
+ msm_jpegdma_hw_fe_0_addr(dma, addr->in_addr + cfg->in_offset);
+ prefetch_in_size = cfg->size_cfg.in_size.stride *
+ cfg->size_cfg.in_size.scanline;
+
+ msm_jpegdma_hw_we_0_block(dma, &cfg->block_cfg, plane->type);
+ msm_jpegdma_hw_we_0_size(dma, &cfg->size_cfg.out_size);
+ msm_jpegdma_hw_we_0_addr(dma, addr->out_addr + cfg->out_offset);
+ prefetch_out_size = cfg->size_cfg.out_size.stride *
+ cfg->size_cfg.out_size.scanline;
+
+ if (plane->active_pipes > 1) {
+ cfg = &plane->config[1];
+ msm_jpegdma_hw_scale_1_config(dma, &cfg->scale_cfg);
+
+ msm_jpegdma_hw_fe_1_block(dma, &cfg->block_cfg, plane->type);
+ msm_jpegdma_hw_fe_1_phase(dma, cfg->phase);
+ msm_jpegdma_hw_fe_1_size(dma, &cfg->size_cfg.in_size,
+ plane->type);
+ msm_jpegdma_hw_fe_1_addr(dma, addr->in_addr + cfg->in_offset);
+ prefetch_in_size += (cfg->size_cfg.in_size.stride *
+ cfg->size_cfg.in_size.scanline);
+
+ msm_jpegdma_hw_we_1_block(dma, &cfg->block_cfg, plane->type);
+ msm_jpegdma_hw_we_1_size(dma, &cfg->size_cfg.out_size);
+ msm_jpegdma_hw_we_1_addr(dma, addr->out_addr + cfg->out_offset);
+ prefetch_out_size += (cfg->size_cfg.out_size.stride *
+ cfg->size_cfg.out_size.scanline);
+ }
+
+ if (prefetch_in_size > 0 && prefetch_out_size > 0) {
+ prefetch_max_addr.in_addr = addr->in_addr +
+ (prefetch_in_size - 1);
+ prefetch_max_addr.out_addr = addr->out_addr +
+ (prefetch_out_size - 1);
+ msm_jpegdma_hw_config_mmu_prefetch(dma, addr,
+ &prefetch_max_addr);
+ }
+
+ msm_jpegdma_hw_run(dma);
+
+ return 1;
+}
+
+/*
+* msm_jpegdma_hw_abort - abort dma processing.
+ *@dma: Pointer to dma device.
+*/
+int msm_jpegdma_hw_abort(struct msm_jpegdma_device *dma)
+{
+ int ret;
+
+ ret = msm_jpegdma_hw_halt(dma);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to halt hw\n");
+ return ret;
+ }
+
+ ret = msm_jpegdma_hw_reset(dma);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to reset hw\n");
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_irq - Dma irq handler.
+ * @irq: Irq number.
+ * @dev_id: Pointer to dma device.
+ */
+static irqreturn_t msm_jpegdma_hw_irq(int irq, void *dev_id)
+{
+ struct msm_jpegdma_device *dma = dev_id;
+
+ u32 irq_status;
+
+ irq_status = msm_jpegdma_hw_get_irq_status(dma);
+ msm_jpegdma_hw_clear_irq(dma, irq_status);
+
+ if (irq_status & MSM_JPEGDMA_IRQ_STATUS_RST_DONE) {
+ dev_dbg(dma->dev, "Jpeg v4l2 dma IRQ reset done\n");
+ complete_all(&dma->hw_reset_completion);
+ }
+
+ if (irq_status & MSM_JPEGDMA_IRQ_STATUS_AXI_HALT) {
+ dev_dbg(dma->dev, "Jpeg v4l2 dma IRQ AXI halt\n");
+ complete_all(&dma->hw_halt_completion);
+ }
+
+ if (irq_status & MSM_JPEGDMA_IRQ_STATUS_SESSION_DONE) {
+ dev_dbg(dma->dev, "Jpeg v4l2 dma IRQ session done\n");
+ msm_jpegdma_isr_processing_done(dma);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * msm_jpegdma_hw_request_irq - Request dma irq.
+ * @pdev: Pointer to platform device.
+ * @dma: Pointer to dma device.
+ */
+int msm_jpegdma_hw_request_irq(struct platform_device *pdev,
+ struct msm_jpegdma_device *dma)
+{
+ int ret;
+
+ dma->irq_num = platform_get_irq(pdev, 0);
+ if (dma->irq_num < 0) {
+ dev_err(dma->dev, "Can not get dma core irq resource\n");
+ ret = -ENODEV;
+ goto error_irq;
+ }
+
+ ret = request_threaded_irq(dma->irq_num, NULL,
+ msm_jpegdma_hw_irq, IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ dev_name(&pdev->dev), dma);
+ if (ret) {
+ dev_err(dma->dev, "Can not claim wrapper IRQ %d\n",
+ dma->irq_num);
+ goto error_irq;
+ }
+
+ return 0;
+
+error_irq:
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_release_mem_resources - Releases memory resources.
+ * @dma: Pointer to dma device.
+ */
+void msm_jpegdma_hw_release_mem_resources(struct msm_jpegdma_device *dma)
+{
+ int i, reserve_mem_flag;
+ char *dev_name;
+
+ /* Prepare memory resources */
+ for (i = 0; i < MSM_JPEGDMA_IOMEM_LAST; i++) {
+
+ switch (i) {
+ case MSM_JPEGDMA_IOMEM_CORE:
+ dev_name = "jpeg_hw";
+ reserve_mem_flag = true;
+ break;
+ case MSM_JPEGDMA_IOMEM_VBIF:
+ dev_name = "jpeg_vbif";
+ reserve_mem_flag = false;
+ break;
+ default:
+ pr_err("%s: Invalid device : %d\n", __func__, i);
+ return;
+ }
+ /* release the device address */
+ msm_camera_put_reg_base(dma->pdev, dma->iomem_base[i], dev_name,
+ reserve_mem_flag);
+ }
+}
+
+/*
+ * msm_jpegdma_hw_get_mem_resources - Get memory resources.
+ * @pdev: Pointer to dma platform device.
+ * @dma: Pointer to dma device.
+ *
+ * Get and ioremap platform memory resources.
+ */
+int msm_jpegdma_hw_get_mem_resources(struct platform_device *pdev,
+ struct msm_jpegdma_device *dma)
+{
+ int i;
+ int ret = 0;
+ char *dev_name;
+ int reserve_mem_flag;
+
+ /* Prepare memory resources */
+ for (i = 0; i < MSM_JPEGDMA_IOMEM_LAST; i++) {
+
+ switch (i) {
+ case MSM_JPEGDMA_IOMEM_CORE:
+ dev_name = "jpeg_hw";
+ reserve_mem_flag = true;
+ break;
+ case MSM_JPEGDMA_IOMEM_VBIF:
+ dev_name = "jpeg_vbif";
+ reserve_mem_flag = false;
+ break;
+ default:
+ pr_err("%s: Invalid device : %d\n", __func__, i);
+ return -EINVAL;
+ }
+ /* get the device address base */
+ dma->iomem_base[i] =
+ msm_camera_get_reg_base(pdev, dev_name,
+ reserve_mem_flag);
+ if (!dma->iomem_base[i]) {
+ dev_err(dma->dev, "%s can not remap region\n",
+ dev_name);
+ ret = -ENODEV;
+ break;
+ }
+ }
+
+ if (ret < 0)
+ msm_jpegdma_hw_release_mem_resources(dma);
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_get_qos - Get dma qos settings from device-tree.
+ * @dma: Pointer to dma device.
+ */
+int msm_jpegdma_hw_get_qos(struct msm_jpegdma_device *dma)
+{
+ int i, j;
+ int ret;
+ unsigned int cnt;
+ const void *property;
+
+ property = of_get_property(dma->dev->of_node,
+ "qcom,qos-reg-settings", &cnt);
+ if (!property || !cnt) {
+ dev_dbg(dma->dev, "Missing qos settings\n");
+ return 0;
+ }
+ cnt /= 4;
+
+ dma->qos_regs = kzalloc((sizeof(*dma->qos_regs) * cnt), GFP_KERNEL);
+ if (!dma->qos_regs)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < cnt; i += 2, j++) {
+ ret = of_property_read_u32_index(dma->dev->of_node,
+ "qcom,qos-reg-settings", i,
+ &dma->qos_regs[j].reg);
+ if (ret < 0) {
+ dev_err(dma->dev, "can not read qos reg %d\n", j);
+ goto error;
+ }
+
+ ret = of_property_read_u32_index(dma->dev->of_node,
+ "qcom,qos-reg-settings", i + 1,
+ &dma->qos_regs[j].val);
+ if (ret < 0) {
+ dev_err(dma->dev, "can not read qos setting %d\n", j);
+ goto error;
+ }
+ dev_dbg(dma->dev, "Qos idx %d, reg %x val %x\n", j,
+ dma->qos_regs[j].reg, dma->qos_regs[j].val);
+ }
+ dma->qos_regs_num = cnt;
+
+ return 0;
+error:
+ kfree(dma->qos_regs);
+ dma->qos_regs = NULL;
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_put_qos - Free dma qos settings.
+ * @dma: Pointer to dma device.
+ */
+void msm_jpegdma_hw_put_qos(struct msm_jpegdma_device *dma)
+{
+ kfree(dma->qos_regs);
+ dma->qos_regs = NULL;
+}
+
+/*
+ * msm_jpegdma_hw_get_vbif - Get dma vbif settings from device-tree.
+ * @dma: Pointer to dma device.
+ */
+int msm_jpegdma_hw_get_vbif(struct msm_jpegdma_device *dma)
+{
+ int i, j;
+ int ret;
+ unsigned int cnt;
+ const void *property;
+
+ property = of_get_property(dma->dev->of_node, "qcom,vbif-reg-settings",
+ &cnt);
+ if (!property || !cnt) {
+ dev_dbg(dma->dev, "Missing vbif settings\n");
+ return 0;
+ }
+ cnt /= 4;
+
+ dma->vbif_regs = kzalloc((sizeof(*dma->vbif_regs) * cnt), GFP_KERNEL);
+ if (!dma->vbif_regs)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < cnt; i += 2, j++) {
+ ret = of_property_read_u32_index(dma->dev->of_node,
+ "qcom,vbif-reg-settings", i,
+ &dma->vbif_regs[j].reg);
+ if (ret < 0) {
+ dev_err(dma->dev, "can not read vbif reg %d\n", j);
+ goto error;
+ }
+
+ ret = of_property_read_u32_index(dma->dev->of_node,
+ "qcom,vbif-reg-settings", i + 1,
+ &dma->vbif_regs[j].val);
+ if (ret < 0) {
+ dev_err(dma->dev, "can not read vbif setting %d\n", j);
+ goto error;
+ }
+
+ dev_dbg(dma->dev, "Vbif idx %d, reg %x val %x\n", j,
+ dma->vbif_regs[j].reg, dma->vbif_regs[j].val);
+ }
+ dma->vbif_regs_num = cnt;
+
+ return 0;
+error:
+ kfree(dma->vbif_regs);
+ dma->vbif_regs = NULL;
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_put_vbif - Put dma clocks.
+ * @dma: Pointer to dma device.
+ */
+void msm_jpegdma_hw_put_vbif(struct msm_jpegdma_device *dma)
+{
+ kfree(dma->vbif_regs);
+ dma->vbif_regs = NULL;
+}
+
+/*
+ * msm_jpegdma_hw_get_prefetch - Get dma prefetch settings from device-tree.
+ * @dma: Pointer to dma device.
+ */
+int msm_jpegdma_hw_get_prefetch(struct msm_jpegdma_device *dma)
+{
+ int i, j;
+ int ret;
+ unsigned int cnt;
+ const void *property;
+
+ property = of_get_property(dma->dev->of_node,
+ "qcom,prefetch-reg-settings", &cnt);
+ if (!property || !cnt) {
+ dev_dbg(dma->dev, "Missing prefetch settings\n");
+ return 0;
+ }
+ cnt /= 4;
+
+ dma->prefetch_regs = kcalloc(cnt, sizeof(*dma->prefetch_regs),
+ GFP_KERNEL);
+ if (!dma->prefetch_regs)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < cnt; i += 2, j++) {
+ ret = of_property_read_u32_index(dma->dev->of_node,
+ "qcom,prefetch-reg-settings", i,
+ &dma->prefetch_regs[j].reg);
+ if (ret < 0) {
+ dev_err(dma->dev, "can not read prefetch reg %d\n", j);
+ goto error;
+ }
+
+ ret = of_property_read_u32_index(dma->dev->of_node,
+ "qcom,prefetch-reg-settings", i + 1,
+ &dma->prefetch_regs[j].val);
+ if (ret < 0) {
+ dev_err(dma->dev, "can not read prefetch setting %d\n",
+ j);
+ goto error;
+ }
+
+ dev_dbg(dma->dev, "Prefetch idx %d, reg %x val %x\n", j,
+ dma->prefetch_regs[j].reg, dma->prefetch_regs[j].val);
+ }
+ dma->prefetch_regs_num = cnt;
+
+ return 0;
+error:
+ kfree(dma->prefetch_regs);
+ dma->prefetch_regs = NULL;
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_put_prefetch - free prefetch settings.
+ * @dma: Pointer to dma device.
+ */
+void msm_jpegdma_hw_put_prefetch(struct msm_jpegdma_device *dma)
+{
+ kfree(dma->prefetch_regs);
+ dma->prefetch_regs = NULL;
+}
+
+/*
+ * msm_jpegdma_hw_get_capabilities - Get dma hw for performing any hw operation.
+ * @dma: Pointer to dma device.
+ */
+int msm_jpegdma_hw_get_capabilities(struct msm_jpegdma_device *dma)
+{
+ int ret = 0;
+
+ mutex_lock(&dma->lock);
+
+ /* enable all the regulators */
+ ret = msm_camera_regulator_enable(dma->dma_vdd,
+ dma->num_reg, true);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to enable regulators\n");
+ goto error_regulators_get;
+ }
+
+ /* enable all the clocks */
+ ret = msm_camera_clk_enable(&dma->pdev->dev,
+ dma->jpeg_clk_info, dma->clk,
+ dma->num_clk, true);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to enable clocks\n");
+ goto error_clocks;
+ }
+
+ dma->hw_num_pipes = msm_jpegdma_hw_get_num_pipes(dma);
+
+ /* disable all the clocks */
+ msm_camera_clk_enable(&dma->pdev->dev,
+ dma->jpeg_clk_info, dma->clk,
+ dma->num_clk, false);
+ /* disable all the regulators */
+ msm_camera_regulator_enable(dma->dma_vdd, dma->num_reg, false);
+
+ mutex_unlock(&dma->lock);
+
+ return 0;
+
+error_clocks:
+ msm_camera_regulator_enable(dma->dma_vdd, dma->num_reg, false);
+error_regulators_get:
+ mutex_unlock(&dma->lock);
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_get - Get dma hw for performing any hw operation.
+ * @dma: Pointer to dma device.
+ * @clock_rate_idx: Clock rate index.
+ *
+ * Prepare dma hw for operation. Have reference count protected by
+ * dma device mutex.
+ */
+int msm_jpegdma_hw_get(struct msm_jpegdma_device *dma)
+{
+ int ret;
+
+ mutex_lock(&dma->lock);
+ if (dma->ref_count == 0) {
+
+ dev_dbg(dma->dev, "msm_jpegdma_hw_get E\n");
+ /* enable all the regulators */
+ ret = msm_camera_regulator_enable(dma->dma_vdd,
+ dma->num_reg, true);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to enable regulators\n");
+ goto error_regulators_get;
+ }
+
+ /* enable all the clocks */
+ ret = msm_camera_clk_enable(&dma->pdev->dev,
+ dma->jpeg_clk_info, dma->clk,
+ dma->num_clk, true);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to enable clocks\n");
+ goto error_clocks;
+ }
+
+ /* update the bus vector with valid bw */
+ msm_camera_update_bus_vector(dma->bus_client, 1);
+ msm_jpegdma_hw_config_qos(dma);
+ msm_jpegdma_hw_config_vbif(dma);
+
+ msm_jpegdma_hw_enable_irq(dma);
+
+ ret = msm_jpegdma_hw_reset(dma);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to reset hw\n");
+ goto error_hw_reset;
+ }
+ msm_jpegdma_hw_config_qos(dma);
+ msm_jpegdma_hw_config_mmu_prefetch(dma, NULL, NULL);
+ msm_jpegdma_hw_enable_irq(dma);
+ }
+ dma->ref_count++;
+ dev_dbg(dma->dev, "msm_jpegdma_hw_get X\n");
+ mutex_unlock(&dma->lock);
+
+ return 0;
+
+error_hw_reset:
+ msm_jpegdma_hw_disable_irq(dma);
+ msm_camera_clk_enable(&dma->pdev->dev, dma->jpeg_clk_info,
+ dma->clk, dma->num_clk, false);
+error_clocks:
+ msm_camera_regulator_enable(dma->dma_vdd, dma->num_reg, false);
+error_regulators_get:
+ mutex_unlock(&dma->lock);
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_put - Put dma hw.
+ * @dma: Pointer to dma device.
+ *
+ * Release dma hw. Have reference count protected by
+ * dma device mutex.
+ */
+void msm_jpegdma_hw_put(struct msm_jpegdma_device *dma)
+{
+ mutex_lock(&dma->lock);
+ WARN_ON(dma->ref_count == 0);
+
+ if (--dma->ref_count == 0) {
+ msm_jpegdma_hw_halt(dma);
+ msm_jpegdma_hw_disable_irq(dma);
+ /* release the irq */
+ msm_camera_unregister_irq(dma->pdev,
+ dma->irq, dma);
+ /* update the bus vector with zeroth vector */
+ msm_camera_update_bus_vector(dma->bus_client, 0);
+ /* disable all the clocks */
+ msm_camera_clk_enable(&dma->pdev->dev, dma->jpeg_clk_info,
+ dma->clk, dma->num_clk, false);
+ /* disable all the regulators */
+ msm_camera_regulator_enable(dma->dma_vdd, dma->num_reg, false);
+ }
+ /* Reset clock rate, need to be updated on next processing */
+ dma->active_clock_rate = -1;
+ mutex_unlock(&dma->lock);
+}
+
+/*
+ * msm_jpegdma_hw_attach_iommu - Attach iommu to jpeg dma engine.
+ * @dma: Pointer to dma device.
+ *
+ * Iommu attach have reference count protected by
+ * dma device mutex.
+ */
+static int msm_jpegdma_hw_attach_iommu(struct msm_jpegdma_device *dma)
+{
+ int ret;
+
+ mutex_lock(&dma->lock);
+
+ if (dma->iommu_attached_cnt == UINT_MAX) {
+ dev_err(dma->dev, "Max count reached! can not attach iommu\n");
+ goto error;
+ }
+
+ if (dma->iommu_attached_cnt == 0) {
+ ret = cam_smmu_get_handle(MSM_JPEGDMA_SMMU_NAME,
+ &dma->iommu_hndl);
+ if (ret < 0) {
+ dev_err(dma->dev, "Smmu get handle failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+ ret = cam_smmu_ops(dma->iommu_hndl, CAM_SMMU_ATTACH);
+ if (ret < 0) {
+ dev_err(dma->dev, "Can not attach smmu.\n");
+ goto error_attach;
+ }
+ }
+ dma->iommu_attached_cnt++;
+ mutex_unlock(&dma->lock);
+
+ return 0;
+error_attach:
+ cam_smmu_destroy_handle(dma->iommu_hndl);
+error:
+ mutex_unlock(&dma->lock);
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_detach_iommu - Detach iommu from jpeg dma engine.
+ * @dma: Pointer to dma device.
+ *
+ * Iommu detach have reference count protected by
+ * dma device mutex.
+ */
+static void msm_jpegdma_hw_detach_iommu(struct msm_jpegdma_device *dma)
+{
+ mutex_lock(&dma->lock);
+ if (dma->iommu_attached_cnt == 0) {
+ dev_err(dma->dev, "There is no attached device\n");
+ mutex_unlock(&dma->lock);
+ return;
+ }
+
+ if (--dma->iommu_attached_cnt == 0) {
+ cam_smmu_ops(dma->iommu_hndl, CAM_SMMU_DETACH);
+ cam_smmu_destroy_handle(dma->iommu_hndl);
+ }
+ mutex_unlock(&dma->lock);
+}
+
+/*
+ * msm_jpegdma_hw_map_buffer - Map buffer to dma hw mmu.
+ * @dma: Pointer to dma device.
+ * @fd: Ion fd.
+ * @buf: dma buffer handle, for storing mapped buffer information.
+ *
+ * It will map ion fd to dma hw smmu.
+ */
+int msm_jpegdma_hw_map_buffer(struct msm_jpegdma_device *dma, int fd,
+ struct msm_jpegdma_buf_handle *buf)
+{
+ int ret;
+
+ if (!dma || fd < 0)
+ return -EINVAL;
+
+ ret = msm_jpegdma_hw_attach_iommu(dma);
+ if (ret < 0)
+ goto error;
+
+ buf->dma = dma;
+ buf->fd = fd;
+
+ ret = cam_smmu_get_phy_addr(dma->iommu_hndl, buf->fd,
+ CAM_SMMU_MAP_RW, &buf->addr, &buf->size);
+ if (ret < 0) {
+ dev_err(dma->dev, "Can not get physical address\n");
+ goto error_get_phy;
+ }
+
+ return buf->size;
+
+error_get_phy:
+ msm_jpegdma_hw_detach_iommu(dma);
+error:
+ return -ENOMEM;
+}
+
+/*
+ * msm_jpegdma_hw_unmap_buffer - Unmap buffer from dma hw mmu.
+ * @buf: dma buffer handle, for storing mapped buffer information.
+ */
+void msm_jpegdma_hw_unmap_buffer(struct msm_jpegdma_buf_handle *buf)
+{
+ if (buf->size && buf->dma) {
+ cam_smmu_put_phy_addr(buf->dma->iommu_hndl,
+ buf->fd);
+ msm_jpegdma_hw_detach_iommu(buf->dma);
+ buf->size = 0;
+ }
+ buf->fd = -1;
+ buf->dma = NULL;
+}
diff --git a/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_hw.h b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_hw.h
new file mode 100644
index 000000000000..60c257703464
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_hw.h
@@ -0,0 +1,78 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_JPEG_DMA_HW_H__
+#define __MSM_JPEG_DMA_HW_H__
+
+#include "msm_jpeg_dma_dev.h"
+
+int msm_jpegdma_hw_check_config(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size_config *size_cfg);
+
+int msm_jpegdma_hw_set_config(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size_config *size_cfg,
+ struct msm_jpegdma_plane_config *plane_cfg);
+
+int msm_jpegdma_hw_start(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_addr *addr,
+ struct msm_jpegdma_plane *plane,
+ struct msm_jpegdma_speed *speed);
+
+int msm_jpegdma_hw_abort(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_update_bus_data(struct msm_jpegdma_device *dma,
+ u64 ab, u64 ib);
+
+int msm_jpegdma_hw_handle_irq(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_request_irq(struct platform_device *pdev,
+ struct msm_jpegdma_device *dma);
+
+void msm_jpegdma_hw_release_irq(struct msm_jpegdma_device *dma);
+
+void msm_jpegdma_hw_release_mem_resources(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get_mem_resources(struct platform_device *pdev,
+ struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get_regulators(struct msm_jpegdma_device *dma);
+
+void msm_jpegdma_hw_put_regulators(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get_clocks(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_put_clocks(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get_qos(struct msm_jpegdma_device *dma);
+
+void msm_jpegdma_hw_put_qos(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get_vbif(struct msm_jpegdma_device *dma);
+
+void msm_jpegdma_hw_put_vbif(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get_prefetch(struct msm_jpegdma_device *dma);
+
+void msm_jpegdma_hw_put_prefetch(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get_capabilities(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get(struct msm_jpegdma_device *dma);
+
+void msm_jpegdma_hw_put(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_map_buffer(struct msm_jpegdma_device *dma, int fd,
+ struct msm_jpegdma_buf_handle *buf);
+
+void msm_jpegdma_hw_unmap_buffer(struct msm_jpegdma_buf_handle *buf);
+
+#endif /* __MSM_JPEG_DMA_HW_H__ */
diff --git a/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_regs.h b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_regs.h
new file mode 100644
index 000000000000..83a79717754f
--- /dev/null
+++ b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_regs.h
@@ -0,0 +1,122 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_JPEGDMA_REGS_H__
+#define __MSM_JPEGDMA_REGS_H__
+
+#define MSM_JPEGDMA_HW_REVISION 0x00
+#define MSM_JPEGDMA_HW_CAPABILITY 0x04
+#define MSM_JPEGDMA_HW_CAPABILITY_NUM_PIPES_BMSK 0x06
+#define MSM_JPEGDMA_HW_CAPABILITY_NUM_PIPES_SHFT 0x01
+
+#define MSM_JPEGDMA_IRQ_MASK_ADDR 0x0C
+#define MSM_JPEGDMA_IRQ_MASK_SESSION_DONE (1 << 0)
+#define MSM_JPEGDMA_IRQ_MASK_RD_BUF_DONE (1 << 1)
+#define MSM_JPEGDMA_IRQ_MASK_WR_BUF_DONE (1 << 5)
+#define MSM_JPEGDMA_IRQ_MASK_AXI_HALT (1 << 9)
+#define MSM_JPEGDMA_IRQ_MASK_RST_DONE (1 << 10)
+
+#define MSM_JPEGDMA_IRQ_STATUS 0x10
+#define MSM_JPEGDMA_IRQ_STATUS_SESSION_DONE (1 << 0)
+#define MSM_JPEGDMA_IRQ_STATUS_RD_BUF_DONE (1 << 1)
+#define MSM_JPEGDMA_IRQ_STATUS_WR_BUF_DONE (1 << 5)
+#define MSM_JPEGDMA_IRQ_STATUS_AXI_HALT (1 << 9)
+#define MSM_JPEGDMA_IRQ_STATUS_RST_DONE (1 << 10)
+
+#define MSM_JPEGDMA_IRQ_CLEAR_ADDR 0x14
+#define MSM_JPEGDMA_IRQ_CLEAR_BMSK 0xFFFFFFFF
+
+#define MSM_JPEGDMA_CORE_CFG_ADDR 0x18
+#define MSM_JPEGDMA_CMD_ADDR 0x1C
+
+#define MSM_JPEGDMA_CORE_CFG_TEST_BUS_ENABLE_SHFT 19
+#define MSM_JPEGDMA_CORE_CFG_BRIDGE_ENABLE_SHFT 6
+#define MSM_JPEGDMA_CORE_CFG_SCALE_1_ENABLE_SHFT 5
+#define MSM_JPEGDMA_CORE_CFG_SCALE_0_ENABLE_SHFT 4
+
+#define MSM_JPEGDMA_CORE_CFG_WE_1_ENABLE_SHFT 0x03
+#define MSM_JPEGDMA_CORE_CFG_WE_0_ENABLE_SHFT 0x02
+#define MSM_JPEGDMA_CORE_CFG_FE_1_ENABLE_SHFT 0x01
+#define MSM_JPEGDMA_CORE_CFG_FE_0_ENABLE_SHFT 0x00
+
+#define MSM_JPEGDMA_FE_0_CFG_ADDR 0x2C
+#define MSM_JPEGDMA_FE_1_CFG_ADDR 0x70
+#define MSM_JPEGDMA_FE_CFG_MAL_BOUNDARY_SHFT 25
+#define MSM_JPEGDMA_FE_CFG_MAL_EN_SHFT 21
+#define MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CBCR 0x03
+#define MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CR 0x02
+#define MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CB 0x01
+#define MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_Y 0x00
+#define MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT 19
+#define MSM_JPEGDMA_FE_CFG_BLOCK_WIDTH_SHFT 0x04
+#define MSM_JPEGDMA_FE_CFG_BURST_LENGTH_MAX_SHFT 0x00
+
+#define MSM_JPEGDMA_FE_RD_0_PNTR_ADDR 0x34
+#define MSM_JPEGDMA_FE_RD_1_PNTR_ADDR 0x78
+#define MSM_JPEGDMA_FE_RD_BUFFER_SIZE_0_ADDR 0x44
+#define MSM_JPEGDMA_FE_RD_BUFFER_SIZE_1_ADDR 0x88
+#define MSM_JPEGDMA_FE_RD_BUFFER_SIZE_HEIGHT_SHFT 16
+#define MSM_JPEGDMA_FE_RD_0_STRIDE_ADDR 0x48
+#define MSM_JPEGDMA_FE_RD_1_STRIDE_ADDR 0x8C
+#define MSM_JPEGDMA_FE_RD_0_HINIT_ADDR 0x4C
+#define MSM_JPEGDMA_FE_RD_1_HINIT_ADDR 0x90
+#define MSM_JPEGDMA_FE_RD_0_HINIT_INT_ADDR 0x50
+#define MSM_JPEGDMA_FE_RD_1_HINIT_INT_ADDR 0x94
+#define MSM_JPEGDMA_FE_RD_0_VINIT_INT_ADDR 0x58
+#define MSM_JPEGDMA_FE_RD_1_VINIT_INT_ADDR 0x9C
+
+#define MSM_JPEGDMA_WE_CFG_ADDR 0xB8
+#define MSM_JPEGDMA_WE_CFG_MAL_BOUNDARY_SHFT 0x08
+#define MSM_JPEGDMA_WE_CFG_MAL_EN_SHFT 0x07
+#define MSM_JPEGDMA_WE_CFG_BURST_LENGTH_MAX_SHFT 0x00
+#define MSM_JPEGDMA_WE_PLN_0_WR_PNTR_ADDR 0xBC
+#define MSM_JPEGDMA_WE_PLN_1_WR_PNTR_ADDR 0xEC
+#define MSM_JPEGDMA_WE_PLN_WR_BUFFER_SIZE_0_ADDR 0xC4
+#define MSM_JPEGDMA_WE_PLN_WR_BUFFER_SIZE_1_ADDR 0xF4
+#define MSM_JPEGDMA_WE_PLN_WR_BUFFER_SIZE_HEIGHT_SHFT 16
+#define MSM_JPEGDMA_WE_PLN_0_WR_STRIDE_ADDR 0xC8
+#define MSM_JPEGDMA_WE_PLN_1_WR_STRIDE_ADDR 0xF8
+#define MSM_JPEGDMA_WE_PLN_0_WR_CFG_0_ADDR 0xCC
+#define MSM_JPEGDMA_WE_PLN_1_WR_CFG_0_ADDR 0xFC
+#define MSM_JPEGDMA_WE_PLN_WR_CFG_0_BLOCKS_PER_ROW_SHFT 16
+#define MSM_JPEGDMA_WE_PLN_0_WR_CFG_1_ADDR 0xD0
+#define MSM_JPEGDMA_WE_PLN_1_WR_CFG_1_ADDR 0x100
+#define MSM_JPEGDMA_WE_PLN_WR_CFG_1_LAST_H_STEP_SHFT 16
+#define MSM_JPEGDMA_WE_PLN_0_WR_CFG_2_ADDR 0xD4
+#define MSM_JPEGDMA_WE_PLN_1_WR_CFG_2_ADDR 0x104
+#define MSM_JPEGDMA_WE_PLN_WR_CFG_2_LAST_V_STEP_SHFT 16
+#define MSM_JPEGDMA_WE_PLN_0_WR_CFG_3_ADDR 0xD8
+#define MSM_JPEGDMA_WE_PLN_1_WR_CFG_3_ADDR 0x108
+
+#define MSM_JPEGDMA_PP_0_SCALE_PHASEV_STEP_ADDR 0x19C
+#define MSM_JPEGDMA_PP_1_SCALE_PHASEV_STEP_ADDR 0x1BC
+#define MSM_JPEGDMA_PP_0_SCALE_PHASEH_STEP_ADDR 0x194
+#define MSM_JPEGDMA_PP_1_SCALE_PHASEH_STEP_ADDR 0x1B4
+#define MSM_JPEGDMA_PP_0_SCALE_CFG_ADDR 0x188
+#define MSM_JPEGDMA_PP_1_SCALE_CFG_ADDR 0x1A8
+#define MSM_JPEGDMA_PP_SCALE_CFG_VSCALE_ENABLE_SHFT 0x05
+#define MSM_JPEGDMA_PP_SCALE_CFG_HSCALE_ENABLE_SHFT 0x04
+
+#define MSM_JPEGDMA_S0_MMU_PF_ADDR_MIN 0x190
+#define MSM_JPEGDMA_S0_MMU_PF_ADDR_MAX 0x198
+#define MSM_JPEGDMA_S1_MMU_PF_ADDR_MIN 0x1A4
+#define MSM_JPEGDMA_S1_MMU_PF_ADDR_MAX 0x1AC
+
+#define MSM_JPEGDMA_CMD_CLEAR_READ_PLN_QUEUES 0x030
+#define MSM_JPEGDMA_CMD_CLEAR_WRITE_PLN_QUEUES 0x300
+
+#define MSM_HW_JPEGDMA_RESET 0x08
+#define MSM_HW_JPEGDMA_RESET_DEFAULT 0x32083
+
+#define MSM_JPEGDMA_RESET_CMD_BMSK 0xFFFFFFFF
+
+#endif /* __MSM_JPEG_DMA_REGS_H__ */
diff --git a/drivers/media/platform/msm/ais/msm.c b/drivers/media/platform/msm/ais/msm.c
new file mode 100644
index 000000000000..e8859b7db5cb
--- /dev/null
+++ b/drivers/media/platform/msm/ais/msm.c
@@ -0,0 +1,1357 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioctl.h>
+#include <linux/spinlock.h>
+#include <linux/proc_fs.h>
+#include <linux/atomic.h>
+#include <linux/videodev2.h>
+#include <linux/msm_ion.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <media/v4l2-fh.h>
+#include "msm.h"
+#include "msm_vb2.h"
+#include "msm_sd.h"
+#include "cam_hw_ops.h"
+#include <media/ais/msm_ais_buf_mgr.h>
+
+
+static struct v4l2_device *msm_v4l2_dev;
+static struct list_head ordered_sd_list;
+
+static struct pm_qos_request msm_v4l2_pm_qos_request;
+
+static struct msm_queue_head *msm_session_q;
+
+/* This variable represent daemon status
+ * true = daemon present (default state)
+ * false = daemon is NOT present
+ */
+bool is_daemon_status = true;
+
+/* config node envent queue */
+static struct v4l2_fh *msm_eventq;
+spinlock_t msm_eventq_lock;
+
+static struct pid *msm_pid;
+spinlock_t msm_pid_lock;
+
+/*
+ * It takes 20 bytes + NULL character to write the
+ * largest decimal value of an uint64_t
+ */
+#define LOGSYNC_PACKET_SIZE 21
+
+#define msm_dequeue(queue, type, member) ({ \
+ unsigned long flags; \
+ struct msm_queue_head *__q = (queue); \
+ type *node = 0; \
+ spin_lock_irqsave(&__q->lock, flags); \
+ if (!list_empty(&__q->list)) { \
+ __q->len--; \
+ node = list_first_entry(&__q->list, \
+ type, member); \
+ if ((node) && (&node->member) && (&node->member.next)) \
+ list_del_init(&node->member); \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+ node; \
+})
+
+#define msm_delete_sd_entry(queue, type, member, q_node) ({ \
+ unsigned long flags; \
+ struct msm_queue_head *__q = (queue); \
+ type *node = 0; \
+ spin_lock_irqsave(&__q->lock, flags); \
+ if (!list_empty(&__q->list)) { \
+ list_for_each_entry(node, &__q->list, member) \
+ if (node->sd == q_node) { \
+ __q->len--; \
+ list_del_init(&node->member); \
+ kzfree(node); \
+ break; \
+ } \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+})
+
+#define msm_delete_entry(queue, type, member, q_node) ({ \
+ unsigned long flags; \
+ struct msm_queue_head *__q = (queue); \
+ type *node = 0; \
+ spin_lock_irqsave(&__q->lock, flags); \
+ if (!list_empty(&__q->list)) { \
+ list_for_each_entry(node, &__q->list, member) \
+ if (node == q_node) { \
+ __q->len--; \
+ list_del_init(&node->member); \
+ kzfree(node); \
+ break; \
+ } \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+})
+
+#define msm_queue_drain(queue, type, member) do { \
+ unsigned long flags; \
+ struct msm_queue_head *__q = (queue); \
+ type *node; \
+ spin_lock_irqsave(&__q->lock, flags); \
+ while (!list_empty(&__q->list)) { \
+ __q->len--; \
+ node = list_first_entry(&__q->list, \
+ type, member); \
+ if (node) { \
+ if (&node->member) \
+ list_del_init(&node->member); \
+ kzfree(node); \
+ } \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+} while (0)
+
+typedef int (*msm_queue_func)(void *d1, void *d2);
+#define msm_queue_traverse_action(queue, type, member, func, data) do {\
+ unsigned long flags; \
+ struct msm_queue_head *__q = (queue); \
+ type *node = 0; \
+ msm_queue_func __f = (func); \
+ spin_lock_irqsave(&__q->lock, flags); \
+ if (!list_empty(&__q->list)) { \
+ list_for_each_entry(node, &__q->list, member) \
+ if (node && __f) { \
+ __f(node, data); \
+ } \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+} while (0)
+
+typedef int (*msm_queue_find_func)(void *d1, void *d2);
+#define msm_queue_find(queue, type, member, func, data) ({\
+ unsigned long flags; \
+ struct msm_queue_head *__q = (queue); \
+ type *node = 0; \
+ typeof(node) __ret = NULL; \
+ msm_queue_find_func __f = (func); \
+ spin_lock_irqsave(&__q->lock, flags); \
+ if (!list_empty(&__q->list)) { \
+ list_for_each_entry(node, &__q->list, member) \
+ if ((__f) && __f(node, data)) { \
+ __ret = node; \
+ break; \
+ } \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+ __ret; \
+})
+
+static void msm_init_queue(struct msm_queue_head *qhead)
+{
+ if (WARN_ON(!qhead))
+ return;
+
+ INIT_LIST_HEAD(&qhead->list);
+ spin_lock_init(&qhead->lock);
+ qhead->len = 0;
+ qhead->max = 0;
+}
+
+static void msm_enqueue(struct msm_queue_head *qhead,
+ struct list_head *entry)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qhead->lock, flags);
+ qhead->len++;
+ if (qhead->len > qhead->max)
+ qhead->max = qhead->len;
+ list_add_tail(entry, &qhead->list);
+ spin_unlock_irqrestore(&qhead->lock, flags);
+}
+
+void msm_cam_copy_v4l2_subdev_fops(struct v4l2_file_operations *d1)
+{
+ *d1 = v4l2_subdev_fops;
+}
+EXPORT_SYMBOL(msm_cam_copy_v4l2_subdev_fops);
+
+static const struct v4l2_file_operations *msm_cam_get_v4l2_subdev_fops_ptr(
+ void)
+{
+ return &v4l2_subdev_fops;
+}
+
+/* index = session id */
+static inline int __msm_queue_find_session(void *d1, void *d2)
+{
+ struct msm_session *session = d1;
+
+ return (session->session_id == *(unsigned int *)d2) ? 1 : 0;
+}
+
+static inline int __msm_queue_find_stream(void *d1, void *d2)
+{
+ struct msm_stream *stream = d1;
+
+ return (stream->stream_id == *(unsigned int *)d2) ? 1 : 0;
+}
+
+static inline int __msm_queue_find_command_ack_q(void *d1, void *d2)
+{
+ struct msm_command_ack *ack = d1;
+
+ return (ack->stream_id == *(unsigned int *)d2) ? 1 : 0;
+}
+
+static void msm_pm_qos_add_request(void)
+{
+ pr_info("%s: add request", __func__);
+ pm_qos_add_request(&msm_v4l2_pm_qos_request, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+}
+
+static void msm_pm_qos_remove_request(void)
+{
+ pr_info("%s: remove request", __func__);
+ pm_qos_remove_request(&msm_v4l2_pm_qos_request);
+}
+
+void msm_pm_qos_update_request(int val)
+{
+ pr_info("%s: update request %d", __func__, val);
+ pm_qos_update_request(&msm_v4l2_pm_qos_request, val);
+}
+
+struct msm_session *msm_session_find(unsigned int session_id)
+{
+ struct msm_session *session;
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (WARN_ON(!session))
+ return NULL;
+ return session;
+}
+EXPORT_SYMBOL(msm_session_find);
+
+int msm_create_stream(unsigned int session_id,
+ unsigned int stream_id, struct vb2_queue *q)
+{
+ struct msm_session *session;
+ struct msm_stream *stream;
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (!session)
+ return -EINVAL;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ stream->stream_id = stream_id;
+ stream->vb2_q = q;
+ spin_lock_init(&stream->stream_lock);
+ msm_enqueue(&session->stream_q, &stream->list);
+ session->stream_q.len++;
+
+ INIT_LIST_HEAD(&stream->queued_list);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_create_stream);
+
+void msm_delete_stream(unsigned int session_id, unsigned int stream_id)
+{
+ struct msm_session *session = NULL;
+ struct msm_stream *stream = NULL;
+ unsigned long flags;
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (!session)
+ return;
+
+ stream = msm_queue_find(&session->stream_q, struct msm_stream,
+ list, __msm_queue_find_stream, &stream_id);
+ if (!stream)
+ return;
+ spin_lock_irqsave(&(session->stream_q.lock), flags);
+ list_del_init(&stream->list);
+ session->stream_q.len--;
+ kfree(stream);
+ stream = NULL;
+ spin_unlock_irqrestore(&(session->stream_q.lock), flags);
+}
+EXPORT_SYMBOL(msm_delete_stream);
+
+static void msm_sd_unregister_subdev(struct video_device *vdev)
+{
+ struct v4l2_subdev *sd = video_get_drvdata(vdev);
+
+ sd->devnode = NULL;
+ kzfree(vdev);
+}
+
+static inline int __msm_sd_register_subdev(struct v4l2_subdev *sd)
+{
+ int rc = 0;
+ struct video_device *vdev;
+
+ if (!msm_v4l2_dev || !sd || !sd->name[0])
+ return -EINVAL;
+
+ rc = v4l2_device_register_subdev(msm_v4l2_dev, sd);
+ if (rc < 0)
+ return rc;
+
+ /* Register a device node for every subdev marked with the
+ * V4L2_SUBDEV_FL_HAS_DEVNODE flag.
+ */
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
+ return rc;
+
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev) {
+ rc = -ENOMEM;
+ goto clean_up;
+ }
+
+ video_set_drvdata(vdev, sd);
+ strlcpy(vdev->name, sd->name, sizeof(vdev->name));
+ vdev->v4l2_dev = msm_v4l2_dev;
+ vdev->fops = msm_cam_get_v4l2_subdev_fops_ptr();
+ vdev->release = msm_sd_unregister_subdev;
+ rc = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
+ sd->owner);
+ if (rc < 0) {
+ kzfree(vdev);
+ goto clean_up;
+ }
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ sd->entity.info.dev.major = VIDEO_MAJOR;
+ sd->entity.info.dev.minor = vdev->minor;
+ sd->entity.name = video_device_node_name(vdev);
+#endif
+ sd->devnode = vdev;
+ return 0;
+
+clean_up:
+ if (sd->devnode)
+ video_unregister_device(sd->devnode);
+ return rc;
+}
+
+static void msm_add_sd_in_position(struct msm_sd_subdev *msm_subdev,
+ struct list_head *sd_list)
+{
+ struct msm_sd_subdev *temp_sd;
+
+ list_for_each_entry(temp_sd, sd_list, list) {
+ if (msm_subdev->close_seq < temp_sd->close_seq) {
+ list_add_tail(&msm_subdev->list, &temp_sd->list);
+ return;
+ }
+ }
+ list_add_tail(&msm_subdev->list, sd_list);
+}
+
+int msm_sd_register(struct msm_sd_subdev *msm_subdev)
+{
+ if (WARN_ON(!msm_subdev))
+ return -EINVAL;
+
+ if (WARN_ON(!msm_v4l2_dev) || WARN_ON(!msm_v4l2_dev->dev))
+ return -EIO;
+
+ msm_add_sd_in_position(msm_subdev, &ordered_sd_list);
+ return __msm_sd_register_subdev(&msm_subdev->sd);
+}
+EXPORT_SYMBOL(msm_sd_register);
+
+int msm_sd_unregister(struct msm_sd_subdev *msm_subdev)
+{
+ if (WARN_ON(!msm_subdev))
+ return -EINVAL;
+
+ v4l2_device_unregister_subdev(&msm_subdev->sd);
+ return 0;
+}
+EXPORT_SYMBOL(msm_sd_unregister);
+
+static struct v4l2_subdev *msm_sd_find(const char *name)
+{
+ unsigned long flags;
+ struct v4l2_subdev *subdev = NULL;
+ struct v4l2_subdev *subdev_out = NULL;
+
+ spin_lock_irqsave(&msm_v4l2_dev->lock, flags);
+ if (!list_empty(&msm_v4l2_dev->subdevs)) {
+ list_for_each_entry(subdev, &msm_v4l2_dev->subdevs, list)
+ if (!strcmp(name, subdev->name)) {
+ subdev_out = subdev;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&msm_v4l2_dev->lock, flags);
+
+ return subdev_out;
+}
+
+int msm_create_session(unsigned int session_id, struct video_device *vdev)
+{
+ struct msm_session *session = NULL;
+
+ if (!msm_session_q) {
+ pr_err("%s : session queue not available Line %d\n",
+ __func__, __LINE__);
+ return -ENODEV;
+ }
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (session) {
+ pr_err("%s: Session exist session_id=%d\n",
+ __func__, session_id);
+ return -EINVAL;
+ }
+
+ session = kzalloc(sizeof(*session), GFP_KERNEL);
+ if (!session)
+ return -ENOMEM;
+
+ session->session_id = session_id;
+ session->event_q.vdev = vdev;
+ msm_init_queue(&session->command_ack_q);
+ msm_init_queue(&session->stream_q);
+ msm_enqueue(msm_session_q, &session->list);
+ mutex_init(&session->lock);
+ mutex_init(&session->lock_q);
+ mutex_init(&session->close_lock);
+ return 0;
+}
+EXPORT_SYMBOL(msm_create_session);
+
+int msm_create_command_ack_q(unsigned int session_id, unsigned int stream_id)
+{
+ struct msm_session *session;
+ struct msm_command_ack *cmd_ack;
+
+ if (!msm_session_q) {
+ pr_err("%s : Session queue not available Line %d\n",
+ __func__, __LINE__);
+ return -ENODEV;
+ }
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (!session) {
+ pr_err("%s : Session not found Line %d\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ mutex_lock(&session->lock);
+ cmd_ack = kzalloc(sizeof(*cmd_ack), GFP_KERNEL);
+ if (!cmd_ack) {
+ mutex_unlock(&session->lock);
+ pr_err("%s : memory not available Line %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ msm_init_queue(&cmd_ack->command_q);
+ INIT_LIST_HEAD(&cmd_ack->list);
+ init_completion(&cmd_ack->wait_complete);
+ cmd_ack->stream_id = stream_id;
+
+ msm_enqueue(&session->command_ack_q, &cmd_ack->list);
+ session->command_ack_q.len++;
+ mutex_unlock(&session->lock);
+ return 0;
+}
+EXPORT_SYMBOL(msm_create_command_ack_q);
+
+void msm_delete_command_ack_q(unsigned int session_id, unsigned int stream_id)
+{
+ struct msm_session *session;
+ struct msm_command_ack *cmd_ack;
+ unsigned long flags;
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (!session)
+ return;
+ mutex_lock(&session->lock);
+
+ cmd_ack = msm_queue_find(&session->command_ack_q,
+ struct msm_command_ack, list, __msm_queue_find_command_ack_q,
+ &stream_id);
+ if (!cmd_ack) {
+ mutex_unlock(&session->lock);
+ return;
+ }
+
+ msm_queue_drain(&cmd_ack->command_q, struct msm_command, list);
+
+ spin_lock_irqsave(&(session->command_ack_q.lock), flags);
+ list_del_init(&cmd_ack->list);
+ kzfree(cmd_ack);
+ session->command_ack_q.len--;
+ spin_unlock_irqrestore(&(session->command_ack_q.lock), flags);
+ mutex_unlock(&session->lock);
+}
+EXPORT_SYMBOL(msm_delete_command_ack_q);
+
+static inline int __msm_sd_close_subdevs(struct msm_sd_subdev *msm_sd,
+ struct msm_sd_close_ioctl *sd_close)
+{
+ struct v4l2_subdev *sd;
+
+ sd = &msm_sd->sd;
+ pr_debug("%s: Shutting down subdev %s", __func__, sd->name);
+
+ v4l2_subdev_call(sd, core, ioctl, MSM_SD_SHUTDOWN, sd_close);
+ v4l2_subdev_call(sd, core, s_power, 0);
+
+ return 0;
+}
+
+static inline int __msm_sd_notify_freeze_subdevs(struct msm_sd_subdev *msm_sd,
+ int enable)
+{
+ struct v4l2_subdev *sd;
+
+ sd = &msm_sd->sd;
+
+ if (enable)
+ v4l2_subdev_call(sd, core, ioctl, MSM_SD_NOTIFY_FREEZE, NULL);
+ else
+ v4l2_subdev_call(sd, core, ioctl, MSM_SD_UNNOTIFY_FREEZE, NULL);
+
+ return 0;
+}
+
+static inline int __msm_destroy_session_streams(void *d1, void *d2)
+{
+ struct msm_stream *stream = d1;
+ unsigned long flags;
+
+ pr_err("%s: Error: Destroyed list is not empty\n", __func__);
+ spin_lock_irqsave(&stream->stream_lock, flags);
+ INIT_LIST_HEAD(&stream->queued_list);
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return 0;
+}
+
+static void msm_destroy_session_streams(struct msm_session *session)
+{
+
+ if (!session)
+ return;
+
+ msm_queue_traverse_action(&session->stream_q, struct msm_stream, list,
+ __msm_destroy_session_streams, NULL);
+
+ msm_queue_drain(&session->stream_q, struct msm_stream, list);
+}
+
+static inline int __msm_remove_session_cmd_ack_q(void *d1, void *d2)
+{
+ struct msm_command_ack *cmd_ack = d1;
+
+ if (!(&cmd_ack->command_q))
+ return 0;
+
+ msm_queue_drain(&cmd_ack->command_q, struct msm_command, list);
+
+ return 0;
+}
+
+static void msm_remove_session_cmd_ack_q(struct msm_session *session)
+{
+ if ((!session) || !(&session->command_ack_q))
+ return;
+
+ mutex_lock(&session->lock);
+ /* to ensure error handling purpose, it needs to detach all subdevs
+ * which are being connected to streams
+ */
+ msm_queue_traverse_action(&session->command_ack_q,
+ struct msm_command_ack, list,
+ __msm_remove_session_cmd_ack_q, NULL);
+
+ msm_queue_drain(&session->command_ack_q, struct msm_command_ack, list);
+
+ mutex_unlock(&session->lock);
+}
+
+int msm_destroy_session(unsigned int session_id)
+{
+ struct msm_session *session;
+ struct v4l2_subdev *buf_mgr_subdev;
+ struct msm_sd_close_ioctl session_info;
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (!session)
+ return -EINVAL;
+
+ msm_destroy_session_streams(session);
+ msm_remove_session_cmd_ack_q(session);
+ mutex_destroy(&session->lock);
+ mutex_destroy(&session->lock_q);
+ mutex_destroy(&session->close_lock);
+ msm_delete_entry(msm_session_q, struct msm_session,
+ list, session);
+ buf_mgr_subdev = msm_sd_find("msm_buf_mngr");
+ if (buf_mgr_subdev) {
+ session_info.session = session_id;
+ session_info.stream = 0;
+ v4l2_subdev_call(buf_mgr_subdev, core, ioctl,
+ MSM_SD_SHUTDOWN, &session_info);
+ } else {
+ pr_err("%s: Buff manger device node is NULL\n", __func__);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_destroy_session);
+
+static int __msm_close_destry_session_notify_apps(void *d1, void *d2)
+{
+ struct v4l2_event event;
+ struct msm_v4l2_event_data *event_data =
+ (struct msm_v4l2_event_data *)&event.u.data[0];
+ struct msm_session *session = d1;
+
+ event.type = MSM_CAMERA_V4L2_EVENT_TYPE;
+ event.id = MSM_CAMERA_MSM_NOTIFY;
+ event_data->command = MSM_CAMERA_PRIV_SHUTDOWN;
+
+ v4l2_event_queue(session->event_q.vdev, &event);
+
+ return 0;
+}
+
+static int __msm_wakeup_all_cmdack_session_stream(void *d1, void *d2)
+{
+ struct msm_stream *stream = d1;
+ struct msm_session *session = d2;
+ struct msm_command_ack *cmd_ack = NULL;
+ unsigned long spin_flags = 0;
+
+ cmd_ack = msm_queue_find(&session->command_ack_q,
+ struct msm_command_ack, list,
+ __msm_queue_find_command_ack_q,
+ &stream->stream_id);
+ if (cmd_ack) {
+ spin_lock_irqsave(&(session->command_ack_q.lock),
+ spin_flags);
+ complete(&cmd_ack->wait_complete);
+ spin_unlock_irqrestore(&(session->command_ack_q.lock),
+ spin_flags);
+ }
+ return 0;
+}
+
+static int __msm_close_wakeup_all_cmdack_session(void *d1, void *d2)
+{
+ struct msm_stream *stream = NULL;
+ struct msm_session *session = d1;
+
+ stream = msm_queue_find(&session->stream_q, struct msm_stream,
+ list, __msm_wakeup_all_cmdack_session_stream, d1);
+ return 0;
+}
+
+static long msm_private_ioctl(struct file *file, void *fh,
+ bool valid_prio, unsigned int cmd, void *arg)
+{
+ int rc = 0;
+ struct msm_v4l2_event_data *event_data = arg;
+ struct v4l2_event event;
+ struct msm_session *session;
+ unsigned int session_id;
+ unsigned int stream_id;
+ unsigned long spin_flags = 0;
+ struct msm_sd_subdev *msm_sd;
+
+ if (cmd == MSM_CAM_V4L2_IOCTL_DAEMON_DISABLED) {
+ is_daemon_status = false;
+ return 0;
+ }
+
+ memset(&event, 0, sizeof(struct v4l2_event));
+ session_id = event_data->session_id;
+ stream_id = event_data->stream_id;
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+
+ if (!session)
+ return -EINVAL;
+
+ switch (cmd) {
+ case MSM_CAM_V4L2_IOCTL_NOTIFY: {
+ if (WARN_ON(!session->event_q.vdev)) {
+ rc = -EFAULT;
+ break;
+ }
+ event.type = event_data->v4l2_event_type;
+ event.id = event_data->v4l2_event_id;
+ memcpy(&event.u.data, event_data,
+ sizeof(struct msm_v4l2_event_data));
+ v4l2_event_queue(session->event_q.vdev,
+ &event);
+ }
+ break;
+
+ case MSM_CAM_V4L2_IOCTL_CMD_ACK: {
+ struct msm_command_ack *cmd_ack;
+ struct msm_command *ret_cmd;
+
+ ret_cmd = kzalloc(sizeof(*ret_cmd), GFP_KERNEL);
+ if (!ret_cmd) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ cmd_ack = msm_queue_find(&session->command_ack_q,
+ struct msm_command_ack, list,
+ __msm_queue_find_command_ack_q,
+ &stream_id);
+ if (WARN_ON(!cmd_ack)) {
+ kzfree(ret_cmd);
+ rc = -EFAULT;
+ break;
+ }
+
+ spin_lock_irqsave(&(session->command_ack_q.lock),
+ spin_flags);
+ event.type = event_data->v4l2_event_type;
+ event.id = event_data->v4l2_event_id;
+ memcpy(&event.u.data, event_data,
+ sizeof(struct msm_v4l2_event_data));
+ memcpy(&ret_cmd->event, &event, sizeof(struct v4l2_event));
+ msm_enqueue(&cmd_ack->command_q, &ret_cmd->list);
+ complete(&cmd_ack->wait_complete);
+ spin_unlock_irqrestore(&(session->command_ack_q.lock),
+ spin_flags);
+ }
+ break;
+
+ case MSM_CAM_V4L2_IOCTL_NOTIFY_DEBUG: {
+ if (event_data->status) {
+ pr_err("%s:Notifying subdevs about potential sof freeze\n",
+ __func__);
+ } else {
+ pr_err("%s:Notifying subdevs about sof recover\n",
+ __func__);
+ }
+
+ if (!list_empty(&msm_v4l2_dev->subdevs)) {
+ list_for_each_entry(msm_sd, &ordered_sd_list, list)
+ __msm_sd_notify_freeze_subdevs(msm_sd,
+ event_data->status);
+ }
+ }
+ break;
+
+ case MSM_CAM_V4L2_IOCTL_NOTIFY_ERROR:
+ /* send v4l2_event to HAL next*/
+ msm_queue_traverse_action(msm_session_q,
+ struct msm_session, list,
+ __msm_close_destry_session_notify_apps, NULL);
+ break;
+
+ default:
+ rc = -ENOTTY;
+ break;
+ }
+
+ return rc;
+}
+
+static int msm_unsubscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+static int msm_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_subscribe(fh, sub, 5, NULL);
+}
+
+static const struct v4l2_ioctl_ops g_msm_ioctl_ops = {
+ .vidioc_subscribe_event = msm_subscribe_event,
+ .vidioc_unsubscribe_event = msm_unsubscribe_event,
+ .vidioc_default = msm_private_ioctl,
+};
+
+static unsigned int msm_poll(struct file *f,
+ struct poll_table_struct *pll_table)
+{
+ int rc = 0;
+ struct v4l2_fh *eventq = f->private_data;
+
+ if (WARN_ON(!eventq))
+ return 0;
+
+ poll_wait(f, &eventq->wait, pll_table);
+
+ if (v4l2_event_pending(eventq))
+ rc = POLLIN | POLLRDNORM;
+
+ return rc;
+}
+
+static void msm_print_event_error(struct v4l2_event *event)
+{
+ struct msm_v4l2_event_data *event_data =
+ (struct msm_v4l2_event_data *)&event->u.data[0];
+
+ pr_err("Evt_type=%x Evt_id=%d Evt_cmd=%x\n", event->type,
+ event->id, event_data->command);
+ pr_err("Evt_session_id=%d Evt_stream_id=%d Evt_arg=%d\n",
+ event_data->session_id, event_data->stream_id,
+ event_data->arg_value);
+}
+
+/* something seriously wrong if msm_close is triggered
+ * !!! user space imaging server is shutdown !!!
+ */
+int msm_post_event(struct v4l2_event *event, int timeout)
+{
+ int rc = 0;
+ struct video_device *vdev;
+ struct msm_session *session;
+ struct msm_v4l2_event_data *event_data =
+ (struct msm_v4l2_event_data *)&event->u.data[0];
+ struct msm_command_ack *cmd_ack;
+ struct msm_command *cmd;
+ int session_id, stream_id;
+ unsigned long flags = 0;
+
+ session_id = event_data->session_id;
+ stream_id = event_data->stream_id;
+
+ spin_lock_irqsave(&msm_eventq_lock, flags);
+ if (!msm_eventq) {
+ spin_unlock_irqrestore(&msm_eventq_lock, flags);
+ pr_err("%s : msm event queue not available Line %d\n",
+ __func__, __LINE__);
+ return -ENODEV;
+ }
+ spin_unlock_irqrestore(&msm_eventq_lock, flags);
+
+ vdev = msm_eventq->vdev;
+
+ /* send to imaging server and wait for ACK */
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (WARN_ON(!session)) {
+ pr_err("%s : session not found Line %d\n",
+ __func__, __LINE__);
+ return -EIO;
+ }
+ mutex_lock(&session->lock);
+ cmd_ack = msm_queue_find(&session->command_ack_q,
+ struct msm_command_ack, list,
+ __msm_queue_find_command_ack_q, &stream_id);
+ if (WARN_ON(!cmd_ack)) {
+ mutex_unlock(&session->lock);
+ pr_err("%s : cmd_ack not found Line %d\n",
+ __func__, __LINE__);
+ return -EIO;
+ }
+
+ /* re-init wait_complete */
+ reinit_completion(&cmd_ack->wait_complete);
+
+ v4l2_event_queue(vdev, event);
+
+ if (timeout < 0) {
+ mutex_unlock(&session->lock);
+ pr_debug("%s : timeout cannot be negative Line %d\n",
+ __func__, __LINE__);
+ return rc;
+ }
+
+ /* should wait on session based condition */
+ rc = wait_for_completion_timeout(&cmd_ack->wait_complete,
+ msecs_to_jiffies(timeout));
+
+
+ if (list_empty_careful(&cmd_ack->command_q.list)) {
+ if (!rc) {
+ pr_err("%s: Timed out\n", __func__);
+ msm_print_event_error(event);
+ mutex_unlock(&session->lock);
+ return -ETIMEDOUT;
+ }
+ pr_err("%s: Error: No timeout but list empty!", __func__);
+ msm_print_event_error(event);
+ mutex_unlock(&session->lock);
+ return -EINVAL;
+ }
+
+ cmd = msm_dequeue(&cmd_ack->command_q,
+ struct msm_command, list);
+ if (!cmd) {
+ mutex_unlock(&session->lock);
+ pr_err("%s : cmd dequeue failed Line %d\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ event_data = (struct msm_v4l2_event_data *)cmd->event.u.data;
+
+ /* compare cmd_ret and event */
+ if (WARN_ON(event->type != cmd->event.type) ||
+ WARN_ON(event->id != cmd->event.id)) {
+ pr_err("%s : Either event type or id didnot match Line %d\n",
+ __func__, __LINE__);
+ pr_err("%s : event->type %d event->id %d\n", __func__,
+ event->type, event->id);
+ pr_err("%s : cmd->event.type %d cmd->event.id %d\n", __func__,
+ cmd->event.type, cmd->event.id);
+ rc = -EINVAL;
+ }
+
+ *event = cmd->event;
+
+ kzfree(cmd);
+ mutex_unlock(&session->lock);
+ return rc;
+}
+EXPORT_SYMBOL(msm_post_event);
+
+static int msm_close(struct file *filep)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ struct msm_sd_close_ioctl sd_close;
+ struct msm_sd_subdev *msm_sd;
+
+ /* stop all hardware blocks immediately */
+ if (!list_empty(&msm_v4l2_dev->subdevs))
+ list_for_each_entry(msm_sd, &ordered_sd_list, list)
+ __msm_sd_close_subdevs(msm_sd, &sd_close);
+
+ /* remove msm_v4l2_pm_qos_request */
+ msm_pm_qos_remove_request();
+
+ /* send v4l2_event to HAL next*/
+ msm_queue_traverse_action(msm_session_q, struct msm_session, list,
+ __msm_close_destry_session_notify_apps, NULL);
+
+ msm_queue_traverse_action(msm_session_q, struct msm_session, list,
+ __msm_close_wakeup_all_cmdack_session, NULL);
+
+ spin_lock_irqsave(&msm_eventq_lock, flags);
+ msm_eventq = NULL;
+ spin_unlock_irqrestore(&msm_eventq_lock, flags);
+ v4l2_fh_release(filep);
+
+ spin_lock_irqsave(&msm_pid_lock, flags);
+ put_pid(msm_pid);
+ msm_pid = NULL;
+ spin_unlock_irqrestore(&msm_pid_lock, flags);
+
+ atomic_set(&pvdev->opened, 0);
+
+ return rc;
+}
+
+static inline void msm_list_switch(struct list_head *l1,
+ struct list_head *l2)
+{
+ l1->next = l2->next;
+ l2->prev = l1->prev;
+ l1->prev->next = l2;
+ l2->next->prev = l1;
+ l1->prev = l2;
+ l2->next = l1;
+}
+
+static int msm_open(struct file *filep)
+{
+ int rc;
+ unsigned long flags;
+ struct msm_video_device *pvdev = video_drvdata(filep);
+
+ if (WARN_ON(!pvdev))
+ return -EIO;
+
+ /* !!! only ONE open is allowed !!! */
+ if (atomic_read(&pvdev->opened))
+ return -EBUSY;
+
+ atomic_set(&pvdev->opened, 1);
+
+ spin_lock_irqsave(&msm_pid_lock, flags);
+ msm_pid = get_pid(task_pid(current));
+ spin_unlock_irqrestore(&msm_pid_lock, flags);
+
+ /* create event queue */
+ rc = v4l2_fh_open(filep);
+ if (rc < 0)
+ return rc;
+
+ spin_lock_irqsave(&msm_eventq_lock, flags);
+ msm_eventq = filep->private_data;
+ spin_unlock_irqrestore(&msm_eventq_lock, flags);
+
+ /* register msm_v4l2_pm_qos_request */
+ msm_pm_qos_add_request();
+
+ return rc;
+}
+
+static struct v4l2_file_operations msm_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_open,
+ .poll = msm_poll,
+ .release = msm_close,
+ .unlocked_ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = video_ioctl2,
+#endif
+};
+
+struct msm_stream *msm_get_stream(unsigned int session_id,
+ unsigned int stream_id)
+{
+ struct msm_session *session;
+ struct msm_stream *stream;
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (!session)
+ return ERR_PTR(-EINVAL);
+
+ stream = msm_queue_find(&session->stream_q, struct msm_stream,
+ list, __msm_queue_find_stream, &stream_id);
+
+ if (!stream)
+ return ERR_PTR(-EINVAL);
+
+ return stream;
+}
+EXPORT_SYMBOL(msm_get_stream);
+
+struct vb2_queue *msm_get_stream_vb2q(unsigned int session_id,
+ unsigned int stream_id)
+{
+ struct msm_session *session;
+ struct msm_stream *stream;
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (!session)
+ return NULL;
+
+ stream = msm_queue_find(&session->stream_q, struct msm_stream,
+ list, __msm_queue_find_stream, &stream_id);
+ if (!stream)
+ return NULL;
+
+ return stream->vb2_q;
+}
+EXPORT_SYMBOL(msm_get_stream_vb2q);
+
+struct msm_stream *msm_get_stream_from_vb2q(struct vb2_queue *q)
+{
+ struct msm_session *session;
+ struct msm_stream *stream;
+ unsigned long flags1;
+ unsigned long flags2;
+
+ spin_lock_irqsave(&msm_session_q->lock, flags1);
+ list_for_each_entry(session, &(msm_session_q->list), list) {
+ spin_lock_irqsave(&(session->stream_q.lock), flags2);
+ list_for_each_entry(
+ stream, &(session->stream_q.list), list) {
+ if (stream->vb2_q == q) {
+ spin_unlock_irqrestore
+ (&(session->stream_q.lock), flags2);
+ spin_unlock_irqrestore
+ (&msm_session_q->lock, flags1);
+ return stream;
+ }
+ }
+ spin_unlock_irqrestore(&(session->stream_q.lock), flags2);
+ }
+ spin_unlock_irqrestore(&msm_session_q->lock, flags1);
+ return NULL;
+}
+EXPORT_SYMBOL(msm_get_stream_from_vb2q);
+
+#ifdef CONFIG_COMPAT
+long msm_copy_camera_private_ioctl_args(unsigned long arg,
+ struct msm_camera_private_ioctl_arg *k_ioctl,
+ void __user **tmp_compat_ioctl_ptr)
+{
+ struct msm_camera_private_ioctl_arg up_ioctl;
+
+ if (WARN_ON(!arg || !k_ioctl || !tmp_compat_ioctl_ptr))
+ return -EIO;
+
+ if (copy_from_user(&up_ioctl,
+ (struct msm_camera_private_ioctl_arg *)arg,
+ sizeof(struct msm_camera_private_ioctl_arg)))
+ return -EFAULT;
+
+ k_ioctl->id = up_ioctl.id;
+ k_ioctl->size = up_ioctl.size;
+ k_ioctl->result = up_ioctl.result;
+ k_ioctl->reserved = up_ioctl.reserved;
+ *tmp_compat_ioctl_ptr = compat_ptr(up_ioctl.ioctl_ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_copy_camera_private_ioctl_args);
+#endif
+
+static void msm_sd_notify(struct v4l2_subdev *sd,
+ unsigned int notification, void *arg)
+{
+ int rc = 0;
+ struct v4l2_subdev *subdev = NULL;
+
+ if (WARN_ON(!sd) || WARN_ON(!arg))
+ return;
+
+ /* Check if subdev exists before processing*/
+ if (!msm_sd_find(sd->name))
+ return;
+
+ switch (notification) {
+ case MSM_SD_NOTIFY_GET_SD: {
+ struct msm_sd_req_sd *get_sd = arg;
+
+ get_sd->subdev = msm_sd_find(get_sd->name);
+ /* TODO: might need to add ref count on ret_sd */
+ }
+ break;
+
+ case MSM_SD_NOTIFY_PUT_SD: {
+ struct msm_sd_req_sd *put_sd = arg;
+
+ subdev = msm_sd_find(put_sd->name);
+ }
+ break;
+
+ case MSM_SD_NOTIFY_REQ_CB: {
+ struct msm_sd_req_vb2_q *req_sd = arg;
+
+ rc = msm_vb2_request_cb(req_sd);
+ if (rc < 0)
+ return;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static ssize_t write_logsync(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char lbuf[LOGSYNC_PACKET_SIZE] = {0};
+ uint64_t seq_num = 0;
+
+ if (copy_from_user(lbuf, buf, sizeof(lbuf)))
+ return -EFAULT;
+
+ if (kstrtoull(lbuf, 0, &seq_num) < 0)
+ pr_err("LOGSYNC (Kernel): Bad or malformed sequence number\n");
+ else
+ pr_debug("LOGSYNC (Kernel): seq_num = %llu\n", seq_num);
+
+ return count;
+}
+
+
+static const struct file_operations logsync_fops = {
+ .write = write_logsync,
+};
+
+static int msm_probe(struct platform_device *pdev)
+{
+ struct msm_video_device *pvdev = NULL;
+ static struct dentry *cam_debugfs_root;
+ int rc = 0;
+
+ msm_v4l2_dev = kzalloc(sizeof(*msm_v4l2_dev),
+ GFP_KERNEL);
+ if (!msm_v4l2_dev) {
+ rc = -ENOMEM;
+ goto probe_end;
+ }
+
+ pvdev = kzalloc(sizeof(struct msm_video_device),
+ GFP_KERNEL);
+ if (!pvdev) {
+ rc = -ENOMEM;
+ goto pvdev_fail;
+ }
+
+ pvdev->vdev = video_device_alloc();
+ if (!pvdev->vdev) {
+ rc = -ENOMEM;
+ goto video_fail;
+ }
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ msm_v4l2_dev->mdev = kzalloc(sizeof(struct media_device),
+ GFP_KERNEL);
+ if (!msm_v4l2_dev->mdev) {
+ rc = -ENOMEM;
+ goto mdev_fail;
+ }
+ strlcpy(msm_v4l2_dev->mdev->model, MSM_CONFIGURATION_NAME,
+ sizeof(msm_v4l2_dev->mdev->model));
+ msm_v4l2_dev->mdev->dev = &(pdev->dev);
+
+ rc = media_device_register(msm_v4l2_dev->mdev);
+ if (WARN_ON(rc < 0))
+ goto media_fail;
+
+ if (WARN_ON((rc == media_entity_init(&pvdev->vdev->entity,
+ 0, NULL, 0)) < 0))
+ goto entity_fail;
+
+ pvdev->vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
+ pvdev->vdev->entity.group_id = QCAMERA_VNODE_GROUP_ID;
+#endif
+
+ msm_v4l2_dev->notify = msm_sd_notify;
+
+ pvdev->vdev->v4l2_dev = msm_v4l2_dev;
+
+ rc = v4l2_device_register(&(pdev->dev), pvdev->vdev->v4l2_dev);
+ if (WARN_ON(rc < 0))
+ goto register_fail;
+
+ strlcpy(pvdev->vdev->name, "msm-config", sizeof(pvdev->vdev->name));
+ pvdev->vdev->release = video_device_release;
+ pvdev->vdev->fops = &msm_fops;
+ pvdev->vdev->ioctl_ops = &g_msm_ioctl_ops;
+ pvdev->vdev->minor = -1;
+ pvdev->vdev->vfl_type = VFL_TYPE_GRABBER;
+ rc = video_register_device(pvdev->vdev,
+ VFL_TYPE_GRABBER, -1);
+ if (WARN_ON(rc < 0))
+ goto v4l2_fail;
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ /* FIXME: How to get rid of this messy? */
+ pvdev->vdev->entity.name = video_device_node_name(pvdev->vdev);
+#endif
+
+ atomic_set(&pvdev->opened, 0);
+ video_set_drvdata(pvdev->vdev, pvdev);
+
+ msm_session_q = kzalloc(sizeof(*msm_session_q), GFP_KERNEL);
+ if (!msm_session_q)
+ goto v4l2_fail;
+
+ msm_init_queue(msm_session_q);
+ spin_lock_init(&msm_eventq_lock);
+ spin_lock_init(&msm_pid_lock);
+ INIT_LIST_HEAD(&ordered_sd_list);
+
+ cam_debugfs_root = debugfs_create_dir(MSM_CAM_LOGSYNC_FILE_BASEDIR,
+ NULL);
+ if (!cam_debugfs_root) {
+ pr_warn("NON-FATAL: failed to create logsync base directory\n");
+ } else {
+ if (!debugfs_create_file(MSM_CAM_LOGSYNC_FILE_NAME,
+ 0666,
+ cam_debugfs_root,
+ NULL,
+ &logsync_fops))
+ pr_warn("NON-FATAL: failed to create logsync debugfs file\n");
+ }
+
+ rc = cam_ahb_clk_init(pdev);
+ if (rc < 0) {
+ pr_err("%s: failed to register ahb clocks\n", __func__);
+ goto v4l2_fail;
+ }
+
+ goto probe_end;
+
+v4l2_fail:
+ v4l2_device_unregister(pvdev->vdev->v4l2_dev);
+register_fail:
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&pvdev->vdev->entity);
+entity_fail:
+ media_device_unregister(msm_v4l2_dev->mdev);
+media_fail:
+ kzfree(msm_v4l2_dev->mdev);
+mdev_fail:
+#endif
+ video_device_release(pvdev->vdev);
+video_fail:
+ kzfree(pvdev);
+pvdev_fail:
+ kzfree(msm_v4l2_dev);
+probe_end:
+ return rc;
+}
+
+static const struct of_device_id msm_dt_match[] = {
+ {.compatible = "qcom,msm-cam"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_dt_match);
+
+static struct platform_driver msm_driver = {
+ .probe = msm_probe,
+ .driver = {
+ .name = "msm",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_dt_match,
+ },
+};
+
+static int __init msm_init(void)
+{
+ return platform_driver_register(&msm_driver);
+}
+
+static void __exit msm_exit(void)
+{
+ platform_driver_unregister(&msm_driver);
+}
+
+
+module_init(msm_init);
+module_exit(msm_exit);
+MODULE_DESCRIPTION("MSM V4L2 Camera");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/msm.h b/drivers/media/platform/msm/ais/msm.h
new file mode 100644
index 000000000000..d8b2d5871fc2
--- /dev/null
+++ b/drivers/media/platform/msm/ais/msm.h
@@ -0,0 +1,145 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_H
+#define _MSM_H
+
+#include <linux/version.h>
+#include <linux/completion.h>
+#include <linux/i2c.h>
+#include <linux/videodev2.h>
+#include <linux/pm_qos.h>
+#include <linux/msm_ion.h>
+#include <linux/iommu.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/ais/msm_ais.h>
+
+/* Setting MAX timeout to 6.5seconds considering
+ * backend will operate @ .6fps in certain usecases
+ * like Long exposure usecase and isp needs max of 2 frames
+ * to stop the hardware which will be around 3 seconds
+ */
+#define MSM_POST_EVT_TIMEOUT 6500
+#define MSM_POST_EVT_NOTIMEOUT 0xFFFFFFFF
+#define MSM_CAMERA_STREAM_CNT_BITS 32
+
+#define CAMERA_DISABLE_PC_LATENCY 100
+#define CAMERA_ENABLE_PC_LATENCY PM_QOS_DEFAULT_VALUE
+
+extern bool is_daemon_status;
+
+struct msm_video_device {
+ struct video_device *vdev;
+ atomic_t opened;
+};
+
+struct msm_queue_head {
+ struct list_head list;
+ spinlock_t lock;
+ int len;
+ int max;
+};
+
+/** msm_event:
+ *
+ * event sent by imaging server
+ **/
+struct msm_event {
+ struct video_device *vdev;
+ atomic_t on_heap;
+};
+
+struct msm_command {
+ struct list_head list;
+ struct v4l2_event event;
+ atomic_t on_heap;
+};
+
+/** struct msm_command_ack
+ *
+ * Object of command_ack_q, which is
+ * created per open operation
+ *
+ * contains struct msm_command
+ **/
+struct msm_command_ack {
+ struct list_head list;
+ struct msm_queue_head command_q;
+ struct completion wait_complete;
+ int stream_id;
+};
+
+struct msm_v4l2_subdev {
+ /* FIXME: for session close and error handling such
+ * as daemon shutdown
+ */
+ int close_sequence;
+};
+
+struct msm_session {
+ struct list_head list;
+
+ /* session index */
+ unsigned int session_id;
+
+ /* event queue sent by imaging server */
+ struct msm_event event_q;
+
+ /* ACK by imaging server. Object type of
+ * struct msm_command_ack per open,
+ * assumption is application can send
+ * command on every opened video node
+ */
+ struct msm_queue_head command_ack_q;
+
+ /* real streams(either data or metadate) owned by one
+ * session struct msm_stream
+ */
+ struct msm_queue_head stream_q;
+ struct mutex lock;
+ struct mutex lock_q;
+ struct mutex close_lock;
+};
+
+static inline bool msm_is_daemon_present(void)
+{
+ return is_daemon_status;
+}
+
+void msm_pm_qos_update_request(int val);
+int msm_post_event(struct v4l2_event *event, int timeout);
+int msm_create_session(unsigned int session, struct video_device *vdev);
+int msm_destroy_session(unsigned int session_id);
+
+int msm_create_stream(unsigned int session_id,
+ unsigned int stream_id, struct vb2_queue *q);
+void msm_delete_stream(unsigned int session_id, unsigned int stream_id);
+int msm_create_command_ack_q(unsigned int session_id, unsigned int stream_id);
+void msm_delete_command_ack_q(unsigned int session_id, unsigned int stream_id);
+struct msm_stream *msm_get_stream(unsigned int session_id,
+ unsigned int stream_id);
+struct vb2_queue *msm_get_stream_vb2q(unsigned int session_id,
+ unsigned int stream_id);
+struct msm_stream *msm_get_stream_from_vb2q(struct vb2_queue *q);
+struct msm_session *msm_session_find(unsigned int session_id);
+#ifdef CONFIG_COMPAT
+long msm_copy_camera_private_ioctl_args(unsigned long arg,
+ struct msm_camera_private_ioctl_arg *k_ioctl,
+ void __user **tmp_compat_ioctl_ptr);
+#endif
+#endif /*_MSM_H */
diff --git a/drivers/media/platform/msm/ais/msm_buf_mgr/Makefile b/drivers/media/platform/msm/ais/msm_buf_mgr/Makefile
new file mode 100644
index 000000000000..ec75622d193a
--- /dev/null
+++ b/drivers/media/platform/msm/ais/msm_buf_mgr/Makefile
@@ -0,0 +1,2 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+obj-$(CONFIG_MSM_AIS) += msm_generic_buf_mgr.o
diff --git a/drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.c b/drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.c
new file mode 100644
index 000000000000..675bf6b24b03
--- /dev/null
+++ b/drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.c
@@ -0,0 +1,896 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-BUFMGR %s:%d " fmt, __func__, __LINE__
+
+#include "msm_generic_buf_mgr.h"
+
+static struct msm_buf_mngr_device *msm_buf_mngr_dev;
+
+struct v4l2_subdev *msm_buf_mngr_get_subdev(void)
+{
+ return &msm_buf_mngr_dev->subdev.sd;
+}
+
+static int32_t msm_buf_mngr_hdl_cont_get_buf(struct msm_buf_mngr_device *dev,
+ struct msm_buf_mngr_info *buf_info)
+{
+ unsigned int i;
+ struct msm_buf_mngr_user_buf_cont_info *cbuf, *cont_save;
+
+ list_for_each_entry_safe(cbuf, cont_save, &dev->cont_qhead, entry) {
+ if ((cbuf->sessid == buf_info->session_id) &&
+ (cbuf->index == buf_info->index) &&
+ (cbuf->strid == buf_info->stream_id)) {
+ buf_info->user_buf.buf_cnt = cbuf->paddr->buf_cnt;
+ if (buf_info->user_buf.buf_cnt >
+ MSM_CAMERA_MAX_USER_BUFF_CNT) {
+ pr_err("Invalid cnt%d,%d,%d\n",
+ cbuf->paddr->buf_cnt,
+ buf_info->session_id,
+ buf_info->stream_id);
+ return -EINVAL;
+ }
+ for (i = 0 ; i < buf_info->user_buf.buf_cnt; i++) {
+ buf_info->user_buf.buf_idx[i] =
+ cbuf->paddr->buf_idx[i];
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+static int32_t msm_buf_mngr_get_buf(struct msm_buf_mngr_device *dev,
+ void __user *argp)
+{
+ unsigned long flags;
+ int32_t rc = 0;
+ struct msm_buf_mngr_info *buf_info =
+ (struct msm_buf_mngr_info *)argp;
+ struct msm_get_bufs *new_entry =
+ kzalloc(sizeof(struct msm_get_bufs), GFP_KERNEL);
+
+ if (!new_entry) {
+ pr_err("%s:No mem\n", __func__);
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&new_entry->entry);
+ new_entry->vb2_v4l2_buf = dev->vb2_ops.get_buf(buf_info->session_id,
+ buf_info->stream_id);
+ if (!new_entry->vb2_v4l2_buf) {
+ pr_debug("%s:Get buf is null\n", __func__);
+ kfree(new_entry);
+ return -EINVAL;
+ }
+ new_entry->session_id = buf_info->session_id;
+ new_entry->stream_id = buf_info->stream_id;
+ new_entry->index = new_entry->vb2_v4l2_buf->vb2_buf.index;
+ spin_lock_irqsave(&dev->buf_q_spinlock, flags);
+ list_add_tail(&new_entry->entry, &dev->buf_qhead);
+ spin_unlock_irqrestore(&dev->buf_q_spinlock, flags);
+ buf_info->index = new_entry->vb2_v4l2_buf->vb2_buf.index;
+ if (buf_info->type == MSM_CAMERA_BUF_MNGR_BUF_USER) {
+ mutex_lock(&dev->cont_mutex);
+ if (!list_empty(&dev->cont_qhead)) {
+ rc = msm_buf_mngr_hdl_cont_get_buf(dev, buf_info);
+ } else {
+ pr_err("Nothing mapped in user buf for %d,%d\n",
+ buf_info->session_id, buf_info->stream_id);
+ rc = -EINVAL;
+ }
+ mutex_unlock(&dev->cont_mutex);
+ }
+ return rc;
+}
+
+static int32_t msm_buf_mngr_get_buf_by_idx(struct msm_buf_mngr_device *dev,
+ void *argp)
+{
+ unsigned long flags;
+ int32_t rc = 0;
+ struct msm_buf_mngr_info *buf_info =
+ (struct msm_buf_mngr_info *)argp;
+ struct msm_get_bufs *new_entry =
+ kzalloc(sizeof(struct msm_get_bufs), GFP_KERNEL);
+
+ if (!new_entry) {
+ pr_err("%s:No mem\n", __func__);
+ return -ENOMEM;
+ }
+ if (!buf_info) {
+ kfree(new_entry);
+ return -EIO;
+ }
+
+ INIT_LIST_HEAD(&new_entry->entry);
+ new_entry->vb2_v4l2_buf = dev->vb2_ops.get_buf_by_idx(
+ buf_info->session_id, buf_info->stream_id, buf_info->index);
+ if (!new_entry->vb2_v4l2_buf) {
+ pr_debug("%s:Get buf is null\n", __func__);
+ kfree(new_entry);
+ return -EINVAL;
+ }
+ new_entry->session_id = buf_info->session_id;
+ new_entry->stream_id = buf_info->stream_id;
+ new_entry->index = new_entry->vb2_v4l2_buf->vb2_buf.index;
+ spin_lock_irqsave(&dev->buf_q_spinlock, flags);
+ list_add_tail(&new_entry->entry, &dev->buf_qhead);
+ spin_unlock_irqrestore(&dev->buf_q_spinlock, flags);
+ if (buf_info->type == MSM_CAMERA_BUF_MNGR_BUF_USER) {
+ mutex_lock(&dev->cont_mutex);
+ if (!list_empty(&dev->cont_qhead)) {
+ rc = msm_buf_mngr_hdl_cont_get_buf(dev, buf_info);
+ } else {
+ pr_err("Nothing mapped in user buf for %d,%d\n",
+ buf_info->session_id, buf_info->stream_id);
+ rc = -EINVAL;
+ }
+ mutex_unlock(&dev->cont_mutex);
+ }
+ return rc;
+}
+
+static int32_t msm_buf_mngr_buf_done(struct msm_buf_mngr_device *buf_mngr_dev,
+ struct msm_buf_mngr_info *buf_info)
+{
+ unsigned long flags;
+ struct msm_get_bufs *bufs, *save;
+ int32_t ret = -EINVAL;
+
+ spin_lock_irqsave(&buf_mngr_dev->buf_q_spinlock, flags);
+ list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) {
+ if ((bufs->session_id == buf_info->session_id) &&
+ (bufs->stream_id == buf_info->stream_id) &&
+ (bufs->vb2_v4l2_buf->vb2_buf.index ==
+ buf_info->index)) {
+ bufs->vb2_v4l2_buf->sequence = buf_info->frame_id;
+ bufs->vb2_v4l2_buf->timestamp = buf_info->timestamp;
+ ret = buf_mngr_dev->vb2_ops.buf_done
+ (bufs->vb2_v4l2_buf,
+ buf_info->session_id,
+ buf_info->stream_id,
+ buf_info->frame_id,
+ &buf_info->timestamp,
+ buf_info->reserved);
+ list_del_init(&bufs->entry);
+ kfree(bufs);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&buf_mngr_dev->buf_q_spinlock, flags);
+ return ret;
+}
+
+
+static int32_t msm_buf_mngr_put_buf(struct msm_buf_mngr_device *buf_mngr_dev,
+ struct msm_buf_mngr_info *buf_info)
+{
+ unsigned long flags;
+ struct msm_get_bufs *bufs, *save;
+ int32_t ret = -EINVAL;
+
+ spin_lock_irqsave(&buf_mngr_dev->buf_q_spinlock, flags);
+ list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) {
+ if ((bufs->session_id == buf_info->session_id) &&
+ (bufs->stream_id == buf_info->stream_id) &&
+ (bufs->vb2_v4l2_buf->vb2_buf.index ==
+ buf_info->index)) {
+ ret = buf_mngr_dev->vb2_ops.put_buf(bufs->vb2_v4l2_buf,
+ buf_info->session_id, buf_info->stream_id);
+ list_del_init(&bufs->entry);
+ kfree(bufs);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&buf_mngr_dev->buf_q_spinlock, flags);
+ return ret;
+}
+
+static int32_t msm_generic_buf_mngr_flush(
+ struct msm_buf_mngr_device *buf_mngr_dev,
+ struct msm_buf_mngr_info *buf_info)
+{
+ unsigned long flags;
+ struct msm_get_bufs *bufs, *save;
+ int32_t ret = -EINVAL;
+ struct timeval ts;
+
+ spin_lock_irqsave(&buf_mngr_dev->buf_q_spinlock, flags);
+ /*
+ * Sanity check on client buf list, remove buf mgr
+ * queue entries in case any
+ */
+ list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) {
+ if ((bufs->session_id == buf_info->session_id) &&
+ (bufs->stream_id == buf_info->stream_id)) {
+ ret = buf_mngr_dev->vb2_ops.buf_done(bufs->vb2_v4l2_buf,
+ buf_info->session_id,
+ buf_info->stream_id, 0, &ts, 0);
+ pr_err("Bufs not flushed: str_id = %d buf_index = %d ret = %d\n",
+ buf_info->stream_id, bufs->vb2_v4l2_buf->vb2_buf.index,
+ ret);
+ list_del_init(&bufs->entry);
+ kfree(bufs);
+ }
+ }
+ spin_unlock_irqrestore(&buf_mngr_dev->buf_q_spinlock, flags);
+ /* Flush the remaining vb2 buffers in stream list */
+ ret = buf_mngr_dev->vb2_ops.flush_buf(buf_info->session_id,
+ buf_info->stream_id);
+ return ret;
+}
+
+static int32_t msm_buf_mngr_find_cont_stream(struct msm_buf_mngr_device *dev,
+ uint32_t *cnt, uint32_t *tstream,
+ struct msm_sd_close_ioctl *session)
+{
+ struct msm_buf_mngr_user_buf_cont_info *cont_bufs, *cont_save;
+ int32_t ret = -1;
+
+ list_for_each_entry_safe(cont_bufs,
+ cont_save, &dev->cont_qhead, entry) {
+ if (cont_bufs->sessid == session->session) {
+ *cnt = cont_bufs->cnt;
+ *tstream = cont_bufs->strid;
+ return 0;
+ }
+ }
+ return ret;
+}
+
+static void msm_buf_mngr_contq_listdel(struct msm_buf_mngr_device *dev,
+ uint32_t session, int32_t stream,
+ bool unmap, uint32_t cnt)
+{
+ struct msm_buf_mngr_user_buf_cont_info *cont_bufs, *cont_save;
+
+ list_for_each_entry_safe(cont_bufs,
+ cont_save, &dev->cont_qhead, entry) {
+ if ((cont_bufs->sessid == session) &&
+ (cont_bufs->strid == stream)) {
+ if ((cnt == 1) && (unmap == 1)) {
+ ion_unmap_kernel(dev->ion_client,
+ cont_bufs->ion_handle);
+ ion_free(dev->ion_client,
+ cont_bufs->ion_handle);
+ }
+ list_del_init(&cont_bufs->entry);
+ kfree(cont_bufs);
+ cnt--;
+ }
+ }
+ if (cnt != 0)
+ pr_err("Buffers pending cnt = %d\n", cnt);
+}
+
+static void msm_buf_mngr_contq_cleanup(struct msm_buf_mngr_device *dev,
+ struct msm_sd_close_ioctl *session)
+{
+ int32_t stream = -1, found = -1;
+ uint32_t cnt = 0;
+
+ do {
+ found = msm_buf_mngr_find_cont_stream(dev, &cnt,
+ &stream, session);
+ if (found == -1)
+ break;
+ msm_buf_mngr_contq_listdel(dev, session->session,
+ stream, 1, cnt);
+ } while (found == 0);
+}
+
+static void msm_buf_mngr_sd_shutdown(struct msm_buf_mngr_device *dev,
+ struct msm_sd_close_ioctl *session)
+{
+ unsigned long flags;
+ struct msm_get_bufs *bufs, *save;
+
+ if (WARN_ON(!dev) || WARN_ON(!session))
+ return;
+
+ spin_lock_irqsave(&dev->buf_q_spinlock, flags);
+ if (!list_empty(&dev->buf_qhead)) {
+ list_for_each_entry_safe(bufs,
+ save, &dev->buf_qhead, entry) {
+ pr_info("%s: Delete invalid bufs =%pK, session_id=%u, bufs->ses_id=%d, str_id=%d, idx=%d\n",
+ __func__, (void *)bufs, session->session,
+ bufs->session_id, bufs->stream_id,
+ bufs->index);
+ if (session->session == bufs->session_id) {
+ list_del_init(&bufs->entry);
+ kfree(bufs);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dev->buf_q_spinlock, flags);
+ mutex_lock(&dev->cont_mutex);
+ if (!list_empty(&dev->cont_qhead))
+ msm_buf_mngr_contq_cleanup(dev, session);
+ mutex_unlock(&dev->cont_mutex);
+}
+
+static int msm_buf_mngr_handle_cont_cmd(struct msm_buf_mngr_device *dev,
+ struct msm_buf_mngr_main_cont_info
+ *cont_cmd)
+{
+ int rc = 0, i = 0;
+ struct ion_handle *ion_handle = NULL;
+ struct msm_camera_user_buf_cont_t *iaddr, *temp_addr;
+ struct msm_buf_mngr_user_buf_cont_info *new_entry, *bufs, *save;
+ size_t size;
+
+ if ((cont_cmd->cmd >= MSM_CAMERA_BUF_MNGR_CONT_MAX) ||
+ (cont_cmd->cmd < 0) ||
+ (cont_cmd->cnt > VB2_MAX_FRAME) ||
+ (cont_cmd->cont_fd < 0)) {
+ pr_debug("Invalid arg passed Cmd:%d, cnt:%d, fd:%d\n",
+ cont_cmd->cmd, cont_cmd->cnt,
+ cont_cmd->cont_fd);
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->cont_mutex);
+
+ if (cont_cmd->cmd == MSM_CAMERA_BUF_MNGR_CONT_MAP) {
+ if (!list_empty(&dev->cont_qhead)) {
+ list_for_each_entry_safe(bufs,
+ save, &dev->cont_qhead, entry) {
+ if ((bufs->sessid == cont_cmd->session_id) &&
+ (bufs->strid == cont_cmd->stream_id)) {
+ pr_err("Map exist %d,%d unmap first\n",
+ cont_cmd->session_id,
+ cont_cmd->stream_id);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+ }
+ ion_handle = ion_import_dma_buf(dev->ion_client,
+ cont_cmd->cont_fd);
+ if (IS_ERR_OR_NULL(ion_handle)) {
+ pr_err("Failed to create ion handle for fd %d\n",
+ cont_cmd->cont_fd);
+ rc = -EINVAL;
+ goto end;
+ }
+ if (ion_handle_get_size(dev->ion_client,
+ ion_handle, &size) < 0) {
+ pr_err("Get ion size failed\n");
+ rc = -EINVAL;
+ goto free_ion_handle;
+ }
+ if ((size == 0) || (size <
+ (sizeof(struct msm_camera_user_buf_cont_t) *
+ cont_cmd->cnt))) {
+ pr_err("Invalid or zero size ION buffer %zu\n", size);
+ rc = -EINVAL;
+ goto free_ion_handle;
+ }
+ iaddr = ion_map_kernel(dev->ion_client, ion_handle);
+ if (IS_ERR_OR_NULL(iaddr)) {
+ pr_err("Mapping cont buff failed\n");
+ rc = -EINVAL;
+ goto free_ion_handle;
+ }
+ for (i = 0; i < cont_cmd->cnt; i++) {
+ temp_addr = iaddr + i;
+ if (temp_addr->buf_cnt >
+ MSM_CAMERA_MAX_USER_BUFF_CNT) {
+ pr_err("%s:Invalid buf_cnt:%d for cont:%d\n",
+ __func__, temp_addr->buf_cnt, i);
+ rc = -EINVAL;
+ goto free_list;
+ }
+ new_entry = kzalloc(sizeof(
+ struct msm_buf_mngr_user_buf_cont_info),
+ GFP_KERNEL);
+ if (!new_entry) {
+ pr_err("%s:No mem\n", __func__);
+ rc = -ENOMEM;
+ goto free_list;
+ }
+ INIT_LIST_HEAD(&new_entry->entry);
+ new_entry->sessid = cont_cmd->session_id;
+ new_entry->strid = cont_cmd->stream_id;
+ new_entry->index = i;
+ new_entry->main_fd = cont_cmd->cont_fd;
+ new_entry->ion_handle = ion_handle;
+ new_entry->cnt = cont_cmd->cnt;
+ new_entry->paddr = temp_addr;
+ list_add_tail(&new_entry->entry, &dev->cont_qhead);
+ }
+ goto end;
+ } else if (cont_cmd->cmd == MSM_CAMERA_BUF_MNGR_CONT_UNMAP) {
+ if (!list_empty(&dev->cont_qhead)) {
+ msm_buf_mngr_contq_listdel(dev, cont_cmd->session_id,
+ cont_cmd->stream_id, 1, cont_cmd->cnt);
+ } else {
+ pr_err("Nothing mapped for %d,%d\n",
+ cont_cmd->session_id, cont_cmd->stream_id);
+ rc = -EINVAL;
+ }
+ goto end;
+ }
+
+free_list:
+ if (i != 0) {
+ if (!list_empty(&dev->cont_qhead)) {
+ msm_buf_mngr_contq_listdel(dev, cont_cmd->session_id,
+ cont_cmd->stream_id, 0, i);
+ }
+ }
+ ion_unmap_kernel(dev->ion_client, ion_handle);
+free_ion_handle:
+ ion_free(dev->ion_client, ion_handle);
+end:
+ mutex_unlock(&dev->cont_mutex);
+ return rc;
+}
+
+static int msm_generic_buf_mngr_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ int rc = 0;
+ struct msm_buf_mngr_device *buf_mngr_dev = v4l2_get_subdevdata(sd);
+
+ if (!buf_mngr_dev) {
+ pr_err("%s buf manager device NULL\n", __func__);
+ rc = -ENODEV;
+ return rc;
+ }
+ return rc;
+}
+
+static int msm_generic_buf_mngr_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ int rc = 0;
+ struct msm_buf_mngr_device *buf_mngr_dev = v4l2_get_subdevdata(sd);
+
+ if (!buf_mngr_dev) {
+ pr_err("%s buf manager device NULL\n", __func__);
+ rc = -ENODEV;
+ return rc;
+ }
+ return rc;
+}
+
+int msm_cam_buf_mgr_ops(unsigned int cmd, void *argp)
+{
+ int rc = 0;
+
+ if (!msm_buf_mngr_dev)
+ return -ENODEV;
+ if (!argp)
+ return -EINVAL;
+
+ switch (cmd) {
+ case VIDIOC_MSM_BUF_MNGR_GET_BUF:
+ rc = msm_buf_mngr_get_buf(msm_buf_mngr_dev, argp);
+ break;
+ case VIDIOC_MSM_BUF_MNGR_BUF_DONE:
+ rc = msm_buf_mngr_buf_done(msm_buf_mngr_dev, argp);
+ break;
+ case VIDIOC_MSM_BUF_MNGR_PUT_BUF:
+ rc = msm_buf_mngr_put_buf(msm_buf_mngr_dev, argp);
+ break;
+ case VIDIOC_MSM_BUF_MNGR_IOCTL_CMD: {
+ struct msm_camera_private_ioctl_arg *k_ioctl = argp;
+
+ switch (k_ioctl->id) {
+ case MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX: {
+ struct msm_buf_mngr_info *tmp = NULL;
+
+ if (!k_ioctl->ioctl_ptr)
+ return -EINVAL;
+ if (k_ioctl->size != sizeof(struct msm_buf_mngr_info))
+ return -EINVAL;
+
+ MSM_CAM_GET_IOCTL_ARG_PTR(&tmp, &k_ioctl->ioctl_ptr,
+ sizeof(tmp));
+ rc = msm_buf_mngr_get_buf_by_idx(msm_buf_mngr_dev,
+ tmp);
+ }
+ break;
+ default:
+ pr_debug("unimplemented id %d", k_ioctl->id);
+ return -EINVAL;
+ }
+ break;
+ }
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return rc;
+}
+
+int msm_cam_buf_mgr_register_ops(struct msm_cam_buf_mgr_req_ops *cb_struct)
+{
+ if (!msm_buf_mngr_dev)
+ return -ENODEV;
+ if (!cb_struct)
+ return -EINVAL;
+
+ cb_struct->msm_cam_buf_mgr_ops = msm_cam_buf_mgr_ops;
+ return 0;
+}
+
+static long msm_buf_mngr_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int32_t rc = 0;
+ struct msm_buf_mngr_device *buf_mngr_dev = v4l2_get_subdevdata(sd);
+ void __user *argp = (void __user *)arg;
+
+ if (!buf_mngr_dev) {
+ pr_err("%s buf manager device NULL\n", __func__);
+ rc = -ENOMEM;
+ return rc;
+ }
+ switch (cmd) {
+ case VIDIOC_MSM_BUF_MNGR_IOCTL_CMD: {
+ struct msm_camera_private_ioctl_arg k_ioctl, *ptr;
+
+ if (!arg)
+ return -EINVAL;
+ ptr = arg;
+ k_ioctl = *ptr;
+ switch (k_ioctl.id) {
+ case MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX: {
+ struct msm_buf_mngr_info buf_info, *tmp = NULL;
+
+ if (k_ioctl.size != sizeof(struct msm_buf_mngr_info))
+ return -EINVAL;
+ if (!k_ioctl.ioctl_ptr)
+ return -EINVAL;
+
+ MSM_CAM_GET_IOCTL_ARG_PTR(&tmp, &k_ioctl.ioctl_ptr,
+ sizeof(tmp));
+ if (copy_from_user(&buf_info, tmp,
+ sizeof(struct msm_buf_mngr_info))) {
+ return -EFAULT;
+ }
+ MSM_CAM_GET_IOCTL_ARG_PTR(&k_ioctl.ioctl_ptr,
+ &buf_info, sizeof(void *));
+ argp = &k_ioctl;
+ rc = msm_cam_buf_mgr_ops(cmd, argp);
+ }
+ break;
+ default:
+ pr_debug("unimplemented id %d", k_ioctl.id);
+ return -EINVAL;
+ }
+ }
+ break;
+ case VIDIOC_MSM_BUF_MNGR_GET_BUF:
+ case VIDIOC_MSM_BUF_MNGR_BUF_DONE:
+ case VIDIOC_MSM_BUF_MNGR_PUT_BUF:
+ rc = msm_cam_buf_mgr_ops(cmd, argp);
+ break;
+ case VIDIOC_MSM_BUF_MNGR_INIT:
+ rc = msm_generic_buf_mngr_open(sd, NULL);
+ break;
+ case VIDIOC_MSM_BUF_MNGR_DEINIT:
+ rc = msm_generic_buf_mngr_close(sd, NULL);
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ break;
+ case VIDIOC_MSM_BUF_MNGR_FLUSH:
+ rc = msm_generic_buf_mngr_flush(buf_mngr_dev, argp);
+ break;
+ case MSM_SD_UNNOTIFY_FREEZE:
+ break;
+ case MSM_SD_SHUTDOWN:
+ msm_buf_mngr_sd_shutdown(buf_mngr_dev, argp);
+ break;
+ case VIDIOC_MSM_BUF_MNGR_CONT_CMD:
+ rc = msm_buf_mngr_handle_cont_cmd(buf_mngr_dev, argp);
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_camera_buf_mgr_fetch_buf_info(
+ struct msm_buf_mngr_info32_t *buf_info32,
+ struct msm_buf_mngr_info *buf_info, unsigned long arg)
+{
+ if (!arg || !buf_info32 || !buf_info)
+ return -EINVAL;
+
+ if (copy_from_user(buf_info32, (void __user *)arg,
+ sizeof(struct msm_buf_mngr_info32_t)))
+ return -EFAULT;
+
+ buf_info->session_id = buf_info32->session_id;
+ buf_info->stream_id = buf_info32->stream_id;
+ buf_info->frame_id = buf_info32->frame_id;
+ buf_info->index = buf_info32->index;
+ buf_info->timestamp.tv_sec = (long) buf_info32->timestamp.tv_sec;
+ buf_info->timestamp.tv_usec = (long) buf_info32->
+ timestamp.tv_usec;
+ buf_info->reserved = buf_info32->reserved;
+ buf_info->type = buf_info32->type;
+ return 0;
+}
+
+static long msm_camera_buf_mgr_update_buf_info(
+ struct msm_buf_mngr_info32_t *buf_info32,
+ struct msm_buf_mngr_info *buf_info, unsigned long arg)
+{
+ if (!arg || !buf_info32 || !buf_info)
+ return -EINVAL;
+
+ buf_info32->session_id = buf_info->session_id;
+ buf_info32->stream_id = buf_info->stream_id;
+ buf_info32->index = buf_info->index;
+ buf_info32->timestamp.tv_sec = (int32_t) buf_info->
+ timestamp.tv_sec;
+ buf_info32->timestamp.tv_usec = (int32_t) buf_info->timestamp.
+ tv_usec;
+ buf_info32->reserved = buf_info->reserved;
+ buf_info32->type = buf_info->type;
+ buf_info32->user_buf.buf_cnt = buf_info->user_buf.buf_cnt;
+ memcpy(&buf_info32->user_buf.buf_idx,
+ &buf_info->user_buf.buf_idx,
+ sizeof(buf_info->user_buf.buf_idx));
+ if (copy_to_user((void __user *)arg, buf_info32,
+ sizeof(struct msm_buf_mngr_info32_t)))
+ return -EFAULT;
+ return 0;
+}
+static long msm_camera_buf_mgr_internal_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ long rc = 0;
+ struct msm_camera_private_ioctl_arg k_ioctl;
+ void __user *tmp_compat_ioctl_ptr = NULL;
+
+ rc = msm_copy_camera_private_ioctl_args(arg,
+ &k_ioctl, &tmp_compat_ioctl_ptr);
+ if (rc < 0) {
+ pr_err("Subdev cmd %d failed\n", cmd);
+ return rc;
+ }
+
+ switch (k_ioctl.id) {
+ case MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX: {
+ struct msm_buf_mngr_info32_t buf_info32;
+ struct msm_buf_mngr_info buf_info;
+
+ if (k_ioctl.size != sizeof(struct msm_buf_mngr_info32_t)) {
+ pr_err("Invalid size for id %d with size %d",
+ k_ioctl.id, k_ioctl.size);
+ return -EINVAL;
+ }
+ if (!tmp_compat_ioctl_ptr) {
+ pr_err("Invalid ptr for id %d", k_ioctl.id);
+ return -EINVAL;
+ }
+ k_ioctl.ioctl_ptr = (__u64)&buf_info;
+ rc = msm_camera_buf_mgr_fetch_buf_info(&buf_info32, &buf_info,
+ (unsigned long)tmp_compat_ioctl_ptr);
+ if (rc < 0) {
+ pr_err("Fetch buf info failed for cmd=%d", cmd);
+ return rc;
+ }
+ rc = v4l2_subdev_call(sd, core, ioctl, cmd, &k_ioctl);
+ if (rc < 0) {
+ pr_err("Subdev cmd %d failed for id %d", cmd,
+ k_ioctl.id);
+ return rc;
+ }
+ }
+ break;
+ default:
+ pr_debug("unimplemented id %d", k_ioctl.id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static long msm_bmgr_subdev_fops_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ int32_t rc = 0;
+
+ /* Convert 32 bit IOCTL ID's to 64 bit IOCTL ID's
+ * except VIDIOC_MSM_CPP_CFG32, which needs special
+ * processing
+ */
+ switch (cmd) {
+ case VIDIOC_MSM_BUF_MNGR_GET_BUF32:
+ cmd = VIDIOC_MSM_BUF_MNGR_GET_BUF;
+ break;
+ case VIDIOC_MSM_BUF_MNGR_BUF_DONE32:
+ cmd = VIDIOC_MSM_BUF_MNGR_BUF_DONE;
+ break;
+ case VIDIOC_MSM_BUF_MNGR_PUT_BUF32:
+ cmd = VIDIOC_MSM_BUF_MNGR_PUT_BUF;
+ break;
+ case VIDIOC_MSM_BUF_MNGR_CONT_CMD:
+ break;
+ case VIDIOC_MSM_BUF_MNGR_FLUSH32:
+ cmd = VIDIOC_MSM_BUF_MNGR_FLUSH;
+ break;
+ case VIDIOC_MSM_BUF_MNGR_IOCTL_CMD:
+ break;
+ default:
+ pr_debug("unsupported compat type\n");
+ return -ENOIOCTLCMD;
+ }
+
+ switch (cmd) {
+ case VIDIOC_MSM_BUF_MNGR_GET_BUF:
+ case VIDIOC_MSM_BUF_MNGR_BUF_DONE:
+ case VIDIOC_MSM_BUF_MNGR_FLUSH:
+ case VIDIOC_MSM_BUF_MNGR_PUT_BUF: {
+ struct msm_buf_mngr_info32_t buf_info32;
+ struct msm_buf_mngr_info buf_info;
+
+ rc = msm_camera_buf_mgr_fetch_buf_info(&buf_info32, &buf_info,
+ arg);
+ if (rc < 0) {
+ pr_err("Fetch buf info failed for cmd=%d\n", cmd);
+ return rc;
+ }
+ rc = v4l2_subdev_call(sd, core, ioctl, cmd, &buf_info);
+ if (rc < 0) {
+ pr_debug("Subdev cmd %d fail\n", cmd);
+ return rc;
+ }
+ rc = msm_camera_buf_mgr_update_buf_info(&buf_info32, &buf_info,
+ arg);
+ if (rc < 0) {
+ pr_err("Update buf info failed for cmd=%d\n", cmd);
+ return rc;
+ }
+ }
+ break;
+ case VIDIOC_MSM_BUF_MNGR_IOCTL_CMD: {
+ rc = msm_camera_buf_mgr_internal_compat_ioctl(file, cmd, arg);
+ if (rc < 0) {
+ pr_debug("Subdev cmd %d fail\n", cmd);
+ return rc;
+ }
+ }
+ break;
+ case VIDIOC_MSM_BUF_MNGR_CONT_CMD: {
+ struct msm_buf_mngr_main_cont_info cont_cmd;
+
+ if (copy_from_user(&cont_cmd, (void __user *)arg,
+ sizeof(struct msm_buf_mngr_main_cont_info)))
+ return -EFAULT;
+ rc = v4l2_subdev_call(sd, core, ioctl, cmd, &cont_cmd);
+ if (rc < 0) {
+ pr_debug("Subdev cmd %d fail\n", cmd);
+ return rc;
+ }
+ }
+ break;
+ default:
+ pr_debug("unsupported compat type\n");
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+#endif
+
+static struct v4l2_subdev_core_ops msm_buf_mngr_subdev_core_ops = {
+ .ioctl = msm_buf_mngr_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_internal_ops
+ msm_generic_buf_mngr_subdev_internal_ops = {
+ .open = msm_generic_buf_mngr_open,
+ .close = msm_generic_buf_mngr_close,
+};
+
+static const struct v4l2_subdev_ops msm_buf_mngr_subdev_ops = {
+ .core = &msm_buf_mngr_subdev_core_ops,
+};
+
+static struct v4l2_file_operations msm_buf_v4l2_subdev_fops;
+
+static long msm_bmgr_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+}
+
+
+static long msm_buf_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_bmgr_subdev_do_ioctl);
+}
+
+static int32_t __init msm_buf_mngr_init(void)
+{
+ int32_t rc = 0;
+
+ msm_buf_mngr_dev = kzalloc(sizeof(*msm_buf_mngr_dev),
+ GFP_KERNEL);
+ if (!msm_buf_mngr_dev)
+ return -ENOMEM;
+
+ /* Sub-dev */
+ v4l2_subdev_init(&msm_buf_mngr_dev->subdev.sd,
+ &msm_buf_mngr_subdev_ops);
+ msm_cam_copy_v4l2_subdev_fops(&msm_buf_v4l2_subdev_fops);
+ msm_buf_v4l2_subdev_fops.unlocked_ioctl = msm_buf_subdev_fops_ioctl;
+#ifdef CONFIG_COMPAT
+ msm_buf_v4l2_subdev_fops.compat_ioctl32 =
+ msm_bmgr_subdev_fops_compat_ioctl;
+#endif
+ snprintf(msm_buf_mngr_dev->subdev.sd.name,
+ ARRAY_SIZE(msm_buf_mngr_dev->subdev.sd.name), "msm_buf_mngr");
+ msm_buf_mngr_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ v4l2_set_subdevdata(&msm_buf_mngr_dev->subdev.sd, msm_buf_mngr_dev);
+
+ media_entity_init(&msm_buf_mngr_dev->subdev.sd.entity, 0, NULL, 0);
+ msm_buf_mngr_dev->subdev.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ msm_buf_mngr_dev->subdev.sd.entity.group_id =
+ MSM_CAMERA_SUBDEV_BUF_MNGR;
+ msm_buf_mngr_dev->subdev.sd.internal_ops =
+ &msm_generic_buf_mngr_subdev_internal_ops;
+ msm_buf_mngr_dev->subdev.close_seq = MSM_SD_CLOSE_4TH_CATEGORY;
+ rc = msm_sd_register(&msm_buf_mngr_dev->subdev);
+ if (rc != 0) {
+ pr_err("%s: msm_sd_register error = %d\n", __func__, rc);
+ goto end;
+ }
+
+ msm_buf_mngr_dev->subdev.sd.devnode->fops = &msm_buf_v4l2_subdev_fops;
+
+ v4l2_subdev_notify(&msm_buf_mngr_dev->subdev.sd, MSM_SD_NOTIFY_REQ_CB,
+ &msm_buf_mngr_dev->vb2_ops);
+
+ INIT_LIST_HEAD(&msm_buf_mngr_dev->buf_qhead);
+ spin_lock_init(&msm_buf_mngr_dev->buf_q_spinlock);
+
+ mutex_init(&msm_buf_mngr_dev->cont_mutex);
+ INIT_LIST_HEAD(&msm_buf_mngr_dev->cont_qhead);
+ msm_buf_mngr_dev->ion_client =
+ msm_ion_client_create("msm_cam_generic_buf_mgr");
+ if (!msm_buf_mngr_dev->ion_client) {
+ pr_err("%s: Failed to create ion client\n", __func__);
+ rc = -EBADFD;
+ }
+
+end:
+ return rc;
+}
+
+static void __exit msm_buf_mngr_exit(void)
+{
+ msm_sd_unregister(&msm_buf_mngr_dev->subdev);
+ mutex_destroy(&msm_buf_mngr_dev->cont_mutex);
+ kfree(msm_buf_mngr_dev);
+}
+
+module_init(msm_buf_mngr_init);
+module_exit(msm_buf_mngr_exit);
+MODULE_DESCRIPTION("MSM Buffer Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.h b/drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.h
new file mode 100644
index 000000000000..2dca256bf8f8
--- /dev/null
+++ b/drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUF_GENERIC_MNGR_H__
+#define __MSM_BUF_GENERIC_MNGR_H__
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <media/ais/msm_ais.h>
+#include <media/ais/msm_ais_buf_mgr.h>
+
+#include "msm.h"
+#include "msm_sd.h"
+
+struct msm_get_bufs {
+ struct list_head entry;
+ struct vb2_v4l2_buffer *vb2_v4l2_buf;
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t index;
+};
+
+struct msm_buf_mngr_device {
+ struct list_head buf_qhead;
+ spinlock_t buf_q_spinlock;
+ struct ion_client *ion_client;
+ struct msm_sd_subdev subdev;
+ struct msm_sd_req_vb2_q vb2_ops;
+ struct list_head cont_qhead;
+ struct mutex cont_mutex;
+};
+
+struct msm_buf_mngr_user_buf_cont_info {
+ struct list_head entry;
+ uint32_t sessid;
+ uint32_t strid;
+ uint32_t index;
+ int32_t main_fd;
+ struct msm_camera_user_buf_cont_t *paddr;
+ uint32_t cnt;
+ struct ion_handle *ion_handle;
+};
+
+/* kernel space functions*/
+struct msm_cam_buf_mgr_req_ops {
+ int (*msm_cam_buf_mgr_ops)(unsigned int cmd, void *argp);
+};
+
+/* API to register callback from client. This assumes cb_struct is allocated by
+ * client.
+ */
+int msm_cam_buf_mgr_register_ops(struct msm_cam_buf_mgr_req_ops *cb_struct);
+#endif
diff --git a/drivers/media/platform/msm/ais/msm_sd.h b/drivers/media/platform/msm/ais/msm_sd.h
new file mode 100644
index 000000000000..87d41bfcdcf1
--- /dev/null
+++ b/drivers/media/platform/msm/ais/msm_sd.h
@@ -0,0 +1,100 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_SD_H
+#define _MSM_SD_H
+
+#include <media/v4l2-subdev.h>
+#include <media/ais/msm_ais.h>
+
+/* NOTE: this header file should ONLY be included by subdev drivers */
+
+struct msm_sd_close_ioctl {
+ unsigned int session;
+ unsigned int stream;
+};
+
+#define MSM_SD_CLOSE_STREAM \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 26, struct msm_sd_close_ioctl)
+
+#define MSM_SD_CLOSE_SESSION \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 27, struct msm_sd_close_ioctl)
+
+#define MSM_SD_CLOSE_SESSION_AND_STREAM \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 28, struct msm_sd_close_ioctl)
+
+#define MSM_SD_SHUTDOWN \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 29, struct msm_sd_close_ioctl)
+
+#define MSM_SD_NOTIFY_FREEZE \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 30, struct msm_sd_close_ioctl)
+
+#define MSM_SD_UNNOTIFY_FREEZE \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 31, struct msm_sd_close_ioctl)
+/*
+ * This is used to install Sequence in msm_sd_register.
+ * During msm_close, proper close sequence will be triggered.
+ * For example:
+ *
+ * close_sequence = 0x00100001 (ISP)
+ * close_sequence = 0x00100002 (ISP)
+ * close_sequence = 0x00100003 (ISP)
+ * close_sequence = 0x00200001 (sensor)
+ * close_sequence = 0x00200002 (sensor)
+ * close_sequence = 0x00200003 (sensor)
+ */
+#define MSM_SD_CLOSE_1ST_CATEGORY 0x00010000
+#define MSM_SD_CLOSE_2ND_CATEGORY 0x00020000
+#define MSM_SD_CLOSE_3RD_CATEGORY 0x00030000
+#define MSM_SD_CLOSE_4TH_CATEGORY 0x00040000
+
+struct msm_sd_subdev {
+ struct v4l2_subdev sd;
+ int close_seq;
+ struct list_head list;
+};
+
+struct msm_sd_req_sd {
+ char *name;
+ struct v4l2_subdev *subdev;
+};
+
+struct msm_sd_req_vb2_q {
+ struct vb2_v4l2_buffer * (*get_buf)(int session_id,
+ unsigned int stream_id);
+ struct vb2_queue * (*get_vb2_queue)(int session_id,
+ unsigned int stream_id);
+ struct vb2_v4l2_buffer * (*get_buf_by_idx)(int session_id,
+ unsigned int stream_id, uint32_t index);
+ int (*put_buf)(struct vb2_v4l2_buffer *vb2_buf, int session_id,
+ unsigned int stream_id);
+ int (*buf_done)(struct vb2_v4l2_buffer *vb2_v4l2_buf, int session_id,
+ unsigned int stream_id, uint32_t sequence, struct timeval *ts,
+ uint32_t reserved);
+ int (*flush_buf)(int session_id, unsigned int stream_id);
+};
+
+#define MSM_SD_NOTIFY_GET_SD 0x00000001
+#define MSM_SD_NOTIFY_PUT_SD 0x00000002
+#define MSM_SD_NOTIFY_REQ_CB 0x00000003
+
+#define MSM_CAM_GET_IOCTL_ARG_PTR(ptr, \
+ ioctl_ptr, len) memcpy(ptr, ioctl_ptr, len)
+
+int msm_sd_register(struct msm_sd_subdev *msm_subdev);
+int msm_sd_unregister(struct msm_sd_subdev *sd);
+struct v4l2_subdev *msm_sd_get_subdev(struct v4l2_subdev *sd,
+ const char *get_name);
+void msm_sd_put_subdev(struct v4l2_subdev *sd, struct v4l2_subdev *put);
+void msm_cam_copy_v4l2_subdev_fops(struct v4l2_file_operations *d1);
+
+#endif /*_MSM_SD_H */
diff --git a/drivers/media/platform/msm/ais/msm_vb2/Makefile b/drivers/media/platform/msm/ais/msm_vb2/Makefile
new file mode 100644
index 000000000000..96b5b4e5b0e3
--- /dev/null
+++ b/drivers/media/platform/msm/ais/msm_vb2/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/msm_vb2
+obj-$(CONFIG_MSM_AIS) += msm_vb2.o
diff --git a/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c
new file mode 100644
index 000000000000..280bf4ebb596
--- /dev/null
+++ b/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c
@@ -0,0 +1,438 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-VB2 %s:%d " fmt, __func__, __LINE__
+#include "msm_vb2.h"
+
+static int msm_vb2_queue_setup(struct vb2_queue *q,
+ const void *parg,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ int i;
+ struct msm_v4l2_format_data *data = q->drv_priv;
+
+ if (!data) {
+ pr_err("%s: drv_priv NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (data->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (WARN_ON(data->num_planes > VIDEO_MAX_PLANES))
+ return -EINVAL;
+
+ *num_planes = data->num_planes;
+
+ for (i = 0; i < data->num_planes; i++)
+ sizes[i] = data->plane_sizes[i];
+ } else {
+ pr_err("%s: Unsupported buf type :%d\n", __func__,
+ data->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int msm_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct msm_stream *stream;
+ struct msm_vb2_buffer *msm_vb2_buf;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ stream = msm_get_stream_from_vb2q(vb->vb2_queue);
+ if (!stream) {
+ pr_err("%s: Couldn't find stream\n", __func__);
+ return -EINVAL;
+ }
+ msm_vb2_buf = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf);
+ msm_vb2_buf->in_freeq = 0;
+
+ return 0;
+}
+
+static void msm_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct msm_vb2_buffer *msm_vb2;
+ struct msm_stream *stream;
+ unsigned long flags;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ msm_vb2 = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf);
+ if (!msm_vb2) {
+ pr_err("%s:%d] vb2_buf NULL", __func__, __LINE__);
+ return;
+ }
+
+ stream = msm_get_stream_from_vb2q(vb->vb2_queue);
+ if (!stream) {
+ pr_err("%s:%d] NULL stream", __func__, __LINE__);
+ return;
+ }
+
+ spin_lock_irqsave(&stream->stream_lock, flags);
+ list_add_tail(&msm_vb2->list, &stream->queued_list);
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+}
+
+static void msm_vb2_buf_finish(struct vb2_buffer *vb)
+{
+ struct msm_vb2_buffer *msm_vb2;
+ struct msm_stream *stream;
+ unsigned long flags;
+ struct msm_vb2_buffer *msm_vb2_entry, *temp;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ msm_vb2 = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf);
+ if (!msm_vb2) {
+ pr_err("%s:%d] vb2_buf NULL", __func__, __LINE__);
+ return;
+ }
+
+ stream = msm_get_stream_from_vb2q(vb->vb2_queue);
+ if (!stream) {
+ pr_err("%s:%d] NULL stream", __func__, __LINE__);
+ return;
+ }
+
+ spin_lock_irqsave(&stream->stream_lock, flags);
+ list_for_each_entry_safe(msm_vb2_entry, temp, &(stream->queued_list),
+ list) {
+ if (msm_vb2_entry == msm_vb2) {
+ list_del_init(&msm_vb2_entry->list);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+}
+
+static void msm_vb2_stop_stream(struct vb2_queue *q)
+{
+ struct msm_vb2_buffer *msm_vb2, *temp;
+ struct msm_stream *stream;
+ unsigned long flags;
+ struct vb2_v4l2_buffer *vb2_v4l2_buf;
+
+ stream = msm_get_stream_from_vb2q(q);
+ if (!stream) {
+ pr_err_ratelimited("%s:%d] NULL stream", __func__, __LINE__);
+ return;
+ }
+
+ /*
+ * Release all the buffers enqueued to driver
+ * when streamoff is issued
+ */
+
+ spin_lock_irqsave(&stream->stream_lock, flags);
+ list_for_each_entry_safe(msm_vb2, temp, &(stream->queued_list),
+ list) {
+ vb2_v4l2_buf = &(msm_vb2->vb2_v4l2_buf);
+ if (vb2_v4l2_buf->vb2_buf.state == VB2_BUF_STATE_DONE)
+ continue;
+ vb2_buffer_done(&vb2_v4l2_buf->vb2_buf,
+ VB2_BUF_STATE_DONE);
+ msm_vb2->in_freeq = 0;
+ }
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+}
+
+static struct vb2_ops msm_vb2_get_q_op = {
+ .queue_setup = msm_vb2_queue_setup,
+ .buf_init = msm_vb2_buf_init,
+ .buf_queue = msm_vb2_buf_queue,
+ .buf_finish = msm_vb2_buf_finish,
+ .stop_streaming = msm_vb2_stop_stream,
+};
+
+
+struct vb2_ops *msm_vb2_get_q_ops(void)
+{
+ return &msm_vb2_get_q_op;
+}
+
+static void *msm_vb2_dma_contig_get_userptr(void *alloc_ctx,
+ unsigned long vaddr, unsigned long size,
+ enum dma_data_direction dma_dir)
+{
+ struct msm_vb2_private_data *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+ priv->vaddr = (void *)vaddr;
+ priv->size = size;
+ priv->alloc_ctx = alloc_ctx;
+ return priv;
+}
+
+static void msm_vb2_dma_contig_put_userptr(void *buf_priv)
+{
+ kzfree(buf_priv);
+}
+
+static struct vb2_mem_ops msm_vb2_get_q_mem_op = {
+ .get_userptr = msm_vb2_dma_contig_get_userptr,
+ .put_userptr = msm_vb2_dma_contig_put_userptr,
+};
+
+struct vb2_mem_ops *msm_vb2_get_q_mem_ops(void)
+{
+ return &msm_vb2_get_q_mem_op;
+}
+
+static struct vb2_queue *msm_vb2_get_queue(int session_id,
+ unsigned int stream_id)
+{
+ return msm_get_stream_vb2q(session_id, stream_id);
+}
+
+static struct vb2_v4l2_buffer *msm_vb2_get_buf(int session_id,
+ unsigned int stream_id)
+{
+ struct msm_stream *stream;
+ struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
+ struct msm_vb2_buffer *msm_vb2 = NULL;
+ unsigned long flags;
+
+ stream = msm_get_stream(session_id, stream_id);
+ if (IS_ERR_OR_NULL(stream))
+ return NULL;
+
+ spin_lock_irqsave(&stream->stream_lock, flags);
+
+ if (!stream->vb2_q) {
+ pr_err("%s: stream q not available\n", __func__);
+ goto end;
+ }
+
+ list_for_each_entry(msm_vb2, &(stream->queued_list), list) {
+ vb2_v4l2_buf = &(msm_vb2->vb2_v4l2_buf);
+ if (vb2_v4l2_buf->vb2_buf.state != VB2_BUF_STATE_ACTIVE)
+ continue;
+
+ if (msm_vb2->in_freeq)
+ continue;
+
+ msm_vb2->in_freeq = 1;
+ goto end;
+ }
+ msm_vb2 = NULL;
+ vb2_v4l2_buf = NULL;
+end:
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return vb2_v4l2_buf;
+}
+
+static struct vb2_v4l2_buffer *msm_vb2_get_buf_by_idx(int session_id,
+ unsigned int stream_id, uint32_t index)
+{
+ struct msm_stream *stream;
+ struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
+ struct msm_vb2_buffer *msm_vb2 = NULL;
+ unsigned long flags;
+
+ stream = msm_get_stream(session_id, stream_id);
+ if (IS_ERR_OR_NULL(stream))
+ return NULL;
+
+ spin_lock_irqsave(&stream->stream_lock, flags);
+
+ if (!stream->vb2_q) {
+ pr_err("%s: stream q not available\n", __func__);
+ goto end;
+ }
+
+ list_for_each_entry(msm_vb2, &(stream->queued_list), list) {
+ vb2_v4l2_buf = &(msm_vb2->vb2_v4l2_buf);
+ if ((vb2_v4l2_buf->vb2_buf.index != index) || msm_vb2->in_freeq
+ || vb2_v4l2_buf->vb2_buf.state != VB2_BUF_STATE_ACTIVE)
+ continue;
+
+ msm_vb2->in_freeq = 1;
+ goto end;
+ }
+ msm_vb2 = NULL;
+ vb2_v4l2_buf = NULL;
+end:
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return vb2_v4l2_buf;
+}
+
+static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id,
+ unsigned int stream_id)
+{
+ struct msm_stream *stream;
+ struct msm_vb2_buffer *msm_vb2;
+ struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
+ int rc = 0;
+ unsigned long flags;
+
+ stream = msm_get_stream(session_id, stream_id);
+ if (IS_ERR_OR_NULL(stream))
+ return -EINVAL;
+
+ spin_lock_irqsave(&stream->stream_lock, flags);
+ if (vb) {
+ list_for_each_entry(msm_vb2, &(stream->queued_list), list) {
+ vb2_v4l2_buf = &(msm_vb2->vb2_v4l2_buf);
+ if (vb2_v4l2_buf == vb)
+ break;
+ }
+ if (vb2_v4l2_buf != vb) {
+ pr_err("VB buffer is INVALID vb=%pK, ses_id=%d, str_id=%d\n",
+ vb, session_id, stream_id);
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return -EINVAL;
+ }
+ msm_vb2 =
+ container_of(vb2_v4l2_buf, struct msm_vb2_buffer,
+ vb2_v4l2_buf);
+ if (msm_vb2->in_freeq) {
+ msm_vb2->in_freeq = 0;
+ rc = 0;
+ } else
+ rc = -EINVAL;
+ } else {
+ pr_err(" VB buffer is null for ses_id=%d, str_id=%d\n",
+ session_id, stream_id);
+ rc = -EINVAL;
+ }
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return rc;
+}
+
+static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id,
+ unsigned int stream_id, uint32_t sequence,
+ struct timeval *ts, uint32_t reserved)
+{
+ unsigned long flags;
+ struct msm_vb2_buffer *msm_vb2;
+ struct msm_stream *stream;
+ struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
+ int rc = 0;
+
+ stream = msm_get_stream(session_id, stream_id);
+ if (IS_ERR_OR_NULL(stream))
+ return -EINVAL;
+ spin_lock_irqsave(&stream->stream_lock, flags);
+ if (vb) {
+ list_for_each_entry(msm_vb2, &(stream->queued_list), list) {
+ vb2_v4l2_buf = &(msm_vb2->vb2_v4l2_buf);
+ if (vb2_v4l2_buf == vb)
+ break;
+ }
+ if (vb2_v4l2_buf != vb) {
+ pr_err("VB buffer is INVALID ses_id=%d, str_id=%d, vb=%pK\n",
+ session_id, stream_id, vb);
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return -EINVAL;
+ }
+ msm_vb2 = container_of(vb2_v4l2_buf,
+ struct msm_vb2_buffer, vb2_v4l2_buf);
+ /* put buf before buf done */
+ if (msm_vb2->in_freeq) {
+ vb2_v4l2_buf->sequence = sequence;
+ vb2_v4l2_buf->timestamp = *ts;
+ vb2_buffer_done(&vb2_v4l2_buf->vb2_buf,
+ VB2_BUF_STATE_DONE);
+ msm_vb2->in_freeq = 0;
+ rc = 0;
+ } else
+ rc = -EINVAL;
+ } else {
+ pr_err(" VB buffer is NULL for ses_id=%d, str_id=%d\n",
+ session_id, stream_id);
+ rc = -EINVAL;
+ }
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return rc;
+}
+
+long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id,
+ uint32_t index)
+{
+ struct msm_stream *stream;
+ struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
+ struct msm_vb2_buffer *msm_vb2 = NULL;
+ unsigned long flags;
+ long rc = -EINVAL;
+
+ stream = msm_get_stream(session_id, stream_id);
+ if (IS_ERR_OR_NULL(stream))
+ return rc;
+
+ spin_lock_irqsave(&stream->stream_lock, flags);
+
+ if (!stream->vb2_q) {
+ pr_err("%s: stream q not available\n", __func__);
+ goto end;
+ }
+
+ list_for_each_entry(msm_vb2, &(stream->queued_list), list) {
+ vb2_v4l2_buf = &(msm_vb2->vb2_v4l2_buf);
+ if ((vb2_v4l2_buf->vb2_buf.index != index)
+ || vb2_v4l2_buf->vb2_buf.state != VB2_BUF_STATE_ACTIVE)
+ continue;
+
+ if (!msm_vb2->in_freeq) {
+ vb2_buffer_done(&vb2_v4l2_buf->vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ rc = 0;
+ } else {
+ rc = -EINVAL;
+ }
+ break;
+ }
+
+end:
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL(msm_vb2_return_buf_by_idx);
+
+static int msm_vb2_flush_buf(int session_id, unsigned int stream_id)
+{
+ unsigned long flags;
+ struct msm_vb2_buffer *msm_vb2;
+ struct msm_stream *stream;
+ struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
+
+ stream = msm_get_stream(session_id, stream_id);
+ if (IS_ERR_OR_NULL(stream))
+ return -EINVAL;
+ spin_lock_irqsave(&stream->stream_lock, flags);
+ list_for_each_entry(msm_vb2, &(stream->queued_list), list) {
+ vb2_v4l2_buf = &(msm_vb2->vb2_v4l2_buf);
+ /* Do buf done for all buffers*/
+ vb2_buffer_done(&vb2_v4l2_buf->vb2_buf, VB2_BUF_STATE_DONE);
+ msm_vb2->in_freeq = 0;
+ }
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return 0;
+}
+
+
+int msm_vb2_request_cb(struct msm_sd_req_vb2_q *req)
+{
+ if (!req) {
+ pr_err("%s: suddev is null\n", __func__);
+ return -EINVAL;
+ }
+
+ req->get_buf = msm_vb2_get_buf;
+ req->get_buf_by_idx = msm_vb2_get_buf_by_idx;
+ req->get_vb2_queue = msm_vb2_get_queue;
+ req->put_buf = msm_vb2_put_buf;
+ req->buf_done = msm_vb2_buf_done;
+ req->flush_buf = msm_vb2_flush_buf;
+ return 0;
+}
+
diff --git a/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.h b/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.h
new file mode 100644
index 000000000000..3dbb21332857
--- /dev/null
+++ b/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_VB_H
+#define _MSM_VB_H
+
+#include <linux/version.h>
+#include <linux/i2c.h>
+#include <linux/videodev2.h>
+#include <linux/pm_qos.h>
+#include <linux/wakelock.h>
+#include <linux/msm_ion.h>
+#include <linux/iommu.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/ais/msm_ais.h>
+#include <media/videobuf2-core.h>
+#include "msm.h"
+#include "msm_sd.h"
+
+struct msm_vb2_buffer {
+ /*
+ * vb2 buffer has to be first in the structure
+ * because both v4l2 frameworks and driver directly
+ * cast msm_vb2_buffer to a vb2_buf.
+ */
+ struct vb2_v4l2_buffer vb2_v4l2_buf;
+ struct list_head list;
+ int in_freeq;
+};
+
+struct msm_vb2_private_data {
+ void *vaddr;
+ unsigned long size;
+ /* Offset of the plane inside the buffer */
+ void *alloc_ctx;
+};
+
+struct msm_stream {
+ struct list_head list;
+
+ /* stream index per session, same
+ * as stream_id but set through s_parm
+ */
+ unsigned int stream_id;
+ /* vb2 buffer handling */
+ struct vb2_queue *vb2_q;
+ spinlock_t stream_lock;
+ struct list_head queued_list;
+};
+
+struct vb2_ops *msm_vb2_get_q_ops(void);
+struct vb2_mem_ops *msm_vb2_get_q_mem_ops(void);
+int msm_vb2_request_cb(struct msm_sd_req_vb2_q *req_sd);
+long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id,
+ uint32_t index);
+
+#endif /*_MSM_VB_H */
diff --git a/drivers/media/platform/msm/ais/pproc/Makefile b/drivers/media/platform/msm/ais/pproc/Makefile
new file mode 100644
index 000000000000..bc6b5fff9ead
--- /dev/null
+++ b/drivers/media/platform/msm/ais/pproc/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MSM_AIS_CPP) += cpp/
diff --git a/drivers/media/platform/msm/ais/pproc/cpp/Makefile b/drivers/media/platform/msm/ais/pproc/cpp/Makefile
new file mode 100644
index 000000000000..5edd3b5b5e78
--- /dev/null
+++ b/drivers/media/platform/msm/ais/pproc/cpp/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/isp/
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/msm_buf_mgr/
+obj-$(CONFIG_MSM_AIS_CPP) += msm_cpp_soc.o msm_cpp.o
diff --git a/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c
new file mode 100644
index 000000000000..c9cb0080436a
--- /dev/null
+++ b/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c
@@ -0,0 +1,4497 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "MSM-CPP %s:%d " fmt, __func__, __LINE__
+
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/proc_fs.h>
+#include <linux/msm_ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/msmb_camera.h>
+#include <media/msmb_generic_buf_mgr.h>
+#include <media/msmb_pproc.h>
+#include "msm_cpp.h"
+#include "msm_isp_util.h"
+#include "msm_camera_io_util.h"
+#include <linux/debugfs.h>
+#include "cam_smmu_api.h"
+
+#define MSM_CPP_DRV_NAME "msm_cpp"
+
+#define MSM_CPP_MAX_BUFF_QUEUE 16
+
+#define CONFIG_MSM_CPP_DBG 0
+
+#define ENABLE_CPP_LOW 0
+
+#define CPP_CMD_TIMEOUT_MS 300
+#define MSM_CPP_INVALID_OFFSET 0x00000000
+#define MSM_CPP_NOMINAL_CLOCK 266670000
+#define MSM_CPP_TURBO_CLOCK 320000000
+
+#define CPP_FW_VERSION_1_2_0 0x10020000
+#define CPP_FW_VERSION_1_4_0 0x10040000
+#define CPP_FW_VERSION_1_6_0 0x10060000
+#define CPP_FW_VERSION_1_8_0 0x10080000
+#define CPP_FW_VERSION_1_10_0 0x10100000
+
+/* dump the frame command before writing to the hardware */
+#define MSM_CPP_DUMP_FRM_CMD 0
+
+#define CPP_CLK_INFO_MAX 16
+
+#define MSM_CPP_IRQ_MASK_VAL 0x7c8
+
+#define CPP_GDSCR_SW_COLLAPSE_ENABLE 0xFFFFFFFE
+#define CPP_GDSCR_SW_COLLAPSE_DISABLE 0xFFFFFFFD
+#define CPP_GDSCR_HW_CONTROL_ENABLE 0x2
+#define CPP_GDSCR_HW_CONTROL_DISABLE 0x1
+#define PAYLOAD_NUM_PLANES 3
+#define TNR_MASK 0x4
+#define UBWC_MASK 0x20
+#define CDS_MASK 0x40
+#define MMU_PF_MASK 0x80
+#define POP_FRONT 1
+#define POP_BACK 0
+#define BATCH_DUP_MASK 0x100
+
+#define IS_BATCH_BUFFER_ON_PREVIEW(new_frame) \
+ (((new_frame->batch_info.batch_mode == BATCH_MODE_PREVIEW) && \
+ new_frame->duplicate_output) ? 1 : 0)
+
+#define SWAP_IDENTITY_FOR_BATCH_ON_PREVIEW(new_frame, iden, swap_iden) { \
+ if (IS_BATCH_BUFFER_ON_PREVIEW(new_frame)) \
+ iden = swap_iden; \
+}
+
+#define SWAP_BUF_INDEX_FOR_BATCH_ON_PREVIEW(new_frame, buff_mgr_info, \
+ cur_index, swap_index) { \
+ if (IS_BATCH_BUFFER_ON_PREVIEW(new_frame)) \
+ buff_mgr_info.index = swap_index; \
+ else \
+ buff_mgr_info.index = cur_index; \
+}
+
+/*
+ * Default value for get buf to be used - 0xFFFFFFFF
+ * 0 is a valid index
+ * no valid index from userspace, use last buffer from queue.
+ */
+#define DEFAULT_OUTPUT_BUF_INDEX 0xFFFFFFFF
+#define IS_DEFAULT_OUTPUT_BUF_INDEX(index) \
+ ((index == DEFAULT_OUTPUT_BUF_INDEX) ? 1 : 0)
+
+static struct msm_cpp_vbif_data cpp_vbif;
+
+static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev,
+ uint32_t buff_mgr_ops, uint32_t ids, void *arg);
+
+static int msm_cpp_send_frame_to_hardware(struct cpp_device *cpp_dev,
+ struct msm_queue_cmd *frame_qcmd);
+static int msm_cpp_send_command_to_hardware(struct cpp_device *cpp_dev,
+ uint32_t *cmd_msg, uint32_t payload_size);
+
+static int msm_cpp_update_gdscr_status(struct cpp_device *cpp_dev,
+ bool status);
+static int msm_cpp_buffer_private_ops(struct cpp_device *cpp_dev,
+ uint32_t buff_mgr_ops, uint32_t id, void *arg);
+static void msm_cpp_set_micro_irq_mask(struct cpp_device *cpp_dev,
+ uint8_t enable, uint32_t irq_mask);
+static void msm_cpp_flush_queue_and_release_buffer(struct cpp_device *cpp_dev,
+ int queue_len);
+static int msm_cpp_dump_frame_cmd(struct msm_cpp_frame_info_t *frame_info);
+static int msm_cpp_dump_addr(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *frame_info);
+static int32_t msm_cpp_reset_vbif_and_load_fw(struct cpp_device *cpp_dev);
+
+#if CONFIG_MSM_CPP_DBG
+#define CPP_DBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CPP_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define CPP_LOW(fmt, args...) do { \
+ if (ENABLE_CPP_LOW) \
+ pr_info(fmt, ##args); \
+ } while (0)
+
+#define ERR_USER_COPY(to) pr_err("copy %s user\n", \
+ ((to) ? "to" : "from"))
+#define ERR_COPY_FROM_USER() ERR_USER_COPY(0)
+
+#define msm_dequeue(queue, member, pop_dir) ({ \
+ unsigned long flags; \
+ struct msm_device_queue *__q = (queue); \
+ struct msm_queue_cmd *qcmd = 0; \
+ spin_lock_irqsave(&__q->lock, flags); \
+ if (!list_empty(&__q->list)) { \
+ __q->len--; \
+ qcmd = pop_dir ? list_first_entry(&__q->list, \
+ struct msm_queue_cmd, member) : \
+ list_last_entry(&__q->list, \
+ struct msm_queue_cmd, member); \
+ list_del_init(&qcmd->member); \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+ qcmd; \
+})
+
+#define MSM_CPP_MAX_TIMEOUT_TRIAL 1
+
+struct msm_cpp_timer_data_t {
+ struct cpp_device *cpp_dev;
+ struct msm_cpp_frame_info_t *processed_frame[MAX_CPP_PROCESSING_FRAME];
+ spinlock_t processed_frame_lock;
+};
+
+struct msm_cpp_timer_t {
+ atomic_t used;
+ struct msm_cpp_timer_data_t data;
+ struct timer_list cpp_timer;
+};
+
+struct msm_cpp_timer_t cpp_timer;
+static void msm_cpp_set_vbif_reg_values(struct cpp_device *cpp_dev);
+
+
+void msm_cpp_vbif_register_error_handler(void *dev,
+ enum cpp_vbif_client client,
+ int (*client_vbif_error_handler)(void *, uint32_t))
+{
+ if (dev == NULL || client >= VBIF_CLIENT_MAX) {
+ pr_err("%s: Fail to register handler! dev = %pK,client %d\n",
+ __func__, dev, client);
+ return;
+ }
+
+ if (client_vbif_error_handler != NULL) {
+ cpp_vbif.dev[client] = dev;
+ cpp_vbif.err_handler[client] = client_vbif_error_handler;
+ } else {
+ /* if handler = NULL, is unregister case */
+ cpp_vbif.dev[client] = NULL;
+ cpp_vbif.err_handler[client] = NULL;
+ }
+}
+static int msm_cpp_init_bandwidth_mgr(struct cpp_device *cpp_dev)
+{
+ int rc = 0;
+
+ rc = msm_camera_register_bus_client(cpp_dev->pdev, CAM_BUS_CLIENT_CPP);
+ if (rc < 0) {
+ pr_err("Fail to register bus client\n");
+ return -ENOENT;
+ }
+
+ rc = msm_camera_update_bus_bw(CAM_BUS_CLIENT_CPP, 0, 0);
+ if (rc < 0) {
+ msm_camera_unregister_bus_client(CAM_BUS_CLIENT_CPP);
+ pr_err("Fail bus scale update %d\n", rc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int msm_cpp_update_bandwidth(struct cpp_device *cpp_dev,
+ uint64_t ab, uint64_t ib)
+{
+
+ int rc;
+
+ rc = msm_camera_update_bus_bw(CAM_BUS_CLIENT_CPP, ab, ib);
+ if (rc < 0) {
+ pr_err("Fail bus scale update %d\n", rc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void msm_cpp_deinit_bandwidth_mgr(struct cpp_device *cpp_dev)
+{
+ int rc = 0;
+
+ rc = msm_camera_unregister_bus_client(CAM_BUS_CLIENT_CPP);
+ if (rc < 0) {
+ pr_err("Failed to unregister %d\n", rc);
+ return;
+ }
+}
+
+static int msm_cpp_update_bandwidth_setting(struct cpp_device *cpp_dev,
+ uint64_t ab, uint64_t ib) {
+ int rc;
+
+ if (cpp_dev->bus_master_flag)
+ rc = msm_cpp_update_bandwidth(cpp_dev, ab, ib);
+ else
+ rc = msm_isp_update_bandwidth(ISP_CPP, ab, ib);
+ return rc;
+}
+
+static void msm_queue_init(struct msm_device_queue *queue, const char *name)
+{
+ CPP_DBG("E\n");
+ spin_lock_init(&queue->lock);
+ queue->len = 0;
+ queue->max = 0;
+ queue->name = name;
+ INIT_LIST_HEAD(&queue->list);
+ init_waitqueue_head(&queue->wait);
+}
+
+static void msm_enqueue(struct msm_device_queue *queue,
+ struct list_head *entry)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ queue->len++;
+ if (queue->len > queue->max) {
+ queue->max = queue->len;
+ pr_debug("queue %s new max is %d\n", queue->name, queue->max);
+ }
+ list_add_tail(entry, &queue->list);
+ wake_up(&queue->wait);
+ CPP_DBG("woke up %s\n", queue->name);
+ spin_unlock_irqrestore(&queue->lock, flags);
+}
+
+#define msm_cpp_empty_list(queue, member) { \
+ unsigned long flags; \
+ struct msm_queue_cmd *qcmd = NULL; \
+ if (queue) { \
+ spin_lock_irqsave(&queue->lock, flags); \
+ while (!list_empty(&queue->list)) { \
+ queue->len--; \
+ qcmd = list_first_entry(&queue->list, \
+ struct msm_queue_cmd, member); \
+ list_del_init(&qcmd->member); \
+ kfree(qcmd); \
+ } \
+ spin_unlock_irqrestore(&queue->lock, flags); \
+ } \
+}
+
+
+
+static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev,
+ uint8_t put_buf);
+static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin);
+static void cpp_timer_callback(unsigned long data);
+
+uint8_t induce_error;
+static int msm_cpp_enable_debugfs(struct cpp_device *cpp_dev);
+
+static void msm_cpp_write(u32 data, void __iomem *cpp_base)
+{
+ msm_camera_io_w((data), cpp_base + MSM_CPP_MICRO_FIFO_RX_DATA);
+}
+
+static void msm_cpp_clear_timer(struct cpp_device *cpp_dev)
+{
+ uint32_t i = 0;
+
+ if (atomic_read(&cpp_timer.used)) {
+ atomic_set(&cpp_timer.used, 0);
+ del_timer(&cpp_timer.cpp_timer);
+ for (i = 0; i < MAX_CPP_PROCESSING_FRAME; i++)
+ cpp_timer.data.processed_frame[i] = NULL;
+ cpp_dev->timeout_trial_cnt = 0;
+ }
+}
+
+static void msm_cpp_timer_queue_update(struct cpp_device *cpp_dev)
+{
+ uint32_t i;
+ unsigned long flags;
+
+ CPP_DBG("Frame done qlen %d\n", cpp_dev->processing_q.len);
+ if (cpp_dev->processing_q.len <= 1) {
+ msm_cpp_clear_timer(cpp_dev);
+ } else {
+ spin_lock_irqsave(&cpp_timer.data.processed_frame_lock, flags);
+ for (i = 0; i < cpp_dev->processing_q.len - 1; i++)
+ cpp_timer.data.processed_frame[i] =
+ cpp_timer.data.processed_frame[i + 1];
+ cpp_timer.data.processed_frame[i] = NULL;
+ cpp_dev->timeout_trial_cnt = 0;
+ spin_unlock_irqrestore(&cpp_timer.data.processed_frame_lock,
+ flags);
+
+ mod_timer(&cpp_timer.cpp_timer,
+ jiffies + msecs_to_jiffies(CPP_CMD_TIMEOUT_MS));
+ }
+}
+
+static uint32_t msm_cpp_read(void __iomem *cpp_base)
+{
+ uint32_t tmp, retry = 0;
+
+ do {
+ tmp = msm_camera_io_r(cpp_base + MSM_CPP_MICRO_FIFO_TX_STAT);
+ } while (((tmp & 0x2) == 0x0) && (retry++ < 10));
+ if (retry < 10) {
+ tmp = msm_camera_io_r(cpp_base + MSM_CPP_MICRO_FIFO_TX_DATA);
+ CPP_DBG("Read data: 0%x\n", tmp);
+ } else {
+ CPP_DBG("Read failed\n");
+ tmp = 0xDEADBEEF;
+ }
+
+ return tmp;
+}
+
+static struct msm_cpp_buff_queue_info_t *msm_cpp_get_buff_queue_entry(
+ struct cpp_device *cpp_dev, uint32_t session_id, uint32_t stream_id)
+{
+ uint32_t i = 0;
+ struct msm_cpp_buff_queue_info_t *buff_queue_info = NULL;
+
+ for (i = 0; i < cpp_dev->num_buffq; i++) {
+ if ((cpp_dev->buff_queue[i].used == 1) &&
+ (cpp_dev->buff_queue[i].session_id == session_id) &&
+ (cpp_dev->buff_queue[i].stream_id == stream_id)) {
+ buff_queue_info = &cpp_dev->buff_queue[i];
+ break;
+ }
+ }
+
+ if (buff_queue_info == NULL) {
+ CPP_DBG("buffer queue entry for sess:%d strm:%d not found\n",
+ session_id, stream_id);
+ }
+ return buff_queue_info;
+}
+
+static unsigned long msm_cpp_get_phy_addr(struct cpp_device *cpp_dev,
+ struct msm_cpp_buff_queue_info_t *buff_queue_info, uint32_t buff_index,
+ uint8_t native_buff, int32_t *fd)
+{
+ unsigned long phy_add = 0;
+ struct list_head *buff_head;
+ struct msm_cpp_buffer_map_list_t *buff, *save;
+
+ if (native_buff)
+ buff_head = &buff_queue_info->native_buff_head;
+ else
+ buff_head = &buff_queue_info->vb2_buff_head;
+
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ if (buff->map_info.buff_info.index == buff_index) {
+ phy_add = buff->map_info.phy_addr;
+ *fd = buff->map_info.buff_info.fd;
+ break;
+ }
+ }
+
+ return phy_add;
+}
+
+static unsigned long msm_cpp_queue_buffer_info(struct cpp_device *cpp_dev,
+ struct msm_cpp_buff_queue_info_t *buff_queue,
+ struct msm_cpp_buffer_info_t *buffer_info)
+{
+ struct list_head *buff_head;
+ struct msm_cpp_buffer_map_list_t *buff, *save;
+ int rc = 0;
+
+ if (buffer_info->native_buff)
+ buff_head = &buff_queue->native_buff_head;
+ else
+ buff_head = &buff_queue->vb2_buff_head;
+
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ if (buff->map_info.buff_info.index == buffer_info->index) {
+ pr_err("error buffer index already queued\n");
+ goto error;
+ }
+ }
+
+ buff = kzalloc(
+ sizeof(struct msm_cpp_buffer_map_list_t), GFP_KERNEL);
+ if (!buff)
+ goto error;
+
+ buff->map_info.buff_info = *buffer_info;
+
+ buff->map_info.buf_fd = buffer_info->fd;
+ rc = cam_smmu_get_phy_addr(cpp_dev->iommu_hdl, buffer_info->fd,
+ CAM_SMMU_MAP_RW, &buff->map_info.phy_addr,
+ (size_t *)&buff->map_info.len);
+ if (rc < 0) {
+ pr_err("ION mmap failed\n");
+ kzfree(buff);
+ goto error;
+ }
+
+ INIT_LIST_HEAD(&buff->entry);
+ list_add_tail(&buff->entry, buff_head);
+
+ return buff->map_info.phy_addr;
+error:
+ return 0;
+}
+
+static void msm_cpp_dequeue_buffer_info(struct cpp_device *cpp_dev,
+ struct msm_cpp_buffer_map_list_t *buff)
+{
+ int ret = -1;
+
+ ret = cam_smmu_put_phy_addr(cpp_dev->iommu_hdl, buff->map_info.buf_fd);
+ if (ret < 0)
+ pr_err("Error: cannot put the iommu handle back to ion fd\n");
+
+ list_del_init(&buff->entry);
+ kzfree(buff);
+}
+
+static unsigned long msm_cpp_fetch_buffer_info(struct cpp_device *cpp_dev,
+ struct msm_cpp_buffer_info_t *buffer_info, uint32_t session_id,
+ uint32_t stream_id, int32_t *fd)
+{
+ unsigned long phy_addr = 0;
+ struct msm_cpp_buff_queue_info_t *buff_queue_info;
+ uint8_t native_buff = buffer_info->native_buff;
+
+ buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev, session_id,
+ stream_id);
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
+ session_id, stream_id);
+ return phy_addr;
+ }
+
+ phy_addr = msm_cpp_get_phy_addr(cpp_dev, buff_queue_info,
+ buffer_info->index, native_buff, fd);
+ if ((phy_addr == 0) && (native_buff)) {
+ phy_addr = msm_cpp_queue_buffer_info(cpp_dev, buff_queue_info,
+ buffer_info);
+ *fd = buffer_info->fd;
+ }
+ return phy_addr;
+}
+
+static int32_t msm_cpp_dequeue_buff_info_list(struct cpp_device *cpp_dev,
+ struct msm_cpp_buff_queue_info_t *buff_queue_info)
+{
+ struct msm_cpp_buffer_map_list_t *buff, *save;
+ struct list_head *buff_head;
+
+ buff_head = &buff_queue_info->native_buff_head;
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ msm_cpp_dequeue_buffer_info(cpp_dev, buff);
+ }
+
+ buff_head = &buff_queue_info->vb2_buff_head;
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ msm_cpp_dequeue_buffer_info(cpp_dev, buff);
+ }
+
+ return 0;
+}
+
+static int32_t msm_cpp_dequeue_buff(struct cpp_device *cpp_dev,
+ struct msm_cpp_buff_queue_info_t *buff_queue_info, uint32_t buff_index,
+ uint8_t native_buff)
+{
+ struct msm_cpp_buffer_map_list_t *buff, *save;
+ struct list_head *buff_head;
+
+ if (native_buff)
+ buff_head = &buff_queue_info->native_buff_head;
+ else
+ buff_head = &buff_queue_info->vb2_buff_head;
+
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ if (buff->map_info.buff_info.index == buff_index) {
+ msm_cpp_dequeue_buffer_info(cpp_dev, buff);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int32_t msm_cpp_add_buff_queue_entry(struct cpp_device *cpp_dev,
+ uint16_t session_id, uint16_t stream_id)
+{
+ uint32_t i;
+ struct msm_cpp_buff_queue_info_t *buff_queue_info;
+
+ for (i = 0; i < cpp_dev->num_buffq; i++) {
+ if (cpp_dev->buff_queue[i].used == 0) {
+ buff_queue_info = &cpp_dev->buff_queue[i];
+ buff_queue_info->used = 1;
+ buff_queue_info->session_id = session_id;
+ buff_queue_info->stream_id = stream_id;
+ INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
+ INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
+ return 0;
+ }
+ }
+ pr_err("buffer queue full. error for sessionid: %d streamid: %d\n",
+ session_id, stream_id);
+ return -EINVAL;
+}
+
+static int32_t msm_cpp_free_buff_queue_entry(struct cpp_device *cpp_dev,
+ uint32_t session_id, uint32_t stream_id)
+{
+ struct msm_cpp_buff_queue_info_t *buff_queue_info;
+
+ buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev, session_id,
+ stream_id);
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
+ session_id, stream_id);
+ return -EINVAL;
+ }
+
+ buff_queue_info->used = 0;
+ buff_queue_info->session_id = 0;
+ buff_queue_info->stream_id = 0;
+ INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
+ INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
+ return 0;
+}
+
+static int32_t msm_cpp_create_buff_queue(struct cpp_device *cpp_dev,
+ uint32_t num_buffq)
+{
+ struct msm_cpp_buff_queue_info_t *buff_queue;
+
+ buff_queue = kzalloc(
+ sizeof(struct msm_cpp_buff_queue_info_t) * num_buffq,
+ GFP_KERNEL);
+ if (!buff_queue) {
+ pr_err("Buff queue allocation failure\n");
+ return -ENOMEM;
+ }
+
+ if (cpp_dev->buff_queue) {
+ pr_err("Buff queue not empty\n");
+ kzfree(buff_queue);
+ return -EINVAL;
+ }
+ cpp_dev->buff_queue = buff_queue;
+ cpp_dev->num_buffq = num_buffq;
+ return 0;
+}
+
+static void msm_cpp_delete_buff_queue(struct cpp_device *cpp_dev)
+{
+ uint32_t i;
+
+ for (i = 0; i < cpp_dev->num_buffq; i++) {
+ if (cpp_dev->buff_queue[i].used == 1) {
+ pr_warn("Queue not free sessionid: %d, streamid: %d\n",
+ cpp_dev->buff_queue[i].session_id,
+ cpp_dev->buff_queue[i].stream_id);
+ msm_cpp_dequeue_buff_info_list
+ (cpp_dev, &cpp_dev->buff_queue[i]);
+ msm_cpp_free_buff_queue_entry(cpp_dev,
+ cpp_dev->buff_queue[i].session_id,
+ cpp_dev->buff_queue[i].stream_id);
+ }
+ }
+ kzfree(cpp_dev->buff_queue);
+ cpp_dev->buff_queue = NULL;
+ cpp_dev->num_buffq = 0;
+}
+
+static int32_t msm_cpp_poll(void __iomem *cpp_base, u32 val)
+{
+ uint32_t tmp, retry = 0;
+ int32_t rc = 0;
+
+ do {
+ tmp = msm_cpp_read(cpp_base);
+ if (tmp != 0xDEADBEEF)
+ CPP_LOW("poll: 0%x\n", tmp);
+ usleep_range(200, 250);
+ } while ((tmp != val) && (retry++ < MSM_CPP_POLL_RETRIES));
+ if (retry < MSM_CPP_POLL_RETRIES) {
+ CPP_LOW("Poll finished\n");
+ } else {
+ pr_err("Poll failed: expect: 0x%x\n", val);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static int32_t msm_cpp_poll_rx_empty(void __iomem *cpp_base)
+{
+ uint32_t tmp, retry = 0;
+ int32_t rc = 0;
+
+ tmp = msm_camera_io_r(cpp_base + MSM_CPP_MICRO_FIFO_RX_STAT);
+ while (((tmp & 0x2) != 0x0) && (retry++ < MSM_CPP_POLL_RETRIES)) {
+ /*
+ * Below usleep values are chosen based on experiments
+ * and this was the smallest number which works. This
+ * sleep is needed to leave enough time for Microcontroller
+ * to read rx fifo.
+ */
+ usleep_range(200, 300);
+ tmp = msm_camera_io_r(cpp_base + MSM_CPP_MICRO_FIFO_RX_STAT);
+ }
+
+ if (retry < MSM_CPP_POLL_RETRIES) {
+ CPP_LOW("Poll rx empty\n");
+ } else {
+ pr_err("Poll rx empty failed\n");
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static int msm_cpp_dump_addr(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *frame_info)
+{
+ int32_t s_base, p_base;
+ uint32_t rd_off, wr0_off, wr1_off, wr2_off, wr3_off;
+ uint32_t wr0_mdata_off, wr1_mdata_off, wr2_mdata_off, wr3_mdata_off;
+ uint32_t rd_ref_off, wr_ref_off;
+ uint32_t s_size, p_size;
+ uint8_t tnr_enabled, ubwc_enabled, cds_en;
+ int32_t i = 0;
+ uint32_t *cpp_frame_msg;
+
+ cpp_frame_msg = frame_info->cpp_cmd_msg;
+
+ /* Update stripe/plane size and base offsets */
+ s_base = cpp_dev->payload_params.stripe_base;
+ s_size = cpp_dev->payload_params.stripe_size;
+ p_base = cpp_dev->payload_params.plane_base;
+ p_size = cpp_dev->payload_params.plane_size;
+
+ /* Fetch engine Offset */
+ rd_off = cpp_dev->payload_params.rd_pntr_off;
+ /* Write engine offsets */
+ wr0_off = cpp_dev->payload_params.wr_0_pntr_off;
+ wr1_off = wr0_off + 1;
+ wr2_off = wr1_off + 1;
+ wr3_off = wr2_off + 1;
+ /* Reference engine offsets */
+ rd_ref_off = cpp_dev->payload_params.rd_ref_pntr_off;
+ wr_ref_off = cpp_dev->payload_params.wr_ref_pntr_off;
+ /* Meta data offsets */
+ wr0_mdata_off =
+ cpp_dev->payload_params.wr_0_meta_data_wr_pntr_off;
+ wr1_mdata_off = (wr0_mdata_off + 1);
+ wr2_mdata_off = (wr1_mdata_off + 1);
+ wr3_mdata_off = (wr2_mdata_off + 1);
+
+ tnr_enabled = ((frame_info->feature_mask & TNR_MASK) >> 2);
+ ubwc_enabled = ((frame_info->feature_mask & UBWC_MASK) >> 5);
+ cds_en = ((frame_info->feature_mask & CDS_MASK) >> 6);
+
+ for (i = 0; i < frame_info->num_strips; i++) {
+ pr_err("stripe %d: in %x, out1 %x out2 %x, out3 %x, out4 %x\n",
+ i, cpp_frame_msg[s_base + rd_off + i * s_size],
+ cpp_frame_msg[s_base + wr0_off + i * s_size],
+ cpp_frame_msg[s_base + wr1_off + i * s_size],
+ cpp_frame_msg[s_base + wr2_off + i * s_size],
+ cpp_frame_msg[s_base + wr3_off + i * s_size]);
+
+ if (tnr_enabled) {
+ pr_err("stripe %d: read_ref %x, write_ref %x\n", i,
+ cpp_frame_msg[s_base + rd_ref_off + i * s_size],
+ cpp_frame_msg[s_base + wr_ref_off + i * s_size]
+ );
+ }
+
+ if (cds_en) {
+ pr_err("stripe %d:, dsdn_off %x\n", i,
+ cpp_frame_msg[s_base + rd_ref_off + i * s_size]
+ );
+ }
+
+ if (ubwc_enabled) {
+ pr_err("stripe %d: metadata %x, %x, %x, %x\n", i,
+ cpp_frame_msg[s_base + wr0_mdata_off +
+ i * s_size],
+ cpp_frame_msg[s_base + wr1_mdata_off +
+ i * s_size],
+ cpp_frame_msg[s_base + wr2_mdata_off +
+ i * s_size],
+ cpp_frame_msg[s_base + wr3_mdata_off +
+ i * s_size]
+ );
+ }
+
+ }
+ return 0;
+}
+
+static void msm_cpp_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags, void *token)
+{
+ struct cpp_device *cpp_dev = NULL;
+ struct msm_cpp_frame_info_t *processed_frame[MAX_CPP_PROCESSING_FRAME];
+ int32_t i = 0, queue_len = 0;
+ struct msm_device_queue *queue = NULL;
+ int32_t rc = 0;
+
+ if (token) {
+ cpp_dev = token;
+ disable_irq(cpp_dev->irq->start);
+ if (atomic_read(&cpp_timer.used)) {
+ atomic_set(&cpp_timer.used, 0);
+ del_timer_sync(&cpp_timer.cpp_timer);
+ }
+ mutex_lock(&cpp_dev->mutex);
+ tasklet_kill(&cpp_dev->cpp_tasklet);
+ rc = cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
+ if (rc < 0) {
+ pr_err("load fw failure %d-retry\n", rc);
+ rc = msm_cpp_reset_vbif_and_load_fw(cpp_dev);
+ if (rc < 0) {
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ mutex_unlock(&cpp_dev->mutex);
+ return;
+ }
+ }
+ queue = &cpp_timer.data.cpp_dev->processing_q;
+ queue_len = queue->len;
+ if (!queue_len) {
+ pr_err("%s:%d: Invalid queuelen\n", __func__, __LINE__);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ mutex_unlock(&cpp_dev->mutex);
+ return;
+ }
+ for (i = 0; i < queue_len; i++) {
+ if (cpp_timer.data.processed_frame[i]) {
+ processed_frame[i] =
+ cpp_timer.data.processed_frame[i];
+ pr_err("Fault on identity=0x%x, frame_id=%03d\n",
+ processed_frame[i]->identity,
+ processed_frame[i]->frame_id);
+ msm_cpp_dump_addr(cpp_dev, processed_frame[i]);
+ msm_cpp_dump_frame_cmd(processed_frame[i]);
+ }
+ }
+ msm_cpp_flush_queue_and_release_buffer(cpp_dev, queue_len);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ mutex_unlock(&cpp_dev->mutex);
+ }
+}
+
+static int cpp_init_mem(struct cpp_device *cpp_dev)
+{
+ int rc = 0;
+ int iommu_hdl;
+
+ if (cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_5_0_0 ||
+ cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_5_1_0)
+ rc = cam_smmu_get_handle("cpp_0", &iommu_hdl);
+ else
+ rc = cam_smmu_get_handle("cpp", &iommu_hdl);
+
+ if (rc < 0)
+ return -ENODEV;
+
+ cpp_dev->iommu_hdl = iommu_hdl;
+ cam_smmu_reg_client_page_fault_handler(
+ cpp_dev->iommu_hdl,
+ msm_cpp_iommu_fault_handler, cpp_dev);
+ return 0;
+}
+
+
+static irqreturn_t msm_cpp_irq(int irq_num, void *data)
+{
+ unsigned long flags;
+ uint32_t tx_level;
+ uint32_t irq_status;
+ uint32_t i;
+ uint32_t tx_fifo[MSM_CPP_TX_FIFO_LEVEL];
+ struct cpp_device *cpp_dev = data;
+ struct msm_cpp_tasklet_queue_cmd *queue_cmd;
+
+ irq_status = msm_camera_io_r(cpp_dev->base + MSM_CPP_MICRO_IRQGEN_STAT);
+
+ if (irq_status & 0x8) {
+ tx_level = msm_camera_io_r(cpp_dev->base +
+ MSM_CPP_MICRO_FIFO_TX_STAT) >> 2;
+ for (i = 0; i < tx_level; i++) {
+ tx_fifo[i] = msm_camera_io_r(cpp_dev->base +
+ MSM_CPP_MICRO_FIFO_TX_DATA);
+ }
+ spin_lock_irqsave(&cpp_dev->tasklet_lock, flags);
+ queue_cmd = &cpp_dev->tasklet_queue_cmd[cpp_dev->taskletq_idx];
+ if (queue_cmd->cmd_used) {
+ pr_err("%s:%d] cpp tasklet queue overflow tx %d rc %x",
+ __func__, __LINE__, tx_level, irq_status);
+ list_del(&queue_cmd->list);
+ } else {
+ atomic_add(1, &cpp_dev->irq_cnt);
+ }
+ queue_cmd->irq_status = irq_status;
+ queue_cmd->tx_level = tx_level;
+ memset(&queue_cmd->tx_fifo[0], 0, sizeof(queue_cmd->tx_fifo));
+ for (i = 0; i < tx_level; i++)
+ queue_cmd->tx_fifo[i] = tx_fifo[i];
+
+ queue_cmd->cmd_used = 1;
+ cpp_dev->taskletq_idx =
+ (cpp_dev->taskletq_idx + 1) % MSM_CPP_TASKLETQ_SIZE;
+ list_add_tail(&queue_cmd->list, &cpp_dev->tasklet_q);
+ spin_unlock_irqrestore(&cpp_dev->tasklet_lock, flags);
+
+ tasklet_schedule(&cpp_dev->cpp_tasklet);
+ } else if (irq_status & 0x7C0) {
+ pr_debug("irq_status: 0x%x\n", irq_status);
+ pr_debug("DEBUG_SP: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x40));
+ pr_debug("DEBUG_T: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x44));
+ pr_debug("DEBUG_N: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x48));
+ pr_debug("DEBUG_R: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x4C));
+ pr_debug("DEBUG_OPPC: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x50));
+ pr_debug("DEBUG_MO: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x54));
+ pr_debug("DEBUG_TIMER0: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x60));
+ pr_debug("DEBUG_TIMER1: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x64));
+ pr_debug("DEBUG_GPI: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x70));
+ pr_debug("DEBUG_GPO: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x74));
+ pr_debug("DEBUG_T0: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x80));
+ pr_debug("DEBUG_R0: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x84));
+ pr_debug("DEBUG_T1: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x88));
+ pr_debug("DEBUG_R1: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x8C));
+ }
+ msm_camera_io_w(irq_status, cpp_dev->base + MSM_CPP_MICRO_IRQGEN_CLR);
+ return IRQ_HANDLED;
+}
+
+void msm_cpp_do_tasklet(unsigned long data)
+{
+ unsigned long flags;
+ uint32_t irq_status;
+ uint32_t tx_level;
+ uint32_t msg_id, cmd_len;
+ uint32_t i;
+ uint32_t tx_fifo[MSM_CPP_TX_FIFO_LEVEL];
+ struct cpp_device *cpp_dev = (struct cpp_device *) data;
+ struct msm_cpp_tasklet_queue_cmd *queue_cmd;
+
+ while (atomic_read(&cpp_dev->irq_cnt)) {
+ spin_lock_irqsave(&cpp_dev->tasklet_lock, flags);
+ queue_cmd = list_first_entry(&cpp_dev->tasklet_q,
+ struct msm_cpp_tasklet_queue_cmd, list);
+
+ if (!queue_cmd) {
+ atomic_set(&cpp_dev->irq_cnt, 0);
+ spin_unlock_irqrestore(&cpp_dev->tasklet_lock, flags);
+ return;
+ }
+ atomic_sub(1, &cpp_dev->irq_cnt);
+ list_del(&queue_cmd->list);
+ queue_cmd->cmd_used = 0;
+ irq_status = queue_cmd->irq_status;
+ tx_level = queue_cmd->tx_level;
+ for (i = 0; i < tx_level; i++)
+ tx_fifo[i] = queue_cmd->tx_fifo[i];
+
+ spin_unlock_irqrestore(&cpp_dev->tasklet_lock, flags);
+
+ for (i = 0; i < tx_level; i++) {
+ if (tx_fifo[i] == MSM_CPP_MSG_ID_CMD) {
+ cmd_len = tx_fifo[i+1];
+ msg_id = tx_fifo[i+2];
+ if (msg_id == MSM_CPP_MSG_ID_FRAME_ACK) {
+ CPP_DBG("Frame done!!\n");
+ /* delete CPP timer */
+ CPP_DBG("delete timer.\n");
+ msm_cpp_timer_queue_update(cpp_dev);
+ msm_cpp_notify_frame_done(cpp_dev, 0);
+ } else if (msg_id ==
+ MSM_CPP_MSG_ID_FRAME_NACK) {
+ pr_err("NACK error from hw!!\n");
+ CPP_DBG("delete timer.\n");
+ msm_cpp_timer_queue_update(cpp_dev);
+ msm_cpp_notify_frame_done(cpp_dev, 0);
+ }
+ i += cmd_len + 2;
+ }
+ }
+ }
+}
+
+static int cpp_init_hardware(struct cpp_device *cpp_dev)
+{
+ int rc = 0;
+ uint32_t vbif_version;
+
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CPP,
+ CAM_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ goto ahb_vote_fail;
+ }
+
+ rc = msm_camera_regulator_enable(cpp_dev->cpp_vdd,
+ cpp_dev->num_reg, true);
+ if (rc < 0) {
+ pr_err("%s: failed to enable regulators\n", __func__);
+ goto reg_enable_failed;
+ }
+
+ rc = msm_cpp_set_micro_clk(cpp_dev);
+ if (rc < 0) {
+ pr_err("%s: set micro clk failed\n", __func__);
+ goto clk_failed;
+ }
+
+ rc = msm_camera_clk_enable(&cpp_dev->pdev->dev, cpp_dev->clk_info,
+ cpp_dev->cpp_clk, cpp_dev->num_clks, true);
+ if (rc < 0) {
+ pr_err("%s: clk enable failed\n", __func__);
+ goto clk_failed;
+ }
+
+ if (cpp_dev->state != CPP_STATE_BOOT) {
+ rc = msm_camera_register_irq(cpp_dev->pdev, cpp_dev->irq,
+ msm_cpp_irq, IRQF_TRIGGER_RISING, "cpp", cpp_dev);
+ if (rc < 0) {
+ pr_err("%s: irq request fail\n", __func__);
+ goto req_irq_fail;
+ }
+ rc = msm_cam_buf_mgr_register_ops(&cpp_dev->buf_mgr_ops);
+ if (rc < 0) {
+ pr_err("buf mngr req ops failed\n");
+ msm_camera_unregister_irq(cpp_dev->pdev,
+ cpp_dev->irq, cpp_dev);
+ goto req_irq_fail;
+ }
+ }
+
+ cpp_dev->hw_info.cpp_hw_version =
+ msm_camera_io_r(cpp_dev->cpp_hw_base);
+ if (cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_4_1_0) {
+ vbif_version = msm_camera_io_r(cpp_dev->vbif_base);
+ if (vbif_version == VBIF_VERSION_2_3_0)
+ cpp_dev->hw_info.cpp_hw_version = CPP_HW_VERSION_4_0_0;
+ }
+ pr_info("CPP HW Version: 0x%x\n", cpp_dev->hw_info.cpp_hw_version);
+ cpp_dev->hw_info.cpp_hw_caps =
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4);
+
+ rc = msm_update_freq_tbl(cpp_dev);
+ if (rc < 0)
+ goto pwr_collapse_reset;
+
+ pr_debug("CPP HW Caps: 0x%x\n", cpp_dev->hw_info.cpp_hw_caps);
+ msm_camera_io_w(0x1, cpp_dev->vbif_base + 0x4);
+ cpp_dev->taskletq_idx = 0;
+ atomic_set(&cpp_dev->irq_cnt, 0);
+ rc = msm_cpp_create_buff_queue(cpp_dev, MSM_CPP_MAX_BUFF_QUEUE);
+ if (rc < 0) {
+ pr_err("%s: create buff queue failed with err %d\n",
+ __func__, rc);
+ goto pwr_collapse_reset;
+ }
+ pr_err("stream_cnt:%d\n", cpp_dev->stream_cnt);
+ cpp_dev->stream_cnt = 0;
+ if (cpp_dev->fw_name_bin) {
+ msm_camera_enable_irq(cpp_dev->irq, false);
+ rc = cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
+ if (rc < 0) {
+ pr_err("%s: load firmware failure %d-retry\n",
+ __func__, rc);
+ rc = msm_cpp_reset_vbif_and_load_fw(cpp_dev);
+ if (rc < 0) {
+ msm_camera_enable_irq(cpp_dev->irq, true);
+ goto pwr_collapse_reset;
+ }
+ }
+ msm_camera_enable_irq(cpp_dev->irq, true);
+ msm_camera_io_w_mb(0x7C8, cpp_dev->base +
+ MSM_CPP_MICRO_IRQGEN_MASK);
+ msm_camera_io_w_mb(0xFFFF, cpp_dev->base +
+ MSM_CPP_MICRO_IRQGEN_CLR);
+ }
+
+ msm_cpp_set_vbif_reg_values(cpp_dev);
+ return rc;
+
+pwr_collapse_reset:
+ msm_cpp_update_gdscr_status(cpp_dev, false);
+ msm_camera_unregister_irq(cpp_dev->pdev, cpp_dev->irq, cpp_dev);
+req_irq_fail:
+ msm_camera_clk_enable(&cpp_dev->pdev->dev, cpp_dev->clk_info,
+ cpp_dev->cpp_clk, cpp_dev->num_clks, false);
+clk_failed:
+ msm_camera_regulator_enable(cpp_dev->cpp_vdd,
+ cpp_dev->num_reg, false);
+reg_enable_failed:
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CPP,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+ahb_vote_fail:
+ return rc;
+}
+
+static void cpp_release_hardware(struct cpp_device *cpp_dev)
+{
+ int32_t rc;
+
+ if (cpp_dev->state != CPP_STATE_BOOT) {
+ msm_camera_unregister_irq(cpp_dev->pdev, cpp_dev->irq, cpp_dev);
+ tasklet_kill(&cpp_dev->cpp_tasklet);
+ atomic_set(&cpp_dev->irq_cnt, 0);
+ }
+ msm_cpp_delete_buff_queue(cpp_dev);
+ msm_cpp_update_gdscr_status(cpp_dev, false);
+ msm_camera_clk_enable(&cpp_dev->pdev->dev, cpp_dev->clk_info,
+ cpp_dev->cpp_clk, cpp_dev->num_clks, false);
+ msm_camera_regulator_enable(cpp_dev->cpp_vdd, cpp_dev->num_reg, false);
+ if (cpp_dev->stream_cnt > 0) {
+ pr_warn("stream count active\n");
+ rc = msm_cpp_update_bandwidth_setting(cpp_dev, 0, 0);
+ }
+ cpp_dev->stream_cnt = 0;
+
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CPP,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+}
+
+static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
+{
+ uint32_t i;
+ uint32_t *ptr_bin = NULL;
+ int32_t rc = 0;
+
+ if (!fw_name_bin) {
+ pr_err("%s:%d] invalid fw name", __func__, __LINE__);
+ rc = -EINVAL;
+ goto end;
+ }
+ pr_debug("%s:%d] FW file: %s\n", __func__, __LINE__, fw_name_bin);
+ if (cpp_dev->fw == NULL) {
+ pr_err("%s:%d] fw NULL", __func__, __LINE__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ ptr_bin = (uint32_t *)cpp_dev->fw->data;
+ if (!ptr_bin) {
+ pr_err("%s:%d] Fw bin NULL", __func__, __LINE__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ msm_camera_io_w(0x1, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
+ msm_camera_io_w(0x1, cpp_dev->base +
+ MSM_CPP_MICRO_BOOT_START);
+
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_CMD, rc);
+ goto end;
+ }
+
+ msm_camera_io_w(0xFFFFFFFF, cpp_dev->base +
+ MSM_CPP_MICRO_IRQGEN_CLR);
+
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d] poll rx empty failed %d",
+ __func__, __LINE__, rc);
+ goto end;
+ }
+ /* Start firmware loading */
+ msm_cpp_write(MSM_CPP_CMD_FW_LOAD, cpp_dev->base);
+ msm_cpp_write(cpp_dev->fw->size, cpp_dev->base);
+ msm_cpp_write(MSM_CPP_START_ADDRESS, cpp_dev->base);
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d] poll rx empty failed %d",
+ __func__, __LINE__, rc);
+ goto end;
+ }
+ for (i = 0; i < cpp_dev->fw->size/4; i++) {
+ msm_cpp_write(*ptr_bin, cpp_dev->base);
+ if (i % MSM_CPP_RX_FIFO_LEVEL == 0) {
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d] poll rx empty failed %d",
+ __func__, __LINE__, rc);
+ goto end;
+ }
+ }
+ ptr_bin++;
+ }
+ msm_camera_io_w_mb(0x00, cpp_dev->cpp_hw_base + 0xC);
+ rc = msm_cpp_update_gdscr_status(cpp_dev, true);
+ if (rc < 0)
+ pr_err("update cpp gdscr status failed\n");
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_OK);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_OK, rc);
+ goto end;
+ }
+
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_CMD, rc);
+ goto end;
+ }
+
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d] poll rx empty failed %d",
+ __func__, __LINE__, rc);
+ goto end;
+ }
+ /* Trigger MC to jump to start address */
+ msm_cpp_write(MSM_CPP_CMD_EXEC_JUMP, cpp_dev->base);
+ msm_cpp_write(MSM_CPP_JUMP_ADDRESS, cpp_dev->base);
+
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_CMD, rc);
+ goto end;
+ }
+
+ rc = msm_cpp_poll(cpp_dev->base, 0x1);
+ if (rc) {
+ pr_err("%s:%d] poll command 0x1 failed %d", __func__, __LINE__,
+ rc);
+ goto end;
+ }
+
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_JUMP_ACK);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_JUMP_ACK, rc);
+ goto end;
+ }
+
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_TRAILER);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_JUMP_ACK, rc);
+ }
+
+end:
+ return rc;
+}
+
+int32_t msm_cpp_reset_vbif_clients(struct cpp_device *cpp_dev)
+{
+ uint32_t i;
+
+ pr_warn("%s: handle vbif hang...\n", __func__);
+ for (i = 0; i < VBIF_CLIENT_MAX; i++) {
+ if (cpp_dev->vbif_data->err_handler[i] == NULL)
+ continue;
+
+ cpp_dev->vbif_data->err_handler[i](
+ cpp_dev->vbif_data->dev[i], CPP_VBIF_ERROR_HANG);
+ }
+ return 0;
+}
+
+int32_t msm_cpp_reset_vbif_and_load_fw(struct cpp_device *cpp_dev)
+{
+ int32_t rc = 0;
+
+ msm_cpp_reset_vbif_clients(cpp_dev);
+
+ rc = cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
+ if (rc < 0)
+ pr_err("Reset and load fw failed %d\n", rc);
+
+ return rc;
+}
+
+int cpp_vbif_error_handler(void *dev, uint32_t vbif_error)
+{
+ struct cpp_device *cpp_dev = NULL;
+
+ if (dev == NULL || vbif_error >= CPP_VBIF_ERROR_MAX) {
+ pr_err("failed: dev %pK,vbif error %d\n", dev, vbif_error);
+ return -EINVAL;
+ }
+
+ cpp_dev = (struct cpp_device *) dev;
+
+ /* MMSS_A_CPP_IRQ_STATUS_0 = 0x10 */
+ pr_err("%s: before reset halt... read MMSS_A_CPP_IRQ_STATUS_0 = 0x%x",
+ __func__, msm_camera_io_r(cpp_dev->cpp_hw_base + 0x10));
+
+ pr_err("%s: start reset bus bridge on FD + CPP!\n", __func__);
+ /* MMSS_A_CPP_RST_CMD_0 = 0x8, firmware reset = 0x3DF77 */
+ msm_camera_io_w(0x3DF77, cpp_dev->cpp_hw_base + 0x8);
+
+ /* MMSS_A_CPP_IRQ_STATUS_0 = 0x10 */
+ pr_err("%s: after reset halt... read MMSS_A_CPP_IRQ_STATUS_0 = 0x%x",
+ __func__, msm_camera_io_r(cpp_dev->cpp_hw_base + 0x10));
+
+ return 0;
+}
+
+static int cpp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ int rc;
+ uint32_t i;
+ struct cpp_device *cpp_dev = NULL;
+
+ CPP_DBG("E\n");
+
+ if (!sd || !fh) {
+ pr_err("Wrong input parameters sd %pK fh %pK!",
+ sd, fh);
+ return -EINVAL;
+ }
+ cpp_dev = v4l2_get_subdevdata(sd);
+ if (!cpp_dev) {
+ pr_err("failed: cpp_dev %pK\n", cpp_dev);
+ return -EINVAL;
+ }
+ mutex_lock(&cpp_dev->mutex);
+ if (cpp_dev->cpp_open_cnt == MAX_ACTIVE_CPP_INSTANCE) {
+ pr_err("No free CPP instance\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
+ if (cpp_dev->cpp_subscribe_list[i].active == 0) {
+ cpp_dev->cpp_subscribe_list[i].active = 1;
+ cpp_dev->cpp_subscribe_list[i].vfh = &fh->vfh;
+ break;
+ }
+ }
+ if (i == MAX_ACTIVE_CPP_INSTANCE) {
+ pr_err("No free instance\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -ENODEV;
+ }
+
+ CPP_DBG("open %d %pK\n", i, &fh->vfh);
+ cpp_dev->cpp_open_cnt++;
+
+ msm_cpp_vbif_register_error_handler(cpp_dev,
+ VBIF_CLIENT_CPP, cpp_vbif_error_handler);
+
+ if (cpp_dev->cpp_open_cnt == 1) {
+ rc = cpp_init_hardware(cpp_dev);
+ if (rc < 0) {
+ cpp_dev->cpp_open_cnt--;
+ cpp_dev->cpp_subscribe_list[i].active = 0;
+ cpp_dev->cpp_subscribe_list[i].vfh = NULL;
+ mutex_unlock(&cpp_dev->mutex);
+ return rc;
+ }
+
+ rc = cpp_init_mem(cpp_dev);
+ if (rc < 0) {
+ pr_err("Error: init memory fail\n");
+ cpp_dev->cpp_open_cnt--;
+ cpp_dev->cpp_subscribe_list[i].active = 0;
+ cpp_dev->cpp_subscribe_list[i].vfh = NULL;
+ mutex_unlock(&cpp_dev->mutex);
+ return rc;
+ }
+ cpp_dev->state = CPP_STATE_IDLE;
+ }
+
+ mutex_unlock(&cpp_dev->mutex);
+ return 0;
+}
+
+static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ uint32_t i;
+ int rc = -1;
+ struct cpp_device *cpp_dev = NULL;
+ struct msm_device_queue *processing_q = NULL;
+ struct msm_device_queue *eventData_q = NULL;
+
+ if (!sd) {
+ pr_err("Wrong input sd parameter");
+ return -EINVAL;
+ }
+ cpp_dev = v4l2_get_subdevdata(sd);
+
+ if (!cpp_dev) {
+ pr_err("failed: cpp_dev %pK\n", cpp_dev);
+ return -EINVAL;
+ }
+
+ mutex_lock(&cpp_dev->mutex);
+
+ processing_q = &cpp_dev->processing_q;
+ eventData_q = &cpp_dev->eventData_q;
+
+ if (cpp_dev->cpp_open_cnt == 0) {
+ mutex_unlock(&cpp_dev->mutex);
+ return 0;
+ }
+
+ for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
+ if (cpp_dev->cpp_subscribe_list[i].active == 1) {
+ cpp_dev->cpp_subscribe_list[i].active = 0;
+ cpp_dev->cpp_subscribe_list[i].vfh = NULL;
+ break;
+ }
+ }
+ if (i == MAX_ACTIVE_CPP_INSTANCE) {
+ pr_err("Invalid close\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -ENODEV;
+ }
+
+ cpp_dev->cpp_open_cnt--;
+ if (cpp_dev->cpp_open_cnt == 0) {
+ pr_debug("irq_status: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4));
+ pr_debug("DEBUG_SP: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x40));
+ pr_debug("DEBUG_T: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x44));
+ pr_debug("DEBUG_N: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x48));
+ pr_debug("DEBUG_R: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4C));
+ pr_debug("DEBUG_OPPC: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x50));
+ pr_debug("DEBUG_MO: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x54));
+ pr_debug("DEBUG_TIMER0: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x60));
+ pr_debug("DEBUG_TIMER1: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x64));
+ pr_debug("DEBUG_GPI: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x70));
+ pr_debug("DEBUG_GPO: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x74));
+ pr_debug("DEBUG_T0: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x80));
+ pr_debug("DEBUG_R0: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x84));
+ pr_debug("DEBUG_T1: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x88));
+ pr_debug("DEBUG_R1: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x8C));
+ msm_camera_io_w(0x0, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
+ msm_cpp_clear_timer(cpp_dev);
+ cpp_release_hardware(cpp_dev);
+ if (cpp_dev->iommu_state == CPP_IOMMU_STATE_ATTACHED) {
+ cpp_dev->iommu_state = CPP_IOMMU_STATE_DETACHED;
+ rc = cam_smmu_ops(cpp_dev->iommu_hdl, CAM_SMMU_DETACH);
+ if (rc < 0)
+ pr_err("Error: Detach fail in release\n");
+ }
+ cam_smmu_destroy_handle(cpp_dev->iommu_hdl);
+ msm_cpp_empty_list(processing_q, list_frame);
+ msm_cpp_empty_list(eventData_q, list_eventdata);
+ cpp_dev->state = CPP_STATE_OFF;
+ }
+
+ /* unregister vbif error handler */
+ msm_cpp_vbif_register_error_handler(cpp_dev,
+ VBIF_CLIENT_CPP, NULL);
+ mutex_unlock(&cpp_dev->mutex);
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops msm_cpp_internal_ops = {
+ .open = cpp_open_node,
+ .close = cpp_close_node,
+};
+
+static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev,
+ uint32_t buff_mgr_ops, uint32_t ids,
+ void *arg)
+{
+ int rc = -EINVAL;
+
+ switch (buff_mgr_ops) {
+ case VIDIOC_MSM_BUF_MNGR_IOCTL_CMD: {
+ rc = msm_cpp_buffer_private_ops(cpp_dev, buff_mgr_ops,
+ ids, arg);
+ break;
+ }
+ case VIDIOC_MSM_BUF_MNGR_PUT_BUF:
+ case VIDIOC_MSM_BUF_MNGR_BUF_DONE:
+ case VIDIOC_MSM_BUF_MNGR_GET_BUF:
+ default: {
+ struct msm_buf_mngr_info *buff_mgr_info =
+ (struct msm_buf_mngr_info *)arg;
+ rc = cpp_dev->buf_mgr_ops.msm_cam_buf_mgr_ops(buff_mgr_ops,
+ buff_mgr_info);
+ break;
+ }
+ }
+ if (rc < 0)
+ pr_debug("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ return rc;
+}
+
+static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev,
+ uint8_t put_buf)
+{
+ struct v4l2_event v4l2_evt;
+ struct msm_queue_cmd *frame_qcmd = NULL;
+ struct msm_queue_cmd *event_qcmd = NULL;
+ struct msm_cpp_frame_info_t *processed_frame = NULL;
+ struct msm_device_queue *queue = &cpp_dev->processing_q;
+ struct msm_buf_mngr_info buff_mgr_info;
+ int rc = 0;
+
+ frame_qcmd = msm_dequeue(queue, list_frame, POP_FRONT);
+ if (frame_qcmd) {
+ processed_frame = frame_qcmd->command;
+ do_gettimeofday(&(processed_frame->out_time));
+ kfree(frame_qcmd);
+ event_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_ATOMIC);
+ if (!event_qcmd) {
+ pr_err("Insufficient memory\n");
+ return -ENOMEM;
+ }
+ atomic_set(&event_qcmd->on_heap, 1);
+ event_qcmd->command = processed_frame;
+ CPP_DBG("fid %d\n", processed_frame->frame_id);
+ msm_enqueue(&cpp_dev->eventData_q, &event_qcmd->list_eventdata);
+
+ if ((processed_frame->partial_frame_indicator != 0) &&
+ (processed_frame->last_payload == 0))
+ goto NOTIFY_FRAME_DONE;
+
+ if (!processed_frame->output_buffer_info[0].processed_divert &&
+ !processed_frame->output_buffer_info[0].native_buff &&
+ !processed_frame->we_disable) {
+
+ int32_t iden = processed_frame->identity;
+
+ SWAP_IDENTITY_FOR_BATCH_ON_PREVIEW(processed_frame,
+ iden, processed_frame->duplicate_identity);
+ memset(&buff_mgr_info, 0,
+ sizeof(struct msm_buf_mngr_info));
+
+ buff_mgr_info.session_id = ((iden >> 16) & 0xFFFF);
+ buff_mgr_info.stream_id = (iden & 0xFFFF);
+ buff_mgr_info.frame_id = processed_frame->frame_id;
+ buff_mgr_info.timestamp = processed_frame->timestamp;
+ if (processed_frame->batch_info.batch_mode ==
+ BATCH_MODE_VIDEO ||
+ (IS_BATCH_BUFFER_ON_PREVIEW(
+ processed_frame))) {
+ buff_mgr_info.index =
+ processed_frame->batch_info.cont_idx;
+ } else {
+ buff_mgr_info.index = processed_frame->
+ output_buffer_info[0].index;
+ }
+ if (put_buf) {
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ VIDIOC_MSM_BUF_MNGR_PUT_BUF,
+ 0x0, &buff_mgr_info);
+ if (rc < 0) {
+ pr_err("error putting buffer\n");
+ rc = -EINVAL;
+ }
+ } else {
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ VIDIOC_MSM_BUF_MNGR_BUF_DONE,
+ 0x0, &buff_mgr_info);
+ if (rc < 0) {
+ pr_err("error putting buffer\n");
+ rc = -EINVAL;
+ }
+ }
+ }
+
+ if (processed_frame->duplicate_output &&
+ !processed_frame->
+ duplicate_buffer_info.processed_divert &&
+ !processed_frame->we_disable) {
+ int32_t iden = processed_frame->duplicate_identity;
+
+ SWAP_IDENTITY_FOR_BATCH_ON_PREVIEW(processed_frame,
+ iden, processed_frame->identity);
+
+ memset(&buff_mgr_info, 0,
+ sizeof(struct msm_buf_mngr_info));
+
+ buff_mgr_info.session_id = ((iden >> 16) & 0xFFFF);
+ buff_mgr_info.stream_id = (iden & 0xFFFF);
+ buff_mgr_info.frame_id = processed_frame->frame_id;
+ buff_mgr_info.timestamp = processed_frame->timestamp;
+ buff_mgr_info.index =
+ processed_frame->duplicate_buffer_info.index;
+ if (put_buf) {
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ VIDIOC_MSM_BUF_MNGR_PUT_BUF,
+ 0x0, &buff_mgr_info);
+ if (rc < 0) {
+ pr_err("error putting buffer\n");
+ rc = -EINVAL;
+ }
+ } else {
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ VIDIOC_MSM_BUF_MNGR_BUF_DONE,
+ 0x0, &buff_mgr_info);
+ if (rc < 0) {
+ pr_err("error putting buffer\n");
+ rc = -EINVAL;
+ }
+ }
+ }
+NOTIFY_FRAME_DONE:
+ v4l2_evt.id = processed_frame->inst_id;
+ v4l2_evt.type = V4L2_EVENT_CPP_FRAME_DONE;
+ v4l2_event_queue(cpp_dev->msm_sd.sd.devnode, &v4l2_evt);
+ }
+ return rc;
+}
+
+#if MSM_CPP_DUMP_FRM_CMD
+static int msm_cpp_dump_frame_cmd(struct msm_cpp_frame_info_t *frame_info)
+{
+ int i, i1, i2;
+ struct cpp_device *cpp_dev = cpp_timer.data.cpp_dev;
+
+ CPP_DBG("-- start: cpp frame cmd for identity=0x%x, frame_id=%d --\n",
+ frame_info->identity, frame_info->frame_id);
+
+ CPP_DBG("msg[%03d] = 0x%08x\n", 0, 0x6);
+ /* send top level and plane level */
+ for (i = 0; i < cpp_dev->payload_params.stripe_base; i++)
+ CPP_DBG("msg[%03d] = 0x%08x\n", i,
+ frame_info->cpp_cmd_msg[i]);
+ /* send stripes */
+ i1 = cpp_dev->payload_params.stripe_base +
+ cpp_dev->payload_params.stripe_size *
+ frame_info->first_stripe_index;
+ i2 = cpp_dev->payload_params.stripe_size *
+ (frame_info->last_stripe_index -
+ frame_info->first_stripe_index + 1);
+ for (i = 0; i < i2; i++)
+ CPP_DBG("msg[%03d] = 0x%08x\n", i+i1,
+ frame_info->cpp_cmd_msg[i+i1]);
+ /* send trailer */
+ CPP_DBG("msg[%03d] = 0x%08x\n", i+i1, MSM_CPP_MSG_ID_TRAILER);
+ CPP_DBG("-- end: cpp frame cmd for identity=0x%x, frame_id=%d --\n",
+ frame_info->identity, frame_info->frame_id);
+ return 0;
+}
+#else
+static int msm_cpp_dump_frame_cmd(struct msm_cpp_frame_info_t *frame_info)
+{
+ return 0;
+}
+#endif
+
+static void msm_cpp_flush_queue_and_release_buffer(struct cpp_device *cpp_dev,
+ int queue_len) {
+ uint32_t i;
+
+ while (queue_len) {
+ msm_cpp_notify_frame_done(cpp_dev, 1);
+ queue_len--;
+ }
+ atomic_set(&cpp_timer.used, 0);
+ for (i = 0; i < MAX_CPP_PROCESSING_FRAME; i++)
+ cpp_timer.data.processed_frame[i] = NULL;
+}
+
+static void msm_cpp_set_micro_irq_mask(struct cpp_device *cpp_dev,
+ uint8_t enable, uint32_t irq_mask)
+{
+ msm_camera_io_w_mb(irq_mask, cpp_dev->base +
+ MSM_CPP_MICRO_IRQGEN_MASK);
+ msm_camera_io_w_mb(0xFFFF, cpp_dev->base +
+ MSM_CPP_MICRO_IRQGEN_CLR);
+ if (enable)
+ enable_irq(cpp_dev->irq->start);
+}
+
+static void msm_cpp_do_timeout_work(struct work_struct *work)
+{
+ uint32_t j = 0, i = 0, i1 = 0, i2 = 0;
+ int32_t queue_len = 0, rc = 0, fifo_counter = 0;
+ struct msm_device_queue *queue = NULL;
+ struct msm_cpp_frame_info_t *processed_frame[MAX_CPP_PROCESSING_FRAME];
+ struct cpp_device *cpp_dev = cpp_timer.data.cpp_dev;
+
+ pr_warn("cpp_timer_callback called. (jiffies=%lu)\n",
+ jiffies);
+ mutex_lock(&cpp_dev->mutex);
+
+ if (!work || (cpp_timer.data.cpp_dev->state != CPP_STATE_ACTIVE)) {
+ pr_err("Invalid work:%pK or state:%d\n", work,
+ cpp_timer.data.cpp_dev->state);
+ /* Do not flush queue here as it is not a fatal error */
+ goto end;
+ }
+ if (!atomic_read(&cpp_timer.used)) {
+ pr_warn("Delayed trigger, IRQ serviced\n");
+ /* Do not flush queue here as it is not a fatal error */
+ goto end;
+ }
+
+ msm_camera_enable_irq(cpp_timer.data.cpp_dev->irq, false);
+ /* make sure all the pending queued entries are scheduled */
+ tasklet_kill(&cpp_dev->cpp_tasklet);
+
+ queue = &cpp_timer.data.cpp_dev->processing_q;
+ queue_len = queue->len;
+ if (!queue_len) {
+ pr_err("%s:%d: irq serviced after timeout.Ignore timeout\n",
+ __func__, __LINE__);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ goto end;
+ }
+
+ pr_debug("Reloading firmware %d\n", queue_len);
+ rc = cpp_load_fw(cpp_timer.data.cpp_dev,
+ cpp_timer.data.cpp_dev->fw_name_bin);
+ if (rc) {
+ pr_warn("Firmware loading failed-retry\n");
+ rc = msm_cpp_reset_vbif_and_load_fw(cpp_dev);
+ if (rc < 0) {
+ pr_err("Firmware loading failed\n");
+ goto error;
+ }
+ } else {
+ pr_debug("Firmware loading done\n");
+ }
+
+ if (!atomic_read(&cpp_timer.used)) {
+ pr_warn("Delayed trigger, IRQ serviced\n");
+ /* Do not flush queue here as it is not a fatal error */
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ cpp_dev->timeout_trial_cnt = 0;
+ goto end;
+ }
+
+ if (cpp_dev->timeout_trial_cnt >=
+ cpp_dev->max_timeout_trial_cnt) {
+ pr_warn("Max trial reached\n");
+ msm_cpp_flush_queue_and_release_buffer(cpp_dev, queue_len);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ goto end;
+ }
+
+ for (i = 0; i < queue_len; i++) {
+ processed_frame[i] = cpp_timer.data.processed_frame[i];
+ if (!processed_frame[i]) {
+ pr_warn("process frame null , queue len %d", queue_len);
+ msm_cpp_flush_queue_and_release_buffer(cpp_dev,
+ queue_len);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ goto end;
+ }
+ }
+
+ atomic_set(&cpp_timer.used, 1);
+ pr_warn("Starting timer to fire in %d ms. (jiffies=%lu)\n",
+ CPP_CMD_TIMEOUT_MS, jiffies);
+ mod_timer(&cpp_timer.cpp_timer,
+ jiffies + msecs_to_jiffies(CPP_CMD_TIMEOUT_MS));
+
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+
+ for (i = 0; i < queue_len; i++) {
+ pr_warn("Rescheduling for identity=0x%x, frame_id=%03d\n",
+ processed_frame[i]->identity,
+ processed_frame[i]->frame_id);
+
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d: Reschedule payload failed %d\n",
+ __func__, __LINE__, rc);
+ goto error;
+ }
+ msm_cpp_write(0x6, cpp_dev->base);
+ fifo_counter++;
+ /* send top level and plane level */
+ for (j = 0; j < cpp_dev->payload_params.stripe_base; j++,
+ fifo_counter++) {
+ if (fifo_counter % MSM_CPP_RX_FIFO_LEVEL == 0) {
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d] poll failed %d rc %d",
+ __func__, __LINE__, j, rc);
+ goto error;
+ }
+ fifo_counter = 0;
+ }
+ msm_cpp_write(processed_frame[i]->cpp_cmd_msg[j],
+ cpp_dev->base);
+ }
+ if (rc) {
+ pr_err("%s:%d: Rescheduling plane info failed %d\n",
+ __func__, __LINE__, rc);
+ goto error;
+ }
+ /* send stripes */
+ i1 = cpp_dev->payload_params.stripe_base +
+ cpp_dev->payload_params.stripe_size *
+ processed_frame[i]->first_stripe_index;
+ i2 = cpp_dev->payload_params.stripe_size *
+ (processed_frame[i]->last_stripe_index -
+ processed_frame[i]->first_stripe_index + 1);
+ for (j = 0; j < i2; j++, fifo_counter++) {
+ if (fifo_counter % MSM_CPP_RX_FIFO_LEVEL == 0) {
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d] poll failed %d rc %d",
+ __func__, __LINE__, j, rc);
+ break;
+ }
+ fifo_counter = 0;
+ }
+ msm_cpp_write(processed_frame[i]->cpp_cmd_msg[j+i1],
+ cpp_dev->base);
+ }
+ if (rc) {
+ pr_err("%s:%d] Rescheduling stripe info failed %d\n",
+ __func__, __LINE__, rc);
+ goto error;
+ }
+ /* send trailer */
+
+ if (fifo_counter % MSM_CPP_RX_FIFO_LEVEL == 0) {
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d] Reschedule trailer failed %d\n",
+ __func__, __LINE__, rc);
+ goto error;
+ }
+ fifo_counter = 0;
+ }
+ msm_cpp_write(0xabcdefaa, cpp_dev->base);
+ pr_debug("After frame:%d write\n", i+1);
+ }
+
+ cpp_timer.data.cpp_dev->timeout_trial_cnt++;
+
+end:
+ mutex_unlock(&cpp_dev->mutex);
+ pr_debug("%s:%d] exit\n", __func__, __LINE__);
+ return;
+error:
+ cpp_dev->state = CPP_STATE_OFF;
+ /* flush the queue */
+ msm_cpp_flush_queue_and_release_buffer(cpp_dev,
+ queue_len);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 0, 0x0);
+ cpp_dev->timeout_trial_cnt = 0;
+ mutex_unlock(&cpp_dev->mutex);
+ pr_debug("%s:%d] exit\n", __func__, __LINE__);
+}
+
+void cpp_timer_callback(unsigned long data)
+{
+ struct msm_cpp_work_t *work =
+ cpp_timer.data.cpp_dev->work;
+ queue_work(cpp_timer.data.cpp_dev->timer_wq,
+ (struct work_struct *)work);
+}
+
+static int msm_cpp_send_frame_to_hardware(struct cpp_device *cpp_dev,
+ struct msm_queue_cmd *frame_qcmd)
+{
+ unsigned long flags;
+ uint32_t i, i1, i2;
+ int32_t rc = -EAGAIN;
+ struct msm_cpp_frame_info_t *process_frame;
+ struct msm_queue_cmd *qcmd = NULL;
+ uint32_t queue_len = 0, fifo_counter = 0;
+
+ if (cpp_dev->processing_q.len < MAX_CPP_PROCESSING_FRAME) {
+ process_frame = frame_qcmd->command;
+ msm_cpp_dump_frame_cmd(process_frame);
+ spin_lock_irqsave(&cpp_timer.data.processed_frame_lock, flags);
+ msm_enqueue(&cpp_dev->processing_q,
+ &frame_qcmd->list_frame);
+ cpp_timer.data.processed_frame[cpp_dev->processing_q.len - 1] =
+ process_frame;
+ queue_len = cpp_dev->processing_q.len;
+ spin_unlock_irqrestore(&cpp_timer.data.processed_frame_lock,
+ flags);
+ atomic_set(&cpp_timer.used, 1);
+
+ CPP_DBG("Starting timer to fire in %d ms. (jiffies=%lu)\n",
+ CPP_CMD_TIMEOUT_MS, jiffies);
+ if (mod_timer(&cpp_timer.cpp_timer,
+ (jiffies + msecs_to_jiffies(CPP_CMD_TIMEOUT_MS))) != 0)
+ CPP_DBG("Timer has not expired yet\n");
+
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d: Scheduling payload failed %d",
+ __func__, __LINE__, rc);
+ goto dequeue_frame;
+ }
+ msm_cpp_write(0x6, cpp_dev->base);
+ fifo_counter++;
+ /* send top level and plane level */
+ for (i = 0; i < cpp_dev->payload_params.stripe_base; i++,
+ fifo_counter++) {
+ if ((fifo_counter % MSM_CPP_RX_FIFO_LEVEL) == 0) {
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc)
+ break;
+ fifo_counter = 0;
+ }
+ msm_cpp_write(process_frame->cpp_cmd_msg[i],
+ cpp_dev->base);
+ }
+ if (rc) {
+ pr_err("%s:%d: Scheduling plane info failed %d\n",
+ __func__, __LINE__, rc);
+ goto dequeue_frame;
+ }
+ /* send stripes */
+ i1 = cpp_dev->payload_params.stripe_base +
+ cpp_dev->payload_params.stripe_size *
+ process_frame->first_stripe_index;
+ i2 = cpp_dev->payload_params.stripe_size *
+ (process_frame->last_stripe_index -
+ process_frame->first_stripe_index + 1);
+ for (i = 0; i < i2; i++, fifo_counter++) {
+ if ((fifo_counter % MSM_CPP_RX_FIFO_LEVEL) == 0) {
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc)
+ break;
+ fifo_counter = 0;
+ }
+ msm_cpp_write(process_frame->cpp_cmd_msg[i+i1],
+ cpp_dev->base);
+ }
+ if (rc) {
+ pr_err("%s:%d: Scheduling stripe info failed %d\n",
+ __func__, __LINE__, rc);
+ goto dequeue_frame;
+ }
+ /* send trailer */
+ if ((fifo_counter % MSM_CPP_RX_FIFO_LEVEL) == 0) {
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s: Scheduling trailer failed %d\n",
+ __func__, rc);
+ goto dequeue_frame;
+ }
+ fifo_counter = 0;
+ }
+ msm_cpp_write(MSM_CPP_MSG_ID_TRAILER, cpp_dev->base);
+
+ do_gettimeofday(&(process_frame->in_time));
+ rc = 0;
+ } else {
+ pr_err("process queue full. drop frame\n");
+ goto end;
+ }
+
+dequeue_frame:
+ if (rc < 0) {
+ qcmd = msm_dequeue(&cpp_dev->processing_q, list_frame,
+ POP_BACK);
+ if (!qcmd)
+ pr_warn("%s:%d: no queue cmd\n", __func__, __LINE__);
+ spin_lock_irqsave(&cpp_timer.data.processed_frame_lock,
+ flags);
+ queue_len = cpp_dev->processing_q.len;
+ spin_unlock_irqrestore(
+ &cpp_timer.data.processed_frame_lock, flags);
+ if (queue_len == 0) {
+ atomic_set(&cpp_timer.used, 0);
+ del_timer(&cpp_timer.cpp_timer);
+ }
+ }
+end:
+ return rc;
+}
+
+static int msm_cpp_send_command_to_hardware(struct cpp_device *cpp_dev,
+ uint32_t *cmd_msg, uint32_t payload_size)
+{
+ uint32_t i;
+ int rc = 0;
+
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d] poll rx empty failed %d",
+ __func__, __LINE__, rc);
+ goto end;
+ }
+
+ for (i = 0; i < payload_size; i++) {
+ msm_cpp_write(cmd_msg[i], cpp_dev->base);
+ if (i % MSM_CPP_RX_FIFO_LEVEL == 0) {
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d] poll rx empty failed %d",
+ __func__, __LINE__, rc);
+ goto end;
+ }
+ }
+ }
+end:
+ return rc;
+}
+
+static int msm_cpp_flush_frames(struct cpp_device *cpp_dev)
+{
+ return 0;
+}
+
+static struct msm_cpp_frame_info_t *msm_cpp_get_frame(
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
+{
+ uint32_t *cpp_frame_msg;
+ struct msm_cpp_frame_info_t *new_frame = NULL;
+ int32_t rc = 0;
+
+ new_frame = kzalloc(sizeof(struct msm_cpp_frame_info_t), GFP_KERNEL);
+
+ if (!new_frame) {
+ rc = -ENOMEM;
+ goto no_mem_err;
+ }
+
+ rc = (copy_from_user(new_frame, (void __user *)ioctl_ptr->ioctl_ptr,
+ sizeof(struct msm_cpp_frame_info_t)) ? -EFAULT : 0);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ goto frame_err;
+ }
+
+ if ((new_frame->msg_len == 0) ||
+ (new_frame->msg_len > MSM_CPP_MAX_FRAME_LENGTH)) {
+ pr_err("%s:%d: Invalid frame len:%d\n", __func__,
+ __LINE__, new_frame->msg_len);
+ goto frame_err;
+ }
+
+ cpp_frame_msg = kcalloc(new_frame->msg_len, sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!cpp_frame_msg)
+ goto frame_err;
+
+ rc = (copy_from_user(cpp_frame_msg,
+ (void __user *)new_frame->cpp_cmd_msg,
+ sizeof(uint32_t) * new_frame->msg_len) ? -EFAULT : 0);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ goto frame_msg_err;
+ }
+ new_frame->cpp_cmd_msg = cpp_frame_msg;
+ return new_frame;
+
+frame_msg_err:
+ kfree(cpp_frame_msg);
+frame_err:
+ kfree(new_frame);
+no_mem_err:
+ return NULL;
+}
+
+static int msm_cpp_check_buf_type(struct msm_buf_mngr_info *buff_mgr_info,
+ struct msm_cpp_frame_info_t *new_frame)
+{
+ int32_t num_output_bufs = 0;
+ uint32_t i = 0;
+
+ if (buff_mgr_info->type == MSM_CAMERA_BUF_MNGR_BUF_USER) {
+ new_frame->batch_info.cont_idx =
+ buff_mgr_info->index;
+ num_output_bufs = buff_mgr_info->user_buf.buf_cnt;
+ if (buff_mgr_info->user_buf.buf_cnt <
+ new_frame->batch_info.batch_size) {
+ /* Less bufs than Input buffer */
+ num_output_bufs = buff_mgr_info->user_buf.buf_cnt;
+ } else {
+ /* More or equal bufs as Input buffer */
+ num_output_bufs = new_frame->batch_info.batch_size;
+ }
+ if (num_output_bufs > MSM_OUTPUT_BUF_CNT)
+ return 0;
+ for (i = 0; i < num_output_bufs; i++) {
+ new_frame->output_buffer_info[i].index =
+ buff_mgr_info->user_buf.buf_idx[i];
+ }
+ } else {
+ /* For non-group case use first buf slot */
+ new_frame->output_buffer_info[0].index = buff_mgr_info->index;
+ num_output_bufs = 1;
+ }
+
+ return num_output_bufs;
+}
+
+static void msm_cpp_update_frame_msg_phy_address(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *new_frame, unsigned long in_phyaddr,
+ unsigned long out_phyaddr0, unsigned long out_phyaddr1,
+ unsigned long tnr_scratch_buffer0, unsigned long tnr_scratch_buffer1)
+{
+ int32_t stripe_base, plane_base;
+ uint32_t rd_pntr_off, wr_0_pntr_off, wr_1_pntr_off,
+ wr_2_pntr_off, wr_3_pntr_off;
+ uint32_t wr_0_meta_data_wr_pntr_off, wr_1_meta_data_wr_pntr_off,
+ wr_2_meta_data_wr_pntr_off, wr_3_meta_data_wr_pntr_off;
+ uint32_t rd_ref_pntr_off, wr_ref_pntr_off;
+ uint32_t stripe_size, plane_size;
+ uint32_t fe_mmu_pf_ptr_off, ref_fe_mmu_pf_ptr_off, we_mmu_pf_ptr_off,
+ dup_we_mmu_pf_ptr_off, ref_we_mmu_pf_ptr_off;
+ uint8_t tnr_enabled, ubwc_enabled, mmu_pf_en, cds_en;
+ int32_t i = 0;
+ uint32_t *cpp_frame_msg;
+
+ cpp_frame_msg = new_frame->cpp_cmd_msg;
+
+ /* Update stripe/plane size and base offsets */
+ stripe_base = cpp_dev->payload_params.stripe_base;
+ stripe_size = cpp_dev->payload_params.stripe_size;
+ plane_base = cpp_dev->payload_params.plane_base;
+ plane_size = cpp_dev->payload_params.plane_size;
+
+ /* Fetch engine Offset */
+ rd_pntr_off = cpp_dev->payload_params.rd_pntr_off;
+ /* Write engine offsets */
+ wr_0_pntr_off = cpp_dev->payload_params.wr_0_pntr_off;
+ wr_1_pntr_off = wr_0_pntr_off + 1;
+ wr_2_pntr_off = wr_1_pntr_off + 1;
+ wr_3_pntr_off = wr_2_pntr_off + 1;
+ /* Reference engine offsets */
+ rd_ref_pntr_off = cpp_dev->payload_params.rd_ref_pntr_off;
+ wr_ref_pntr_off = cpp_dev->payload_params.wr_ref_pntr_off;
+ /* Meta data offsets */
+ wr_0_meta_data_wr_pntr_off =
+ cpp_dev->payload_params.wr_0_meta_data_wr_pntr_off;
+ wr_1_meta_data_wr_pntr_off = (wr_0_meta_data_wr_pntr_off + 1);
+ wr_2_meta_data_wr_pntr_off = (wr_1_meta_data_wr_pntr_off + 1);
+ wr_3_meta_data_wr_pntr_off = (wr_2_meta_data_wr_pntr_off + 1);
+ /* MMU PF offsets */
+ fe_mmu_pf_ptr_off = cpp_dev->payload_params.fe_mmu_pf_ptr_off;
+ ref_fe_mmu_pf_ptr_off = cpp_dev->payload_params.ref_fe_mmu_pf_ptr_off;
+ we_mmu_pf_ptr_off = cpp_dev->payload_params.we_mmu_pf_ptr_off;
+ dup_we_mmu_pf_ptr_off = cpp_dev->payload_params.dup_we_mmu_pf_ptr_off;
+ ref_we_mmu_pf_ptr_off = cpp_dev->payload_params.ref_we_mmu_pf_ptr_off;
+
+ pr_debug("%s: feature_mask 0x%x\n", __func__, new_frame->feature_mask);
+
+ /* Update individual module status from feature mask */
+ tnr_enabled = ((new_frame->feature_mask & TNR_MASK) >> 2);
+ ubwc_enabled = ((new_frame->feature_mask & UBWC_MASK) >> 5);
+ cds_en = ((new_frame->feature_mask & CDS_MASK) >> 6);
+ mmu_pf_en = ((new_frame->feature_mask & MMU_PF_MASK) >> 7);
+
+ /*
+ * Update the stripe based addresses for fetch/write/reference engines.
+ * Update meta data offset for ubwc.
+ * Update ref engine address for cds / tnr.
+ */
+ for (i = 0; i < new_frame->num_strips; i++) {
+ cpp_frame_msg[stripe_base + rd_pntr_off + i * stripe_size] +=
+ (uint32_t) in_phyaddr;
+ cpp_frame_msg[stripe_base + wr_0_pntr_off + i * stripe_size] +=
+ (uint32_t) out_phyaddr0;
+ cpp_frame_msg[stripe_base + wr_1_pntr_off + i * stripe_size] +=
+ (uint32_t) out_phyaddr1;
+ cpp_frame_msg[stripe_base + wr_2_pntr_off + i * stripe_size] +=
+ (uint32_t) out_phyaddr0;
+ cpp_frame_msg[stripe_base + wr_3_pntr_off + i * stripe_size] +=
+ (uint32_t) out_phyaddr1;
+ if (tnr_enabled) {
+ cpp_frame_msg[stripe_base + rd_ref_pntr_off +
+ i * stripe_size] +=
+ (uint32_t)tnr_scratch_buffer0;
+ cpp_frame_msg[stripe_base + wr_ref_pntr_off +
+ i * stripe_size] +=
+ (uint32_t)tnr_scratch_buffer1;
+ } else if (cds_en) {
+ cpp_frame_msg[stripe_base + rd_ref_pntr_off +
+ i * stripe_size] +=
+ (uint32_t)in_phyaddr;
+ }
+ if (ubwc_enabled) {
+ cpp_frame_msg[stripe_base + wr_0_meta_data_wr_pntr_off +
+ i * stripe_size] += (uint32_t) out_phyaddr0;
+ cpp_frame_msg[stripe_base + wr_1_meta_data_wr_pntr_off +
+ i * stripe_size] += (uint32_t) out_phyaddr1;
+ cpp_frame_msg[stripe_base + wr_2_meta_data_wr_pntr_off +
+ i * stripe_size] += (uint32_t) out_phyaddr0;
+ cpp_frame_msg[stripe_base + wr_3_meta_data_wr_pntr_off +
+ i * stripe_size] += (uint32_t) out_phyaddr1;
+ }
+ }
+
+ if (!mmu_pf_en)
+ goto exit;
+
+ /* Update mmu prefetch related plane specific address */
+ for (i = 0; i < PAYLOAD_NUM_PLANES; i++) {
+ cpp_frame_msg[plane_base + fe_mmu_pf_ptr_off +
+ i * plane_size] += (uint32_t)in_phyaddr;
+ cpp_frame_msg[plane_base + fe_mmu_pf_ptr_off +
+ i * plane_size + 1] += (uint32_t)in_phyaddr;
+ cpp_frame_msg[plane_base + ref_fe_mmu_pf_ptr_off +
+ i * plane_size] += (uint32_t)tnr_scratch_buffer0;
+ cpp_frame_msg[plane_base + ref_fe_mmu_pf_ptr_off +
+ i * plane_size + 1] += (uint32_t)tnr_scratch_buffer0;
+ cpp_frame_msg[plane_base + we_mmu_pf_ptr_off +
+ i * plane_size] += (uint32_t)out_phyaddr0;
+ cpp_frame_msg[plane_base + we_mmu_pf_ptr_off +
+ i * plane_size + 1] += (uint32_t)out_phyaddr0;
+ cpp_frame_msg[plane_base + dup_we_mmu_pf_ptr_off +
+ i * plane_size] += (uint32_t)out_phyaddr1;
+ cpp_frame_msg[plane_base + dup_we_mmu_pf_ptr_off +
+ i * plane_size + 1] += (uint32_t)out_phyaddr1;
+ cpp_frame_msg[plane_base + ref_we_mmu_pf_ptr_off +
+ i * plane_size] += (uint32_t)tnr_scratch_buffer1;
+ cpp_frame_msg[plane_base + ref_we_mmu_pf_ptr_off +
+ i * plane_size + 1] += (uint32_t)tnr_scratch_buffer1;
+ }
+exit:
+ return;
+}
+
+static int32_t msm_cpp_set_group_buffer_duplicate(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *new_frame, unsigned long out_phyaddr,
+ uint32_t num_output_bufs)
+{
+
+ uint32_t *set_group_buffer_w_duplication = NULL;
+ uint32_t *ptr;
+ unsigned long out_phyaddr0, out_phyaddr1, distance;
+ int32_t rc = 0;
+ uint32_t set_group_buffer_len, set_group_buffer_len_bytes,
+ dup_frame_off, ubwc_enabled, j, i = 0;
+
+ do {
+ int iden = new_frame->identity;
+
+ set_group_buffer_len =
+ cpp_dev->payload_params.set_group_buffer_len;
+ if (!set_group_buffer_len) {
+ pr_err("%s: invalid set group buffer cmd len %d\n",
+ __func__, set_group_buffer_len);
+ rc = -EINVAL;
+ break;
+ }
+
+ /*
+ * Length of MSM_CPP_CMD_GROUP_BUFFER_DUP command +
+ * 4 byte for header + 4 byte for the length field +
+ * 4 byte for the trailer + 4 byte for
+ * MSM_CPP_CMD_GROUP_BUFFER_DUP prefix before the payload
+ */
+ set_group_buffer_len += 4;
+ set_group_buffer_len_bytes = set_group_buffer_len *
+ sizeof(uint32_t);
+ set_group_buffer_w_duplication =
+ kzalloc(set_group_buffer_len_bytes, GFP_KERNEL);
+ if (!set_group_buffer_w_duplication) {
+ pr_err("%s: set group buffer data alloc failed\n",
+ __func__);
+ rc = -ENOMEM;
+ break;
+ }
+
+ memset(set_group_buffer_w_duplication, 0x0,
+ set_group_buffer_len_bytes);
+ dup_frame_off =
+ cpp_dev->payload_params.dup_frame_indicator_off;
+ /* Add a factor of 1 as command is prefixed to the payload. */
+ dup_frame_off += 1;
+ ubwc_enabled = ((new_frame->feature_mask & UBWC_MASK) >> 5);
+ ptr = set_group_buffer_w_duplication;
+ /* create and send Set Group Buffer with Duplicate command */
+ *ptr++ = MSM_CPP_CMD_GROUP_BUFFER_DUP;
+ *ptr++ = MSM_CPP_MSG_ID_CMD;
+ /*
+ * This field is the value read from dt and stands for length of
+ * actual data in payload
+ */
+ *ptr++ = cpp_dev->payload_params.set_group_buffer_len;
+ *ptr++ = MSM_CPP_CMD_GROUP_BUFFER_DUP;
+ *ptr++ = 0;
+ out_phyaddr0 = out_phyaddr;
+
+ SWAP_IDENTITY_FOR_BATCH_ON_PREVIEW(new_frame,
+ iden, new_frame->duplicate_identity);
+
+ for (i = 1; i < num_output_bufs; i++) {
+ out_phyaddr1 = msm_cpp_fetch_buffer_info(cpp_dev,
+ &new_frame->output_buffer_info[i],
+ ((iden >> 16) & 0xFFFF),
+ (iden & 0xFFFF),
+ &new_frame->output_buffer_info[i].fd);
+ if (!out_phyaddr1) {
+ pr_err("%s: error getting o/p phy addr\n",
+ __func__);
+ rc = -EINVAL;
+ break;
+ }
+ distance = out_phyaddr1 - out_phyaddr0;
+ out_phyaddr0 = out_phyaddr1;
+ for (j = 0; j < PAYLOAD_NUM_PLANES; j++)
+ *ptr++ = distance;
+
+ for (j = 0; j < PAYLOAD_NUM_PLANES; j++)
+ *ptr++ = ubwc_enabled ? distance : 0;
+ }
+ if (rc)
+ break;
+
+ if (new_frame->duplicate_output)
+ set_group_buffer_w_duplication[dup_frame_off] =
+ 1 << new_frame->batch_info.pick_preview_idx;
+ else
+ set_group_buffer_w_duplication[dup_frame_off] = 0;
+
+ /*
+ * Index for cpp message id trailer is length of payload for
+ * set group buffer minus 1
+ */
+ set_group_buffer_w_duplication[set_group_buffer_len - 1] =
+ MSM_CPP_MSG_ID_TRAILER;
+ rc = msm_cpp_send_command_to_hardware(cpp_dev,
+ set_group_buffer_w_duplication, set_group_buffer_len);
+ if (rc < 0) {
+ pr_err("%s: Send Command Error rc %d\n", __func__, rc);
+ break;
+ }
+
+ } while (0);
+
+ kfree(set_group_buffer_w_duplication);
+ return rc;
+}
+
+static int32_t msm_cpp_set_group_buffer(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *new_frame, unsigned long out_phyaddr,
+ uint32_t num_output_bufs)
+{
+ uint32_t set_group_buffer_len;
+ uint32_t *set_group_buffer = NULL;
+ uint32_t *ptr;
+ unsigned long out_phyaddr0, out_phyaddr1, distance;
+ int32_t rc = 0;
+ uint32_t set_group_buffer_len_bytes, i = 0;
+ bool batching_valid = false;
+
+ if ((IS_BATCH_BUFFER_ON_PREVIEW(new_frame)) ||
+ new_frame->batch_info.batch_mode == BATCH_MODE_VIDEO)
+ batching_valid = true;
+
+ if (!batching_valid) {
+ pr_debug("%s: batch mode %d, batching valid %d\n",
+ __func__, new_frame->batch_info.batch_mode,
+ batching_valid);
+ return rc;
+ }
+
+ if (new_frame->batch_info.batch_size <= 1) {
+ pr_debug("%s: batch size is invalid %d\n", __func__,
+ new_frame->batch_info.batch_size);
+ return rc;
+ }
+
+ if ((new_frame->feature_mask & BATCH_DUP_MASK) >> 8) {
+ return msm_cpp_set_group_buffer_duplicate(cpp_dev, new_frame,
+ out_phyaddr, num_output_bufs);
+ }
+
+ if (new_frame->duplicate_output) {
+ pr_err("cannot support duplication enable\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ set_group_buffer_len =
+ 2 + 3 * (num_output_bufs - 1);
+ /*
+ * Length of MSM_CPP_CMD_GROUP_BUFFER command +
+ * 4 byte for header + 4 byte for the length field +
+ * 4 byte for the trailer + 4 byte for
+ * MSM_CPP_CMD_GROUP_BUFFER prefix before the payload
+ */
+ set_group_buffer_len += 4;
+ set_group_buffer_len_bytes = set_group_buffer_len *
+ sizeof(uint32_t);
+ set_group_buffer =
+ kzalloc(set_group_buffer_len_bytes, GFP_KERNEL);
+ if (!set_group_buffer) {
+ pr_err("%s: set group buffer data alloc failed\n",
+ __func__);
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ memset(set_group_buffer, 0x0,
+ set_group_buffer_len_bytes);
+ ptr = set_group_buffer;
+ /*Create and send Set Group Buffer*/
+ *ptr++ = MSM_CPP_CMD_GROUP_BUFFER;
+ *ptr++ = MSM_CPP_MSG_ID_CMD;
+ /*
+ * This field is the value read from dt and stands
+ * for length of actual data in payload
+ */
+ *ptr++ = set_group_buffer_len - 4;
+ *ptr++ = MSM_CPP_CMD_GROUP_BUFFER;
+ *ptr++ = 0;
+ out_phyaddr0 = out_phyaddr;
+
+ for (i = 1; i < num_output_bufs; i++) {
+ out_phyaddr1 =
+ msm_cpp_fetch_buffer_info(cpp_dev,
+ &new_frame->output_buffer_info[i],
+ ((new_frame->identity >> 16) & 0xFFFF),
+ (new_frame->identity & 0xFFFF),
+ &new_frame->output_buffer_info[i].fd);
+ if (!out_phyaddr1) {
+ pr_err("%s: error getting o/p phy addr\n",
+ __func__);
+ rc = -EINVAL;
+ goto free_and_exit;
+ }
+ distance = out_phyaddr1 - out_phyaddr0;
+ out_phyaddr0 = out_phyaddr1;
+ *ptr++ = distance;
+ *ptr++ = distance;
+ *ptr++ = distance;
+ }
+ if (rc)
+ goto free_and_exit;
+
+ /*
+ * Index for cpp message id trailer is length of
+ * payload for set group buffer minus 1
+ */
+ set_group_buffer[set_group_buffer_len - 1] =
+ MSM_CPP_MSG_ID_TRAILER;
+ rc = msm_cpp_send_command_to_hardware(cpp_dev,
+ set_group_buffer, set_group_buffer_len);
+ if (rc < 0)
+ pr_err("Send Command Error rc %d\n", rc);
+
+free_and_exit:
+ kfree(set_group_buffer);
+exit:
+ return rc;
+}
+
+static int msm_cpp_cfg_frame(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *new_frame)
+{
+ int32_t rc = 0;
+ struct msm_queue_cmd *frame_qcmd = NULL;
+ uint32_t *cpp_frame_msg;
+ unsigned long in_phyaddr, out_phyaddr0 = (unsigned long)NULL;
+ unsigned long out_phyaddr1;
+ unsigned long tnr_scratch_buffer0, tnr_scratch_buffer1;
+ uint16_t num_stripes = 0;
+ struct msm_buf_mngr_info buff_mgr_info, dup_buff_mgr_info;
+ int32_t in_fd;
+ int32_t num_output_bufs = 1;
+ uint32_t stripe_base = 0;
+ uint32_t stripe_size;
+ uint8_t tnr_enabled;
+ enum msm_camera_buf_mngr_buf_type buf_type =
+ MSM_CAMERA_BUF_MNGR_BUF_PLANAR;
+ uint32_t ioctl_cmd, idx;
+ uint32_t op_index, dup_index;
+
+ stripe_base = cpp_dev->payload_params.stripe_base;
+ stripe_size = cpp_dev->payload_params.stripe_size;
+
+ if (!new_frame) {
+ pr_err("%s: Frame is Null\n", __func__);
+ return -EINVAL;
+ }
+
+ if (cpp_dev->state == CPP_STATE_OFF) {
+ pr_err("%s: cpp state is off, return fatal error\n", __func__);
+ return -EINVAL;
+ }
+
+ cpp_frame_msg = new_frame->cpp_cmd_msg;
+
+ if (cpp_frame_msg == NULL ||
+ (new_frame->msg_len < MSM_CPP_MIN_FRAME_LENGTH)) {
+ pr_err("Length is not correct or frame message is missing\n");
+ return -EINVAL;
+ }
+
+ if (cpp_frame_msg[new_frame->msg_len - 1] !=
+ MSM_CPP_MSG_ID_TRAILER) {
+ pr_err("Invalid frame message\n");
+ return -EINVAL;
+ }
+
+ /* Stripe index starts at zero */
+ if ((!new_frame->num_strips) ||
+ (new_frame->first_stripe_index >= new_frame->num_strips) ||
+ (new_frame->last_stripe_index >= new_frame->num_strips) ||
+ (new_frame->first_stripe_index >
+ new_frame->last_stripe_index)) {
+ pr_err("Invalid frame message, #stripes=%d, stripe indices=[%d,%d]\n",
+ new_frame->num_strips,
+ new_frame->first_stripe_index,
+ new_frame->last_stripe_index);
+ return -EINVAL;
+ }
+
+ if (!stripe_size) {
+ pr_err("Invalid frame message, invalid stripe_size (%d)!\n",
+ stripe_size);
+ return -EINVAL;
+ }
+
+ if ((stripe_base == UINT_MAX) ||
+ (new_frame->num_strips >
+ (UINT_MAX - 1 - stripe_base) / stripe_size)) {
+ pr_err("Invalid frame message, num_strips %d is large\n",
+ new_frame->num_strips);
+ return -EINVAL;
+ }
+
+ if ((stripe_base + new_frame->num_strips * stripe_size + 1) !=
+ new_frame->msg_len) {
+ pr_err("Invalid frame message,len=%d,expected=%d\n",
+ new_frame->msg_len,
+ (stripe_base +
+ new_frame->num_strips * stripe_size + 1));
+ return -EINVAL;
+ }
+
+ if (cpp_dev->iommu_state != CPP_IOMMU_STATE_ATTACHED) {
+ pr_err("IOMMU is not attached\n");
+ return -EAGAIN;
+ }
+
+ in_phyaddr = msm_cpp_fetch_buffer_info(cpp_dev,
+ &new_frame->input_buffer_info,
+ ((new_frame->input_buffer_info.identity >> 16) & 0xFFFF),
+ (new_frame->input_buffer_info.identity & 0xFFFF), &in_fd);
+ if (!in_phyaddr) {
+ pr_err("%s: error gettting input physical address\n", __func__);
+ rc = -EINVAL;
+ goto frame_msg_err;
+ }
+
+ op_index = new_frame->output_buffer_info[0].index;
+ dup_index = new_frame->duplicate_buffer_info.index;
+
+ if (new_frame->we_disable == 0) {
+ int32_t iden = new_frame->identity;
+
+ if ((new_frame->output_buffer_info[0].native_buff == 0) &&
+ (new_frame->first_payload)) {
+ memset(&buff_mgr_info, 0,
+ sizeof(struct msm_buf_mngr_info));
+ if ((new_frame->batch_info.batch_mode ==
+ BATCH_MODE_VIDEO) ||
+ (IS_BATCH_BUFFER_ON_PREVIEW(new_frame)))
+ buf_type = MSM_CAMERA_BUF_MNGR_BUF_USER;
+
+ SWAP_IDENTITY_FOR_BATCH_ON_PREVIEW(new_frame,
+ iden, new_frame->duplicate_identity);
+
+ /*
+ * Swap the input buffer index for batch mode with
+ * buffer on preview
+ */
+ SWAP_BUF_INDEX_FOR_BATCH_ON_PREVIEW(new_frame,
+ buff_mgr_info, op_index, dup_index);
+
+ buff_mgr_info.session_id = ((iden >> 16) & 0xFFFF);
+ buff_mgr_info.stream_id = (iden & 0xFFFF);
+ buff_mgr_info.type = buf_type;
+
+ if (IS_DEFAULT_OUTPUT_BUF_INDEX(buff_mgr_info.index)) {
+ ioctl_cmd = VIDIOC_MSM_BUF_MNGR_GET_BUF;
+ idx = 0x0;
+ } else {
+ ioctl_cmd = VIDIOC_MSM_BUF_MNGR_IOCTL_CMD;
+ idx =
+ MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX;
+ }
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ ioctl_cmd, idx, &buff_mgr_info);
+ if (rc < 0) {
+ rc = -EAGAIN;
+ pr_debug("%s:get_buf err rc:%d, index %d\n",
+ __func__, rc,
+ new_frame->output_buffer_info[0].index);
+ goto frame_msg_err;
+ }
+ num_output_bufs =
+ msm_cpp_check_buf_type(&buff_mgr_info,
+ new_frame);
+ if (!num_output_bufs) {
+ pr_err("%s: error getting buffer %d\n",
+ __func__, num_output_bufs);
+ rc = -EINVAL;
+ goto phyaddr_err;
+ }
+ }
+
+ out_phyaddr0 = msm_cpp_fetch_buffer_info(cpp_dev,
+ &new_frame->output_buffer_info[0],
+ ((iden >> 16) & 0xFFFF),
+ (iden & 0xFFFF),
+ &new_frame->output_buffer_info[0].fd);
+ if (!out_phyaddr0) {
+ pr_err("%s: error gettting output physical address\n",
+ __func__);
+ rc = -EINVAL;
+ goto phyaddr_err;
+ }
+ }
+ out_phyaddr1 = out_phyaddr0;
+
+ /* get buffer for duplicate output */
+ if (new_frame->duplicate_output) {
+ int32_t iden = new_frame->duplicate_identity;
+
+ CPP_DBG("duplication enabled, dup_id=0x%x",
+ new_frame->duplicate_identity);
+
+ SWAP_IDENTITY_FOR_BATCH_ON_PREVIEW(new_frame,
+ iden, new_frame->identity);
+
+ memset(&dup_buff_mgr_info, 0, sizeof(struct msm_buf_mngr_info));
+
+ /*
+ * Swap the input buffer index for batch mode with
+ * buffer on preview
+ */
+ SWAP_BUF_INDEX_FOR_BATCH_ON_PREVIEW(new_frame,
+ dup_buff_mgr_info, dup_index, op_index);
+
+ dup_buff_mgr_info.session_id = ((iden >> 16) & 0xFFFF);
+ dup_buff_mgr_info.stream_id = (iden & 0xFFFF);
+ dup_buff_mgr_info.type =
+ MSM_CAMERA_BUF_MNGR_BUF_PLANAR;
+ if (IS_DEFAULT_OUTPUT_BUF_INDEX(dup_buff_mgr_info.index)) {
+ ioctl_cmd = VIDIOC_MSM_BUF_MNGR_GET_BUF;
+ idx = 0x0;
+ } else {
+ ioctl_cmd = VIDIOC_MSM_BUF_MNGR_IOCTL_CMD;
+ idx = MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX;
+ }
+ rc = msm_cpp_buffer_ops(cpp_dev, ioctl_cmd, idx,
+ &dup_buff_mgr_info);
+ if (rc < 0) {
+ rc = -EAGAIN;
+ pr_debug("%s: get_buf err rc:%d, index %d\n",
+ __func__, rc,
+ new_frame->duplicate_buffer_info.index);
+ goto phyaddr_err;
+ }
+ new_frame->duplicate_buffer_info.index =
+ dup_buff_mgr_info.index;
+ out_phyaddr1 = msm_cpp_fetch_buffer_info(cpp_dev,
+ &new_frame->duplicate_buffer_info,
+ ((iden >> 16) & 0xFFFF),
+ (iden & 0xFFFF),
+ &new_frame->duplicate_buffer_info.fd);
+ if (!out_phyaddr1) {
+ pr_err("error gettting output physical address\n");
+ rc = -EINVAL;
+ msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_PUT_BUF,
+ 0x0, &dup_buff_mgr_info);
+ goto phyaddr_err;
+ }
+ /* set duplicate enable bit */
+ cpp_frame_msg[5] |= 0x1;
+ CPP_DBG("out_phyaddr1= %08x\n", (uint32_t)out_phyaddr1);
+ }
+
+ tnr_enabled = ((new_frame->feature_mask & TNR_MASK) >> 2);
+ if (tnr_enabled) {
+ tnr_scratch_buffer0 = msm_cpp_fetch_buffer_info(cpp_dev,
+ &new_frame->tnr_scratch_buffer_info[0],
+ ((new_frame->identity >> 16) & 0xFFFF),
+ (new_frame->identity & 0xFFFF),
+ &new_frame->tnr_scratch_buffer_info[0].fd);
+ if (!tnr_scratch_buffer0) {
+ pr_err("error getting scratch buffer physical address\n");
+ rc = -EINVAL;
+ goto phyaddr_err;
+ }
+
+ tnr_scratch_buffer1 = msm_cpp_fetch_buffer_info(cpp_dev,
+ &new_frame->tnr_scratch_buffer_info[1],
+ ((new_frame->identity >> 16) & 0xFFFF),
+ (new_frame->identity & 0xFFFF),
+ &new_frame->tnr_scratch_buffer_info[1].fd);
+ if (!tnr_scratch_buffer1) {
+ pr_err("error getting scratch buffer physical address\n");
+ rc = -EINVAL;
+ goto phyaddr_err;
+ }
+ } else {
+ tnr_scratch_buffer0 = 0;
+ tnr_scratch_buffer1 = 0;
+ }
+
+
+ msm_cpp_update_frame_msg_phy_address(cpp_dev, new_frame,
+ in_phyaddr, out_phyaddr0, out_phyaddr1,
+ tnr_scratch_buffer0, tnr_scratch_buffer1);
+ if (tnr_enabled) {
+ cpp_frame_msg[10] = tnr_scratch_buffer1 -
+ tnr_scratch_buffer0;
+ }
+
+ rc = msm_cpp_set_group_buffer(cpp_dev, new_frame, out_phyaddr0,
+ num_output_bufs);
+ if (rc) {
+ pr_err("%s: set group buffer failure %d\n", __func__, rc);
+ goto phyaddr_err;
+ }
+
+ num_stripes = new_frame->last_stripe_index -
+ new_frame->first_stripe_index + 1;
+ cpp_frame_msg[1] = stripe_base - 2 + num_stripes * stripe_size;
+
+ frame_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
+ if (!frame_qcmd) {
+ rc = -ENOMEM;
+ goto qcmd_err;
+ }
+
+ atomic_set(&frame_qcmd->on_heap, 1);
+ frame_qcmd->command = new_frame;
+ rc = msm_cpp_send_frame_to_hardware(cpp_dev, frame_qcmd);
+ if (rc < 0) {
+ pr_err("%s: error cannot send frame to hardware\n", __func__);
+ rc = -EINVAL;
+ goto qcmd_err;
+ }
+
+ return rc;
+qcmd_err:
+ kfree(frame_qcmd);
+phyaddr_err:
+ if (new_frame->output_buffer_info[0].native_buff == 0)
+ msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_PUT_BUF,
+ 0x0, &buff_mgr_info);
+frame_msg_err:
+ kfree(cpp_frame_msg);
+ kfree(new_frame);
+ return rc;
+}
+
+static int msm_cpp_cfg(struct cpp_device *cpp_dev,
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
+{
+ struct msm_cpp_frame_info_t *frame = NULL;
+ struct msm_cpp_frame_info_t k_frame_info;
+ int32_t rc = 0;
+ uint32_t i = 0;
+ uint32_t num_buff = sizeof(k_frame_info.output_buffer_info) /
+ sizeof(struct msm_cpp_buffer_info_t);
+
+ if (copy_from_user(&k_frame_info,
+ (void __user *)ioctl_ptr->ioctl_ptr,
+ sizeof(k_frame_info)))
+ return -EFAULT;
+
+ frame = msm_cpp_get_frame(ioctl_ptr);
+ if (!frame) {
+ pr_err("%s: Error allocating frame\n", __func__);
+ rc = -EINVAL;
+ } else {
+ rc = msm_cpp_cfg_frame(cpp_dev, frame);
+ if (rc >= 0) {
+ for (i = 0; i < num_buff; i++) {
+ k_frame_info.output_buffer_info[i] =
+ frame->output_buffer_info[i];
+ }
+ }
+ }
+
+ ioctl_ptr->trans_code = rc;
+
+ if (copy_to_user((void __user *)k_frame_info.status, &rc,
+ sizeof(int32_t)))
+ pr_err("error cannot copy error\n");
+
+
+ if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
+ &k_frame_info, sizeof(k_frame_info))) {
+ pr_err("Error: cannot copy k_frame_info");
+ return -EFAULT;
+ }
+
+ return rc;
+}
+
+void msm_cpp_clean_queue(struct cpp_device *cpp_dev)
+{
+ struct msm_queue_cmd *frame_qcmd = NULL;
+ struct msm_cpp_frame_info_t *processed_frame = NULL;
+ struct msm_device_queue *queue = NULL;
+
+ while (cpp_dev->processing_q.len) {
+ pr_debug("queue len:%d\n", cpp_dev->processing_q.len);
+ queue = &cpp_dev->processing_q;
+ frame_qcmd = msm_dequeue(queue, list_frame, POP_FRONT);
+ if (frame_qcmd) {
+ processed_frame = frame_qcmd->command;
+ kfree(frame_qcmd);
+ if (processed_frame)
+ kfree(processed_frame->cpp_cmd_msg);
+ kfree(processed_frame);
+ }
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static int msm_cpp_copy_from_ioctl_ptr(void *dst_ptr,
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
+{
+ int ret;
+
+ if ((ioctl_ptr->ioctl_ptr == NULL) || (ioctl_ptr->len == 0)) {
+ pr_err("%s: Wrong ioctl_ptr %pK / len %zu\n", __func__,
+ ioctl_ptr, ioctl_ptr->len);
+ return -EINVAL;
+ }
+
+ /* For compat task, source ptr is in kernel space */
+ if (is_compat_task()) {
+ memcpy(dst_ptr, ioctl_ptr->ioctl_ptr, ioctl_ptr->len);
+ ret = 0;
+ } else {
+ ret = copy_from_user(dst_ptr,
+ (void __user *)ioctl_ptr->ioctl_ptr, ioctl_ptr->len);
+ if (ret)
+ pr_err("Copy from user fail %d\n", ret);
+ }
+ return ret ? -EFAULT : 0;
+}
+#else
+static int msm_cpp_copy_from_ioctl_ptr(void *dst_ptr,
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
+{
+ int ret;
+
+ if ((ioctl_ptr->ioctl_ptr == NULL) || (ioctl_ptr->len == 0)) {
+ pr_err("%s: Wrong ioctl_ptr %pK / len %zu\n", __func__,
+ ioctl_ptr, ioctl_ptr->len);
+ return -EINVAL;
+ }
+
+ ret = copy_from_user(dst_ptr,
+ (void __user *)ioctl_ptr->ioctl_ptr, ioctl_ptr->len);
+ if (ret)
+ pr_err("Copy from user fail %d\n", ret);
+
+ return ret ? -EFAULT : 0;
+}
+#endif
+
+static int32_t msm_cpp_fw_version(struct cpp_device *cpp_dev)
+{
+ int32_t rc = 0;
+
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d] poll rx empty failed %d",
+ __func__, __LINE__, rc);
+ goto end;
+ }
+ /* Get Firmware Version */
+ msm_cpp_write(MSM_CPP_CMD_GET_FW_VER, cpp_dev->base);
+ msm_cpp_write(MSM_CPP_MSG_ID_CMD, cpp_dev->base);
+ msm_cpp_write(0x1, cpp_dev->base);
+ msm_cpp_write(MSM_CPP_CMD_GET_FW_VER, cpp_dev->base);
+ msm_cpp_write(MSM_CPP_MSG_ID_TRAILER, cpp_dev->base);
+
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_CMD, rc);
+ goto end;
+ }
+ rc = msm_cpp_poll(cpp_dev->base, 0x2);
+ if (rc) {
+ pr_err("%s:%d] poll command 0x2 failed %d", __func__, __LINE__,
+ rc);
+ goto end;
+ }
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_FW_VER);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_FW_VER, rc);
+ goto end;
+ }
+
+ cpp_dev->fw_version = msm_cpp_read(cpp_dev->base);
+ pr_debug("CPP FW Version: 0x%08x\n", cpp_dev->fw_version);
+
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_TRAILER);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_TRAILER, rc);
+ }
+
+end:
+
+ return rc;
+}
+
+static int msm_cpp_validate_input(unsigned int cmd, void *arg,
+ struct msm_camera_v4l2_ioctl_t **ioctl_ptr)
+{
+ switch (cmd) {
+ case MSM_SD_SHUTDOWN:
+ case MSM_SD_NOTIFY_FREEZE:
+ case MSM_SD_UNNOTIFY_FREEZE:
+ break;
+ default: {
+ if (ioctl_ptr == NULL) {
+ pr_err("Wrong ioctl_ptr for cmd %u\n", cmd);
+ return -EINVAL;
+ }
+
+ *ioctl_ptr = arg;
+ if ((*ioctl_ptr == NULL) ||
+ (*ioctl_ptr)->ioctl_ptr == NULL) {
+ pr_err("Error invalid ioctl argument cmd %u", cmd);
+ return -EINVAL;
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct cpp_device *cpp_dev = NULL;
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr = NULL;
+ int rc = 0;
+
+ if (sd == NULL) {
+ pr_err("sd %pK\n", sd);
+ return -EINVAL;
+ }
+ cpp_dev = v4l2_get_subdevdata(sd);
+ if (cpp_dev == NULL) {
+ pr_err("cpp_dev is null\n");
+ return -EINVAL;
+ }
+
+ if (_IOC_DIR(cmd) == _IOC_NONE) {
+ pr_err("Invalid ioctl/subdev cmd %u", cmd);
+ return -EINVAL;
+ }
+
+ rc = msm_cpp_validate_input(cmd, arg, &ioctl_ptr);
+ if (rc != 0) {
+ pr_err("input validation failed\n");
+ return rc;
+ }
+ mutex_lock(&cpp_dev->mutex);
+
+ CPP_DBG("E cmd: 0x%x\n", cmd);
+ switch (cmd) {
+ case VIDIOC_MSM_CPP_GET_HW_INFO: {
+ CPP_DBG("VIDIOC_MSM_CPP_GET_HW_INFO\n");
+ if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
+ &cpp_dev->hw_info,
+ sizeof(struct cpp_hw_info))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+ break;
+ }
+
+ case VIDIOC_MSM_CPP_LOAD_FIRMWARE: {
+ CPP_DBG("VIDIOC_MSM_CPP_LOAD_FIRMWARE\n");
+ if (cpp_dev->is_firmware_loaded == 0) {
+ if (cpp_dev->fw_name_bin != NULL) {
+ kfree(cpp_dev->fw_name_bin);
+ cpp_dev->fw_name_bin = NULL;
+ }
+ if (cpp_dev->fw) {
+ release_firmware(cpp_dev->fw);
+ cpp_dev->fw = NULL;
+ }
+ if ((ioctl_ptr->len == 0) ||
+ (ioctl_ptr->len > MSM_CPP_MAX_FW_NAME_LEN)) {
+ pr_err("ioctl_ptr->len is 0\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ cpp_dev->fw_name_bin = kzalloc(ioctl_ptr->len+1,
+ GFP_KERNEL);
+ if (!cpp_dev->fw_name_bin) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ if (ioctl_ptr->ioctl_ptr == NULL) {
+ pr_err("ioctl_ptr->ioctl_ptr=NULL\n");
+ kfree(cpp_dev->fw_name_bin);
+ cpp_dev->fw_name_bin = NULL;
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ rc = (copy_from_user(cpp_dev->fw_name_bin,
+ (void __user *)ioctl_ptr->ioctl_ptr,
+ ioctl_ptr->len) ? -EFAULT : 0);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ kfree(cpp_dev->fw_name_bin);
+ cpp_dev->fw_name_bin = NULL;
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ *(cpp_dev->fw_name_bin+ioctl_ptr->len) = '\0';
+ rc = request_firmware(&cpp_dev->fw,
+ cpp_dev->fw_name_bin,
+ &cpp_dev->pdev->dev);
+ if (rc) {
+ dev_err(&cpp_dev->pdev->dev,
+ "Fail to loc blob %s dev %pK, rc:%d\n",
+ cpp_dev->fw_name_bin,
+ &cpp_dev->pdev->dev, rc);
+ kfree(cpp_dev->fw_name_bin);
+ cpp_dev->fw_name_bin = NULL;
+ cpp_dev->fw = NULL;
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ msm_camera_enable_irq(cpp_dev->irq, false);
+ rc = cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
+ if (rc < 0) {
+ pr_err("%s: load firmware failure %d-retry\n",
+ __func__, rc);
+ rc = msm_cpp_reset_vbif_and_load_fw(cpp_dev);
+ if (rc < 0) {
+ enable_irq(cpp_dev->irq->start);
+ mutex_unlock(&cpp_dev->mutex);
+ return rc;
+ }
+ }
+ rc = msm_cpp_fw_version(cpp_dev);
+ if (rc < 0) {
+ pr_err("%s: get firmware failure %d\n",
+ __func__, rc);
+ enable_irq(cpp_dev->irq->start);
+ mutex_unlock(&cpp_dev->mutex);
+ return rc;
+ }
+ msm_camera_enable_irq(cpp_dev->irq, true);
+ cpp_dev->is_firmware_loaded = 1;
+ }
+ break;
+ }
+ case VIDIOC_MSM_CPP_CFG:
+ CPP_DBG("VIDIOC_MSM_CPP_CFG\n");
+ rc = msm_cpp_cfg(cpp_dev, ioctl_ptr);
+ break;
+ case VIDIOC_MSM_CPP_FLUSH_QUEUE:
+ CPP_DBG("VIDIOC_MSM_CPP_FLUSH_QUEUE\n");
+ rc = msm_cpp_flush_frames(cpp_dev);
+ break;
+ case VIDIOC_MSM_CPP_DELETE_STREAM_BUFF:
+ case VIDIOC_MSM_CPP_APPEND_STREAM_BUFF_INFO:
+ case VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO: {
+ uint32_t j;
+ struct msm_cpp_stream_buff_info_t *u_stream_buff_info = NULL;
+ struct msm_cpp_stream_buff_info_t k_stream_buff_info;
+ struct msm_cpp_buff_queue_info_t *buff_queue_info = NULL;
+
+ memset(&k_stream_buff_info, 0, sizeof(k_stream_buff_info));
+ CPP_DBG("VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO\n");
+ if (sizeof(struct msm_cpp_stream_buff_info_t) !=
+ ioctl_ptr->len) {
+ pr_err("%s:%d: invalid length\n", __func__, __LINE__);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ u_stream_buff_info = kzalloc(ioctl_ptr->len, GFP_KERNEL);
+ if (!u_stream_buff_info) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = msm_cpp_copy_from_ioctl_ptr(u_stream_buff_info,
+ ioctl_ptr);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ kfree(u_stream_buff_info);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ k_stream_buff_info.num_buffs = u_stream_buff_info->num_buffs;
+ k_stream_buff_info.identity = u_stream_buff_info->identity;
+
+ if (k_stream_buff_info.num_buffs > MSM_CAMERA_MAX_STREAM_BUF) {
+ pr_err("%s:%d: unexpected large num buff requested\n",
+ __func__, __LINE__);
+ kfree(u_stream_buff_info);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ if (u_stream_buff_info->num_buffs != 0) {
+ k_stream_buff_info.buffer_info =
+ kzalloc(k_stream_buff_info.num_buffs *
+ sizeof(struct msm_cpp_buffer_info_t),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(k_stream_buff_info.buffer_info)) {
+ pr_err("%s:%d: malloc error\n",
+ __func__, __LINE__);
+ kfree(u_stream_buff_info);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = (copy_from_user(k_stream_buff_info.buffer_info,
+ (void __user *)u_stream_buff_info->buffer_info,
+ k_stream_buff_info.num_buffs *
+ sizeof(struct msm_cpp_buffer_info_t)) ?
+ -EFAULT : 0);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ kfree(k_stream_buff_info.buffer_info);
+ kfree(u_stream_buff_info);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ }
+
+ buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev,
+ (k_stream_buff_info.identity >> 16) & 0xFFFF,
+ k_stream_buff_info.identity & 0xFFFF);
+
+ if (buff_queue_info == NULL) {
+ if (cmd == VIDIOC_MSM_CPP_DELETE_STREAM_BUFF)
+ goto STREAM_BUFF_END;
+
+ rc = msm_cpp_add_buff_queue_entry(cpp_dev,
+ ((k_stream_buff_info.identity >> 16) & 0xFFFF),
+ (k_stream_buff_info.identity & 0xFFFF));
+
+ if (rc)
+ goto STREAM_BUFF_END;
+
+ if (cpp_dev->stream_cnt == 0) {
+ cpp_dev->state = CPP_STATE_ACTIVE;
+ msm_cpp_clear_timer(cpp_dev);
+ msm_cpp_clean_queue(cpp_dev);
+ }
+ cpp_dev->stream_cnt++;
+ CPP_DBG("stream_cnt:%d\n", cpp_dev->stream_cnt);
+ }
+ buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev,
+ ((k_stream_buff_info.identity >> 16) & 0xFFFF),
+ (k_stream_buff_info.identity & 0xFFFF));
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry identity:%d\n",
+ k_stream_buff_info.identity);
+ kfree(k_stream_buff_info.buffer_info);
+ kfree(u_stream_buff_info);
+ cpp_dev->stream_cnt--;
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ if (cmd == VIDIOC_MSM_CPP_DELETE_STREAM_BUFF) {
+ for (j = 0; j < k_stream_buff_info.num_buffs; j++) {
+ msm_cpp_dequeue_buff(cpp_dev, buff_queue_info,
+ k_stream_buff_info.buffer_info[j].index,
+ k_stream_buff_info.buffer_info[j].native_buff);
+ }
+ } else {
+ for (j = 0; j < k_stream_buff_info.num_buffs; j++) {
+ msm_cpp_queue_buffer_info(cpp_dev,
+ buff_queue_info,
+ &k_stream_buff_info.buffer_info[j]);
+ }
+ }
+
+STREAM_BUFF_END:
+ kfree(k_stream_buff_info.buffer_info);
+ kfree(u_stream_buff_info);
+
+ break;
+ }
+ case VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO: {
+ uint32_t identity;
+ struct msm_cpp_buff_queue_info_t *buff_queue_info;
+
+ CPP_DBG("VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO\n");
+ if (ioctl_ptr->len != sizeof(uint32_t)) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = msm_cpp_copy_from_ioctl_ptr(&identity, ioctl_ptr);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev,
+ ((identity >> 16) & 0xFFFF), (identity & 0xFFFF));
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry for identity:%d\n",
+ identity);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ msm_cpp_dequeue_buff_info_list(cpp_dev, buff_queue_info);
+ rc = msm_cpp_free_buff_queue_entry(cpp_dev,
+ buff_queue_info->session_id,
+ buff_queue_info->stream_id);
+ if (cpp_dev->stream_cnt > 0) {
+ cpp_dev->stream_cnt--;
+ pr_debug("stream_cnt:%d\n", cpp_dev->stream_cnt);
+ if (cpp_dev->stream_cnt == 0) {
+ rc = msm_cpp_update_bandwidth_setting(cpp_dev,
+ 0, 0);
+ if (rc < 0)
+ pr_err("Bandwidth Reset Failed!\n");
+ cpp_dev->state = CPP_STATE_IDLE;
+ msm_cpp_clear_timer(cpp_dev);
+ msm_cpp_clean_queue(cpp_dev);
+ }
+ } else {
+ pr_err("error: stream count underflow %d\n",
+ cpp_dev->stream_cnt);
+ }
+ break;
+ }
+ case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD: {
+ struct msm_device_queue *queue = &cpp_dev->eventData_q;
+ struct msm_queue_cmd *event_qcmd;
+ struct msm_cpp_frame_info_t *process_frame;
+
+ CPP_DBG("VIDIOC_MSM_CPP_GET_EVENTPAYLOAD\n");
+ event_qcmd = msm_dequeue(queue, list_eventdata, POP_FRONT);
+ if (!event_qcmd) {
+ pr_err("no queue cmd available");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ process_frame = event_qcmd->command;
+ CPP_DBG("fid %d\n", process_frame->frame_id);
+ if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
+ process_frame,
+ sizeof(struct msm_cpp_frame_info_t))) {
+ mutex_unlock(&cpp_dev->mutex);
+ kfree(process_frame->cpp_cmd_msg);
+ kfree(process_frame);
+ kfree(event_qcmd);
+ return -EFAULT;
+ }
+
+ kfree(process_frame->cpp_cmd_msg);
+ kfree(process_frame);
+ kfree(event_qcmd);
+ break;
+ }
+ case VIDIOC_MSM_CPP_SET_CLOCK: {
+ uint32_t msm_cpp_core_clk_idx;
+ struct msm_cpp_clock_settings_t clock_settings;
+ unsigned long clock_rate = 0;
+
+ CPP_DBG("VIDIOC_MSM_CPP_SET_CLOCK\n");
+ if (ioctl_ptr->len == 0) {
+ pr_err("ioctl_ptr->len is 0\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ if (ioctl_ptr->ioctl_ptr == NULL) {
+ pr_err("ioctl_ptr->ioctl_ptr is NULL\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ if (ioctl_ptr->len != sizeof(struct msm_cpp_clock_settings_t)) {
+ pr_err("Not valid ioctl_ptr->len\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = msm_cpp_copy_from_ioctl_ptr(&clock_settings, ioctl_ptr);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ if (clock_settings.clock_rate > 0) {
+ msm_cpp_core_clk_idx = msm_cpp_get_clock_index(cpp_dev,
+ "cpp_core_clk");
+ if (msm_cpp_core_clk_idx < 0) {
+ pr_err(" Fail to get clock index\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ rc = msm_cpp_update_bandwidth_setting(cpp_dev,
+ clock_settings.avg,
+ clock_settings.inst);
+ if (rc < 0) {
+ pr_err("Bandwidth Set Failed!\n");
+ rc = msm_cpp_update_bandwidth_setting(cpp_dev,
+ 0, 0);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ clock_rate = msm_cpp_set_core_clk(cpp_dev,
+ clock_settings.clock_rate,
+ msm_cpp_core_clk_idx);
+ if (rc < 0) {
+ pr_err("Fail to set core clk\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ if (clock_rate != clock_settings.clock_rate)
+ pr_err("clock rate differ from settings\n");
+ msm_isp_util_update_clk_rate(clock_settings.clock_rate);
+ }
+ break;
+ }
+ case MSM_SD_NOTIFY_FREEZE:
+ break;
+ case MSM_SD_UNNOTIFY_FREEZE:
+ break;
+ case MSM_SD_SHUTDOWN:
+ CPP_DBG("MSM_SD_SHUTDOWN\n");
+ mutex_unlock(&cpp_dev->mutex);
+ pr_warn("shutdown cpp node. open cnt:%d\n",
+ cpp_dev->cpp_open_cnt);
+
+ if (atomic_read(&cpp_timer.used))
+ pr_debug("Timer state not cleared\n");
+
+ while (cpp_dev->cpp_open_cnt != 0)
+ cpp_close_node(sd, NULL);
+ mutex_lock(&cpp_dev->mutex);
+ rc = 0;
+ break;
+ case VIDIOC_MSM_CPP_QUEUE_BUF: {
+ struct msm_pproc_queue_buf_info queue_buf_info;
+
+ CPP_DBG("VIDIOC_MSM_CPP_QUEUE_BUF\n");
+
+ if (ioctl_ptr->len != sizeof(struct msm_pproc_queue_buf_info)) {
+ pr_err("%s: Not valid ioctl_ptr->len\n", __func__);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ rc = msm_cpp_copy_from_ioctl_ptr(&queue_buf_info, ioctl_ptr);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ break;
+ }
+
+ if (queue_buf_info.is_buf_dirty) {
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ VIDIOC_MSM_BUF_MNGR_PUT_BUF,
+ 0x0, &queue_buf_info.buff_mgr_info);
+ } else {
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ VIDIOC_MSM_BUF_MNGR_BUF_DONE,
+ 0x0, &queue_buf_info.buff_mgr_info);
+ }
+ if (rc < 0) {
+ pr_err("error in buf done\n");
+ rc = -EINVAL;
+ }
+
+ break;
+ }
+ case VIDIOC_MSM_CPP_POP_STREAM_BUFFER: {
+ struct msm_buf_mngr_info buff_mgr_info;
+ struct msm_cpp_frame_info_t frame_info;
+ uint32_t ioctl_cmd, idx;
+
+ if (ioctl_ptr->ioctl_ptr == NULL ||
+ (ioctl_ptr->len !=
+ sizeof(struct msm_cpp_frame_info_t))) {
+ rc = -EINVAL;
+ break;
+ }
+
+ rc = msm_cpp_copy_from_ioctl_ptr(&frame_info, ioctl_ptr);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ break;
+ }
+
+ memset(&buff_mgr_info, 0, sizeof(struct msm_buf_mngr_info));
+ buff_mgr_info.session_id =
+ ((frame_info.identity >> 16) & 0xFFFF);
+ buff_mgr_info.stream_id = (frame_info.identity & 0xFFFF);
+ buff_mgr_info.type =
+ MSM_CAMERA_BUF_MNGR_BUF_PLANAR;
+ if (IS_DEFAULT_OUTPUT_BUF_INDEX(
+ frame_info.output_buffer_info[0].index)) {
+ ioctl_cmd = VIDIOC_MSM_BUF_MNGR_GET_BUF;
+ idx = 0x0;
+ } else {
+ ioctl_cmd = VIDIOC_MSM_BUF_MNGR_IOCTL_CMD;
+ idx = MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX;
+ }
+ rc = msm_cpp_buffer_ops(cpp_dev, ioctl_cmd, idx,
+ &buff_mgr_info);
+ if (rc < 0) {
+ rc = -EAGAIN;
+ pr_err_ratelimited("POP: get_buf err rc:%d, index %d\n",
+ rc, frame_info.output_buffer_info[0].index);
+ break;
+ }
+ buff_mgr_info.frame_id = frame_info.frame_id;
+ rc = msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_BUF_DONE,
+ 0x0, &buff_mgr_info);
+ if (rc < 0) {
+ pr_err("error in buf done\n");
+ rc = -EAGAIN;
+ }
+ break;
+ }
+ default:
+ pr_err_ratelimited("invalid value: cmd=0x%x\n", cmd);
+ break;
+ case VIDIOC_MSM_CPP_IOMMU_ATTACH: {
+ if (cpp_dev->iommu_state == CPP_IOMMU_STATE_DETACHED) {
+ rc = cam_smmu_ops(cpp_dev->iommu_hdl, CAM_SMMU_ATTACH);
+ if (rc < 0) {
+ pr_err("%s:%dError iommu_attach_device failed\n",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+ cpp_dev->iommu_state = CPP_IOMMU_STATE_ATTACHED;
+ } else {
+ pr_err("%s:%d IOMMMU attach triggered in invalid state\n",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ }
+ break;
+ }
+ case VIDIOC_MSM_CPP_IOMMU_DETACH: {
+ if ((cpp_dev->iommu_state == CPP_IOMMU_STATE_ATTACHED) &&
+ (cpp_dev->stream_cnt == 0)) {
+ rc = cam_smmu_ops(cpp_dev->iommu_hdl, CAM_SMMU_DETACH);
+ if (rc < 0) {
+ pr_err("%s:%dError iommu atach failed\n",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+ cpp_dev->iommu_state = CPP_IOMMU_STATE_DETACHED;
+ } else {
+ pr_err("%s:%d IOMMMU attach triggered in invalid state\n",
+ __func__, __LINE__);
+ }
+ break;
+ }
+ }
+ mutex_unlock(&cpp_dev->mutex);
+ CPP_DBG("X\n");
+ return rc;
+}
+
+int msm_cpp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ CPP_DBG("Called\n");
+ return v4l2_event_subscribe(fh, sub, MAX_CPP_V4l2_EVENTS, NULL);
+}
+
+int msm_cpp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ CPP_DBG("Called\n");
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+static struct v4l2_subdev_core_ops msm_cpp_subdev_core_ops = {
+ .ioctl = msm_cpp_subdev_ioctl,
+ .subscribe_event = msm_cpp_subscribe_event,
+ .unsubscribe_event = msm_cpp_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_ops msm_cpp_subdev_ops = {
+ .core = &msm_cpp_subdev_core_ops,
+};
+
+static long msm_cpp_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev;
+ struct v4l2_subdev *sd;
+ struct v4l2_fh *vfh = NULL;
+
+ if ((arg == NULL) || (file == NULL)) {
+ pr_err("Invalid input parameters arg %pK, file %pK\n",
+ arg, file);
+ return -EINVAL;
+ }
+ vdev = video_devdata(file);
+ sd = vdev_to_v4l2_subdev(vdev);
+
+ if (sd == NULL) {
+ pr_err("Invalid input parameter sd %pK\n", sd);
+ return -EINVAL;
+ }
+ vfh = file->private_data;
+
+ switch (cmd) {
+ case VIDIOC_DQEVENT:
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+ return -ENOIOCTLCMD;
+
+ return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
+
+ case VIDIOC_SUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
+
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+
+ case VIDIOC_MSM_CPP_GET_INST_INFO: {
+ uint32_t i;
+ struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
+ struct msm_cpp_frame_info_t inst_info;
+
+ memset(&inst_info, 0, sizeof(struct msm_cpp_frame_info_t));
+ for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
+ if (cpp_dev->cpp_subscribe_list[i].vfh == vfh) {
+ inst_info.inst_id = i;
+ break;
+ }
+ }
+ if (copy_to_user(
+ (void __user *)ioctl_ptr->ioctl_ptr, &inst_info,
+ sizeof(struct msm_cpp_frame_info_t))) {
+ return -EFAULT;
+ }
+ }
+ break;
+ default:
+ return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+ }
+
+ return 0;
+}
+
+static long msm_cpp_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_cpp_subdev_do_ioctl);
+}
+
+
+#ifdef CONFIG_COMPAT
+static struct msm_cpp_frame_info_t *get_64bit_cpp_frame_from_compat(
+ struct msm_camera_v4l2_ioctl_t *kp_ioctl)
+{
+ struct msm_cpp_frame_info32_t *new_frame32 = NULL;
+ struct msm_cpp_frame_info_t *new_frame = NULL;
+ uint32_t *cpp_frame_msg;
+ void *cpp_cmd_msg_64bit;
+ int32_t rc, i;
+
+ new_frame32 = kzalloc(sizeof(struct msm_cpp_frame_info32_t),
+ GFP_KERNEL);
+ if (!new_frame32)
+ goto no_mem32;
+ new_frame = kzalloc(sizeof(struct msm_cpp_frame_info_t), GFP_KERNEL);
+ if (!new_frame)
+ goto no_mem;
+
+ rc = (copy_from_user(new_frame32, (void __user *)kp_ioctl->ioctl_ptr,
+ sizeof(struct msm_cpp_frame_info32_t)) ? -EFAULT : 0);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ goto frame_err;
+ }
+
+ new_frame->frame_id = new_frame32->frame_id;
+ new_frame->inst_id = new_frame32->inst_id;
+ new_frame->client_id = new_frame32->client_id;
+ new_frame->frame_type = new_frame32->frame_type;
+ new_frame->num_strips = new_frame32->num_strips;
+
+ new_frame->src_fd = new_frame32->src_fd;
+ new_frame->dst_fd = new_frame32->dst_fd;
+
+ new_frame->timestamp.tv_sec =
+ (unsigned long)new_frame32->timestamp.tv_sec;
+ new_frame->timestamp.tv_usec =
+ (unsigned long)new_frame32->timestamp.tv_usec;
+
+ new_frame->in_time.tv_sec =
+ (unsigned long)new_frame32->in_time.tv_sec;
+ new_frame->in_time.tv_usec =
+ (unsigned long)new_frame32->in_time.tv_usec;
+
+ new_frame->out_time.tv_sec =
+ (unsigned long)new_frame32->out_time.tv_sec;
+ new_frame->out_time.tv_usec =
+ (unsigned long)new_frame32->out_time.tv_usec;
+
+ new_frame->msg_len = new_frame32->msg_len;
+ new_frame->identity = new_frame32->identity;
+ new_frame->input_buffer_info = new_frame32->input_buffer_info;
+ new_frame->output_buffer_info[0] =
+ new_frame32->output_buffer_info[0];
+ new_frame->output_buffer_info[1] =
+ new_frame32->output_buffer_info[1];
+ new_frame->output_buffer_info[2] =
+ new_frame32->output_buffer_info[2];
+ new_frame->output_buffer_info[3] =
+ new_frame32->output_buffer_info[3];
+ new_frame->output_buffer_info[4] =
+ new_frame32->output_buffer_info[4];
+ new_frame->output_buffer_info[5] =
+ new_frame32->output_buffer_info[5];
+ new_frame->output_buffer_info[6] =
+ new_frame32->output_buffer_info[6];
+ new_frame->output_buffer_info[7] =
+ new_frame32->output_buffer_info[7];
+ new_frame->duplicate_buffer_info =
+ new_frame32->duplicate_buffer_info;
+ new_frame->tnr_scratch_buffer_info[0] =
+ new_frame32->tnr_scratch_buffer_info[0];
+ new_frame->tnr_scratch_buffer_info[1] =
+ new_frame32->tnr_scratch_buffer_info[1];
+ new_frame->duplicate_output = new_frame32->duplicate_output;
+ new_frame->we_disable = new_frame32->we_disable;
+ new_frame->duplicate_identity = new_frame32->duplicate_identity;
+ new_frame->feature_mask = new_frame32->feature_mask;
+ new_frame->partial_frame_indicator =
+ new_frame32->partial_frame_indicator;
+ new_frame->first_payload = new_frame32->first_payload;
+ new_frame->last_payload = new_frame32->last_payload;
+ new_frame->first_stripe_index = new_frame32->first_stripe_index;
+ new_frame->last_stripe_index = new_frame32->last_stripe_index;
+ new_frame->stripe_info_offset =
+ new_frame32->stripe_info_offset;
+ new_frame->stripe_info = new_frame32->stripe_info;
+ new_frame->batch_info.batch_mode =
+ new_frame32->batch_info.batch_mode;
+ new_frame->batch_info.batch_size =
+ new_frame32->batch_info.batch_size;
+ new_frame->batch_info.cont_idx =
+ new_frame32->batch_info.cont_idx;
+ for (i = 0; i < MAX_PLANES; i++)
+ new_frame->batch_info.intra_plane_offset[i] =
+ new_frame32->batch_info.intra_plane_offset[i];
+ new_frame->batch_info.pick_preview_idx =
+ new_frame32->batch_info.pick_preview_idx;
+
+ /* Convert the 32 bit pointer to 64 bit pointer */
+ new_frame->cookie = compat_ptr(new_frame32->cookie);
+ cpp_cmd_msg_64bit = compat_ptr(new_frame32->cpp_cmd_msg);
+ if ((new_frame->msg_len == 0) ||
+ (new_frame->msg_len > MSM_CPP_MAX_FRAME_LENGTH)) {
+ pr_err("%s:%d: Invalid frame len:%d\n", __func__,
+ __LINE__, new_frame->msg_len);
+ goto frame_err;
+ }
+
+ cpp_frame_msg = kcalloc(new_frame->msg_len, sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!cpp_frame_msg)
+ goto frame_err;
+
+ rc = (copy_from_user(cpp_frame_msg,
+ (void __user *)cpp_cmd_msg_64bit,
+ sizeof(uint32_t)*new_frame->msg_len) ? -EFAULT : 0);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ goto frame_msg_err;
+ }
+ new_frame->cpp_cmd_msg = cpp_frame_msg;
+
+ kfree(new_frame32);
+ return new_frame;
+
+frame_msg_err:
+ kfree(cpp_frame_msg);
+frame_err:
+ kfree(new_frame);
+no_mem:
+ kfree(new_frame32);
+no_mem32:
+ return NULL;
+}
+
+static void get_compat_frame_from_64bit(struct msm_cpp_frame_info_t *frame,
+ struct msm_cpp_frame_info32_t *k32_frame)
+{
+ int32_t i;
+
+ k32_frame->frame_id = frame->frame_id;
+ k32_frame->inst_id = frame->inst_id;
+ k32_frame->client_id = frame->client_id;
+ k32_frame->frame_type = frame->frame_type;
+ k32_frame->num_strips = frame->num_strips;
+
+ k32_frame->src_fd = frame->src_fd;
+ k32_frame->dst_fd = frame->dst_fd;
+
+ k32_frame->timestamp.tv_sec = (uint32_t)frame->timestamp.tv_sec;
+ k32_frame->timestamp.tv_usec = (uint32_t)frame->timestamp.tv_usec;
+
+ k32_frame->in_time.tv_sec = (uint32_t)frame->in_time.tv_sec;
+ k32_frame->in_time.tv_usec = (uint32_t)frame->in_time.tv_usec;
+
+ k32_frame->out_time.tv_sec = (uint32_t)frame->out_time.tv_sec;
+ k32_frame->out_time.tv_usec = (uint32_t)frame->out_time.tv_usec;
+
+ k32_frame->msg_len = frame->msg_len;
+ k32_frame->identity = frame->identity;
+ k32_frame->input_buffer_info = frame->input_buffer_info;
+ k32_frame->output_buffer_info[0] = frame->output_buffer_info[0];
+ k32_frame->output_buffer_info[1] = frame->output_buffer_info[1];
+ k32_frame->output_buffer_info[2] = frame->output_buffer_info[2];
+ k32_frame->output_buffer_info[3] = frame->output_buffer_info[3];
+ k32_frame->output_buffer_info[4] = frame->output_buffer_info[4];
+ k32_frame->output_buffer_info[5] = frame->output_buffer_info[5];
+ k32_frame->output_buffer_info[6] = frame->output_buffer_info[6];
+ k32_frame->output_buffer_info[7] = frame->output_buffer_info[7];
+ k32_frame->duplicate_buffer_info = frame->duplicate_buffer_info;
+ k32_frame->duplicate_output = frame->duplicate_output;
+ k32_frame->we_disable = frame->we_disable;
+ k32_frame->duplicate_identity = frame->duplicate_identity;
+ k32_frame->feature_mask = frame->feature_mask;
+ k32_frame->cookie = ptr_to_compat(frame->cookie);
+ k32_frame->partial_frame_indicator = frame->partial_frame_indicator;
+ k32_frame->first_payload = frame->first_payload;
+ k32_frame->last_payload = frame->last_payload;
+ k32_frame->first_stripe_index = frame->first_stripe_index;
+ k32_frame->last_stripe_index = frame->last_stripe_index;
+ k32_frame->stripe_info_offset = frame->stripe_info_offset;
+ k32_frame->stripe_info = frame->stripe_info;
+ k32_frame->batch_info.batch_mode = frame->batch_info.batch_mode;
+ k32_frame->batch_info.batch_size = frame->batch_info.batch_size;
+ k32_frame->batch_info.cont_idx = frame->batch_info.cont_idx;
+ for (i = 0; i < MAX_PLANES; i++)
+ k32_frame->batch_info.intra_plane_offset[i] =
+ frame->batch_info.intra_plane_offset[i];
+ k32_frame->batch_info.pick_preview_idx =
+ frame->batch_info.pick_preview_idx;
+}
+
+static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct cpp_device *cpp_dev = NULL;
+
+ int32_t rc = 0;
+ struct msm_camera_v4l2_ioctl_t kp_ioctl;
+ struct msm_camera_v4l2_ioctl32_t up32_ioctl;
+ struct msm_cpp_clock_settings_t clock_settings;
+ struct msm_pproc_queue_buf_info k_queue_buf;
+ struct msm_cpp_stream_buff_info_t k_cpp_buff_info;
+ struct msm_cpp_frame_info32_t k32_frame_info;
+ struct msm_cpp_frame_info_t k64_frame_info;
+ uint32_t identity_k = 0;
+ bool is_copytouser_req = true;
+ void __user *up = (void __user *)arg;
+
+ if (sd == NULL) {
+ pr_err("%s: Subdevice is NULL\n", __func__);
+ return -EINVAL;
+ }
+ cpp_dev = v4l2_get_subdevdata(sd);
+ if (!vdev || !cpp_dev) {
+ pr_err("Invalid vdev %pK or cpp_dev %pK structures!",
+ vdev, cpp_dev);
+ return -EINVAL;
+ }
+ mutex_lock(&cpp_dev->mutex);
+ /*
+ * copy the user space 32 bit pointer to kernel space 32 bit compat
+ * pointer
+ */
+ if (copy_from_user(&up32_ioctl, (void __user *)up,
+ sizeof(up32_ioctl))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+
+ /* copy the data from 32 bit compat to kernel space 64 bit pointer */
+ kp_ioctl.id = up32_ioctl.id;
+ kp_ioctl.len = up32_ioctl.len;
+ kp_ioctl.trans_code = up32_ioctl.trans_code;
+ /* Convert the 32 bit pointer to 64 bit pointer */
+ kp_ioctl.ioctl_ptr = compat_ptr(up32_ioctl.ioctl_ptr);
+ if (!kp_ioctl.ioctl_ptr) {
+ pr_err("%s: Invalid ioctl pointer\n", __func__);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ /*
+ * Convert 32 bit IOCTL ID's to 64 bit IOCTL ID's
+ * except VIDIOC_MSM_CPP_CFG32, which needs special
+ * processing
+ */
+ switch (cmd) {
+ case VIDIOC_MSM_CPP_CFG32:
+ {
+ struct msm_cpp_frame_info32_t k32_frame_info;
+ struct msm_cpp_frame_info_t *cpp_frame = NULL;
+ int32_t *status;
+
+ if (copy_from_user(&k32_frame_info,
+ (void __user *)kp_ioctl.ioctl_ptr,
+ sizeof(k32_frame_info))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+ /* Get the cpp frame pointer */
+ cpp_frame = get_64bit_cpp_frame_from_compat(&kp_ioctl);
+
+ /* Configure the cpp frame */
+ if (cpp_frame) {
+ rc = msm_cpp_cfg_frame(cpp_dev, cpp_frame);
+ /* Cpp_frame can be free'd by cfg_frame in error */
+ if (rc >= 0) {
+ k32_frame_info.output_buffer_info[0] =
+ cpp_frame->output_buffer_info[0];
+ k32_frame_info.output_buffer_info[1] =
+ cpp_frame->output_buffer_info[1];
+ }
+ } else {
+ pr_err("%s: Error getting frame\n", __func__);
+ mutex_unlock(&cpp_dev->mutex);
+ rc = -EINVAL;
+ }
+
+ kp_ioctl.trans_code = rc;
+
+ /* Convert the 32 bit pointer to 64 bit pointer */
+ status = compat_ptr(k32_frame_info.status);
+
+ if (copy_to_user((void __user *)status, &rc,
+ sizeof(int32_t)))
+ pr_err("error cannot copy error\n");
+
+ if (copy_to_user((void __user *)kp_ioctl.ioctl_ptr,
+ &k32_frame_info,
+ sizeof(k32_frame_info))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+
+ cmd = VIDIOC_MSM_CPP_CFG;
+ break;
+ }
+ case VIDIOC_MSM_CPP_GET_HW_INFO32:
+ {
+ struct cpp_hw_info_32_t u32_cpp_hw_info;
+ uint32_t i;
+
+ u32_cpp_hw_info.cpp_hw_version =
+ cpp_dev->hw_info.cpp_hw_version;
+ u32_cpp_hw_info.cpp_hw_caps = cpp_dev->hw_info.cpp_hw_caps;
+ memset(&u32_cpp_hw_info.freq_tbl, 0x00,
+ sizeof(u32_cpp_hw_info.freq_tbl));
+ for (i = 0; i < cpp_dev->hw_info.freq_tbl_count; i++)
+ u32_cpp_hw_info.freq_tbl[i] =
+ cpp_dev->hw_info.freq_tbl[i];
+
+ u32_cpp_hw_info.freq_tbl_count =
+ cpp_dev->hw_info.freq_tbl_count;
+ if (copy_to_user((void __user *)kp_ioctl.ioctl_ptr,
+ &u32_cpp_hw_info, sizeof(struct cpp_hw_info_32_t))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+
+ cmd = VIDIOC_MSM_CPP_GET_HW_INFO;
+ break;
+ }
+ case VIDIOC_MSM_CPP_LOAD_FIRMWARE32:
+ cmd = VIDIOC_MSM_CPP_LOAD_FIRMWARE;
+ break;
+ case VIDIOC_MSM_CPP_GET_INST_INFO32:
+ {
+ struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
+ struct msm_cpp_frame_info32_t inst_info;
+ struct v4l2_fh *vfh = NULL;
+ uint32_t i;
+
+ vfh = file->private_data;
+ memset(&inst_info, 0, sizeof(struct msm_cpp_frame_info32_t));
+ for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
+ if (cpp_dev->cpp_subscribe_list[i].vfh == vfh) {
+ inst_info.inst_id = i;
+ break;
+ }
+ }
+ if (copy_to_user((void __user *)kp_ioctl.ioctl_ptr,
+ &inst_info, sizeof(struct msm_cpp_frame_info32_t))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+ cmd = VIDIOC_MSM_CPP_GET_INST_INFO;
+ break;
+ }
+ case VIDIOC_MSM_CPP_FLUSH_QUEUE32:
+ cmd = VIDIOC_MSM_CPP_FLUSH_QUEUE;
+ break;
+ case VIDIOC_MSM_CPP_APPEND_STREAM_BUFF_INFO32:
+ case VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO32:
+ case VIDIOC_MSM_CPP_DELETE_STREAM_BUFF32:
+ {
+ compat_uptr_t p;
+ struct msm_cpp_stream_buff_info32_t *u32_cpp_buff_info =
+ (struct msm_cpp_stream_buff_info32_t *)kp_ioctl.ioctl_ptr;
+
+ get_user(k_cpp_buff_info.identity,
+ &u32_cpp_buff_info->identity);
+ get_user(k_cpp_buff_info.num_buffs,
+ &u32_cpp_buff_info->num_buffs);
+ get_user(p, &u32_cpp_buff_info->buffer_info);
+ k_cpp_buff_info.buffer_info = compat_ptr(p);
+
+ kp_ioctl.ioctl_ptr = (void *)&k_cpp_buff_info;
+ if (is_compat_task()) {
+ if (kp_ioctl.len != sizeof(
+ struct msm_cpp_stream_buff_info32_t)) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ kp_ioctl.len =
+ sizeof(struct msm_cpp_stream_buff_info_t);
+ }
+ is_copytouser_req = false;
+ if (cmd == VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO32)
+ cmd = VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO;
+ else if (cmd == VIDIOC_MSM_CPP_DELETE_STREAM_BUFF32)
+ cmd = VIDIOC_MSM_CPP_DELETE_STREAM_BUFF;
+ else
+ cmd = VIDIOC_MSM_CPP_APPEND_STREAM_BUFF_INFO;
+ break;
+ }
+ case VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO32: {
+ uint32_t *identity_u = (uint32_t *)kp_ioctl.ioctl_ptr;
+
+ get_user(identity_k, identity_u);
+ kp_ioctl.ioctl_ptr = (void *)&identity_k;
+ kp_ioctl.len = sizeof(uint32_t);
+ is_copytouser_req = false;
+ cmd = VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO;
+ break;
+ }
+ case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD32:
+ {
+ struct msm_device_queue *queue = &cpp_dev->eventData_q;
+ struct msm_queue_cmd *event_qcmd;
+ struct msm_cpp_frame_info_t *process_frame;
+ struct msm_cpp_frame_info32_t k32_process_frame;
+
+ CPP_DBG("VIDIOC_MSM_CPP_GET_EVENTPAYLOAD\n");
+ event_qcmd = msm_dequeue(queue, list_eventdata, POP_FRONT);
+ if (!event_qcmd) {
+ pr_err("no queue cmd available");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ process_frame = event_qcmd->command;
+
+ memset(&k32_process_frame, 0, sizeof(k32_process_frame));
+ get_compat_frame_from_64bit(process_frame, &k32_process_frame);
+
+ CPP_DBG("fid %d\n", process_frame->frame_id);
+ if (copy_to_user((void __user *)kp_ioctl.ioctl_ptr,
+ &k32_process_frame,
+ sizeof(struct msm_cpp_frame_info32_t))) {
+ kfree(process_frame->cpp_cmd_msg);
+ kfree(process_frame);
+ kfree(event_qcmd);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+
+ kfree(process_frame->cpp_cmd_msg);
+ kfree(process_frame);
+ kfree(event_qcmd);
+ cmd = VIDIOC_MSM_CPP_GET_EVENTPAYLOAD;
+ break;
+ }
+ case VIDIOC_MSM_CPP_SET_CLOCK32:
+ {
+ struct msm_cpp_clock_settings32_t *clock_settings32 =
+ (struct msm_cpp_clock_settings32_t *)kp_ioctl.ioctl_ptr;
+ get_user(clock_settings.clock_rate,
+ &clock_settings32->clock_rate);
+ get_user(clock_settings.avg, &clock_settings32->avg);
+ get_user(clock_settings.inst, &clock_settings32->inst);
+ kp_ioctl.ioctl_ptr = (void *)&clock_settings;
+ if (is_compat_task()) {
+ if (kp_ioctl.len != sizeof(
+ struct msm_cpp_clock_settings32_t)) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ kp_ioctl.len = sizeof(struct msm_cpp_clock_settings_t);
+ }
+ is_copytouser_req = false;
+ cmd = VIDIOC_MSM_CPP_SET_CLOCK;
+ break;
+ }
+ case VIDIOC_MSM_CPP_QUEUE_BUF32:
+ {
+ struct msm_pproc_queue_buf_info32_t *u32_queue_buf =
+ (struct msm_pproc_queue_buf_info32_t *)kp_ioctl.ioctl_ptr;
+
+ get_user(k_queue_buf.is_buf_dirty,
+ &u32_queue_buf->is_buf_dirty);
+ get_user(k_queue_buf.buff_mgr_info.session_id,
+ &u32_queue_buf->buff_mgr_info.session_id);
+ get_user(k_queue_buf.buff_mgr_info.stream_id,
+ &u32_queue_buf->buff_mgr_info.stream_id);
+ get_user(k_queue_buf.buff_mgr_info.frame_id,
+ &u32_queue_buf->buff_mgr_info.frame_id);
+ get_user(k_queue_buf.buff_mgr_info.index,
+ &u32_queue_buf->buff_mgr_info.index);
+ get_user(k_queue_buf.buff_mgr_info.timestamp.tv_sec,
+ &u32_queue_buf->buff_mgr_info.timestamp.tv_sec);
+ get_user(k_queue_buf.buff_mgr_info.timestamp.tv_usec,
+ &u32_queue_buf->buff_mgr_info.timestamp.tv_usec);
+
+ kp_ioctl.ioctl_ptr = (void *)&k_queue_buf;
+ kp_ioctl.len = sizeof(struct msm_pproc_queue_buf_info);
+ is_copytouser_req = false;
+ cmd = VIDIOC_MSM_CPP_QUEUE_BUF;
+ break;
+ }
+ case VIDIOC_MSM_CPP_POP_STREAM_BUFFER32:
+ {
+ if (kp_ioctl.len != sizeof(struct msm_cpp_frame_info32_t)) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ kp_ioctl.len = sizeof(struct msm_cpp_frame_info_t);
+
+ if (copy_from_user(&k32_frame_info,
+ (void __user *)kp_ioctl.ioctl_ptr,
+ sizeof(k32_frame_info))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+
+ memset(&k64_frame_info, 0, sizeof(k64_frame_info));
+ k64_frame_info.identity = k32_frame_info.identity;
+ k64_frame_info.frame_id = k32_frame_info.frame_id;
+
+ kp_ioctl.ioctl_ptr = (void *)&k64_frame_info;
+
+ is_copytouser_req = false;
+ cmd = VIDIOC_MSM_CPP_POP_STREAM_BUFFER;
+ break;
+ }
+ case VIDIOC_MSM_CPP_IOMMU_ATTACH32:
+ cmd = VIDIOC_MSM_CPP_IOMMU_ATTACH;
+ break;
+ case VIDIOC_MSM_CPP_IOMMU_DETACH32:
+ cmd = VIDIOC_MSM_CPP_IOMMU_DETACH;
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ break;
+ case MSM_SD_UNNOTIFY_FREEZE:
+ break;
+ case MSM_SD_SHUTDOWN:
+ cmd = MSM_SD_SHUTDOWN;
+ break;
+ default:
+ pr_err_ratelimited("%s: unsupported compat type :%x LOAD %lu\n",
+ __func__, cmd, VIDIOC_MSM_CPP_LOAD_FIRMWARE);
+ break;
+ }
+
+ mutex_unlock(&cpp_dev->mutex);
+ switch (cmd) {
+ case VIDIOC_MSM_CPP_LOAD_FIRMWARE:
+ case VIDIOC_MSM_CPP_FLUSH_QUEUE:
+ case VIDIOC_MSM_CPP_APPEND_STREAM_BUFF_INFO:
+ case VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO:
+ case VIDIOC_MSM_CPP_DELETE_STREAM_BUFF:
+ case VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO:
+ case VIDIOC_MSM_CPP_SET_CLOCK:
+ case VIDIOC_MSM_CPP_QUEUE_BUF:
+ case VIDIOC_MSM_CPP_POP_STREAM_BUFFER:
+ case VIDIOC_MSM_CPP_IOMMU_ATTACH:
+ case VIDIOC_MSM_CPP_IOMMU_DETACH:
+ case MSM_SD_SHUTDOWN:
+ rc = v4l2_subdev_call(sd, core, ioctl, cmd, &kp_ioctl);
+ break;
+ case VIDIOC_MSM_CPP_GET_HW_INFO:
+ case VIDIOC_MSM_CPP_CFG:
+ case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD:
+ case VIDIOC_MSM_CPP_GET_INST_INFO:
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ break;
+ case MSM_SD_UNNOTIFY_FREEZE:
+ break;
+ default:
+ pr_err_ratelimited("%s: unsupported compat type :%d\n",
+ __func__, cmd);
+ break;
+ }
+
+ if (is_copytouser_req) {
+ up32_ioctl.id = kp_ioctl.id;
+ up32_ioctl.len = kp_ioctl.len;
+ up32_ioctl.trans_code = kp_ioctl.trans_code;
+ up32_ioctl.ioctl_ptr = ptr_to_compat(kp_ioctl.ioctl_ptr);
+
+ if (copy_to_user((void __user *)up, &up32_ioctl,
+ sizeof(up32_ioctl)))
+ return -EFAULT;
+ }
+
+ return rc;
+}
+#endif
+
+struct v4l2_file_operations msm_cpp_v4l2_subdev_fops = {
+ .unlocked_ioctl = msm_cpp_subdev_fops_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = msm_cpp_subdev_fops_compat_ioctl,
+#endif
+};
+static int msm_cpp_update_gdscr_status(struct cpp_device *cpp_dev,
+ bool status)
+{
+ int rc = 0;
+ int value = 0;
+
+ if (!cpp_dev) {
+ pr_err("%s: cpp device invalid\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (cpp_dev->camss_cpp_base) {
+ value = msm_camera_io_r(cpp_dev->camss_cpp_base);
+ pr_debug("value from camss cpp %x, status %d\n", value, status);
+ if (status) {
+ value &= CPP_GDSCR_SW_COLLAPSE_ENABLE;
+ value |= CPP_GDSCR_HW_CONTROL_ENABLE;
+ } else {
+ value |= CPP_GDSCR_HW_CONTROL_DISABLE;
+ value &= CPP_GDSCR_SW_COLLAPSE_DISABLE;
+ }
+ pr_debug("value %x after camss cpp mask\n", value);
+ msm_camera_io_w(value, cpp_dev->camss_cpp_base);
+ }
+end:
+ return rc;
+}
+static void msm_cpp_set_vbif_reg_values(struct cpp_device *cpp_dev)
+{
+ int i, reg, val;
+ const u32 *vbif_qos_arr = NULL;
+ int vbif_qos_len = 0;
+ struct platform_device *pdev;
+
+ pr_debug("%s\n", __func__);
+ if (cpp_dev != NULL) {
+ pdev = cpp_dev->pdev;
+ vbif_qos_arr = of_get_property(pdev->dev.of_node,
+ "qcom,vbif-qos-setting",
+ &vbif_qos_len);
+ if (!vbif_qos_arr || (vbif_qos_len & 1)) {
+ pr_debug("%s: vbif qos setting not found\n",
+ __func__);
+ vbif_qos_len = 0;
+ }
+ vbif_qos_len /= sizeof(u32);
+ pr_debug("%s: vbif_qos_len %d\n", __func__, vbif_qos_len);
+ if (cpp_dev->vbif_base) {
+ for (i = 0; i < vbif_qos_len; i = i+2) {
+ reg = be32_to_cpu(vbif_qos_arr[i]);
+ val = be32_to_cpu(vbif_qos_arr[i+1]);
+ pr_debug("%s: DT: offset %x, val %x\n",
+ __func__, reg, val);
+ pr_debug("%s: before write to register 0x%x\n",
+ __func__, msm_camera_io_r(
+ cpp_dev->vbif_base + reg));
+ msm_camera_io_w(val, cpp_dev->vbif_base + reg);
+ pr_debug("%s: after write to register 0x%x\n",
+ __func__, msm_camera_io_r(
+ cpp_dev->vbif_base + reg));
+ }
+ }
+ }
+}
+
+static int msm_cpp_buffer_private_ops(struct cpp_device *cpp_dev,
+ uint32_t buff_mgr_ops, uint32_t id, void *arg) {
+
+ int32_t rc = 0;
+
+ switch (id) {
+ case MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX: {
+ struct msm_camera_private_ioctl_arg ioctl_arg;
+ struct msm_buf_mngr_info *buff_mgr_info =
+ (struct msm_buf_mngr_info *)arg;
+
+ ioctl_arg.id = MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX;
+ ioctl_arg.size = sizeof(struct msm_buf_mngr_info);
+ ioctl_arg.result = 0;
+ ioctl_arg.reserved = 0x0;
+ ioctl_arg.ioctl_ptr = 0x0;
+ MSM_CAM_GET_IOCTL_ARG_PTR(&ioctl_arg.ioctl_ptr, &buff_mgr_info,
+ sizeof(void *));
+ rc = cpp_dev->buf_mgr_ops.msm_cam_buf_mgr_ops(buff_mgr_ops,
+ &ioctl_arg);
+ /* Use VIDIOC_MSM_BUF_MNGR_GET_BUF if getbuf with indx fails */
+ if (rc < 0) {
+ pr_err_ratelimited("get_buf_by_idx for %d err %d,use get_buf\n",
+ buff_mgr_info->index, rc);
+ rc = cpp_dev->buf_mgr_ops.msm_cam_buf_mgr_ops(
+ VIDIOC_MSM_BUF_MNGR_GET_BUF, buff_mgr_info);
+ }
+ break;
+ }
+ default: {
+ pr_err("unsupported buffer manager ioctl\n");
+ break;
+ }
+ }
+ return rc;
+}
+
+static int cpp_probe(struct platform_device *pdev)
+{
+ struct cpp_device *cpp_dev;
+ int rc = 0;
+
+ CPP_DBG("E");
+
+ cpp_dev = kzalloc(sizeof(struct cpp_device), GFP_KERNEL);
+ if (!cpp_dev)
+ return -ENOMEM;
+
+ v4l2_subdev_init(&cpp_dev->msm_sd.sd, &msm_cpp_subdev_ops);
+ cpp_dev->msm_sd.sd.internal_ops = &msm_cpp_internal_ops;
+ snprintf(cpp_dev->msm_sd.sd.name, ARRAY_SIZE(cpp_dev->msm_sd.sd.name),
+ "cpp");
+ cpp_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ cpp_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+ v4l2_set_subdevdata(&cpp_dev->msm_sd.sd, cpp_dev);
+ platform_set_drvdata(pdev, &cpp_dev->msm_sd.sd);
+ mutex_init(&cpp_dev->mutex);
+ spin_lock_init(&cpp_dev->tasklet_lock);
+ spin_lock_init(&cpp_timer.data.processed_frame_lock);
+
+ cpp_dev->pdev = pdev;
+ memset(&cpp_vbif, 0, sizeof(struct msm_cpp_vbif_data));
+ cpp_dev->vbif_data = &cpp_vbif;
+
+ cpp_dev->camss_cpp_base =
+ msm_camera_get_reg_base(pdev, "camss_cpp", true);
+ if (!cpp_dev->camss_cpp_base) {
+ rc = -ENOMEM;
+ pr_err("failed to get camss_cpp_base\n");
+ goto camss_cpp_base_failed;
+ }
+
+ cpp_dev->base =
+ msm_camera_get_reg_base(pdev, "cpp", true);
+ if (!cpp_dev->base) {
+ rc = -ENOMEM;
+ pr_err("failed to get cpp_base\n");
+ goto cpp_base_failed;
+ }
+
+ cpp_dev->vbif_base =
+ msm_camera_get_reg_base(pdev, "cpp_vbif", false);
+ if (!cpp_dev->vbif_base) {
+ rc = -ENOMEM;
+ pr_err("failed to get vbif_base\n");
+ goto vbif_base_failed;
+ }
+
+ cpp_dev->cpp_hw_base =
+ msm_camera_get_reg_base(pdev, "cpp_hw", true);
+ if (!cpp_dev->cpp_hw_base) {
+ rc = -ENOMEM;
+ pr_err("failed to get cpp_hw_base\n");
+ goto cpp_hw_base_failed;
+ }
+
+ cpp_dev->irq = msm_camera_get_irq(pdev, "cpp");
+ if (!cpp_dev->irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto mem_err;
+ }
+
+ rc = msm_camera_get_clk_info(pdev, &cpp_dev->clk_info,
+ &cpp_dev->cpp_clk, &cpp_dev->num_clks);
+ if (rc < 0) {
+ pr_err("%s: failed to get the clocks\n", __func__);
+ goto mem_err;
+ }
+
+ rc = msm_camera_get_regulator_info(pdev, &cpp_dev->cpp_vdd,
+ &cpp_dev->num_reg);
+ if (rc < 0) {
+ pr_err("%s: failed to get the regulators\n", __func__);
+ goto get_reg_err;
+ }
+
+ msm_cpp_fetch_dt_params(cpp_dev);
+
+ rc = msm_cpp_read_payload_params_from_dt(cpp_dev);
+ if (rc)
+ goto cpp_probe_init_error;
+
+ if (cpp_dev->bus_master_flag)
+ rc = msm_cpp_init_bandwidth_mgr(cpp_dev);
+ else
+ rc = msm_isp_init_bandwidth_mgr(NULL, ISP_CPP);
+ if (rc < 0) {
+ pr_err("%s: Bandwidth registration Failed!\n", __func__);
+ goto cpp_probe_init_error;
+ }
+
+ cpp_dev->state = CPP_STATE_BOOT;
+ rc = cpp_init_hardware(cpp_dev);
+ if (rc < 0)
+ goto bus_de_init;
+
+ media_entity_init(&cpp_dev->msm_sd.sd.entity, 0, NULL, 0);
+ cpp_dev->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ cpp_dev->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_CPP;
+ cpp_dev->msm_sd.sd.entity.name = pdev->name;
+ cpp_dev->msm_sd.close_seq = MSM_SD_CLOSE_3RD_CATEGORY;
+ msm_sd_register(&cpp_dev->msm_sd);
+ msm_cam_copy_v4l2_subdev_fops(&msm_cpp_v4l2_subdev_fops);
+ msm_cpp_v4l2_subdev_fops.unlocked_ioctl = msm_cpp_subdev_fops_ioctl;
+#ifdef CONFIG_COMPAT
+ msm_cpp_v4l2_subdev_fops.compat_ioctl32 =
+ msm_cpp_subdev_fops_compat_ioctl;
+#endif
+
+ cpp_dev->msm_sd.sd.devnode->fops = &msm_cpp_v4l2_subdev_fops;
+ cpp_dev->msm_sd.sd.entity.revision = cpp_dev->msm_sd.sd.devnode->num;
+
+ msm_camera_io_w(0x0, cpp_dev->base +
+ MSM_CPP_MICRO_IRQGEN_MASK);
+ msm_camera_io_w(0xFFFF, cpp_dev->base +
+ MSM_CPP_MICRO_IRQGEN_CLR);
+ msm_camera_io_w(0x80000000, cpp_dev->base + 0xF0);
+ cpp_release_hardware(cpp_dev);
+ cpp_dev->state = CPP_STATE_OFF;
+ msm_cpp_enable_debugfs(cpp_dev);
+
+ msm_queue_init(&cpp_dev->eventData_q, "eventdata");
+ msm_queue_init(&cpp_dev->processing_q, "frame");
+ INIT_LIST_HEAD(&cpp_dev->tasklet_q);
+ tasklet_init(&cpp_dev->cpp_tasklet, msm_cpp_do_tasklet,
+ (unsigned long)cpp_dev);
+ cpp_dev->timer_wq = create_workqueue("msm_cpp_workqueue");
+ cpp_dev->work = kmalloc(sizeof(struct msm_cpp_work_t),
+ GFP_KERNEL);
+
+ if (!cpp_dev->work) {
+ pr_err("no enough memory\n");
+ rc = -ENOMEM;
+ goto bus_de_init;
+ }
+
+ INIT_WORK((struct work_struct *)cpp_dev->work, msm_cpp_do_timeout_work);
+ cpp_dev->cpp_open_cnt = 0;
+ cpp_dev->is_firmware_loaded = 0;
+ cpp_dev->iommu_state = CPP_IOMMU_STATE_DETACHED;
+ cpp_timer.data.cpp_dev = cpp_dev;
+ atomic_set(&cpp_timer.used, 0);
+ /* install timer for cpp timeout */
+ CPP_DBG("Installing cpp_timer\n");
+ setup_timer(&cpp_timer.cpp_timer,
+ cpp_timer_callback, (unsigned long)&cpp_timer);
+ cpp_dev->fw_name_bin = NULL;
+ cpp_dev->max_timeout_trial_cnt = MSM_CPP_MAX_TIMEOUT_TRIAL;
+ if (rc == 0)
+ CPP_DBG("SUCCESS.");
+ else
+ CPP_DBG("FAILED.");
+ return rc;
+
+bus_de_init:
+ if (cpp_dev->bus_master_flag)
+ msm_cpp_deinit_bandwidth_mgr(cpp_dev);
+ else
+ msm_isp_deinit_bandwidth_mgr(ISP_CPP);
+cpp_probe_init_error:
+ media_entity_cleanup(&cpp_dev->msm_sd.sd.entity);
+ msm_sd_unregister(&cpp_dev->msm_sd);
+get_reg_err:
+ msm_camera_put_clk_info(pdev, &cpp_dev->clk_info, &cpp_dev->cpp_clk,
+ cpp_dev->num_clks);
+mem_err:
+ msm_camera_put_reg_base(pdev, cpp_dev->cpp_hw_base, "cpp_hw", true);
+cpp_hw_base_failed:
+ msm_camera_put_reg_base(pdev, cpp_dev->vbif_base, "cpp_vbif", false);
+vbif_base_failed:
+ msm_camera_put_reg_base(pdev, cpp_dev->base, "cpp", true);
+cpp_base_failed:
+ msm_camera_put_reg_base(pdev, cpp_dev->camss_cpp_base,
+ "camss_cpp", true);
+camss_cpp_base_failed:
+ kfree(cpp_dev);
+ return rc;
+}
+
+static const struct of_device_id msm_cpp_dt_match[] = {
+ {.compatible = "qcom,cpp"},
+ {}
+};
+
+static int cpp_device_remove(struct platform_device *dev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(dev);
+ struct cpp_device *cpp_dev;
+
+ if (!sd) {
+ pr_err("%s: Subdevice is NULL\n", __func__);
+ return 0;
+ }
+
+ cpp_dev = (struct cpp_device *)v4l2_get_subdevdata(sd);
+ if (!cpp_dev) {
+ pr_err("%s: cpp device is NULL\n", __func__);
+ return 0;
+ }
+ if (cpp_dev->fw) {
+ release_firmware(cpp_dev->fw);
+ cpp_dev->fw = NULL;
+ }
+ if (cpp_dev->bus_master_flag)
+ msm_cpp_deinit_bandwidth_mgr(cpp_dev);
+ else
+ msm_isp_deinit_bandwidth_mgr(ISP_CPP);
+ msm_sd_unregister(&cpp_dev->msm_sd);
+ msm_camera_put_reg_base(dev, cpp_dev->camss_cpp_base,
+ "camss_cpp", true);
+ msm_camera_put_reg_base(dev, cpp_dev->base, "cpp", true);
+ msm_camera_put_reg_base(dev, cpp_dev->vbif_base, "cpp_vbif", false);
+ msm_camera_put_reg_base(dev, cpp_dev->cpp_hw_base, "cpp_hw", true);
+ msm_camera_put_regulators(dev, &cpp_dev->cpp_vdd,
+ cpp_dev->num_reg);
+ msm_camera_put_clk_info(dev, &cpp_dev->clk_info,
+ &cpp_dev->cpp_clk, cpp_dev->num_clks);
+ msm_camera_unregister_bus_client(CAM_BUS_CLIENT_CPP);
+ mutex_destroy(&cpp_dev->mutex);
+ kfree(cpp_dev->work);
+ destroy_workqueue(cpp_dev->timer_wq);
+ kfree(cpp_dev->cpp_clk);
+ kfree(cpp_dev);
+ return 0;
+}
+
+static struct platform_driver cpp_driver = {
+ .probe = cpp_probe,
+ .remove = cpp_device_remove,
+ .driver = {
+ .name = MSM_CPP_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_cpp_dt_match,
+ },
+};
+
+static int __init msm_cpp_init_module(void)
+{
+ return platform_driver_register(&cpp_driver);
+}
+
+static void __exit msm_cpp_exit_module(void)
+{
+ platform_driver_unregister(&cpp_driver);
+}
+
+static int msm_cpp_debugfs_error_s(void *data, u64 val)
+{
+ pr_err("setting error inducement");
+ induce_error = val;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cpp_debugfs_error, NULL,
+ msm_cpp_debugfs_error_s, "%llu\n");
+
+static int msm_cpp_enable_debugfs(struct cpp_device *cpp_dev)
+{
+ struct dentry *debugfs_base;
+
+ debugfs_base = debugfs_create_dir("msm_cpp", NULL);
+ if (!debugfs_base)
+ return -ENOMEM;
+
+ if (!debugfs_create_file("error", S_IRUGO | S_IWUSR, debugfs_base,
+ (void *)cpp_dev, &cpp_debugfs_error))
+ return -ENOMEM;
+
+ return 0;
+}
+
+module_init(msm_cpp_init_module);
+module_exit(msm_cpp_exit_module);
+MODULE_DESCRIPTION("MSM CPP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.h b/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.h
new file mode 100644
index 000000000000..95ba9a0d6b78
--- /dev/null
+++ b/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.h
@@ -0,0 +1,294 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CPP_H__
+#define __MSM_CPP_H__
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <media/v4l2-subdev.h>
+#include "msm_generic_buf_mgr.h"
+#include "msm_sd.h"
+#include "cam_soc_api.h"
+#include "cam_hw_ops.h"
+#include <media/msmb_pproc.h>
+
+/* hw version info:
+ * 31:28 Major version
+ * 27:16 Minor version
+ * 15:0 Revision bits
+ */
+#define CPP_HW_VERSION_1_1_0 0x10010000
+#define CPP_HW_VERSION_1_1_1 0x10010001
+#define CPP_HW_VERSION_2_0_0 0x20000000
+#define CPP_HW_VERSION_4_0_0 0x40000000
+#define CPP_HW_VERSION_4_1_0 0x40010000
+#define CPP_HW_VERSION_5_0_0 0x50000000
+#define CPP_HW_VERSION_5_1_0 0x50010000
+
+#define VBIF_VERSION_2_3_0 0x20030000
+
+#define MAX_ACTIVE_CPP_INSTANCE 8
+#define MAX_CPP_PROCESSING_FRAME 2
+#define MAX_CPP_V4l2_EVENTS 30
+
+#define MSM_CPP_MICRO_BASE 0x4000
+#define MSM_CPP_MICRO_HW_VERSION 0x0000
+#define MSM_CPP_MICRO_IRQGEN_STAT 0x0004
+#define MSM_CPP_MICRO_IRQGEN_CLR 0x0008
+#define MSM_CPP_MICRO_IRQGEN_MASK 0x000C
+#define MSM_CPP_MICRO_FIFO_TX_DATA 0x0010
+#define MSM_CPP_MICRO_FIFO_TX_STAT 0x0014
+#define MSM_CPP_MICRO_FIFO_RX_DATA 0x0018
+#define MSM_CPP_MICRO_FIFO_RX_STAT 0x001C
+#define MSM_CPP_MICRO_BOOT_START 0x0020
+#define MSM_CPP_MICRO_BOOT_LDORG 0x0024
+#define MSM_CPP_MICRO_CLKEN_CTL 0x0030
+
+#define MSM_CPP_CMD_GET_BOOTLOADER_VER 0x1
+#define MSM_CPP_CMD_FW_LOAD 0x2
+#define MSM_CPP_CMD_EXEC_JUMP 0x3
+#define MSM_CPP_CMD_RESET_HW 0x5
+#define MSM_CPP_CMD_PROCESS_FRAME 0x6
+#define MSM_CPP_CMD_FLUSH_STREAM 0x7
+#define MSM_CPP_CMD_CFG_MEM_PARAM 0x8
+#define MSM_CPP_CMD_ERROR_REQUEST 0x9
+#define MSM_CPP_CMD_GET_STATUS 0xA
+#define MSM_CPP_CMD_GET_FW_VER 0xB
+#define MSM_CPP_CMD_GROUP_BUFFER_DUP 0x12
+#define MSM_CPP_CMD_GROUP_BUFFER 0xF
+
+#define MSM_CPP_MSG_ID_CMD 0x3E646D63
+#define MSM_CPP_MSG_ID_OK 0x0A0A4B4F
+#define MSM_CPP_MSG_ID_TRAILER 0xABCDEFAA
+
+#define MSM_CPP_MSG_ID_JUMP_ACK 0x00000001
+#define MSM_CPP_MSG_ID_FRAME_ACK 0x00000002
+#define MSM_CPP_MSG_ID_FRAME_NACK 0x00000003
+#define MSM_CPP_MSG_ID_FLUSH_ACK 0x00000004
+#define MSM_CPP_MSG_ID_FLUSH_NACK 0x00000005
+#define MSM_CPP_MSG_ID_CFG_MEM_ACK 0x00000006
+#define MSM_CPP_MSG_ID_CFG_MEM_INV 0x00000007
+#define MSM_CPP_MSG_ID_ERROR_STATUS 0x00000008
+#define MSM_CPP_MSG_ID_INVALID_CMD 0x00000009
+#define MSM_CPP_MSG_ID_GEN_STATUS 0x0000000A
+#define MSM_CPP_MSG_ID_FLUSHED 0x0000000B
+#define MSM_CPP_MSG_ID_FW_VER 0x0000000C
+
+#define MSM_CPP_JUMP_ADDRESS 0x20
+#define MSM_CPP_START_ADDRESS 0x0
+#define MSM_CPP_END_ADDRESS 0x3F00
+
+#define MSM_CPP_POLL_RETRIES 200
+#define MSM_CPP_TASKLETQ_SIZE 16
+#define MSM_CPP_TX_FIFO_LEVEL 16
+#define MSM_CPP_RX_FIFO_LEVEL 512
+
+enum cpp_vbif_error {
+ CPP_VBIF_ERROR_HANG,
+ CPP_VBIF_ERROR_MAX,
+};
+
+enum cpp_vbif_client {
+ VBIF_CLIENT_CPP,
+ VBIF_CLIENT_FD,
+ VBIF_CLIENT_MAX,
+};
+
+struct msm_cpp_vbif_data {
+ int (*err_handler[VBIF_CLIENT_MAX])(void *, uint32_t);
+ void *dev[VBIF_CLIENT_MAX];
+};
+
+struct cpp_subscribe_info {
+ struct v4l2_fh *vfh;
+ uint32_t active;
+};
+
+enum cpp_state {
+ CPP_STATE_BOOT,
+ CPP_STATE_IDLE,
+ CPP_STATE_ACTIVE,
+ CPP_STATE_OFF,
+};
+
+enum cpp_iommu_state {
+ CPP_IOMMU_STATE_DETACHED,
+ CPP_IOMMU_STATE_ATTACHED,
+};
+
+enum msm_queue {
+ MSM_CAM_Q_CTRL, /* control command or control command status */
+ MSM_CAM_Q_VFE_EVT, /* adsp event */
+ MSM_CAM_Q_VFE_MSG, /* adsp message */
+ MSM_CAM_Q_V4L2_REQ, /* v4l2 request */
+ MSM_CAM_Q_VPE_MSG, /* vpe message */
+ MSM_CAM_Q_PP_MSG, /* pp message */
+};
+
+struct msm_queue_cmd {
+ struct list_head list_config;
+ struct list_head list_control;
+ struct list_head list_frame;
+ struct list_head list_pict;
+ struct list_head list_vpe_frame;
+ struct list_head list_eventdata;
+ enum msm_queue type;
+ void *command;
+ atomic_t on_heap;
+ struct timespec ts;
+ uint32_t error_code;
+ uint32_t trans_code;
+};
+
+struct msm_device_queue {
+ struct list_head list;
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ int max;
+ int len;
+ const char *name;
+};
+
+struct msm_cpp_tasklet_queue_cmd {
+ struct list_head list;
+ uint32_t irq_status;
+ uint32_t tx_fifo[MSM_CPP_TX_FIFO_LEVEL];
+ uint32_t tx_level;
+ uint8_t cmd_used;
+};
+
+struct msm_cpp_buffer_map_info_t {
+ unsigned long len;
+ dma_addr_t phy_addr;
+ int buf_fd;
+ struct msm_cpp_buffer_info_t buff_info;
+};
+
+struct msm_cpp_buffer_map_list_t {
+ struct msm_cpp_buffer_map_info_t map_info;
+ struct list_head entry;
+};
+
+struct msm_cpp_buff_queue_info_t {
+ uint32_t used;
+ uint16_t session_id;
+ uint16_t stream_id;
+ struct list_head vb2_buff_head;
+ struct list_head native_buff_head;
+};
+
+struct msm_cpp_work_t {
+ struct work_struct my_work;
+ struct cpp_device *cpp_dev;
+};
+
+struct msm_cpp_payload_params {
+ uint32_t stripe_base;
+ uint32_t stripe_size;
+ uint32_t plane_base;
+ uint32_t plane_size;
+
+ /* offsets for stripe/plane pointers in payload */
+ uint32_t rd_pntr_off;
+ uint32_t wr_0_pntr_off;
+ uint32_t rd_ref_pntr_off;
+ uint32_t wr_ref_pntr_off;
+ uint32_t wr_0_meta_data_wr_pntr_off;
+ uint32_t fe_mmu_pf_ptr_off;
+ uint32_t ref_fe_mmu_pf_ptr_off;
+ uint32_t we_mmu_pf_ptr_off;
+ uint32_t dup_we_mmu_pf_ptr_off;
+ uint32_t ref_we_mmu_pf_ptr_off;
+ uint32_t set_group_buffer_len;
+ uint32_t dup_frame_indicator_off;
+};
+
+struct cpp_device {
+ struct platform_device *pdev;
+ struct msm_sd_subdev msm_sd;
+ struct v4l2_subdev subdev;
+ struct resource *irq;
+ void __iomem *vbif_base;
+ void __iomem *base;
+ void __iomem *cpp_hw_base;
+ void __iomem *camss_cpp_base;
+ struct clk **cpp_clk;
+ struct msm_cam_clk_info *clk_info;
+ size_t num_clks;
+ struct msm_cam_regulator *cpp_vdd;
+ int num_reg;
+ struct mutex mutex;
+ enum cpp_state state;
+ enum cpp_iommu_state iommu_state;
+ uint8_t is_firmware_loaded;
+ char *fw_name_bin;
+ const struct firmware *fw;
+ struct workqueue_struct *timer_wq;
+ struct msm_cpp_work_t *work;
+ uint32_t fw_version;
+ uint8_t stream_cnt;
+ uint8_t timeout_trial_cnt;
+ uint8_t max_timeout_trial_cnt;
+
+ int domain_num;
+ struct iommu_domain *domain;
+ struct device *iommu_ctx;
+ uint32_t num_clk;
+ uint32_t min_clk_rate;
+
+ int iommu_hdl;
+ /* Reusing proven tasklet from msm isp */
+ atomic_t irq_cnt;
+ uint8_t taskletq_idx;
+ spinlock_t tasklet_lock;
+ struct list_head tasklet_q;
+ struct tasklet_struct cpp_tasklet;
+ struct msm_cpp_tasklet_queue_cmd
+ tasklet_queue_cmd[MSM_CPP_TASKLETQ_SIZE];
+
+ struct cpp_subscribe_info cpp_subscribe_list[MAX_ACTIVE_CPP_INSTANCE];
+ uint32_t cpp_open_cnt;
+ struct cpp_hw_info hw_info;
+
+ struct msm_device_queue eventData_q; /* V4L2 Event Payload Queue */
+
+ /* Processing Queue
+ * store frame info for frames sent to microcontroller
+ */
+ struct msm_device_queue processing_q;
+
+ struct msm_cpp_buff_queue_info_t *buff_queue;
+ uint32_t num_buffq;
+ struct msm_cam_buf_mgr_req_ops buf_mgr_ops;
+
+ uint32_t bus_client;
+ uint32_t bus_idx;
+ uint32_t bus_master_flag;
+ struct msm_cpp_payload_params payload_params;
+ struct msm_cpp_vbif_data *vbif_data;
+};
+
+int msm_cpp_set_micro_clk(struct cpp_device *cpp_dev);
+int msm_update_freq_tbl(struct cpp_device *cpp_dev);
+int msm_cpp_get_clock_index(struct cpp_device *cpp_dev, const char *clk_name);
+long msm_cpp_set_core_clk(struct cpp_device *cpp_dev, long rate, int idx);
+void msm_cpp_fetch_dt_params(struct cpp_device *cpp_dev);
+int msm_cpp_read_payload_params_from_dt(struct cpp_device *cpp_dev);
+void msm_cpp_vbif_register_error_handler(void *dev,
+ enum cpp_vbif_client client,
+ int (*client_vbif_error_handler)(void *, uint32_t));
+
+#endif /* __MSM_CPP_H__ */
diff --git a/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp_soc.c b/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp_soc.c
new file mode 100644
index 000000000000..ac85340bcdcf
--- /dev/null
+++ b/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp_soc.c
@@ -0,0 +1,251 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "MSM-CPP-SOC %s:%d " fmt, __func__, __LINE__
+
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/delay.h>
+#include <media/msmb_pproc.h>
+#include "msm_cpp.h"
+
+
+#define CPP_DT_READ_U32_ERR(_dev, _key, _str, _ret, _out) { \
+ _key = _str; \
+ _ret = of_property_read_u32(_dev, _key, &_out); \
+ if (_ret) \
+ break; \
+ }
+
+#define CPP_DT_READ_U32(_dev, _str, _out) { \
+ of_property_read_u32(_dev, _str, &_out); \
+ }
+
+void msm_cpp_fetch_dt_params(struct cpp_device *cpp_dev)
+{
+ int rc = 0;
+ struct device_node *of_node = cpp_dev->pdev->dev.of_node;
+
+ if (!of_node) {
+ pr_err("%s: invalid params\n", __func__);
+ return;
+ }
+
+ of_property_read_u32(of_node, "cell-index", &cpp_dev->pdev->id);
+
+ rc = of_property_read_u32(of_node, "qcom,min-clock-rate",
+ &cpp_dev->min_clk_rate);
+ if (rc < 0) {
+ pr_debug("min-clk-rate not defined, setting it to 0\n");
+ cpp_dev->min_clk_rate = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,bus-master",
+ &cpp_dev->bus_master_flag);
+ if (rc)
+ cpp_dev->bus_master_flag = 0;
+}
+
+int msm_cpp_get_clock_index(struct cpp_device *cpp_dev, const char *clk_name)
+{
+ uint32_t i = 0;
+
+ for (i = 0; i < cpp_dev->num_clks; i++) {
+ if (!strcmp(clk_name, cpp_dev->clk_info[i].clk_name))
+ return i;
+ }
+ return -EINVAL;
+}
+
+static int cpp_get_clk_freq_tbl(struct clk *clk, struct cpp_hw_info *hw_info,
+ uint32_t min_clk_rate)
+{
+ uint32_t i;
+ uint32_t idx = 0;
+ signed long freq_tbl_entry = 0;
+
+ if ((clk == NULL) || (hw_info == NULL) || (clk->ops == NULL) ||
+ (clk->ops->list_rate == NULL)) {
+ pr_err("Bad parameter\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_FREQ_TBL; i++) {
+ freq_tbl_entry = clk->ops->list_rate(clk, i);
+ pr_debug("entry=%ld\n", freq_tbl_entry);
+ if (freq_tbl_entry >= 0) {
+ if (freq_tbl_entry >= min_clk_rate) {
+ hw_info->freq_tbl[idx++] = freq_tbl_entry;
+ pr_debug("tbl[%d]=%ld\n", idx-1,
+ freq_tbl_entry);
+ }
+ } else {
+ pr_debug("freq table returned invalid entry/end %ld\n",
+ freq_tbl_entry);
+ break;
+ }
+ }
+
+ pr_debug("%s: idx %d", __func__, idx);
+ hw_info->freq_tbl_count = idx;
+
+ return 0;
+}
+
+int msm_cpp_set_micro_clk(struct cpp_device *cpp_dev)
+{
+#ifdef ENABLE_CPP_MICRO
+ uint32_t msm_micro_iface_idx;
+ int rc;
+
+ msm_micro_iface_idx = msm_cpp_get_clock_index(cpp_dev,
+ "micro_iface_clk");
+ if (msm_micro_iface_idx < 0) {
+ pr_err("Fail to get clock index\n");
+ return -EINVAL;
+ }
+
+
+ rc = msm_clk_reset(cpp_dev->cpp_clk[msm_micro_iface_idx],
+ CLK_RESET_ASSERT);
+ if (rc) {
+ pr_err("%s:micro_iface_clk assert failed\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Below usleep values are chosen based on experiments
+ * and this was the smallest number which works. This
+ * sleep is needed to leave enough time for Microcontroller
+ * to resets all its registers.
+ */
+ usleep_range(1000, 1200);
+
+ rc = msm_clk_reset(cpp_dev->cpp_clk[msm_micro_iface_idx],
+ CLK_RESET_DEASSERT);
+ if (rc) {
+ pr_err("%s:micro_iface_clk de-assert failed\n", __func__);
+ return -EINVAL;
+ }
+ /*
+ * Below usleep values are chosen based on experiments
+ * and this was the smallest number which works. This
+ * sleep is needed to leave enough time for Microcontroller
+ * to resets all its registers.
+ */
+ usleep_range(1000, 1200);
+#endif
+ return 0;
+}
+
+int msm_update_freq_tbl(struct cpp_device *cpp_dev)
+{
+ uint32_t msm_cpp_core_clk_idx;
+ int rc = 0;
+
+ msm_cpp_core_clk_idx = msm_cpp_get_clock_index(cpp_dev, "cpp_core_clk");
+ if (msm_cpp_core_clk_idx < 0) {
+ pr_err("%s: fail to get clock index\n", __func__);
+ rc = msm_cpp_core_clk_idx;
+ return rc;
+ }
+ rc = cpp_get_clk_freq_tbl(cpp_dev->cpp_clk[msm_cpp_core_clk_idx],
+ &cpp_dev->hw_info, cpp_dev->min_clk_rate);
+ if (rc < 0) {
+ pr_err("%s: fail to get frequency table\n", __func__);
+ return rc;
+ }
+
+ return rc;
+}
+
+long msm_cpp_set_core_clk(struct cpp_device *cpp_dev, long rate, int idx)
+{
+ long rc = 0;
+
+ rc = msm_camera_clk_set_rate(&cpp_dev->pdev->dev,
+ cpp_dev->cpp_clk[idx], rate);
+ if (rc < 0) {
+ pr_err("%s: fail to get frequency table\n", __func__);
+ return rc;
+ }
+
+ return rc;
+}
+
+int msm_cpp_read_payload_params_from_dt(struct cpp_device *cpp_dev)
+{
+ struct platform_device *pdev = cpp_dev->pdev;
+ struct device_node *fw_info_node = NULL, *dev_node = NULL;
+ char *key = "qcom,cpp-fw-payload-info";
+ struct msm_cpp_payload_params *payload_params;
+ int ret = 0;
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("%s: Invalid platform device/node\n", __func__);
+ ret = -ENODEV;
+ goto no_cpp_node;
+ }
+
+ dev_node = pdev->dev.of_node;
+ fw_info_node = of_find_node_by_name(dev_node, key);
+ if (!fw_info_node) {
+ ret = -ENODEV;
+ goto no_binding;
+ }
+ payload_params = &cpp_dev->payload_params;
+ memset(payload_params, 0x0, sizeof(struct msm_cpp_payload_params));
+
+ do {
+ CPP_DT_READ_U32_ERR(fw_info_node, key, "qcom,stripe-base", ret,
+ payload_params->stripe_base);
+ CPP_DT_READ_U32_ERR(fw_info_node, key, "qcom,plane-base", ret,
+ payload_params->plane_base);
+ CPP_DT_READ_U32_ERR(fw_info_node, key, "qcom,stripe-size", ret,
+ payload_params->stripe_size);
+ CPP_DT_READ_U32_ERR(fw_info_node, key, "qcom,plane-size", ret,
+ payload_params->plane_size);
+ CPP_DT_READ_U32_ERR(fw_info_node, key, "qcom,fe-ptr-off", ret,
+ payload_params->rd_pntr_off);
+ CPP_DT_READ_U32_ERR(fw_info_node, key, "qcom,we-ptr-off", ret,
+ payload_params->wr_0_pntr_off);
+
+ CPP_DT_READ_U32(fw_info_node, "qcom,ref-fe-ptr-off",
+ payload_params->rd_ref_pntr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,ref-we-ptr-off",
+ payload_params->wr_ref_pntr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,we-meta-ptr-off",
+ payload_params->wr_0_meta_data_wr_pntr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,fe-mmu-pf-ptr-off",
+ payload_params->fe_mmu_pf_ptr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,ref-fe-mmu-pf-ptr-off",
+ payload_params->ref_fe_mmu_pf_ptr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,we-mmu-pf-ptr-off",
+ payload_params->we_mmu_pf_ptr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,dup-we-mmu-pf-ptr-off",
+ payload_params->dup_we_mmu_pf_ptr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,ref-we-mmu-pf-ptr-off",
+ payload_params->ref_we_mmu_pf_ptr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,set-group-buffer-len",
+ payload_params->set_group_buffer_len);
+ CPP_DT_READ_U32(fw_info_node, "qcom,dup-frame-indicator-off",
+ payload_params->dup_frame_indicator_off);
+ } while (0);
+
+no_binding:
+ if (ret)
+ pr_err("%s: Error reading binding %s, ret %d\n",
+ __func__, key, ret);
+no_cpp_node:
+ return ret;
+}
diff --git a/drivers/media/platform/msm/ais/pproc/vpe/Makefile b/drivers/media/platform/msm/ais/pproc/vpe/Makefile
new file mode 100644
index 000000000000..9bdaa7899812
--- /dev/null
+++ b/drivers/media/platform/msm/ais/pproc/vpe/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+obj-$(CONFIG_MSM_AIS) += msm_vpe.o
diff --git a/drivers/media/platform/msm/ais/pproc/vpe/msm_vpe.c b/drivers/media/platform/msm/ais/pproc/vpe/msm_vpe.c
new file mode 100644
index 000000000000..1551a1750ac0
--- /dev/null
+++ b/drivers/media/platform/msm/ais/pproc/vpe/msm_vpe.c
@@ -0,0 +1,1691 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "MSM-VPE %s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/msm_ion.h>
+#include <linux/iommu.h>
+#include <linux/msm_iommu_domains.h>
+#include <linux/qcom_iommu.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/media-entity.h>
+#include <media/msmb_generic_buf_mgr.h>
+#include <media/msmb_pproc.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-buf.h>
+#include "msm_vpe.h"
+#include "msm_camera_io_util.h"
+
+#define MSM_VPE_IDENT_TO_SESSION_ID(identity) ((identity >> 16) & 0xFFFF)
+#define MSM_VPE_IDENT_TO_STREAM_ID(identity) (identity & 0xFFFF)
+
+#define MSM_VPE_DRV_NAME "msm_vpe"
+
+#define MSM_VPE_MAX_BUFF_QUEUE 16
+
+#define CONFIG_MSM_VPE_DBG 0
+
+#if CONFIG_MSM_VPE_DBG
+#define VPE_DBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define VPE_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+static void vpe_mem_dump(const char * const name, const void * const addr,
+ int size)
+{
+ char line_str[128], *p_str;
+ int i;
+ u32 *p = (u32 *) addr;
+ u32 data;
+
+ VPE_DBG("%s: (%s) %pK %d\n", __func__, name, addr, size);
+ line_str[0] = '\0';
+ p_str = line_str;
+ for (i = 0; i < size/4; i++) {
+ if (i % 4 == 0) {
+ snprintf(p_str, 12, "%pK: ", p);
+ p_str += 10;
+ }
+ data = *p++;
+ snprintf(p_str, 12, "%08x ", data);
+ p_str += 9;
+ if ((i + 1) % 4 == 0) {
+ VPE_DBG("%s\n", line_str);
+ line_str[0] = '\0';
+ p_str = line_str;
+ }
+ }
+ if (line_str[0] != '\0')
+ VPE_DBG("%s\n", line_str);
+}
+
+static inline long long vpe_do_div(long long num, long long den)
+{
+ do_div(num, den);
+ return num;
+}
+
+#define msm_dequeue(queue, member) ({ \
+ unsigned long flags; \
+ struct msm_device_queue *__q = (queue); \
+ struct msm_queue_cmd *qcmd = 0; \
+ spin_lock_irqsave(&__q->lock, flags); \
+ if (!list_empty(&__q->list)) { \
+ __q->len--; \
+ qcmd = list_first_entry(&__q->list, \
+ struct msm_queue_cmd, \
+ member); \
+ list_del_init(&qcmd->member); \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+ qcmd; \
+ })
+
+static void msm_queue_init(struct msm_device_queue *queue, const char *name)
+{
+ spin_lock_init(&queue->lock);
+ queue->len = 0;
+ queue->max = 0;
+ queue->name = name;
+ INIT_LIST_HEAD(&queue->list);
+ init_waitqueue_head(&queue->wait);
+}
+
+static struct msm_cam_clk_info vpe_clk_info[] = {
+ {"vpe_clk", 160000000},
+ {"vpe_pclk", -1},
+};
+
+static int msm_vpe_notify_frame_done(struct vpe_device *vpe_dev);
+
+static void msm_enqueue(struct msm_device_queue *queue,
+ struct list_head *entry)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ queue->len++;
+ if (queue->len > queue->max) {
+ queue->max = queue->len;
+ pr_debug("queue %s new max is %d\n", queue->name, queue->max);
+ }
+ list_add_tail(entry, &queue->list);
+ wake_up(&queue->wait);
+ VPE_DBG("woke up %s\n", queue->name);
+ spin_unlock_irqrestore(&queue->lock, flags);
+}
+
+static struct msm_vpe_buff_queue_info_t *msm_vpe_get_buff_queue_entry(
+ struct vpe_device *vpe_dev, uint32_t session_id, uint32_t stream_id)
+{
+ uint32_t i = 0;
+ struct msm_vpe_buff_queue_info_t *buff_queue_info = NULL;
+
+ for (i = 0; i < vpe_dev->num_buffq; i++) {
+ if ((vpe_dev->buff_queue[i].used == 1) &&
+ (vpe_dev->buff_queue[i].session_id == session_id) &&
+ (vpe_dev->buff_queue[i].stream_id == stream_id)) {
+ buff_queue_info = &vpe_dev->buff_queue[i];
+ break;
+ }
+ }
+
+ if (buff_queue_info == NULL) {
+ pr_err("error buffer queue entry for sess:%d strm:%d not found\n",
+ session_id, stream_id);
+ }
+ return buff_queue_info;
+}
+
+static unsigned long msm_vpe_get_phy_addr(struct vpe_device *vpe_dev,
+ struct msm_vpe_buff_queue_info_t *buff_queue_info, uint32_t buff_index,
+ uint8_t native_buff)
+{
+ unsigned long phy_add = 0;
+ struct list_head *buff_head;
+ struct msm_vpe_buffer_map_list_t *buff, *save;
+
+ if (native_buff)
+ buff_head = &buff_queue_info->native_buff_head;
+ else
+ buff_head = &buff_queue_info->vb2_buff_head;
+
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ if (buff->map_info.buff_info.index == buff_index) {
+ phy_add = buff->map_info.phy_addr;
+ break;
+ }
+ }
+
+ return phy_add;
+}
+
+static unsigned long msm_vpe_queue_buffer_info(struct vpe_device *vpe_dev,
+ struct msm_vpe_buff_queue_info_t *buff_queue,
+ struct msm_vpe_buffer_info_t *buffer_info)
+{
+ struct list_head *buff_head;
+ struct msm_vpe_buffer_map_list_t *buff, *save;
+ int rc = 0;
+
+ if (buffer_info->native_buff)
+ buff_head = &buff_queue->native_buff_head;
+ else
+ buff_head = &buff_queue->vb2_buff_head;
+
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ if (buff->map_info.buff_info.index == buffer_info->index) {
+ pr_err("error buffer index already queued\n");
+ return -EINVAL;
+ }
+ }
+
+ buff = kzalloc(
+ sizeof(struct msm_vpe_buffer_map_list_t), GFP_KERNEL);
+ if (!buff)
+ return -EINVAL;
+
+ buff->map_info.buff_info = *buffer_info;
+ buff->map_info.dbuf = dma_buf_get(buffer_info->fd);
+ if (IS_ERR_OR_NULL(buff->map_info.dbuf)) {
+ pr_err("Ion dma get buf failed\n");
+ rc = PTR_ERR(buff->map_info.dbuf);
+ goto err_get;
+ }
+
+ buff->map_info.attachment = dma_buf_attach(buff->map_info.dbuf,
+ &vpe_dev->pdev->dev);
+ if (IS_ERR_OR_NULL(buff->map_info.attachment)) {
+ pr_err("Ion dma buf attach failed\n");
+ rc = PTR_ERR(buff->map_info.attachment);
+ goto err_put;
+ }
+
+ buff->map_info.table =
+ dma_buf_map_attachment(buff->map_info.attachment,
+ DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(buff->map_info.table)) {
+ pr_err("DMA buf map attachment failed\n");
+ rc = PTR_ERR(buff->map_info.table);
+ goto err_detach;
+ }
+ if (msm_map_dma_buf(buff->map_info.dbuf, buff->map_info.table,
+ vpe_dev->domain_num, 0, SZ_4K, 0,
+ &buff->map_info.phy_addr,
+ &buff->map_info.len, 0, 0)) {
+ pr_err("%s: cannot map address", __func__);
+ goto err_detachment;
+ }
+
+ INIT_LIST_HEAD(&buff->entry);
+ list_add_tail(&buff->entry, buff_head);
+
+ return buff->map_info.phy_addr;
+
+err_detachment:
+ dma_buf_unmap_attachment(buff->map_info.attachment,
+ buff->map_info.table, DMA_BIDIRECTIONAL);
+err_detach:
+ dma_buf_detach(buff->map_info.dbuf, buff->map_info.attachment);
+err_put:
+ dma_buf_put(buff->map_info.dbuf);
+err_get:
+ kzfree(buff);
+ return 0;
+}
+
+static void msm_vpe_dequeue_buffer_info(struct vpe_device *vpe_dev,
+ struct msm_vpe_buffer_map_list_t *buff)
+{
+ msm_unmap_dma_buf(buff->map_info.table, vpe_dev->domain_num, 0);
+ dma_buf_unmap_attachment(buff->map_info.attachment,
+ buff->map_info.table, DMA_BIDIRECTIONAL);
+ dma_buf_detach(buff->map_info.dbuf, buff->map_info.attachment);
+ dma_buf_put(buff->map_info.dbuf);
+ list_del_init(&buff->entry);
+ kzfree(buff);
+}
+
+static unsigned long msm_vpe_fetch_buffer_info(struct vpe_device *vpe_dev,
+ struct msm_vpe_buffer_info_t *buffer_info, uint32_t session_id,
+ uint32_t stream_id)
+{
+ unsigned long phy_addr = 0;
+ struct msm_vpe_buff_queue_info_t *buff_queue_info;
+ uint8_t native_buff = buffer_info->native_buff;
+
+ buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev, session_id,
+ stream_id);
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
+ session_id, stream_id);
+ return phy_addr;
+ }
+
+ phy_addr = msm_vpe_get_phy_addr(vpe_dev, buff_queue_info,
+ buffer_info->index, native_buff);
+ if ((phy_addr == 0) && (native_buff)) {
+ phy_addr = msm_vpe_queue_buffer_info(vpe_dev, buff_queue_info,
+ buffer_info);
+ }
+ return phy_addr;
+}
+
+static int32_t msm_vpe_enqueue_buff_info_list(struct vpe_device *vpe_dev,
+ struct msm_vpe_stream_buff_info_t *stream_buff_info)
+{
+ uint32_t j;
+ struct msm_vpe_buff_queue_info_t *buff_queue_info;
+
+ buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev,
+ (stream_buff_info->identity >> 16) & 0xFFFF,
+ stream_buff_info->identity & 0xFFFF);
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
+ (stream_buff_info->identity >> 16) & 0xFFFF,
+ stream_buff_info->identity & 0xFFFF);
+ return -EINVAL;
+ }
+
+ for (j = 0; j < stream_buff_info->num_buffs; j++) {
+ msm_vpe_queue_buffer_info(vpe_dev, buff_queue_info,
+ &stream_buff_info->buffer_info[j]);
+ }
+ return 0;
+}
+
+static int32_t msm_vpe_dequeue_buff_info_list(struct vpe_device *vpe_dev,
+ struct msm_vpe_buff_queue_info_t *buff_queue_info)
+{
+ struct msm_vpe_buffer_map_list_t *buff, *save;
+ struct list_head *buff_head;
+
+ buff_head = &buff_queue_info->native_buff_head;
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ msm_vpe_dequeue_buffer_info(vpe_dev, buff);
+ }
+
+ buff_head = &buff_queue_info->vb2_buff_head;
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ msm_vpe_dequeue_buffer_info(vpe_dev, buff);
+ }
+
+ return 0;
+}
+
+static int32_t msm_vpe_add_buff_queue_entry(struct vpe_device *vpe_dev,
+ uint16_t session_id, uint16_t stream_id)
+{
+ uint32_t i;
+ struct msm_vpe_buff_queue_info_t *buff_queue_info;
+
+ for (i = 0; i < vpe_dev->num_buffq; i++) {
+ if (vpe_dev->buff_queue[i].used == 0) {
+ buff_queue_info = &vpe_dev->buff_queue[i];
+ buff_queue_info->used = 1;
+ buff_queue_info->session_id = session_id;
+ buff_queue_info->stream_id = stream_id;
+ INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
+ INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
+ return 0;
+ }
+ }
+ pr_err("buffer queue full. error for sessionid: %d streamid: %d\n",
+ session_id, stream_id);
+ return -EINVAL;
+}
+
+static int32_t msm_vpe_free_buff_queue_entry(struct vpe_device *vpe_dev,
+ uint32_t session_id, uint32_t stream_id)
+{
+ struct msm_vpe_buff_queue_info_t *buff_queue_info;
+
+ buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev, session_id,
+ stream_id);
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
+ session_id, stream_id);
+ return -EINVAL;
+ }
+
+ buff_queue_info->used = 0;
+ buff_queue_info->session_id = 0;
+ buff_queue_info->stream_id = 0;
+ INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
+ INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
+ return 0;
+}
+
+static int32_t msm_vpe_create_buff_queue(struct vpe_device *vpe_dev,
+ uint32_t num_buffq)
+{
+ struct msm_vpe_buff_queue_info_t *buff_queue;
+
+ buff_queue = kzalloc(
+ sizeof(struct msm_vpe_buff_queue_info_t) * num_buffq,
+ GFP_KERNEL);
+ if (!buff_queue) {
+ pr_err("Buff queue allocation failure\n");
+ return -ENOMEM;
+ }
+
+ if (vpe_dev->buff_queue) {
+ pr_err("Buff queue not empty\n");
+ kzfree(buff_queue);
+ return -EINVAL;
+ }
+ vpe_dev->buff_queue = buff_queue;
+ vpe_dev->num_buffq = num_buffq;
+ return 0;
+}
+
+static void msm_vpe_delete_buff_queue(struct vpe_device *vpe_dev)
+{
+ uint32_t i;
+
+ for (i = 0; i < vpe_dev->num_buffq; i++) {
+ if (vpe_dev->buff_queue[i].used == 1) {
+ pr_err("Queue not free sessionid: %d, streamid: %d\n",
+ vpe_dev->buff_queue[i].session_id,
+ vpe_dev->buff_queue[i].stream_id);
+ msm_vpe_free_buff_queue_entry(vpe_dev,
+ vpe_dev->buff_queue[i].session_id,
+ vpe_dev->buff_queue[i].stream_id);
+ }
+ }
+ kzfree(vpe_dev->buff_queue);
+ vpe_dev->buff_queue = NULL;
+ vpe_dev->num_buffq = 0;
+}
+
+void vpe_release_ion_client(struct kref *ref)
+{
+ struct vpe_device *vpe_dev = container_of(ref,
+ struct vpe_device, refcount);
+ ion_client_destroy(vpe_dev->client);
+}
+
+static int vpe_init_mem(struct vpe_device *vpe_dev)
+{
+ kref_init(&vpe_dev->refcount);
+ kref_get(&vpe_dev->refcount);
+ vpe_dev->client = msm_ion_client_create("vpe");
+
+ if (!vpe_dev->client) {
+ pr_err("couldn't create ion client\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void vpe_deinit_mem(struct vpe_device *vpe_dev)
+{
+ kref_put(&vpe_dev->refcount, vpe_release_ion_client);
+}
+
+static irqreturn_t msm_vpe_irq(int irq_num, void *data)
+{
+ unsigned long flags;
+ uint32_t irq_status;
+ struct msm_vpe_tasklet_queue_cmd *queue_cmd;
+ struct vpe_device *vpe_dev = (struct vpe_device *) data;
+
+ irq_status = msm_camera_io_r_mb(vpe_dev->base +
+ VPE_INTR_STATUS_OFFSET);
+
+ spin_lock_irqsave(&vpe_dev->tasklet_lock, flags);
+ queue_cmd = &vpe_dev->tasklet_queue_cmd[vpe_dev->taskletq_idx];
+ if (queue_cmd->cmd_used) {
+ VPE_DBG("%s: vpe tasklet queue overflow\n", __func__);
+ list_del(&queue_cmd->list);
+ } else {
+ atomic_add(1, &vpe_dev->irq_cnt);
+ }
+ queue_cmd->irq_status = irq_status;
+
+ queue_cmd->cmd_used = 1;
+ vpe_dev->taskletq_idx =
+ (vpe_dev->taskletq_idx + 1) % MSM_VPE_TASKLETQ_SIZE;
+ list_add_tail(&queue_cmd->list, &vpe_dev->tasklet_q);
+ spin_unlock_irqrestore(&vpe_dev->tasklet_lock, flags);
+
+ tasklet_schedule(&vpe_dev->vpe_tasklet);
+
+ msm_camera_io_w_mb(irq_status, vpe_dev->base + VPE_INTR_CLEAR_OFFSET);
+ msm_camera_io_w(0, vpe_dev->base + VPE_INTR_ENABLE_OFFSET);
+ VPE_DBG("%s: irq_status=0x%x.\n", __func__, irq_status);
+
+ return IRQ_HANDLED;
+}
+
+static void msm_vpe_do_tasklet(unsigned long data)
+{
+ unsigned long flags;
+ struct vpe_device *vpe_dev = (struct vpe_device *)data;
+ struct msm_vpe_tasklet_queue_cmd *queue_cmd;
+
+ while (atomic_read(&vpe_dev->irq_cnt)) {
+ spin_lock_irqsave(&vpe_dev->tasklet_lock, flags);
+ queue_cmd = list_first_entry(&vpe_dev->tasklet_q,
+ struct msm_vpe_tasklet_queue_cmd, list);
+ if (!queue_cmd) {
+ atomic_set(&vpe_dev->irq_cnt, 0);
+ spin_unlock_irqrestore(&vpe_dev->tasklet_lock, flags);
+ return;
+ }
+ atomic_sub(1, &vpe_dev->irq_cnt);
+ list_del(&queue_cmd->list);
+ queue_cmd->cmd_used = 0;
+
+ spin_unlock_irqrestore(&vpe_dev->tasklet_lock, flags);
+
+ VPE_DBG("Frame done!!\n");
+ msm_vpe_notify_frame_done(vpe_dev);
+ }
+}
+
+static int vpe_init_hardware(struct vpe_device *vpe_dev)
+{
+ int rc = 0;
+
+ if (vpe_dev->fs_vpe == NULL) {
+ vpe_dev->fs_vpe =
+ regulator_get(&vpe_dev->pdev->dev, "vdd");
+ if (IS_ERR(vpe_dev->fs_vpe)) {
+ pr_err("Regulator vpe vdd get failed %ld\n",
+ PTR_ERR(vpe_dev->fs_vpe));
+ vpe_dev->fs_vpe = NULL;
+ rc = -ENODEV;
+ goto fail;
+ } else if (regulator_enable(vpe_dev->fs_vpe)) {
+ pr_err("Regulator vpe vdd enable failed\n");
+ regulator_put(vpe_dev->fs_vpe);
+ vpe_dev->fs_vpe = NULL;
+ rc = -ENODEV;
+ goto fail;
+ }
+ }
+
+ rc = msm_cam_clk_enable(&vpe_dev->pdev->dev, vpe_clk_info,
+ vpe_dev->vpe_clk, ARRAY_SIZE(vpe_clk_info), 1);
+ if (rc < 0) {
+ pr_err("clk enable failed\n");
+ goto disable_and_put_regulator;
+ }
+
+ vpe_dev->base = ioremap(vpe_dev->mem->start,
+ resource_size(vpe_dev->mem));
+ if (!vpe_dev->base) {
+ rc = -ENOMEM;
+ pr_err("ioremap failed\n");
+ goto disable_and_put_regulator;
+ }
+
+ if (vpe_dev->state != VPE_STATE_BOOT) {
+ rc = request_irq(vpe_dev->irq->start, msm_vpe_irq,
+ IRQF_TRIGGER_RISING,
+ "vpe", vpe_dev);
+ if (rc < 0) {
+ pr_err("irq request fail! start=%u\n",
+ (uint32_t) vpe_dev->irq->start);
+ rc = -EBUSY;
+ goto unmap_base;
+ } else {
+ VPE_DBG("Got irq! %d\n", (int)vpe_dev->irq->start);
+ }
+ } else {
+ VPE_DBG("Skip requesting the irq since device is booting\n");
+ }
+ vpe_dev->buf_mgr_subdev = msm_buf_mngr_get_subdev();
+
+ msm_vpe_create_buff_queue(vpe_dev, MSM_VPE_MAX_BUFF_QUEUE);
+ return rc;
+
+unmap_base:
+ iounmap(vpe_dev->base);
+disable_and_put_regulator:
+ regulator_disable(vpe_dev->fs_vpe);
+ regulator_put(vpe_dev->fs_vpe);
+fail:
+ return rc;
+}
+
+static int vpe_release_hardware(struct vpe_device *vpe_dev)
+{
+ if (vpe_dev->state != VPE_STATE_BOOT) {
+ free_irq(vpe_dev->irq->start, vpe_dev);
+ tasklet_kill(&vpe_dev->vpe_tasklet);
+ atomic_set(&vpe_dev->irq_cnt, 0);
+ }
+
+ msm_vpe_delete_buff_queue(vpe_dev);
+ iounmap(vpe_dev->base);
+ msm_cam_clk_enable(&vpe_dev->pdev->dev, vpe_clk_info,
+ vpe_dev->vpe_clk, ARRAY_SIZE(vpe_clk_info), 0);
+ return 0;
+}
+
+static int vpe_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ int rc = 0;
+ uint32_t i;
+ struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
+
+ mutex_lock(&vpe_dev->mutex);
+ if (vpe_dev->vpe_open_cnt == MAX_ACTIVE_VPE_INSTANCE) {
+ pr_err("No free VPE instance\n");
+ rc = -ENODEV;
+ goto err_mutex_unlock;
+ }
+
+ for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
+ if (vpe_dev->vpe_subscribe_list[i].active == 0) {
+ vpe_dev->vpe_subscribe_list[i].active = 1;
+ vpe_dev->vpe_subscribe_list[i].vfh = &fh->vfh;
+ break;
+ }
+ }
+ if (i == MAX_ACTIVE_VPE_INSTANCE) {
+ pr_err("No free instance\n");
+ rc = -ENODEV;
+ goto err_mutex_unlock;
+ }
+
+ VPE_DBG("open %d %pK\n", i, &fh->vfh);
+ vpe_dev->vpe_open_cnt++;
+ if (vpe_dev->vpe_open_cnt == 1) {
+ rc = vpe_init_hardware(vpe_dev);
+ if (rc < 0) {
+ pr_err("%s: Couldn't init vpe hardware\n", __func__);
+ vpe_dev->vpe_open_cnt--;
+ goto err_fixup_sub_list;
+ }
+ rc = vpe_init_mem(vpe_dev);
+ if (rc < 0) {
+ pr_err("%s: Couldn't init mem\n", __func__);
+ vpe_dev->vpe_open_cnt--;
+ rc = -ENODEV;
+ goto err_release_hardware;
+ }
+ vpe_dev->state = VPE_STATE_IDLE;
+ }
+ mutex_unlock(&vpe_dev->mutex);
+
+ return rc;
+
+err_release_hardware:
+ vpe_release_hardware(vpe_dev);
+err_fixup_sub_list:
+ for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
+ if (vpe_dev->vpe_subscribe_list[i].vfh == &fh->vfh) {
+ vpe_dev->vpe_subscribe_list[i].active = 0;
+ vpe_dev->vpe_subscribe_list[i].vfh = NULL;
+ break;
+ }
+ }
+err_mutex_unlock:
+ mutex_unlock(&vpe_dev->mutex);
+ return rc;
+}
+
+static int vpe_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ uint32_t i;
+ struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
+
+ mutex_lock(&vpe_dev->mutex);
+ for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
+ if (vpe_dev->vpe_subscribe_list[i].vfh == &fh->vfh) {
+ vpe_dev->vpe_subscribe_list[i].active = 0;
+ vpe_dev->vpe_subscribe_list[i].vfh = NULL;
+ break;
+ }
+ }
+ if (i == MAX_ACTIVE_VPE_INSTANCE) {
+ pr_err("Invalid close\n");
+ mutex_unlock(&vpe_dev->mutex);
+ return -ENODEV;
+ }
+
+ VPE_DBG("close %d %pK\n", i, &fh->vfh);
+ vpe_dev->vpe_open_cnt--;
+ if (vpe_dev->vpe_open_cnt == 0) {
+ vpe_deinit_mem(vpe_dev);
+ vpe_release_hardware(vpe_dev);
+ vpe_dev->state = VPE_STATE_OFF;
+ }
+ mutex_unlock(&vpe_dev->mutex);
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops msm_vpe_internal_ops = {
+ .open = vpe_open_node,
+ .close = vpe_close_node,
+};
+
+static int msm_vpe_buffer_ops(struct vpe_device *vpe_dev,
+ uint32_t buff_mgr_ops, struct msm_buf_mngr_info *buff_mgr_info)
+{
+ int rc = -EINVAL;
+
+ rc = v4l2_subdev_call(vpe_dev->buf_mgr_subdev, core, ioctl,
+ buff_mgr_ops, buff_mgr_info);
+ if (rc < 0)
+ pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ return rc;
+}
+
+static int msm_vpe_notify_frame_done(struct vpe_device *vpe_dev)
+{
+ struct v4l2_event v4l2_evt;
+ struct msm_queue_cmd *frame_qcmd;
+ struct msm_queue_cmd *event_qcmd;
+ struct msm_vpe_frame_info_t *processed_frame;
+ struct msm_device_queue *queue = &vpe_dev->processing_q;
+ struct msm_buf_mngr_info buff_mgr_info;
+ int rc = 0;
+
+ if (queue->len > 0) {
+ frame_qcmd = msm_dequeue(queue, list_frame);
+ if (!frame_qcmd) {
+ pr_err("%s: %d frame_qcmd is NULL\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ processed_frame = frame_qcmd->command;
+ do_gettimeofday(&(processed_frame->out_time));
+ kfree(frame_qcmd);
+ event_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_ATOMIC);
+ if (!event_qcmd) {
+ pr_err("%s: Insufficient memory\n", __func__);
+ return -ENOMEM;
+ }
+ atomic_set(&event_qcmd->on_heap, 1);
+ event_qcmd->command = processed_frame;
+ VPE_DBG("fid %d\n", processed_frame->frame_id);
+ msm_enqueue(&vpe_dev->eventData_q, &event_qcmd->list_eventdata);
+
+ if (!processed_frame->output_buffer_info.processed_divert) {
+ memset(&buff_mgr_info, 0,
+ sizeof(buff_mgr_info));
+ buff_mgr_info.session_id =
+ ((processed_frame->identity >> 16) & 0xFFFF);
+ buff_mgr_info.stream_id =
+ (processed_frame->identity & 0xFFFF);
+ buff_mgr_info.frame_id = processed_frame->frame_id;
+ buff_mgr_info.timestamp = processed_frame->timestamp;
+ buff_mgr_info.index =
+ processed_frame->output_buffer_info.index;
+ rc = msm_vpe_buffer_ops(vpe_dev,
+ VIDIOC_MSM_BUF_MNGR_BUF_DONE,
+ &buff_mgr_info);
+ if (rc < 0) {
+ pr_err("%s: error doing VIDIOC_MSM_BUF_MNGR_BUF_DONE\n",
+ __func__);
+ rc = -EINVAL;
+ }
+ }
+
+ v4l2_evt.id = processed_frame->inst_id;
+ v4l2_evt.type = V4L2_EVENT_VPE_FRAME_DONE;
+ v4l2_event_queue(vpe_dev->msm_sd.sd.devnode, &v4l2_evt);
+ }
+ return rc;
+}
+
+static void vpe_update_scaler_params(struct vpe_device *vpe_dev,
+ struct msm_vpe_frame_strip_info strip_info)
+{
+ uint32_t out_ROI_width, out_ROI_height;
+ uint32_t src_ROI_width, src_ROI_height;
+
+ /*
+ * phase_step_x, phase_step_y, phase_init_x and phase_init_y
+ * are represented in fixed-point, unsigned 3.29 format
+ */
+ uint32_t phase_step_x = 0;
+ uint32_t phase_step_y = 0;
+ uint32_t phase_init_x = 0;
+ uint32_t phase_init_y = 0;
+
+ uint32_t src_roi, src_x, src_y, src_xy, temp;
+ uint32_t yscale_filter_sel, xscale_filter_sel;
+ uint32_t scale_unit_sel_x, scale_unit_sel_y;
+ uint64_t numerator, denominator;
+
+ /*
+ * assumption is both direction need zoom. this can be
+ * improved.
+ */
+ temp = msm_camera_io_r(vpe_dev->base + VPE_OP_MODE_OFFSET) | 0x3;
+ msm_camera_io_w(temp, vpe_dev->base + VPE_OP_MODE_OFFSET);
+
+ src_ROI_width = strip_info.src_w;
+ src_ROI_height = strip_info.src_h;
+ out_ROI_width = strip_info.dst_w;
+ out_ROI_height = strip_info.dst_h;
+
+ VPE_DBG("src w = %u, h=%u, dst w = %u, h =%u.\n",
+ src_ROI_width, src_ROI_height, out_ROI_width,
+ out_ROI_height);
+ src_roi = (src_ROI_height << 16) + src_ROI_width;
+
+ msm_camera_io_w(src_roi, vpe_dev->base + VPE_SRC_SIZE_OFFSET);
+
+ src_x = strip_info.src_x;
+ src_y = strip_info.src_y;
+
+ VPE_DBG("src_x = %d, src_y=%d.\n", src_x, src_y);
+
+ src_xy = src_y*(1<<16) + src_x;
+ msm_camera_io_w(src_xy, vpe_dev->base +
+ VPE_SRC_XY_OFFSET);
+ VPE_DBG("src_xy = 0x%x, src_roi=0x%x.\n", src_xy, src_roi);
+
+ /* decide whether to use FIR or M/N for scaling */
+ if ((out_ROI_width == 1 && src_ROI_width < 4) ||
+ (src_ROI_width < 4 * out_ROI_width - 3))
+ scale_unit_sel_x = 0;/* use FIR scalar */
+ else
+ scale_unit_sel_x = 1;/* use M/N scalar */
+
+ if ((out_ROI_height == 1 && src_ROI_height < 4) ||
+ (src_ROI_height < 4 * out_ROI_height - 3))
+ scale_unit_sel_y = 0;/* use FIR scalar */
+ else
+ scale_unit_sel_y = 1;/* use M/N scalar */
+
+ /* calculate phase step for the x direction */
+
+ /*
+ * if destination is only 1 pixel wide, the value of
+ * phase_step_x is unimportant. Assigning phase_step_x to src
+ * ROI width as an arbitrary value.
+ */
+ if (out_ROI_width == 1)
+ phase_step_x = (uint32_t) ((src_ROI_width) <<
+ SCALER_PHASE_BITS);
+
+ /* if using FIR scalar */
+ else if (scale_unit_sel_x == 0) {
+
+ /*
+ * Calculate the quotient ( src_ROI_width - 1 ) (
+ * out_ROI_width - 1) with u3.29 precision. Quotient
+ * is rounded up to the larger 29th decimal point
+ */
+ numerator = (uint64_t)(src_ROI_width - 1) <<
+ SCALER_PHASE_BITS;
+ /*
+ * never equals to 0 because of the "(out_ROI_width ==
+ * 1 )"
+ */
+ denominator = (uint64_t)(out_ROI_width - 1);
+ /*
+ * divide and round up to the larger 29th decimal
+ * point.
+ */
+ phase_step_x = (uint32_t) vpe_do_div((numerator +
+ denominator - 1), denominator);
+ } else if (scale_unit_sel_x == 1) { /* if M/N scalar */
+ /*
+ * Calculate the quotient ( src_ROI_width ) / (
+ * out_ROI_width) with u3.29 precision. Quotient is
+ * rounded down to the smaller 29th decimal point.
+ */
+ numerator = (uint64_t)(src_ROI_width) <<
+ SCALER_PHASE_BITS;
+ denominator = (uint64_t)(out_ROI_width);
+ phase_step_x =
+ (uint32_t) vpe_do_div(numerator, denominator);
+ }
+ /* calculate phase step for the y direction */
+
+ /*
+ * if destination is only 1 pixel wide, the value of
+ * phase_step_x is unimportant. Assigning phase_step_x to src
+ * ROI width as an arbitrary value.
+ */
+ if (out_ROI_height == 1)
+ phase_step_y =
+ (uint32_t) ((src_ROI_height) << SCALER_PHASE_BITS);
+
+ /* if FIR scalar */
+ else if (scale_unit_sel_y == 0) {
+ /*
+ * Calculate the quotient ( src_ROI_height - 1 ) / (
+ * out_ROI_height - 1) with u3.29 precision. Quotient
+ * is rounded up to the larger 29th decimal point.
+ */
+ numerator = (uint64_t)(src_ROI_height - 1) <<
+ SCALER_PHASE_BITS;
+ /*
+ * never equals to 0 because of the " ( out_ROI_height
+ * == 1 )" case
+ */
+ denominator = (uint64_t)(out_ROI_height - 1);
+ /*
+ * Quotient is rounded up to the larger 29th decimal
+ * point.
+ */
+ phase_step_y =
+ (uint32_t) vpe_do_div(
+ (numerator + denominator - 1), denominator);
+ } else if (scale_unit_sel_y == 1) { /* if M/N scalar */
+ /*
+ * Calculate the quotient ( src_ROI_height ) (
+ * out_ROI_height) with u3.29 precision. Quotient is
+ * rounded down to the smaller 29th decimal point.
+ */
+ numerator = (uint64_t)(src_ROI_height) <<
+ SCALER_PHASE_BITS;
+ denominator = (uint64_t)(out_ROI_height);
+ phase_step_y = (uint32_t) vpe_do_div(
+ numerator, denominator);
+ }
+
+ /* decide which set of FIR coefficients to use */
+ if (phase_step_x > HAL_MDP_PHASE_STEP_2P50)
+ xscale_filter_sel = 0;
+ else if (phase_step_x > HAL_MDP_PHASE_STEP_1P66)
+ xscale_filter_sel = 1;
+ else if (phase_step_x > HAL_MDP_PHASE_STEP_1P25)
+ xscale_filter_sel = 2;
+ else
+ xscale_filter_sel = 3;
+
+ if (phase_step_y > HAL_MDP_PHASE_STEP_2P50)
+ yscale_filter_sel = 0;
+ else if (phase_step_y > HAL_MDP_PHASE_STEP_1P66)
+ yscale_filter_sel = 1;
+ else if (phase_step_y > HAL_MDP_PHASE_STEP_1P25)
+ yscale_filter_sel = 2;
+ else
+ yscale_filter_sel = 3;
+
+ /* calculate phase init for the x direction */
+
+ /* if using FIR scalar */
+ if (scale_unit_sel_x == 0) {
+ if (out_ROI_width == 1)
+ phase_init_x =
+ (uint32_t) ((src_ROI_width - 1) <<
+ SCALER_PHASE_BITS);
+ else
+ phase_init_x = 0;
+ } else if (scale_unit_sel_x == 1) /* M over N scalar */
+ phase_init_x = 0;
+
+ /*
+ * calculate phase init for the y direction if using FIR
+ * scalar
+ */
+ if (scale_unit_sel_y == 0) {
+ if (out_ROI_height == 1)
+ phase_init_y =
+ (uint32_t) ((src_ROI_height -
+ 1) << SCALER_PHASE_BITS);
+ else
+ phase_init_y = 0;
+ } else if (scale_unit_sel_y == 1) /* M over N scalar */
+ phase_init_y = 0;
+
+ strip_info.phase_step_x = phase_step_x;
+ strip_info.phase_step_y = phase_step_y;
+ strip_info.phase_init_x = phase_init_x;
+ strip_info.phase_init_y = phase_init_y;
+ VPE_DBG("phase step x = %d, step y = %d.\n",
+ strip_info.phase_step_x, strip_info.phase_step_y);
+ VPE_DBG("phase init x = %d, init y = %d.\n",
+ strip_info.phase_init_x, strip_info.phase_init_y);
+
+ msm_camera_io_w(strip_info.phase_step_x, vpe_dev->base +
+ VPE_SCALE_PHASEX_STEP_OFFSET);
+ msm_camera_io_w(strip_info.phase_step_y, vpe_dev->base +
+ VPE_SCALE_PHASEY_STEP_OFFSET);
+
+ msm_camera_io_w(strip_info.phase_init_x, vpe_dev->base +
+ VPE_SCALE_PHASEX_INIT_OFFSET);
+ msm_camera_io_w(strip_info.phase_init_y, vpe_dev->base +
+ VPE_SCALE_PHASEY_INIT_OFFSET);
+}
+
+static void vpe_program_buffer_addresses(
+ struct vpe_device *vpe_dev,
+ unsigned long srcP0,
+ unsigned long srcP1,
+ unsigned long outP0,
+ unsigned long outP1)
+{
+ VPE_DBG("%s VPE Configured with:\n"
+ "Src %x, %x Dest %x, %x",
+ __func__, (uint32_t)srcP0, (uint32_t)srcP1,
+ (uint32_t)outP0, (uint32_t)outP1);
+
+ msm_camera_io_w(srcP0, vpe_dev->base + VPE_SRCP0_ADDR_OFFSET);
+ msm_camera_io_w(srcP1, vpe_dev->base + VPE_SRCP1_ADDR_OFFSET);
+ msm_camera_io_w(outP0, vpe_dev->base + VPE_OUTP0_ADDR_OFFSET);
+ msm_camera_io_w(outP1, vpe_dev->base + VPE_OUTP1_ADDR_OFFSET);
+}
+
+static int vpe_start(struct vpe_device *vpe_dev)
+{
+ /* enable the frame irq, bit 0 = Display list 0 ROI done */
+ msm_camera_io_w_mb(1, vpe_dev->base + VPE_INTR_ENABLE_OFFSET);
+ msm_camera_io_dump(vpe_dev->base, 0x120, CONFIG_MSM_VPE_DBG);
+ msm_camera_io_dump(vpe_dev->base + 0x00400, 0x18, CONFIG_MSM_VPE_DBG);
+ msm_camera_io_dump(vpe_dev->base + 0x10000, 0x250, CONFIG_MSM_VPE_DBG);
+ msm_camera_io_dump(vpe_dev->base + 0x30000, 0x20, CONFIG_MSM_VPE_DBG);
+ msm_camera_io_dump(vpe_dev->base + 0x50000, 0x30, CONFIG_MSM_VPE_DBG);
+ msm_camera_io_dump(vpe_dev->base + 0x50400, 0x10, CONFIG_MSM_VPE_DBG);
+
+ /*
+ * This triggers the operation. When the VPE is done,
+ * msm_vpe_irq will fire.
+ */
+ msm_camera_io_w_mb(1, vpe_dev->base + VPE_DL0_START_OFFSET);
+ return 0;
+}
+
+static void vpe_config_axi_default(struct vpe_device *vpe_dev)
+{
+ msm_camera_io_w(0x25, vpe_dev->base + VPE_AXI_ARB_2_OFFSET);
+}
+
+static int vpe_reset(struct vpe_device *vpe_dev)
+{
+ uint32_t vpe_version;
+ uint32_t rc = 0;
+
+ vpe_version = msm_camera_io_r(
+ vpe_dev->base + VPE_HW_VERSION_OFFSET);
+ VPE_DBG("vpe_version = 0x%x\n", vpe_version);
+ /* disable all interrupts.*/
+ msm_camera_io_w(0, vpe_dev->base + VPE_INTR_ENABLE_OFFSET);
+ /* clear all pending interrupts*/
+ msm_camera_io_w(0x1fffff, vpe_dev->base + VPE_INTR_CLEAR_OFFSET);
+ /* write sw_reset to reset the core. */
+ msm_camera_io_w(0x10, vpe_dev->base + VPE_SW_RESET_OFFSET);
+ /* then poll the reset bit, it should be self-cleared. */
+ while (1) {
+ rc = msm_camera_io_r(
+ vpe_dev->base + VPE_SW_RESET_OFFSET) & 0x10;
+ if (rc == 0)
+ break;
+ cpu_relax();
+ }
+ /*
+ * at this point, hardware is reset. Then pogram to default
+ * values.
+ */
+ msm_camera_io_w(VPE_AXI_RD_ARB_CONFIG_VALUE,
+ vpe_dev->base + VPE_AXI_RD_ARB_CONFIG_OFFSET);
+
+ msm_camera_io_w(VPE_CGC_ENABLE_VALUE,
+ vpe_dev->base + VPE_CGC_EN_OFFSET);
+ msm_camera_io_w(1, vpe_dev->base + VPE_CMD_MODE_OFFSET);
+ msm_camera_io_w(VPE_DEFAULT_OP_MODE_VALUE,
+ vpe_dev->base + VPE_OP_MODE_OFFSET);
+ msm_camera_io_w(VPE_DEFAULT_SCALE_CONFIG,
+ vpe_dev->base + VPE_SCALE_CONFIG_OFFSET);
+ vpe_config_axi_default(vpe_dev);
+ return rc;
+}
+
+static int vpe_update_scale_coef(struct vpe_device *vpe_dev, uint32_t *p)
+{
+ uint32_t i, offset;
+
+ offset = *p;
+
+ if (offset > VPE_SCALE_COEFF_MAX_N-VPE_SCALE_COEFF_NUM) {
+ pr_err("%s: invalid offset %d passed in", __func__, offset);
+ return -EINVAL;
+ }
+
+ for (i = offset; i < (VPE_SCALE_COEFF_NUM + offset); i++) {
+ VPE_DBG("Setting scale table %d\n", i);
+ msm_camera_io_w(*(++p),
+ vpe_dev->base + VPE_SCALE_COEFF_LSBn(i));
+ msm_camera_io_w(*(++p),
+ vpe_dev->base + VPE_SCALE_COEFF_MSBn(i));
+ }
+
+ return 0;
+}
+
+static void vpe_input_plane_config(struct vpe_device *vpe_dev, uint32_t *p)
+{
+ msm_camera_io_w(*p, vpe_dev->base + VPE_SRC_FORMAT_OFFSET);
+ msm_camera_io_w(*(++p),
+ vpe_dev->base + VPE_SRC_UNPACK_PATTERN1_OFFSET);
+ msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_IMAGE_SIZE_OFFSET);
+ msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_YSTRIDE1_OFFSET);
+ msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_SIZE_OFFSET);
+ msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_XY_OFFSET);
+}
+
+static void vpe_output_plane_config(struct vpe_device *vpe_dev, uint32_t *p)
+{
+ msm_camera_io_w(*p, vpe_dev->base + VPE_OUT_FORMAT_OFFSET);
+ msm_camera_io_w(*(++p),
+ vpe_dev->base + VPE_OUT_PACK_PATTERN1_OFFSET);
+ msm_camera_io_w(*(++p), vpe_dev->base + VPE_OUT_YSTRIDE1_OFFSET);
+ msm_camera_io_w(*(++p), vpe_dev->base + VPE_OUT_SIZE_OFFSET);
+ msm_camera_io_w(*(++p), vpe_dev->base + VPE_OUT_XY_OFFSET);
+}
+
+static void vpe_operation_config(struct vpe_device *vpe_dev, uint32_t *p)
+{
+ msm_camera_io_w(*p, vpe_dev->base + VPE_OP_MODE_OFFSET);
+}
+
+/**
+ * msm_vpe_transaction_setup() - send setup for one frame to VPE
+ * @vpe_dev: vpe device
+ * @data: packed setup commands
+ *
+ * See msm_vpe.h for the expected format of `data'
+ */
+static void msm_vpe_transaction_setup(struct vpe_device *vpe_dev, void *data)
+{
+ int i, rc = 0;
+ void *iter = data;
+
+ vpe_mem_dump("vpe_transaction", data, VPE_TRANSACTION_SETUP_CONFIG_LEN);
+
+ for (i = 0; i < VPE_NUM_SCALER_TABLES; ++i) {
+ rc = vpe_update_scale_coef(vpe_dev, (uint32_t *)iter);
+ if (rc != 0)
+ return;
+
+ iter += VPE_SCALER_CONFIG_LEN;
+ }
+ vpe_input_plane_config(vpe_dev, (uint32_t *)iter);
+ iter += VPE_INPUT_PLANE_CFG_LEN;
+ vpe_output_plane_config(vpe_dev, (uint32_t *)iter);
+ iter += VPE_OUTPUT_PLANE_CFG_LEN;
+ vpe_operation_config(vpe_dev, (uint32_t *)iter);
+}
+
+static int msm_vpe_send_frame_to_hardware(struct vpe_device *vpe_dev,
+ struct msm_queue_cmd *frame_qcmd)
+{
+ struct msm_vpe_frame_info_t *process_frame;
+
+ if (vpe_dev->processing_q.len < MAX_VPE_PROCESSING_FRAME) {
+ process_frame = frame_qcmd->command;
+ msm_enqueue(&vpe_dev->processing_q,
+ &frame_qcmd->list_frame);
+
+ vpe_update_scaler_params(vpe_dev, process_frame->strip_info);
+ vpe_program_buffer_addresses(
+ vpe_dev,
+ process_frame->src_phyaddr,
+ process_frame->src_phyaddr
+ + process_frame->src_chroma_plane_offset,
+ process_frame->dest_phyaddr,
+ process_frame->dest_phyaddr
+ + process_frame->dest_chroma_plane_offset);
+ vpe_start(vpe_dev);
+ do_gettimeofday(&(process_frame->in_time));
+ }
+ return 0;
+}
+
+static int msm_vpe_cfg(struct vpe_device *vpe_dev,
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
+{
+ int rc = 0;
+ struct msm_queue_cmd *frame_qcmd = NULL;
+ struct msm_vpe_frame_info_t *new_frame =
+ kzalloc(sizeof(struct msm_vpe_frame_info_t), GFP_KERNEL);
+ unsigned long in_phyaddr, out_phyaddr;
+ struct msm_buf_mngr_info buff_mgr_info;
+
+ if (!new_frame) {
+ pr_err("Insufficient memory. return\n");
+ return -ENOMEM;
+ }
+
+ rc = copy_from_user(new_frame, (void __user *)ioctl_ptr->ioctl_ptr,
+ sizeof(struct msm_vpe_frame_info_t));
+ if (rc) {
+ pr_err("%s:%d copy from user\n", __func__, __LINE__);
+ rc = -EINVAL;
+ goto err_free_new_frame;
+ }
+
+ in_phyaddr = msm_vpe_fetch_buffer_info(vpe_dev,
+ &new_frame->input_buffer_info,
+ ((new_frame->identity >> 16) & 0xFFFF),
+ (new_frame->identity & 0xFFFF));
+ if (!in_phyaddr) {
+ pr_err("error gettting input physical address\n");
+ rc = -EINVAL;
+ goto err_free_new_frame;
+ }
+
+ memset(&new_frame->output_buffer_info, 0,
+ sizeof(struct msm_vpe_buffer_info_t));
+ memset(&buff_mgr_info, 0, sizeof(struct msm_buf_mngr_info));
+ buff_mgr_info.session_id = ((new_frame->identity >> 16) & 0xFFFF);
+ buff_mgr_info.stream_id = (new_frame->identity & 0xFFFF);
+ buff_mgr_info.type = MSM_CAMERA_BUF_MNGR_BUF_PLANAR;
+ rc = msm_vpe_buffer_ops(vpe_dev, VIDIOC_MSM_BUF_MNGR_GET_BUF,
+ &buff_mgr_info);
+ if (rc < 0) {
+ pr_err("error getting buffer\n");
+ rc = -EINVAL;
+ goto err_free_new_frame;
+ }
+
+ new_frame->output_buffer_info.index = buff_mgr_info.index;
+ out_phyaddr = msm_vpe_fetch_buffer_info(vpe_dev,
+ &new_frame->output_buffer_info,
+ ((new_frame->identity >> 16) & 0xFFFF),
+ (new_frame->identity & 0xFFFF));
+ if (!out_phyaddr) {
+ pr_err("error gettting output physical address\n");
+ rc = -EINVAL;
+ goto err_put_buf;
+ }
+
+ new_frame->src_phyaddr = in_phyaddr;
+ new_frame->dest_phyaddr = out_phyaddr;
+
+ frame_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
+ if (!frame_qcmd) {
+ rc = -ENOMEM;
+ goto err_put_buf;
+ }
+
+ atomic_set(&frame_qcmd->on_heap, 1);
+ frame_qcmd->command = new_frame;
+ rc = msm_vpe_send_frame_to_hardware(vpe_dev, frame_qcmd);
+ if (rc < 0) {
+ pr_err("error cannot send frame to hardware\n");
+ rc = -EINVAL;
+ goto err_free_frame_qcmd;
+ }
+
+ return rc;
+
+err_free_frame_qcmd:
+ kfree(frame_qcmd);
+err_put_buf:
+ msm_vpe_buffer_ops(vpe_dev, VIDIOC_MSM_BUF_MNGR_PUT_BUF,
+ &buff_mgr_info);
+err_free_new_frame:
+ kfree(new_frame);
+ return rc;
+}
+
+static long msm_vpe_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
+ int rc = 0;
+
+ mutex_lock(&vpe_dev->mutex);
+ switch (cmd) {
+ case VIDIOC_MSM_VPE_TRANSACTION_SETUP: {
+ struct msm_vpe_transaction_setup_cfg *cfg;
+
+ VPE_DBG("VIDIOC_MSM_VPE_TRANSACTION_SETUP\n");
+ if (sizeof(*cfg) != ioctl_ptr->len) {
+ pr_err("%s: size mismatch cmd=%d, len=%zu, expected=%zu",
+ __func__, cmd, ioctl_ptr->len,
+ sizeof(*cfg));
+ rc = -EINVAL;
+ break;
+ }
+
+ cfg = kzalloc(ioctl_ptr->len, GFP_KERNEL);
+ if (!cfg) {
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = copy_from_user(cfg, (void __user *)ioctl_ptr->ioctl_ptr,
+ ioctl_ptr->len);
+ if (rc) {
+ pr_err("%s:%d copy from user\n", __func__, __LINE__);
+ kfree(cfg);
+ break;
+ }
+
+ msm_vpe_transaction_setup(vpe_dev, (void *)cfg);
+ kfree(cfg);
+ break;
+ }
+ case VIDIOC_MSM_VPE_CFG: {
+ VPE_DBG("VIDIOC_MSM_VPE_CFG\n");
+ rc = msm_vpe_cfg(vpe_dev, ioctl_ptr);
+ break;
+ }
+ case VIDIOC_MSM_VPE_ENQUEUE_STREAM_BUFF_INFO: {
+ struct msm_vpe_stream_buff_info_t *u_stream_buff_info;
+ struct msm_vpe_stream_buff_info_t k_stream_buff_info;
+
+ VPE_DBG("VIDIOC_MSM_VPE_ENQUEUE_STREAM_BUFF_INFO\n");
+
+ if (sizeof(struct msm_vpe_stream_buff_info_t) !=
+ ioctl_ptr->len) {
+ pr_err("%s:%d: invalid length\n", __func__, __LINE__);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ u_stream_buff_info = kzalloc(ioctl_ptr->len, GFP_KERNEL);
+ if (!u_stream_buff_info) {
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = (copy_from_user(u_stream_buff_info,
+ (void __user *)ioctl_ptr->ioctl_ptr,
+ ioctl_ptr->len) ? -EFAULT : 0);
+ if (rc) {
+ pr_err("%s:%d copy from user\n", __func__, __LINE__);
+ kfree(u_stream_buff_info);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ if ((u_stream_buff_info->num_buffs == 0) ||
+ (u_stream_buff_info->num_buffs >
+ MSM_CAMERA_MAX_STREAM_BUF)) {
+ pr_err("%s:%d: Invalid number of buffers\n", __func__,
+ __LINE__);
+ kfree(u_stream_buff_info);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+ k_stream_buff_info.num_buffs = u_stream_buff_info->num_buffs;
+ k_stream_buff_info.identity = u_stream_buff_info->identity;
+ k_stream_buff_info.buffer_info =
+ kzalloc(k_stream_buff_info.num_buffs *
+ sizeof(struct msm_vpe_buffer_info_t), GFP_KERNEL);
+ if (!k_stream_buff_info.buffer_info) {
+ pr_err("%s:%d: malloc error\n", __func__, __LINE__);
+ kfree(u_stream_buff_info);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = (copy_from_user(k_stream_buff_info.buffer_info,
+ (void __user *)u_stream_buff_info->buffer_info,
+ k_stream_buff_info.num_buffs *
+ sizeof(struct msm_vpe_buffer_info_t)) ?
+ -EFAULT : 0);
+ if (rc) {
+ pr_err("%s:%d copy from user\n", __func__, __LINE__);
+ kfree(k_stream_buff_info.buffer_info);
+ kfree(u_stream_buff_info);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = msm_vpe_add_buff_queue_entry(vpe_dev,
+ ((k_stream_buff_info.identity >> 16) & 0xFFFF),
+ (k_stream_buff_info.identity & 0xFFFF));
+ if (!rc)
+ rc = msm_vpe_enqueue_buff_info_list(vpe_dev,
+ &k_stream_buff_info);
+
+ kfree(k_stream_buff_info.buffer_info);
+ kfree(u_stream_buff_info);
+ break;
+ }
+ case VIDIOC_MSM_VPE_DEQUEUE_STREAM_BUFF_INFO: {
+ uint32_t identity;
+ struct msm_vpe_buff_queue_info_t *buff_queue_info;
+
+ VPE_DBG("VIDIOC_MSM_VPE_DEQUEUE_STREAM_BUFF_INFO\n");
+ if (ioctl_ptr->len != sizeof(uint32_t)) {
+ pr_err("%s:%d Invalid len\n", __func__, __LINE__);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = (copy_from_user(&identity,
+ (void __user *)ioctl_ptr->ioctl_ptr,
+ ioctl_ptr->len) ? -EFAULT : 0);
+ if (rc) {
+ pr_err("%s:%d copy from user\n", __func__, __LINE__);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev,
+ ((identity >> 16) & 0xFFFF), (identity & 0xFFFF));
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry for identity:%d\n",
+ identity);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ msm_vpe_dequeue_buff_info_list(vpe_dev, buff_queue_info);
+ rc = msm_vpe_free_buff_queue_entry(vpe_dev,
+ buff_queue_info->session_id,
+ buff_queue_info->stream_id);
+ break;
+ }
+ case VIDIOC_MSM_VPE_GET_EVENTPAYLOAD: {
+ struct msm_device_queue *queue = &vpe_dev->eventData_q;
+ struct msm_queue_cmd *event_qcmd;
+ struct msm_vpe_frame_info_t *process_frame;
+
+ VPE_DBG("VIDIOC_MSM_VPE_GET_EVENTPAYLOAD\n");
+ event_qcmd = msm_dequeue(queue, list_eventdata);
+ if (!event_qcmd) {
+ pr_err("%s: %d event_qcmd is NULL\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ process_frame = event_qcmd->command;
+ VPE_DBG("fid %d\n", process_frame->frame_id);
+ if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
+ process_frame,
+ sizeof(struct msm_vpe_frame_info_t))) {
+ mutex_unlock(&vpe_dev->mutex);
+ kfree(process_frame);
+ kfree(event_qcmd);
+ return -EINVAL;
+ }
+
+ kfree(process_frame);
+ kfree(event_qcmd);
+ break;
+ }
+ }
+ mutex_unlock(&vpe_dev->mutex);
+ return rc;
+}
+
+static int msm_vpe_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_subscribe(fh, sub, MAX_VPE_V4l2_EVENTS, NULL);
+}
+
+static int msm_vpe_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+static struct v4l2_subdev_core_ops msm_vpe_subdev_core_ops = {
+ .ioctl = msm_vpe_subdev_ioctl,
+ .subscribe_event = msm_vpe_subscribe_event,
+ .unsubscribe_event = msm_vpe_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_ops msm_vpe_subdev_ops = {
+ .core = &msm_vpe_subdev_core_ops,
+};
+
+static struct v4l2_file_operations msm_vpe_v4l2_subdev_fops;
+
+static long msm_vpe_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *vfh = file->private_data;
+
+ switch (cmd) {
+ case VIDIOC_DQEVENT:
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+ return -ENOIOCTLCMD;
+
+ return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
+
+ case VIDIOC_SUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
+
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+ case VIDIOC_MSM_VPE_GET_INST_INFO: {
+ uint32_t i;
+ struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
+ struct msm_vpe_frame_info_t inst_info;
+
+ memset(&inst_info, 0, sizeof(struct msm_vpe_frame_info_t));
+ for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
+ if (vpe_dev->vpe_subscribe_list[i].vfh == vfh) {
+ inst_info.inst_id = i;
+ break;
+ }
+ }
+ if (copy_to_user(
+ (void __user *)ioctl_ptr->ioctl_ptr, &inst_info,
+ sizeof(struct msm_vpe_frame_info_t))) {
+ return -EINVAL;
+ }
+ }
+ default:
+ return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+ }
+
+ return 0;
+}
+
+static long msm_vpe_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_vpe_subdev_do_ioctl);
+}
+
+static int vpe_register_domain(void)
+{
+ struct msm_iova_partition vpe_iommu_partition = {
+ /* TODO: verify that these are correct? */
+ .start = SZ_128K,
+ .size = SZ_2G - SZ_128K,
+ };
+ struct msm_iova_layout vpe_iommu_layout = {
+ .partitions = &vpe_iommu_partition,
+ .npartitions = 1,
+ .client_name = "camera_vpe",
+ .domain_flags = 0,
+ };
+
+ return msm_register_domain(&vpe_iommu_layout);
+}
+
+static int vpe_probe(struct platform_device *pdev)
+{
+ struct vpe_device *vpe_dev;
+ int rc = 0;
+
+ vpe_dev = kzalloc(sizeof(struct vpe_device), GFP_KERNEL);
+ if (!vpe_dev)
+ return -ENOMEM;
+
+ vpe_dev->vpe_clk = kzalloc(sizeof(struct clk *) *
+ ARRAY_SIZE(vpe_clk_info), GFP_KERNEL);
+ if (!vpe_dev->vpe_clk) {
+ rc = -ENOMEM;
+ goto err_free_vpe_dev;
+ }
+
+ v4l2_subdev_init(&vpe_dev->msm_sd.sd, &msm_vpe_subdev_ops);
+ vpe_dev->msm_sd.sd.internal_ops = &msm_vpe_internal_ops;
+ snprintf(vpe_dev->msm_sd.sd.name, ARRAY_SIZE(vpe_dev->msm_sd.sd.name),
+ "vpe");
+ vpe_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ vpe_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+ v4l2_set_subdevdata(&vpe_dev->msm_sd.sd, vpe_dev);
+ platform_set_drvdata(pdev, &vpe_dev->msm_sd.sd);
+ mutex_init(&vpe_dev->mutex);
+ spin_lock_init(&vpe_dev->tasklet_lock);
+
+ vpe_dev->pdev = pdev;
+
+ vpe_dev->mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "vpe");
+ if (!vpe_dev->mem) {
+ pr_err("no mem resource?\n");
+ rc = -ENODEV;
+ goto err_free_vpe_clk;
+ }
+
+ vpe_dev->irq = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "vpe");
+ if (!vpe_dev->irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto err_release_mem;
+ }
+
+ vpe_dev->domain_num = vpe_register_domain();
+ if (vpe_dev->domain_num < 0) {
+ pr_err("%s: could not register domain\n", __func__);
+ rc = -ENODEV;
+ goto err_release_mem;
+ }
+
+ vpe_dev->domain =
+ msm_get_iommu_domain(vpe_dev->domain_num);
+ if (!vpe_dev->domain) {
+ pr_err("%s: cannot find domain\n", __func__);
+ rc = -ENODEV;
+ goto err_release_mem;
+ }
+
+ vpe_dev->iommu_ctx_src = msm_iommu_get_ctx("vpe_src");
+ vpe_dev->iommu_ctx_dst = msm_iommu_get_ctx("vpe_dst");
+ if (!vpe_dev->iommu_ctx_src || !vpe_dev->iommu_ctx_dst) {
+ pr_err("%s: cannot get iommu_ctx\n", __func__);
+ rc = -ENODEV;
+ goto err_release_mem;
+ }
+
+ media_entity_init(&vpe_dev->msm_sd.sd.entity, 0, NULL, 0);
+ vpe_dev->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ vpe_dev->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_VPE;
+ vpe_dev->msm_sd.sd.entity.name = pdev->name;
+ msm_sd_register(&vpe_dev->msm_sd);
+ msm_cam_copy_v4l2_subdev_fops(&msm_vpe_v4l2_subdev_fops);
+ vpe_dev->msm_sd.sd.devnode->fops = &msm_vpe_v4l2_subdev_fops;
+ vpe_dev->msm_sd.sd.entity.revision = vpe_dev->msm_sd.sd.devnode->num;
+ vpe_dev->state = VPE_STATE_BOOT;
+ rc = vpe_init_hardware(vpe_dev);
+ if (rc < 0) {
+ pr_err("%s: Couldn't init vpe hardware\n", __func__);
+ goto err_unregister_sd;
+ }
+ vpe_reset(vpe_dev);
+ vpe_release_hardware(vpe_dev);
+ vpe_dev->state = VPE_STATE_OFF;
+
+ rc = iommu_attach_device(vpe_dev->domain, vpe_dev->iommu_ctx_src);
+ if (rc < 0) {
+ pr_err("Couldn't attach to vpe_src context bank\n");
+ rc = -ENODEV;
+ goto err_unregister_sd;
+ }
+ rc = iommu_attach_device(vpe_dev->domain, vpe_dev->iommu_ctx_dst);
+ if (rc < 0) {
+ pr_err("Couldn't attach to vpe_dst context bank\n");
+ rc = -ENODEV;
+ goto err_detach_src;
+ }
+
+ vpe_dev->state = VPE_STATE_OFF;
+
+ msm_queue_init(&vpe_dev->eventData_q, "vpe-eventdata");
+ msm_queue_init(&vpe_dev->processing_q, "vpe-frame");
+ INIT_LIST_HEAD(&vpe_dev->tasklet_q);
+ tasklet_init(&vpe_dev->vpe_tasklet, msm_vpe_do_tasklet,
+ (unsigned long)vpe_dev);
+ vpe_dev->vpe_open_cnt = 0;
+
+ return rc;
+
+err_detach_src:
+ iommu_detach_device(vpe_dev->domain, vpe_dev->iommu_ctx_src);
+err_unregister_sd:
+ msm_sd_unregister(&vpe_dev->msm_sd);
+err_release_mem:
+ release_mem_region(vpe_dev->mem->start, resource_size(vpe_dev->mem));
+err_free_vpe_clk:
+ kfree(vpe_dev->vpe_clk);
+err_free_vpe_dev:
+ kfree(vpe_dev);
+ return rc;
+}
+
+static int vpe_device_remove(struct platform_device *dev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(dev);
+ struct vpe_device *vpe_dev;
+
+ if (!sd) {
+ pr_err("%s: Subdevice is NULL\n", __func__);
+ return 0;
+ }
+
+ vpe_dev = (struct vpe_device *)v4l2_get_subdevdata(sd);
+ if (!vpe_dev) {
+ pr_err("%s: vpe device is NULL\n", __func__);
+ return 0;
+ }
+
+ iommu_detach_device(vpe_dev->domain, vpe_dev->iommu_ctx_dst);
+ iommu_detach_device(vpe_dev->domain, vpe_dev->iommu_ctx_src);
+ msm_sd_unregister(&vpe_dev->msm_sd);
+ release_mem_region(vpe_dev->mem->start, resource_size(vpe_dev->mem));
+ mutex_destroy(&vpe_dev->mutex);
+ kfree(vpe_dev);
+ return 0;
+}
+
+static struct platform_driver vpe_driver = {
+ .probe = vpe_probe,
+ .remove = vpe_device_remove,
+ .driver = {
+ .name = MSM_VPE_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_vpe_init_module(void)
+{
+ return platform_driver_register(&vpe_driver);
+}
+
+static void __exit msm_vpe_exit_module(void)
+{
+ platform_driver_unregister(&vpe_driver);
+}
+
+module_init(msm_vpe_init_module);
+module_exit(msm_vpe_exit_module);
+MODULE_DESCRIPTION("MSM VPE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/pproc/vpe/msm_vpe.h b/drivers/media/platform/msm/ais/pproc/vpe/msm_vpe.h
new file mode 100644
index 000000000000..7ae9bc1be696
--- /dev/null
+++ b/drivers/media/platform/msm/ais/pproc/vpe/msm_vpe.h
@@ -0,0 +1,258 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_VPE_H__
+#define __MSM_VPE_H__
+
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-subdev.h>
+#include "msm_sd.h"
+
+/*********** start of register offset *********************/
+#define VPE_INTR_ENABLE_OFFSET 0x0020
+#define VPE_INTR_STATUS_OFFSET 0x0024
+#define VPE_INTR_CLEAR_OFFSET 0x0028
+#define VPE_DL0_START_OFFSET 0x0030
+#define VPE_HW_VERSION_OFFSET 0x0070
+#define VPE_SW_RESET_OFFSET 0x0074
+#define VPE_AXI_RD_ARB_CONFIG_OFFSET 0x0078
+#define VPE_SEL_CLK_OR_HCLK_TEST_BUS_OFFSET 0x007C
+#define VPE_CGC_EN_OFFSET 0x0100
+#define VPE_CMD_STATUS_OFFSET 0x10008
+#define VPE_PROFILE_EN_OFFSET 0x10010
+#define VPE_PROFILE_COUNT_OFFSET 0x10014
+#define VPE_CMD_MODE_OFFSET 0x10060
+#define VPE_SRC_SIZE_OFFSET 0x10108
+#define VPE_SRCP0_ADDR_OFFSET 0x1010C
+#define VPE_SRCP1_ADDR_OFFSET 0x10110
+#define VPE_SRC_YSTRIDE1_OFFSET 0x1011C
+#define VPE_SRC_FORMAT_OFFSET 0x10124
+#define VPE_SRC_UNPACK_PATTERN1_OFFSET 0x10128
+#define VPE_OP_MODE_OFFSET 0x10138
+#define VPE_SCALE_PHASEX_INIT_OFFSET 0x1013C
+#define VPE_SCALE_PHASEY_INIT_OFFSET 0x10140
+#define VPE_SCALE_PHASEX_STEP_OFFSET 0x10144
+#define VPE_SCALE_PHASEY_STEP_OFFSET 0x10148
+#define VPE_OUT_FORMAT_OFFSET 0x10150
+#define VPE_OUT_PACK_PATTERN1_OFFSET 0x10154
+#define VPE_OUT_SIZE_OFFSET 0x10164
+#define VPE_OUTP0_ADDR_OFFSET 0x10168
+#define VPE_OUTP1_ADDR_OFFSET 0x1016C
+#define VPE_OUT_YSTRIDE1_OFFSET 0x10178
+#define VPE_OUT_XY_OFFSET 0x1019C
+#define VPE_SRC_XY_OFFSET 0x10200
+#define VPE_SRC_IMAGE_SIZE_OFFSET 0x10208
+#define VPE_SCALE_CONFIG_OFFSET 0x10230
+#define VPE_DEINT_STATUS_OFFSET 0x30000
+#define VPE_DEINT_DECISION_OFFSET 0x30004
+#define VPE_DEINT_COEFF0_OFFSET 0x30010
+#define VPE_SCALE_STATUS_OFFSET 0x50000
+#define VPE_SCALE_SVI_PARAM_OFFSET 0x50010
+#define VPE_SCALE_SHARPEN_CFG_OFFSET 0x50020
+#define VPE_SCALE_COEFF_LSP_0_OFFSET 0x50400
+#define VPE_SCALE_COEFF_MSP_0_OFFSET 0x50404
+
+#define VPE_AXI_ARB_1_OFFSET 0x00408
+#define VPE_AXI_ARB_2_OFFSET 0x0040C
+
+#define VPE_SCALE_COEFF_LSBn(n) (0x50400 + 8 * (n))
+#define VPE_SCALE_COEFF_MSBn(n) (0x50404 + 8 * (n))
+#define VPE_SCALE_COEFF_NUM 32
+#define VPE_SCALE_COEFF_MAX_N 127
+
+/*********** end of register offset ********************/
+
+
+#define VPE_HARDWARE_VERSION 0x00080308
+#define VPE_SW_RESET_VALUE 0x00000010 /* bit 4 for PPP*/
+#define VPE_AXI_RD_ARB_CONFIG_VALUE 0x124924
+#define VPE_CMD_MODE_VALUE 0x1
+#define VPE_DEFAULT_OP_MODE_VALUE 0x40FC0004
+#define VPE_CGC_ENABLE_VALUE 0xffff
+#define VPE_DEFAULT_SCALE_CONFIG 0x3c
+
+#define VPE_NORMAL_MODE_CLOCK_RATE 150000000
+#define VPE_TURBO_MODE_CLOCK_RATE 200000000
+#define VPE_SUBDEV_MAX_EVENTS 30
+
+/**************************************************/
+/*********** End of command id ********************/
+/**************************************************/
+
+#define SCALER_PHASE_BITS 29
+#define HAL_MDP_PHASE_STEP_2P50 0x50000000
+#define HAL_MDP_PHASE_STEP_1P66 0x35555555
+#define HAL_MDP_PHASE_STEP_1P25 0x28000000
+
+
+#define MAX_ACTIVE_VPE_INSTANCE 8
+#define MAX_VPE_PROCESSING_FRAME 2
+#define MAX_VPE_V4l2_EVENTS 30
+
+#define MSM_VPE_TASKLETQ_SIZE 16
+
+/**
+ * The format of the msm_vpe_transaction_setup_cfg is as follows:
+ *
+ * - vpe_update_scale_coef (65*4 uint32_t's)
+ * - Each table is 65 uint32_t's long
+ * - 1st uint32_t in each table indicates offset
+ * - Following 64 uint32_t's are the data
+ *
+ * - vpe_input_plane_config (6 uint32_t's)
+ * - VPE_SRC_FORMAT_OFFSET
+ * - VPE_SRC_UNPACK_PATTERN1_OFFSET
+ * - VPE_SRC_IMAGE_SIZE_OFFSET
+ * - VPE_SRC_YSTRIDE1_OFFSET
+ * - VPE_SRC_SIZE_OFFSET
+ * - VPE_SRC_XY_OFFSET
+ *
+ * - vpe_output_plane_config (5 uint32_t's)
+ * - VPE_OUT_FORMAT_OFFSET
+ * - VPE_OUT_PACK_PATTERN1_OFFSET
+ * - VPE_OUT_YSTRIDE1_OFFSET
+ * - VPE_OUT_SIZE_OFFSET
+ * - VPE_OUT_XY_OFFSET
+ *
+ * - vpe_operation_config (1 uint32_t)
+ * - VPE_OP_MODE_OFFSET
+ *
+ */
+
+#define VPE_SCALER_CONFIG_LEN 260
+#define VPE_INPUT_PLANE_CFG_LEN 24
+#define VPE_OUTPUT_PLANE_CFG_LEN 20
+#define VPE_OPERATION_MODE_CFG_LEN 4
+#define VPE_NUM_SCALER_TABLES 4
+
+#define VPE_TRANSACTION_SETUP_CONFIG_LEN ( \
+ (VPE_SCALER_CONFIG_LEN * VPE_NUM_SCALER_TABLES) \
+ + VPE_INPUT_PLANE_CFG_LEN \
+ + VPE_OUTPUT_PLANE_CFG_LEN \
+ + VPE_OPERATION_MODE_CFG_LEN)
+/* VPE_TRANSACTION_SETUP_CONFIG_LEN = 1088 */
+
+struct msm_vpe_transaction_setup_cfg {
+ uint8_t scaler_cfg[VPE_TRANSACTION_SETUP_CONFIG_LEN];
+};
+
+struct vpe_subscribe_info {
+ struct v4l2_fh *vfh;
+ uint32_t active;
+};
+
+enum vpe_state {
+ VPE_STATE_BOOT,
+ VPE_STATE_IDLE,
+ VPE_STATE_ACTIVE,
+ VPE_STATE_OFF,
+};
+
+struct msm_queue_cmd {
+ struct list_head list_config;
+ struct list_head list_control;
+ struct list_head list_frame;
+ struct list_head list_pict;
+ struct list_head list_vpe_frame;
+ struct list_head list_eventdata;
+ void *command;
+ atomic_t on_heap;
+ struct timespec ts;
+ uint32_t error_code;
+ uint32_t trans_code;
+};
+
+struct msm_device_queue {
+ struct list_head list;
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ int max;
+ int len;
+ const char *name;
+};
+
+struct msm_vpe_tasklet_queue_cmd {
+ struct list_head list;
+ uint32_t irq_status;
+ uint8_t cmd_used;
+};
+
+struct msm_vpe_buffer_map_info_t {
+ unsigned long len;
+ dma_addr_t phy_addr;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *table;
+ struct msm_vpe_buffer_info_t buff_info;
+};
+
+struct msm_vpe_buffer_map_list_t {
+ struct msm_vpe_buffer_map_info_t map_info;
+ struct list_head entry;
+};
+
+struct msm_vpe_buff_queue_info_t {
+ uint32_t used;
+ uint16_t session_id;
+ uint16_t stream_id;
+ struct list_head vb2_buff_head;
+ struct list_head native_buff_head;
+};
+
+struct vpe_device {
+ struct platform_device *pdev;
+ struct msm_sd_subdev msm_sd;
+ struct v4l2_subdev subdev;
+ struct resource *mem;
+ struct resource *irq;
+ void __iomem *base;
+ struct clk **vpe_clk;
+ struct regulator *fs_vpe;
+ struct mutex mutex;
+ enum vpe_state state;
+
+ int domain_num;
+ struct iommu_domain *domain;
+ struct device *iommu_ctx_src;
+ struct device *iommu_ctx_dst;
+ struct ion_client *client;
+ struct kref refcount;
+
+ /* Reusing proven tasklet from msm isp */
+ atomic_t irq_cnt;
+ uint8_t taskletq_idx;
+ spinlock_t tasklet_lock;
+ struct list_head tasklet_q;
+ struct tasklet_struct vpe_tasklet;
+ struct msm_vpe_tasklet_queue_cmd
+ tasklet_queue_cmd[MSM_VPE_TASKLETQ_SIZE];
+
+ struct vpe_subscribe_info vpe_subscribe_list[MAX_ACTIVE_VPE_INSTANCE];
+ uint32_t vpe_open_cnt;
+
+ struct msm_device_queue eventData_q; /* V4L2 Event Payload Queue */
+
+ /*
+ * Processing Queue: store frame info for frames sent to
+ * microcontroller
+ */
+ struct msm_device_queue processing_q;
+
+ struct msm_vpe_buff_queue_info_t *buff_queue;
+ uint32_t num_buffq;
+ struct v4l2_subdev *buf_mgr_subdev;
+};
+
+#endif /* __MSM_VPE_H__ */
diff --git a/drivers/media/platform/msm/ais/sensor/Makefile b/drivers/media/platform/msm/ais/sensor/Makefile
new file mode 100644
index 000000000000..6541a0816f8e
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/Makefile
@@ -0,0 +1,8 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/msm_vb2
+ccflags-y += -Idrivers/media/platform/msm/ais/camera
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+obj-$(CONFIG_MSM_AIS) += cci/ io/ csiphy/ csid/ actuator/ eeprom/ ois/ flash/ ir_led/ ir_cut/
+obj-$(CONFIG_MSM_AIS_CAMERA_SENSOR) += msm_sensor_init.o msm_sensor_driver.o msm_sensor.o
diff --git a/drivers/media/platform/msm/ais/sensor/actuator/Makefile b/drivers/media/platform/msm/ais/sensor/actuator/Makefile
new file mode 100644
index 000000000000..bc2d5ccf0920
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/actuator/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+obj-$(CONFIG_MSM_AIS) += msm_actuator.o
diff --git a/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c
new file mode 100644
index 000000000000..8df56fe526fe
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c
@@ -0,0 +1,2120 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include "msm_sd.h"
+#include "msm_actuator.h"
+#include "msm_cci.h"
+
+DEFINE_MSM_MUTEX(msm_actuator_mutex);
+
+#undef CDBG
+#ifdef MSM_ACTUATOR_DEBUG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define PARK_LENS_LONG_STEP 7
+#define PARK_LENS_MID_STEP 5
+#define PARK_LENS_SMALL_STEP 3
+#define MAX_QVALUE 4096
+
+static struct v4l2_file_operations msm_actuator_v4l2_subdev_fops;
+static int32_t msm_actuator_power_up(struct msm_actuator_ctrl_t *a_ctrl);
+static int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl);
+
+static struct msm_actuator msm_vcm_actuator_table;
+static struct msm_actuator msm_piezo_actuator_table;
+static struct msm_actuator msm_hvcm_actuator_table;
+static struct msm_actuator msm_bivcm_actuator_table;
+
+static struct i2c_driver msm_actuator_i2c_driver;
+static struct msm_actuator *actuators[] = {
+ &msm_vcm_actuator_table,
+ &msm_piezo_actuator_table,
+ &msm_hvcm_actuator_table,
+ &msm_bivcm_actuator_table,
+};
+
+static int32_t msm_actuator_piezo_set_default_focus(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_move_params_t *move_params)
+{
+ int32_t rc = 0;
+ struct msm_camera_i2c_reg_setting reg_setting;
+
+ CDBG("Enter\n");
+
+ if (a_ctrl->curr_step_pos != 0) {
+ a_ctrl->i2c_tbl_index = 0;
+ a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ a_ctrl->initial_code, 0, 0);
+ a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ a_ctrl->initial_code, 0, 0);
+ reg_setting.reg_setting = a_ctrl->i2c_reg_tbl;
+ reg_setting.data_type = a_ctrl->i2c_data_type;
+ reg_setting.size = a_ctrl->i2c_tbl_index;
+ rc = a_ctrl->i2c_client.i2c_func_tbl->
+ i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("%s: i2c write error:%d\n",
+ __func__, rc);
+ return rc;
+ }
+ a_ctrl->i2c_tbl_index = 0;
+ a_ctrl->curr_step_pos = 0;
+ }
+ CDBG("Exit\n");
+ return rc;
+}
+
+static void msm_actuator_parse_i2c_params(struct msm_actuator_ctrl_t *a_ctrl,
+ int16_t next_lens_position, uint32_t hw_params, uint16_t delay)
+{
+ struct msm_actuator_reg_params_t *write_arr = NULL;
+ uint32_t hw_dword = hw_params;
+ uint16_t i2c_byte1 = 0, i2c_byte2 = 0;
+ uint16_t value = 0;
+ uint32_t size = 0, i = 0;
+ struct msm_camera_i2c_reg_array *i2c_tbl = NULL;
+
+ CDBG("Enter\n");
+
+ if (a_ctrl == NULL) {
+ pr_err("failed. actuator ctrl is NULL");
+ return;
+ }
+
+ size = a_ctrl->reg_tbl_size;
+ write_arr = a_ctrl->reg_tbl;
+ i2c_tbl = a_ctrl->i2c_reg_tbl;
+
+ for (i = 0; i < size; i++) {
+ if (write_arr[i].reg_write_type == MSM_ACTUATOR_WRITE_DAC) {
+ value = (next_lens_position <<
+ write_arr[i].data_shift) |
+ ((hw_dword & write_arr[i].hw_mask) >>
+ write_arr[i].hw_shift);
+
+ if (write_arr[i].reg_addr != 0xFFFF) {
+ i2c_byte1 = write_arr[i].reg_addr;
+ i2c_byte2 = value;
+ if (size != (i+1)) {
+ i2c_byte2 = value & 0xFF;
+ CDBG("byte1:0x%x, byte2:0x%x\n",
+ i2c_byte1, i2c_byte2);
+ if (a_ctrl->i2c_tbl_index >
+ a_ctrl->total_steps) {
+ pr_err("failed:i2c table index out of bound\n");
+ break;
+ }
+ i2c_tbl[a_ctrl->i2c_tbl_index].
+ reg_addr = i2c_byte1;
+ i2c_tbl[a_ctrl->i2c_tbl_index].
+ reg_data = i2c_byte2;
+ i2c_tbl[a_ctrl->i2c_tbl_index].
+ delay = 0;
+ a_ctrl->i2c_tbl_index++;
+ i++;
+ i2c_byte1 = write_arr[i].reg_addr;
+ i2c_byte2 = (value & 0xFF00) >> 8;
+ }
+ } else {
+ i2c_byte1 = (value & 0xFF00) >> 8;
+ i2c_byte2 = value & 0xFF;
+ }
+ } else {
+ i2c_byte1 = write_arr[i].reg_addr;
+ i2c_byte2 = (hw_dword & write_arr[i].hw_mask) >>
+ write_arr[i].hw_shift;
+ }
+ if (a_ctrl->i2c_tbl_index > a_ctrl->total_steps) {
+ pr_err("failed: i2c table index out of bound\n");
+ break;
+ }
+ CDBG("i2c_byte1:0x%x, i2c_byte2:0x%x\n", i2c_byte1, i2c_byte2);
+ i2c_tbl[a_ctrl->i2c_tbl_index].reg_addr = i2c_byte1;
+ i2c_tbl[a_ctrl->i2c_tbl_index].reg_data = i2c_byte2;
+ i2c_tbl[a_ctrl->i2c_tbl_index].delay = delay;
+ a_ctrl->i2c_tbl_index++;
+ }
+ CDBG("Exit\n");
+}
+
+static int msm_actuator_bivcm_handle_i2c_ops(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ int16_t next_lens_position, uint32_t hw_params, uint16_t delay)
+{
+ struct msm_actuator_reg_params_t *write_arr = a_ctrl->reg_tbl;
+ uint32_t hw_dword = hw_params;
+ uint16_t i2c_byte1 = 0, i2c_byte2 = 0;
+ uint16_t value = 0, reg_data = 0;
+ uint32_t size = a_ctrl->reg_tbl_size, i = 0;
+ int32_t rc = 0;
+ struct msm_camera_i2c_reg_array i2c_tbl;
+ struct msm_camera_i2c_reg_setting reg_setting;
+ enum msm_camera_i2c_reg_addr_type save_addr_type =
+ a_ctrl->i2c_client.addr_type;
+
+ for (i = 0; i < size; i++) {
+ reg_setting.size = 1;
+ switch (write_arr[i].reg_write_type) {
+ case MSM_ACTUATOR_WRITE_DAC:
+ value = (next_lens_position <<
+ write_arr[i].data_shift) |
+ ((hw_dword & write_arr[i].hw_mask) >>
+ write_arr[i].hw_shift);
+ if (write_arr[i].reg_addr != 0xFFFF) {
+ i2c_byte1 = write_arr[i].reg_addr;
+ i2c_byte2 = value;
+ } else {
+ i2c_byte1 = (value & 0xFF00) >> 8;
+ i2c_byte2 = value & 0xFF;
+ }
+ i2c_tbl.reg_addr = i2c_byte1;
+ i2c_tbl.reg_data = i2c_byte2;
+ i2c_tbl.delay = delay;
+ a_ctrl->i2c_tbl_index++;
+
+ reg_setting.reg_setting = &i2c_tbl;
+ reg_setting.data_type = a_ctrl->i2c_data_type;
+ rc = a_ctrl->i2c_client.
+ i2c_func_tbl->i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("i2c write error:%d\n", rc);
+ return rc;
+ }
+ break;
+ case MSM_ACTUATOR_WRITE:
+ i2c_tbl.reg_data = write_arr[i].reg_data;
+ i2c_tbl.reg_addr = write_arr[i].reg_addr;
+ i2c_tbl.delay = write_arr[i].delay;
+ reg_setting.reg_setting = &i2c_tbl;
+ reg_setting.data_type = write_arr[i].data_type;
+ switch (write_arr[i].addr_type) {
+ case MSM_ACTUATOR_BYTE_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_BYTE_ADDR;
+ break;
+ case MSM_ACTUATOR_WORD_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_WORD_ADDR;
+ break;
+ default:
+ pr_err("Unsupport addr type: %d\n",
+ write_arr[i].addr_type);
+ break;
+ }
+
+ rc = a_ctrl->i2c_client.
+ i2c_func_tbl->i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("i2c write error:%d\n", rc);
+ return rc;
+ }
+ break;
+ case MSM_ACTUATOR_WRITE_DIR_REG:
+ i2c_tbl.reg_data = hw_dword & 0xFFFF;
+ i2c_tbl.reg_addr = write_arr[i].reg_addr;
+ i2c_tbl.delay = write_arr[i].delay;
+ reg_setting.reg_setting = &i2c_tbl;
+ reg_setting.data_type = write_arr[i].data_type;
+ switch (write_arr[i].addr_type) {
+ case MSM_ACTUATOR_BYTE_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_BYTE_ADDR;
+ break;
+ case MSM_ACTUATOR_WORD_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_WORD_ADDR;
+ break;
+ default:
+ pr_err("Unsupport addr type: %d\n",
+ write_arr[i].addr_type);
+ break;
+ }
+
+ rc = a_ctrl->i2c_client.
+ i2c_func_tbl->i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("i2c write error:%d\n", rc);
+ return rc;
+ }
+ break;
+ case MSM_ACTUATOR_POLL:
+ switch (write_arr[i].addr_type) {
+ case MSM_ACTUATOR_BYTE_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_BYTE_ADDR;
+ break;
+ case MSM_ACTUATOR_WORD_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_WORD_ADDR;
+ break;
+ default:
+ pr_err("Unsupport addr type: %d\n",
+ write_arr[i].addr_type);
+ break;
+ }
+
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_poll(
+ &a_ctrl->i2c_client,
+ write_arr[i].reg_addr,
+ write_arr[i].reg_data,
+ write_arr[i].data_type,
+ write_arr[i].delay);
+ if (rc < 0) {
+ pr_err("i2c poll error:%d\n", rc);
+ return rc;
+ }
+ break;
+ case MSM_ACTUATOR_READ_WRITE:
+ i2c_tbl.reg_addr = write_arr[i].reg_addr;
+ i2c_tbl.delay = write_arr[i].delay;
+ reg_setting.reg_setting = &i2c_tbl;
+ reg_setting.data_type = write_arr[i].data_type;
+
+ switch (write_arr[i].addr_type) {
+ case MSM_ACTUATOR_BYTE_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_BYTE_ADDR;
+ break;
+ case MSM_ACTUATOR_WORD_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_WORD_ADDR;
+ break;
+ default:
+ pr_err("Unsupport addr type: %d\n",
+ write_arr[i].addr_type);
+ break;
+ }
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_read(
+ &a_ctrl->i2c_client,
+ write_arr[i].reg_addr,
+ &reg_data,
+ write_arr[i].data_type);
+ if (rc < 0) {
+ pr_err("i2c poll error:%d\n", rc);
+ return rc;
+ }
+
+ i2c_tbl.reg_addr = write_arr[i].reg_data;
+ i2c_tbl.reg_data = reg_data;
+ i2c_tbl.delay = write_arr[i].delay;
+ reg_setting.reg_setting = &i2c_tbl;
+ reg_setting.data_type = write_arr[i].data_type;
+
+ rc = a_ctrl->i2c_client.
+ i2c_func_tbl->i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("i2c write error:%d\n", rc);
+ return rc;
+ }
+ break;
+ case MSM_ACTUATOR_WRITE_HW_DAMP:
+ i2c_tbl.reg_addr = write_arr[i].reg_addr;
+ i2c_tbl.reg_data = (hw_dword & write_arr[i].hw_mask) >>
+ write_arr[i].hw_shift;
+ i2c_tbl.delay = 0;
+ reg_setting.reg_setting = &i2c_tbl;
+ reg_setting.data_type = a_ctrl->i2c_data_type;
+
+ rc = a_ctrl->i2c_client.
+ i2c_func_tbl->i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("i2c write error:%d\n", rc);
+ return rc;
+ }
+ break;
+ default:
+ pr_err("%s:%d Invalid selection\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ a_ctrl->i2c_client.addr_type = save_addr_type;
+ }
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_init_focus(struct msm_actuator_ctrl_t *a_ctrl,
+ uint16_t size, struct reg_settings_t *settings)
+{
+ int32_t rc = -EFAULT;
+ int32_t i = 0;
+ enum msm_camera_i2c_reg_addr_type save_addr_type;
+
+ CDBG("Enter\n");
+
+ save_addr_type = a_ctrl->i2c_client.addr_type;
+ for (i = 0; i < size; i++) {
+
+ switch (settings[i].addr_type) {
+ case MSM_ACTUATOR_BYTE_ADDR:
+ a_ctrl->i2c_client.addr_type = MSM_CAMERA_I2C_BYTE_ADDR;
+ break;
+ case MSM_ACTUATOR_WORD_ADDR:
+ a_ctrl->i2c_client.addr_type = MSM_CAMERA_I2C_WORD_ADDR;
+ break;
+ default:
+ pr_err("Unsupport addr type: %d\n",
+ settings[i].addr_type);
+ break;
+ }
+
+ switch (settings[i].i2c_operation) {
+ case MSM_ACT_WRITE:
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_write(
+ &a_ctrl->i2c_client,
+ settings[i].reg_addr,
+ settings[i].reg_data,
+ settings[i].data_type);
+ if (settings[i].delay > 20)
+ msleep(settings[i].delay);
+ else if (settings[i].delay != 0)
+ usleep_range(settings[i].delay * 1000,
+ (settings[i].delay * 1000) + 1000);
+ break;
+ case MSM_ACT_POLL:
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_poll(
+ &a_ctrl->i2c_client,
+ settings[i].reg_addr,
+ settings[i].reg_data,
+ settings[i].data_type,
+ settings[i].delay);
+ break;
+ default:
+ pr_err("Unsupport i2c_operation: %d\n",
+ settings[i].i2c_operation);
+ break;
+ }
+
+ if (rc < 0) {
+ pr_err("%s:%d fail addr = 0X%X, data = 0X%X, dt = %d",
+ __func__, __LINE__, settings[i].reg_addr,
+ settings[i].reg_data, settings[i].data_type);
+ break;
+ }
+ }
+
+ a_ctrl->curr_step_pos = 0;
+ /*
+ * Recover register addr_type after the init
+ * settings are written.
+ */
+ a_ctrl->i2c_client.addr_type = save_addr_type;
+ CDBG("Exit\n");
+ return rc;
+}
+
+static void msm_actuator_write_focus(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ uint16_t curr_lens_pos,
+ struct damping_params_t *damping_params,
+ int8_t sign_direction,
+ int16_t code_boundary)
+{
+ int16_t next_lens_pos = 0;
+ uint16_t damping_code_step = 0;
+ uint16_t wait_time = 0;
+
+ CDBG("Enter\n");
+
+ damping_code_step = damping_params->damping_step;
+ wait_time = damping_params->damping_delay;
+
+ /* Write code based on damping_code_step in a loop */
+ for (next_lens_pos =
+ curr_lens_pos + (sign_direction * damping_code_step);
+ (sign_direction * next_lens_pos) <=
+ (sign_direction * code_boundary);
+ next_lens_pos =
+ (next_lens_pos +
+ (sign_direction * damping_code_step))) {
+ a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ next_lens_pos, damping_params->hw_params, wait_time);
+ curr_lens_pos = next_lens_pos;
+ }
+
+ if (curr_lens_pos != code_boundary) {
+ a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ code_boundary, damping_params->hw_params, wait_time);
+ }
+ CDBG("Exit\n");
+}
+
+static int msm_actuator_bivcm_write_focus(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ uint16_t curr_lens_pos,
+ struct damping_params_t *damping_params,
+ int8_t sign_direction,
+ int16_t code_boundary)
+{
+ int16_t next_lens_pos = 0;
+ uint16_t damping_code_step = 0;
+ uint16_t wait_time = 0;
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+
+ damping_code_step = damping_params->damping_step;
+ wait_time = damping_params->damping_delay;
+
+ /* Write code based on damping_code_step in a loop */
+ for (next_lens_pos =
+ curr_lens_pos + (sign_direction * damping_code_step);
+ (sign_direction * next_lens_pos) <=
+ (sign_direction * code_boundary);
+ next_lens_pos =
+ (next_lens_pos +
+ (sign_direction * damping_code_step))) {
+ rc = msm_actuator_bivcm_handle_i2c_ops(a_ctrl,
+ next_lens_pos, damping_params->hw_params, wait_time);
+ if (rc < 0) {
+ pr_err("%s:%d msm_actuator_bivcm_handle_i2c_ops failed\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ curr_lens_pos = next_lens_pos;
+ }
+
+ if (curr_lens_pos != code_boundary) {
+ rc = msm_actuator_bivcm_handle_i2c_ops(a_ctrl,
+ code_boundary, damping_params->hw_params, wait_time);
+ if (rc < 0) {
+ pr_err("%s:%d msm_actuator_bivcm_handle_i2c_ops failed\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ }
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_piezo_move_focus(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_move_params_t *move_params)
+{
+ int32_t dest_step_position = move_params->dest_step_pos;
+ struct damping_params_t ringing_params_kernel;
+ int32_t rc = 0;
+ int32_t num_steps = move_params->num_steps;
+ struct msm_camera_i2c_reg_setting reg_setting;
+
+ CDBG("Enter\n");
+
+ if (copy_from_user(&ringing_params_kernel,
+ &(move_params->ringing_params[0]),
+ sizeof(struct damping_params_t))) {
+ pr_err("copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ if (num_steps <= 0 || num_steps > MAX_NUMBER_OF_STEPS) {
+ pr_err("num_steps out of range = %d\n",
+ num_steps);
+ return -EFAULT;
+ }
+
+ if (dest_step_position > a_ctrl->total_steps) {
+ pr_err("Step pos greater than total steps = %d\n",
+ dest_step_position);
+ return -EFAULT;
+ }
+
+ a_ctrl->i2c_tbl_index = 0;
+ a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ (num_steps *
+ a_ctrl->region_params[0].code_per_step),
+ ringing_params_kernel.hw_params, 0);
+
+ reg_setting.reg_setting = a_ctrl->i2c_reg_tbl;
+ reg_setting.data_type = a_ctrl->i2c_data_type;
+ reg_setting.size = a_ctrl->i2c_tbl_index;
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("i2c write error:%d\n", rc);
+ return rc;
+ }
+ a_ctrl->i2c_tbl_index = 0;
+ a_ctrl->curr_step_pos = dest_step_position;
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_move_focus(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_move_params_t *move_params)
+{
+ int32_t rc = 0;
+ struct damping_params_t *ringing_params_kernel = NULL;
+ int8_t sign_dir = move_params->sign_dir;
+ uint16_t step_boundary = 0;
+ uint16_t target_step_pos = 0;
+ uint16_t target_lens_pos = 0;
+ int16_t dest_step_pos = move_params->dest_step_pos;
+ uint16_t curr_lens_pos = 0;
+ int dir = move_params->dir;
+ int32_t num_steps = move_params->num_steps;
+ struct msm_camera_i2c_reg_setting reg_setting;
+
+ CDBG("called, dir %d, num_steps %d\n", dir, num_steps);
+
+ if (dest_step_pos == a_ctrl->curr_step_pos)
+ return rc;
+
+ if ((sign_dir > MSM_ACTUATOR_MOVE_SIGNED_NEAR) ||
+ (sign_dir < MSM_ACTUATOR_MOVE_SIGNED_FAR)) {
+ pr_err("Invalid sign_dir = %d\n", sign_dir);
+ return -EFAULT;
+ }
+ if ((dir > MOVE_FAR) || (dir < MOVE_NEAR)) {
+ pr_err("Invalid direction = %d\n", dir);
+ return -EFAULT;
+ }
+ if (dest_step_pos > a_ctrl->total_steps) {
+ pr_err("Step pos greater than total steps = %d\n",
+ dest_step_pos);
+ return -EFAULT;
+ }
+ if ((a_ctrl->region_size <= 0) ||
+ (a_ctrl->region_size > MAX_ACTUATOR_REGION) ||
+ (!move_params->ringing_params)) {
+ pr_err("Invalid-region size = %d, ringing_params = %pK\n",
+ a_ctrl->region_size, move_params->ringing_params);
+ return -EFAULT;
+ }
+ /* Allocate memory for damping parameters of all regions */
+ ringing_params_kernel = kmalloc(
+ sizeof(struct damping_params_t)*(a_ctrl->region_size),
+ GFP_KERNEL);
+ if (!ringing_params_kernel) {
+ pr_err("kmalloc for damping parameters failed\n");
+ return -EFAULT;
+ }
+ if (copy_from_user(ringing_params_kernel,
+ &(move_params->ringing_params[0]),
+ (sizeof(struct damping_params_t))*(a_ctrl->region_size))) {
+ pr_err("copy_from_user failed\n");
+ /* Free the allocated memory for damping parameters */
+ kfree(ringing_params_kernel);
+ return -EFAULT;
+ }
+ curr_lens_pos = a_ctrl->step_position_table[a_ctrl->curr_step_pos];
+ a_ctrl->i2c_tbl_index = 0;
+ CDBG("curr_step_pos =%d dest_step_pos =%d curr_lens_pos=%d\n",
+ a_ctrl->curr_step_pos, dest_step_pos, curr_lens_pos);
+
+ while (a_ctrl->curr_step_pos != dest_step_pos) {
+ step_boundary =
+ a_ctrl->region_params[a_ctrl->curr_region_index].
+ step_bound[dir];
+ if ((dest_step_pos * sign_dir) <=
+ (step_boundary * sign_dir)) {
+
+ target_step_pos = dest_step_pos;
+ target_lens_pos =
+ a_ctrl->step_position_table[target_step_pos];
+ a_ctrl->func_tbl->actuator_write_focus(a_ctrl,
+ curr_lens_pos,
+ &ringing_params_kernel
+ [a_ctrl->curr_region_index],
+ sign_dir,
+ target_lens_pos);
+ curr_lens_pos = target_lens_pos;
+
+ } else {
+ target_step_pos = step_boundary;
+ target_lens_pos =
+ a_ctrl->step_position_table[target_step_pos];
+ a_ctrl->func_tbl->actuator_write_focus(a_ctrl,
+ curr_lens_pos,
+ &ringing_params_kernel
+ [a_ctrl->curr_region_index],
+ sign_dir,
+ target_lens_pos);
+ curr_lens_pos = target_lens_pos;
+
+ a_ctrl->curr_region_index += sign_dir;
+ }
+ a_ctrl->curr_step_pos = target_step_pos;
+ }
+ /*Free the memory allocated for damping parameters*/
+ kfree(ringing_params_kernel);
+
+ move_params->curr_lens_pos = curr_lens_pos;
+ reg_setting.reg_setting = a_ctrl->i2c_reg_tbl;
+ reg_setting.data_type = a_ctrl->i2c_data_type;
+ reg_setting.size = a_ctrl->i2c_tbl_index;
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("i2c write error:%d\n", rc);
+ return rc;
+ }
+ a_ctrl->i2c_tbl_index = 0;
+ CDBG("Exit\n");
+
+ return rc;
+}
+
+static int32_t msm_actuator_bivcm_move_focus(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_move_params_t *move_params)
+{
+ int32_t rc = 0;
+ struct damping_params_t *ringing_params_kernel = NULL;
+ int8_t sign_dir = move_params->sign_dir;
+ uint16_t step_boundary = 0;
+ uint16_t target_step_pos = 0;
+ uint16_t target_lens_pos = 0;
+ int16_t dest_step_pos = move_params->dest_step_pos;
+ uint16_t curr_lens_pos = 0;
+ int dir = move_params->dir;
+ int32_t num_steps = move_params->num_steps;
+
+ if (a_ctrl->step_position_table == NULL) {
+ pr_err("Step Position Table is NULL");
+ return -EFAULT;
+ }
+
+ CDBG("called, dir %d, num_steps %d\n", dir, num_steps);
+
+ if (dest_step_pos == a_ctrl->curr_step_pos)
+ return rc;
+
+ if ((sign_dir > MSM_ACTUATOR_MOVE_SIGNED_NEAR) ||
+ (sign_dir < MSM_ACTUATOR_MOVE_SIGNED_FAR)) {
+ pr_err("Invalid sign_dir = %d\n", sign_dir);
+ return -EFAULT;
+ }
+ if ((dir > MOVE_FAR) || (dir < MOVE_NEAR)) {
+ pr_err("Invalid direction = %d\n", dir);
+ return -EFAULT;
+ }
+ if (dest_step_pos > a_ctrl->total_steps) {
+ pr_err("Step pos greater than total steps = %d\n",
+ dest_step_pos);
+ return -EFAULT;
+ }
+ if ((a_ctrl->region_size <= 0) ||
+ (a_ctrl->region_size > MAX_ACTUATOR_REGION) ||
+ (!move_params->ringing_params)) {
+ pr_err("Invalid-region size = %d, ringing_params = %pK\n",
+ a_ctrl->region_size, move_params->ringing_params);
+ return -EFAULT;
+ }
+ /* Allocate memory for damping parameters of all regions */
+ ringing_params_kernel = kmalloc(
+ sizeof(struct damping_params_t)*(a_ctrl->region_size),
+ GFP_KERNEL);
+ if (!ringing_params_kernel) {
+ pr_err("kmalloc for damping parameters failed\n");
+ return -EFAULT;
+ }
+ if (copy_from_user(ringing_params_kernel,
+ &(move_params->ringing_params[0]),
+ (sizeof(struct damping_params_t))*(a_ctrl->region_size))) {
+ pr_err("copy_from_user failed\n");
+ /* Free the allocated memory for damping parameters */
+ kfree(ringing_params_kernel);
+ return -EFAULT;
+ }
+ curr_lens_pos = a_ctrl->step_position_table[a_ctrl->curr_step_pos];
+ a_ctrl->i2c_tbl_index = 0;
+ CDBG("curr_step_pos =%d dest_step_pos =%d curr_lens_pos=%d\n",
+ a_ctrl->curr_step_pos, dest_step_pos, curr_lens_pos);
+
+ while (a_ctrl->curr_step_pos != dest_step_pos) {
+ step_boundary =
+ a_ctrl->region_params[a_ctrl->curr_region_index].
+ step_bound[dir];
+ if ((dest_step_pos * sign_dir) <=
+ (step_boundary * sign_dir)) {
+
+ target_step_pos = dest_step_pos;
+ target_lens_pos =
+ a_ctrl->step_position_table[target_step_pos];
+ rc = msm_actuator_bivcm_write_focus(a_ctrl,
+ curr_lens_pos,
+ &ringing_params_kernel
+ [a_ctrl->curr_region_index],
+ sign_dir,
+ target_lens_pos);
+ if (rc < 0) {
+ kfree(ringing_params_kernel);
+ return rc;
+ }
+ curr_lens_pos = target_lens_pos;
+ } else {
+ target_step_pos = step_boundary;
+ target_lens_pos =
+ a_ctrl->step_position_table[target_step_pos];
+ rc = msm_actuator_bivcm_write_focus(a_ctrl,
+ curr_lens_pos,
+ &ringing_params_kernel
+ [a_ctrl->curr_region_index],
+ sign_dir,
+ target_lens_pos);
+ if (rc < 0) {
+ kfree(ringing_params_kernel);
+ return rc;
+ }
+ curr_lens_pos = target_lens_pos;
+
+ a_ctrl->curr_region_index += sign_dir;
+ }
+ a_ctrl->curr_step_pos = target_step_pos;
+ }
+ /*Free the memory allocated for damping parameters*/
+ kfree(ringing_params_kernel);
+
+ move_params->curr_lens_pos = curr_lens_pos;
+ a_ctrl->i2c_tbl_index = 0;
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_park_lens(struct msm_actuator_ctrl_t *a_ctrl)
+{
+ int32_t rc = 0;
+ uint16_t next_lens_pos = 0;
+ struct msm_camera_i2c_reg_setting reg_setting;
+
+ a_ctrl->i2c_tbl_index = 0;
+ if ((a_ctrl->curr_step_pos > a_ctrl->total_steps) ||
+ (!a_ctrl->park_lens.max_step) ||
+ (!a_ctrl->step_position_table) ||
+ (!a_ctrl->i2c_reg_tbl) ||
+ (!a_ctrl->func_tbl) ||
+ (!a_ctrl->func_tbl->actuator_parse_i2c_params)) {
+ pr_err("%s:%d Failed to park lens.\n",
+ __func__, __LINE__);
+ return 0;
+ }
+
+ if (a_ctrl->park_lens.max_step > a_ctrl->max_code_size)
+ a_ctrl->park_lens.max_step = a_ctrl->max_code_size;
+
+ next_lens_pos = a_ctrl->step_position_table[a_ctrl->curr_step_pos];
+ while (next_lens_pos) {
+ /* conditions which help to reduce park lens time */
+ if (next_lens_pos > (a_ctrl->park_lens.max_step *
+ PARK_LENS_LONG_STEP)) {
+ next_lens_pos = next_lens_pos -
+ (a_ctrl->park_lens.max_step *
+ PARK_LENS_LONG_STEP);
+ } else if (next_lens_pos > (a_ctrl->park_lens.max_step *
+ PARK_LENS_MID_STEP)) {
+ next_lens_pos = next_lens_pos -
+ (a_ctrl->park_lens.max_step *
+ PARK_LENS_MID_STEP);
+ } else if (next_lens_pos > (a_ctrl->park_lens.max_step *
+ PARK_LENS_SMALL_STEP)) {
+ next_lens_pos = next_lens_pos -
+ (a_ctrl->park_lens.max_step *
+ PARK_LENS_SMALL_STEP);
+ } else {
+ next_lens_pos = (next_lens_pos >
+ a_ctrl->park_lens.max_step) ?
+ (next_lens_pos - a_ctrl->park_lens.
+ max_step) : 0;
+ }
+ a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ next_lens_pos, a_ctrl->park_lens.hw_params,
+ a_ctrl->park_lens.damping_delay);
+
+ reg_setting.reg_setting = a_ctrl->i2c_reg_tbl;
+ reg_setting.size = a_ctrl->i2c_tbl_index;
+ reg_setting.data_type = a_ctrl->i2c_data_type;
+
+ rc = a_ctrl->i2c_client.i2c_func_tbl->
+ i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("%s Failed I2C write Line %d\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ a_ctrl->i2c_tbl_index = 0;
+ /* Use typical damping time delay to avoid tick sound */
+ usleep_range(10000, 12000);
+ }
+
+ return 0;
+}
+
+static int32_t msm_actuator_bivcm_init_step_table(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_set_info_t *set_info)
+{
+ int16_t code_per_step = 0;
+ int16_t cur_code = 0;
+ uint16_t step_index = 0, region_index = 0;
+ uint16_t step_boundary = 0;
+ uint32_t max_code_size = 1;
+ uint16_t data_size = set_info->actuator_params.data_size;
+ uint16_t mask = 0, i = 0;
+ uint32_t qvalue = 0;
+
+ CDBG("Enter\n");
+
+ for (; data_size > 0; data_size--) {
+ max_code_size *= 2;
+ mask |= (1 << i++);
+ }
+
+ a_ctrl->max_code_size = max_code_size;
+ kfree(a_ctrl->step_position_table);
+ a_ctrl->step_position_table = NULL;
+
+ if (set_info->af_tuning_params.total_steps
+ > MAX_ACTUATOR_AF_TOTAL_STEPS) {
+ pr_err("Max actuator totalsteps exceeded = %d\n",
+ set_info->af_tuning_params.total_steps);
+ return -EFAULT;
+ }
+ /* Fill step position table */
+ a_ctrl->step_position_table =
+ kmalloc(sizeof(uint16_t) *
+ (set_info->af_tuning_params.total_steps + 1), GFP_KERNEL);
+
+ if (a_ctrl->step_position_table == NULL)
+ return -ENOMEM;
+
+ cur_code = set_info->af_tuning_params.initial_code;
+ a_ctrl->step_position_table[step_index++] = cur_code;
+ for (region_index = 0;
+ region_index < a_ctrl->region_size;
+ region_index++) {
+ code_per_step =
+ a_ctrl->region_params[region_index].code_per_step;
+ step_boundary =
+ a_ctrl->region_params[region_index].
+ step_bound[MOVE_NEAR];
+ if (step_boundary >
+ set_info->af_tuning_params.total_steps) {
+ pr_err("invalid step_boundary = %d, max_val = %d",
+ step_boundary,
+ set_info->af_tuning_params.total_steps);
+ kfree(a_ctrl->step_position_table);
+ a_ctrl->step_position_table = NULL;
+ return -EINVAL;
+ }
+ qvalue = a_ctrl->region_params[region_index].qvalue;
+ for (; step_index <= step_boundary;
+ step_index++) {
+ if (qvalue > 1 && qvalue <= MAX_QVALUE)
+ cur_code = step_index * code_per_step / qvalue;
+ else
+ cur_code = step_index * code_per_step;
+ cur_code = (set_info->af_tuning_params.initial_code +
+ cur_code) & mask;
+ if (cur_code < max_code_size)
+ a_ctrl->step_position_table[step_index] =
+ cur_code;
+ else {
+ for (; step_index <
+ set_info->af_tuning_params.total_steps;
+ step_index++)
+ a_ctrl->
+ step_position_table[
+ step_index] =
+ max_code_size;
+ }
+ CDBG("step_position_table[%d] = %d\n", step_index,
+ a_ctrl->step_position_table[step_index]);
+ }
+ }
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_actuator_init_step_table(struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_set_info_t *set_info)
+{
+ int16_t code_per_step = 0;
+ uint32_t qvalue = 0;
+ int16_t cur_code = 0;
+ uint16_t step_index = 0, region_index = 0;
+ uint16_t step_boundary = 0;
+ uint32_t max_code_size = 1;
+ uint16_t data_size = set_info->actuator_params.data_size;
+
+ CDBG("Enter\n");
+
+ /* validate the actuator state */
+ if (a_ctrl->actuator_state != ACT_OPS_ACTIVE) {
+ pr_err("%s:%d invalid actuator_state %d\n"
+ , __func__, __LINE__, a_ctrl->actuator_state);
+ return -EINVAL;
+ }
+ for (; data_size > 0; data_size--)
+ max_code_size *= 2;
+
+ a_ctrl->max_code_size = max_code_size;
+
+ /* free the step_position_table to allocate a new one */
+ kfree(a_ctrl->step_position_table);
+ a_ctrl->step_position_table = NULL;
+
+ if (set_info->af_tuning_params.total_steps
+ > MAX_ACTUATOR_AF_TOTAL_STEPS) {
+ pr_err("Max actuator totalsteps exceeded = %d\n",
+ set_info->af_tuning_params.total_steps);
+ return -EFAULT;
+ }
+ /* Fill step position table */
+ a_ctrl->step_position_table =
+ kmalloc(sizeof(uint16_t) *
+ (set_info->af_tuning_params.total_steps + 1), GFP_KERNEL);
+
+ if (a_ctrl->step_position_table == NULL)
+ return -ENOMEM;
+
+ cur_code = set_info->af_tuning_params.initial_code;
+ a_ctrl->step_position_table[step_index++] = cur_code;
+ for (region_index = 0;
+ region_index < a_ctrl->region_size;
+ region_index++) {
+ code_per_step =
+ a_ctrl->region_params[region_index].code_per_step;
+ qvalue =
+ a_ctrl->region_params[region_index].qvalue;
+ step_boundary =
+ a_ctrl->region_params[region_index].
+ step_bound[MOVE_NEAR];
+ if (step_boundary >
+ set_info->af_tuning_params.total_steps) {
+ pr_err("invalid step_boundary = %d, max_val = %d",
+ step_boundary,
+ set_info->af_tuning_params.total_steps);
+ kfree(a_ctrl->step_position_table);
+ a_ctrl->step_position_table = NULL;
+ return -EINVAL;
+ }
+ for (; step_index <= step_boundary;
+ step_index++) {
+ if (qvalue > 1 && qvalue <= MAX_QVALUE)
+ cur_code = step_index * code_per_step / qvalue;
+ else
+ cur_code = step_index * code_per_step;
+ cur_code += set_info->af_tuning_params.initial_code;
+ if (cur_code < max_code_size)
+ a_ctrl->step_position_table[step_index] =
+ cur_code;
+ else {
+ for (; step_index <
+ set_info->af_tuning_params.total_steps;
+ step_index++)
+ a_ctrl->
+ step_position_table[
+ step_index] =
+ max_code_size;
+ }
+ CDBG("step_position_table[%d] = %d\n", step_index,
+ a_ctrl->step_position_table[step_index]);
+ }
+ }
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_actuator_set_default_focus(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_move_params_t *move_params)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+
+ if (a_ctrl->curr_step_pos != 0)
+ rc = a_ctrl->func_tbl->actuator_move_focus(a_ctrl, move_params);
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_vreg_control(struct msm_actuator_ctrl_t *a_ctrl,
+ int config)
+{
+ int rc = 0, i, cnt;
+ struct msm_actuator_vreg *vreg_cfg;
+
+ vreg_cfg = &a_ctrl->vreg_cfg;
+ cnt = vreg_cfg->num_vreg;
+ if (!cnt)
+ return 0;
+
+ if (cnt >= MSM_ACTUATOR_MAX_VREGS) {
+ pr_err("%s failed %d cnt %d\n", __func__, __LINE__, cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (a_ctrl->act_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ rc = msm_camera_config_single_vreg(&(a_ctrl->pdev->dev),
+ &vreg_cfg->cam_vreg[i],
+ (struct regulator **)&vreg_cfg->data[i],
+ config);
+ } else if (a_ctrl->act_device_type ==
+ MSM_CAMERA_I2C_DEVICE) {
+ rc = msm_camera_config_single_vreg(
+ &(a_ctrl->i2c_client.client->dev),
+ &vreg_cfg->cam_vreg[i],
+ (struct regulator **)&vreg_cfg->data[i],
+ config);
+ }
+ }
+ return rc;
+}
+
+static int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl)
+{
+ int32_t rc = 0;
+ enum msm_sensor_power_seq_gpio_t gpio;
+
+ CDBG("Enter\n");
+ if (a_ctrl->actuator_state != ACT_DISABLE_STATE) {
+
+ if (a_ctrl->func_tbl && a_ctrl->func_tbl->actuator_park_lens) {
+ rc = a_ctrl->func_tbl->actuator_park_lens(a_ctrl);
+ if (rc < 0)
+ pr_err("%s:%d Lens park failed.\n",
+ __func__, __LINE__);
+ }
+
+ rc = msm_actuator_vreg_control(a_ctrl, 0);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+
+ for (gpio = SENSOR_GPIO_AF_PWDM;
+ gpio < SENSOR_GPIO_MAX; gpio++) {
+ if (a_ctrl->gconf &&
+ a_ctrl->gconf->gpio_num_info &&
+ a_ctrl->gconf->gpio_num_info->
+ valid[gpio] == 1) {
+
+ gpio_set_value_cansleep(
+ a_ctrl->gconf->gpio_num_info->
+ gpio_num[gpio],
+ GPIOF_OUT_INIT_LOW);
+
+ if (a_ctrl->cam_pinctrl_status) {
+ rc = pinctrl_select_state(
+ a_ctrl->pinctrl_info.pinctrl,
+ a_ctrl->pinctrl_info.
+ gpio_state_suspend);
+ if (rc < 0)
+ pr_err("ERR:%s:%d cannot set pin to suspend state: %d",
+ __func__, __LINE__, rc);
+
+ devm_pinctrl_put(
+ a_ctrl->pinctrl_info.pinctrl);
+ }
+ a_ctrl->cam_pinctrl_status = 0;
+ rc = msm_camera_request_gpio_table(
+ a_ctrl->gconf->cam_gpio_req_tbl,
+ a_ctrl->gconf->cam_gpio_req_tbl_size,
+ 0);
+ if (rc < 0)
+ pr_err("ERR:%s:Failed in selecting state in actuator power down: %d\n",
+ __func__, rc);
+ }
+ }
+
+ kfree(a_ctrl->step_position_table);
+ a_ctrl->step_position_table = NULL;
+ kfree(a_ctrl->i2c_reg_tbl);
+ a_ctrl->i2c_reg_tbl = NULL;
+ a_ctrl->i2c_tbl_index = 0;
+ a_ctrl->actuator_state = ACT_OPS_INACTIVE;
+ }
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_set_position(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_set_position_t *set_pos)
+{
+ int32_t rc = 0;
+ int32_t index;
+ uint16_t next_lens_position;
+ uint16_t delay;
+ uint32_t hw_params = 0;
+ struct msm_camera_i2c_reg_setting reg_setting;
+
+ CDBG("%s Enter %d\n", __func__, __LINE__);
+ if (set_pos->number_of_steps <= 0 ||
+ set_pos->number_of_steps > MAX_NUMBER_OF_STEPS) {
+ pr_err("num_steps out of range = %d\n",
+ set_pos->number_of_steps);
+ return -EFAULT;
+ }
+
+ if (!a_ctrl || !a_ctrl->func_tbl ||
+ !a_ctrl->func_tbl->actuator_parse_i2c_params) {
+ pr_err("failed. NULL actuator pointers.");
+ return -EFAULT;
+ }
+
+ if (a_ctrl->actuator_state != ACT_OPS_ACTIVE) {
+ pr_err("failed. Invalid actuator state.");
+ return -EFAULT;
+ }
+
+ a_ctrl->i2c_tbl_index = 0;
+ hw_params = set_pos->hw_params;
+ for (index = 0; index < set_pos->number_of_steps; index++) {
+ next_lens_position = set_pos->pos[index];
+ delay = set_pos->delay[index];
+ a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ next_lens_position, hw_params, delay);
+
+ reg_setting.reg_setting = a_ctrl->i2c_reg_tbl;
+ reg_setting.size = a_ctrl->i2c_tbl_index;
+ reg_setting.data_type = a_ctrl->i2c_data_type;
+
+ rc = a_ctrl->i2c_client.i2c_func_tbl->
+ i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("%s Failed I2C write Line %d\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ a_ctrl->i2c_tbl_index = 0;
+ }
+ CDBG("%s exit %d\n", __func__, __LINE__);
+ return rc;
+}
+
+static int32_t msm_actuator_bivcm_set_position(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_set_position_t *set_pos)
+{
+ int32_t rc = 0;
+ int32_t index;
+ uint16_t next_lens_position;
+ uint16_t delay;
+ uint32_t hw_params = 0;
+
+ CDBG("%s Enter %d\n", __func__, __LINE__);
+ if (set_pos->number_of_steps <= 0 ||
+ set_pos->number_of_steps > MAX_NUMBER_OF_STEPS) {
+ pr_err("num_steps out of range = %d\n",
+ set_pos->number_of_steps);
+ return -EFAULT;
+ }
+
+ if (!a_ctrl) {
+ pr_err("failed. NULL actuator pointers.");
+ return -EFAULT;
+ }
+
+ if (a_ctrl->actuator_state != ACT_OPS_ACTIVE) {
+ pr_err("failed. Invalid actuator state.");
+ return -EFAULT;
+ }
+
+ a_ctrl->i2c_tbl_index = 0;
+ hw_params = set_pos->hw_params;
+ for (index = 0; index < set_pos->number_of_steps; index++) {
+ next_lens_position = set_pos->pos[index];
+ delay = set_pos->delay[index];
+ rc = msm_actuator_bivcm_handle_i2c_ops(a_ctrl,
+ next_lens_position, hw_params, delay);
+ a_ctrl->i2c_tbl_index = 0;
+ }
+ CDBG("%s exit %d\n", __func__, __LINE__);
+ return rc;
+}
+
+static int32_t msm_actuator_set_param(struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_set_info_t *set_info) {
+ struct reg_settings_t *init_settings = NULL;
+ int32_t rc = -EFAULT;
+ uint16_t i = 0;
+ struct msm_camera_cci_client *cci_client = NULL;
+
+ CDBG("Enter\n");
+
+ for (i = 0; i < ARRAY_SIZE(actuators); i++) {
+ if (set_info->actuator_params.act_type ==
+ actuators[i]->act_type) {
+ a_ctrl->func_tbl = &actuators[i]->func_tbl;
+ rc = 0;
+ }
+ }
+
+ if (rc < 0) {
+ pr_err("Actuator function table not found\n");
+ return rc;
+ }
+ if (set_info->af_tuning_params.total_steps
+ > MAX_ACTUATOR_AF_TOTAL_STEPS) {
+ pr_err("Max actuator totalsteps exceeded = %d\n",
+ set_info->af_tuning_params.total_steps);
+ return -EFAULT;
+ }
+ if (set_info->af_tuning_params.region_size
+ > MAX_ACTUATOR_REGION) {
+ pr_err("MAX_ACTUATOR_REGION is exceeded.\n");
+ return -EFAULT;
+ }
+
+ a_ctrl->region_size = set_info->af_tuning_params.region_size;
+ a_ctrl->pwd_step = set_info->af_tuning_params.pwd_step;
+ a_ctrl->total_steps = set_info->af_tuning_params.total_steps;
+
+ if (copy_from_user(&a_ctrl->region_params,
+ (void *)set_info->af_tuning_params.region_params,
+ a_ctrl->region_size * sizeof(struct region_params_t)))
+ return -EFAULT;
+
+ if (a_ctrl->act_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ cci_client = a_ctrl->i2c_client.cci_client;
+ cci_client->sid =
+ set_info->actuator_params.i2c_addr >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+ cci_client->cci_i2c_master = a_ctrl->cci_master;
+ cci_client->i2c_freq_mode =
+ set_info->actuator_params.i2c_freq_mode;
+ } else {
+ a_ctrl->i2c_client.client->addr =
+ set_info->actuator_params.i2c_addr;
+ }
+
+ a_ctrl->i2c_data_type = set_info->actuator_params.i2c_data_type;
+ a_ctrl->i2c_client.addr_type = set_info->actuator_params.i2c_addr_type;
+ if (set_info->actuator_params.reg_tbl_size <=
+ MAX_ACTUATOR_REG_TBL_SIZE) {
+ a_ctrl->reg_tbl_size = set_info->actuator_params.reg_tbl_size;
+ } else {
+ a_ctrl->reg_tbl_size = 0;
+ pr_err("MAX_ACTUATOR_REG_TBL_SIZE is exceeded.\n");
+ return -EFAULT;
+ }
+
+ if ((a_ctrl->actuator_state == ACT_OPS_ACTIVE) &&
+ (a_ctrl->i2c_reg_tbl != NULL)) {
+ kfree(a_ctrl->i2c_reg_tbl);
+ }
+ a_ctrl->i2c_reg_tbl = NULL;
+ a_ctrl->i2c_reg_tbl =
+ kmalloc(sizeof(struct msm_camera_i2c_reg_array) *
+ (set_info->af_tuning_params.total_steps + 1), GFP_KERNEL);
+ if (!a_ctrl->i2c_reg_tbl) {
+ pr_err("kmalloc fail\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&a_ctrl->reg_tbl,
+ (void *)set_info->actuator_params.reg_tbl_params,
+ a_ctrl->reg_tbl_size *
+ sizeof(struct msm_actuator_reg_params_t))) {
+ kfree(a_ctrl->i2c_reg_tbl);
+ a_ctrl->i2c_reg_tbl = NULL;
+ return -EFAULT;
+ }
+
+ if (set_info->actuator_params.init_setting_size &&
+ set_info->actuator_params.init_setting_size
+ <= MAX_ACTUATOR_INIT_SET) {
+ if (a_ctrl->func_tbl->actuator_init_focus) {
+ init_settings = kmalloc(sizeof(struct reg_settings_t) *
+ (set_info->actuator_params.init_setting_size),
+ GFP_KERNEL);
+ if (init_settings == NULL) {
+ kfree(a_ctrl->i2c_reg_tbl);
+ a_ctrl->i2c_reg_tbl = NULL;
+ pr_err("Error allocating memory for init_settings\n");
+ return -EFAULT;
+ }
+ if (copy_from_user(init_settings,
+ (void *)set_info->actuator_params.init_settings,
+ set_info->actuator_params.init_setting_size *
+ sizeof(struct reg_settings_t))) {
+ kfree(init_settings);
+ kfree(a_ctrl->i2c_reg_tbl);
+ a_ctrl->i2c_reg_tbl = NULL;
+ pr_err("Error copying init_settings\n");
+ return -EFAULT;
+ }
+ rc = a_ctrl->func_tbl->actuator_init_focus(a_ctrl,
+ set_info->actuator_params.init_setting_size,
+ init_settings);
+ kfree(init_settings);
+ if (rc < 0) {
+ kfree(a_ctrl->i2c_reg_tbl);
+ a_ctrl->i2c_reg_tbl = NULL;
+ pr_err("Error actuator_init_focus\n");
+ return -EFAULT;
+ }
+ }
+ }
+
+ /* Park lens data */
+ a_ctrl->park_lens = set_info->actuator_params.park_lens;
+ a_ctrl->initial_code = set_info->af_tuning_params.initial_code;
+ if (a_ctrl->func_tbl->actuator_init_step_table)
+ rc = a_ctrl->func_tbl->
+ actuator_init_step_table(a_ctrl, set_info);
+
+ a_ctrl->curr_step_pos = 0;
+ a_ctrl->curr_region_index = 0;
+ CDBG("Exit\n");
+
+ return rc;
+}
+
+static int msm_actuator_init(struct msm_actuator_ctrl_t *a_ctrl)
+{
+ int rc = 0;
+
+ CDBG("Enter\n");
+ if (!a_ctrl) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+ if (a_ctrl->act_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_util(
+ &a_ctrl->i2c_client, MSM_CCI_INIT);
+ if (rc < 0)
+ pr_err("cci_init failed\n");
+ }
+ a_ctrl->actuator_state = ACT_OPS_ACTIVE;
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_config(struct msm_actuator_ctrl_t *a_ctrl,
+ void __user *argp)
+{
+ struct msm_actuator_cfg_data *cdata =
+ (struct msm_actuator_cfg_data *)argp;
+ int32_t rc = -EINVAL;
+
+ mutex_lock(a_ctrl->actuator_mutex);
+ CDBG("Enter\n");
+ CDBG("%s type %d\n", __func__, cdata->cfgtype);
+
+ if (cdata->cfgtype != CFG_ACTUATOR_INIT &&
+ cdata->cfgtype != CFG_ACTUATOR_POWERUP &&
+ a_ctrl->actuator_state == ACT_DISABLE_STATE) {
+ pr_err("actuator disabled %d\n", rc);
+ mutex_unlock(a_ctrl->actuator_mutex);
+ return rc;
+ }
+
+ switch (cdata->cfgtype) {
+ case CFG_ACTUATOR_INIT:
+ rc = msm_actuator_init(a_ctrl);
+ if (rc < 0)
+ pr_err("msm_actuator_init failed %d\n", rc);
+ break;
+ case CFG_GET_ACTUATOR_INFO:
+ cdata->is_af_supported = 1;
+ cdata->cfg.cam_name = a_ctrl->cam_name;
+ rc = 0;
+ break;
+
+ case CFG_SET_ACTUATOR_INFO:
+ rc = msm_actuator_set_param(a_ctrl, &cdata->cfg.set_info);
+ if (rc < 0)
+ pr_err("init table failed %d\n", rc);
+ break;
+
+ case CFG_SET_DEFAULT_FOCUS:
+ if (a_ctrl->func_tbl &&
+ a_ctrl->func_tbl->actuator_set_default_focus)
+ rc = a_ctrl->func_tbl->actuator_set_default_focus(
+ a_ctrl, &cdata->cfg.move);
+ if (rc < 0)
+ pr_err("move focus failed %d\n", rc);
+ break;
+
+ case CFG_MOVE_FOCUS:
+ if (a_ctrl->func_tbl &&
+ a_ctrl->func_tbl->actuator_move_focus)
+ rc = a_ctrl->func_tbl->actuator_move_focus(a_ctrl,
+ &cdata->cfg.move);
+ if (rc < 0)
+ pr_err("move focus failed %d\n", rc);
+ break;
+ case CFG_ACTUATOR_POWERDOWN:
+ rc = msm_actuator_power_down(a_ctrl);
+ if (rc < 0)
+ pr_err("msm_actuator_power_down failed %d\n", rc);
+ break;
+
+ case CFG_SET_POSITION:
+ if (a_ctrl->func_tbl &&
+ a_ctrl->func_tbl->actuator_set_position)
+ rc = a_ctrl->func_tbl->actuator_set_position(a_ctrl,
+ &cdata->cfg.setpos);
+ if (rc < 0)
+ pr_err("actuator_set_position failed %d\n", rc);
+ break;
+
+ case CFG_ACTUATOR_POWERUP:
+ rc = msm_actuator_power_up(a_ctrl);
+ if (rc < 0)
+ pr_err("Failed actuator power up%d\n", rc);
+ break;
+
+ default:
+ break;
+ }
+ mutex_unlock(a_ctrl->actuator_mutex);
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_get_subdev_id(struct msm_actuator_ctrl_t *a_ctrl,
+ void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+
+ CDBG("Enter\n");
+ if (!subdev_id) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+ if (a_ctrl->act_device_type == MSM_CAMERA_PLATFORM_DEVICE)
+ *subdev_id = a_ctrl->pdev->id;
+ else
+ *subdev_id = a_ctrl->subdev_id;
+
+ CDBG("subdev_id %d\n", *subdev_id);
+ CDBG("Exit\n");
+ return 0;
+}
+
+static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl = {
+ .i2c_read = msm_camera_cci_i2c_read,
+ .i2c_read_seq = msm_camera_cci_i2c_read_seq,
+ .i2c_write = msm_camera_cci_i2c_write,
+ .i2c_write_table = msm_camera_cci_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_cci_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_cci_i2c_write_table_w_microdelay,
+ .i2c_util = msm_sensor_cci_i2c_util,
+ .i2c_poll = msm_camera_cci_i2c_poll,
+};
+
+static struct msm_camera_i2c_fn_t msm_sensor_qup_func_tbl = {
+ .i2c_read = msm_camera_qup_i2c_read,
+ .i2c_read_seq = msm_camera_qup_i2c_read_seq,
+ .i2c_write = msm_camera_qup_i2c_write,
+ .i2c_write_table = msm_camera_qup_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_qup_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_qup_i2c_write_table_w_microdelay,
+ .i2c_poll = msm_camera_qup_i2c_poll,
+};
+
+static int msm_actuator_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh) {
+ int rc = 0;
+ struct msm_actuator_ctrl_t *a_ctrl = v4l2_get_subdevdata(sd);
+
+ CDBG("Enter\n");
+ if (!a_ctrl) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+ mutex_lock(a_ctrl->actuator_mutex);
+ if (a_ctrl->act_device_type == MSM_CAMERA_PLATFORM_DEVICE &&
+ a_ctrl->actuator_state != ACT_DISABLE_STATE) {
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_util(
+ &a_ctrl->i2c_client, MSM_CCI_RELEASE);
+ if (rc < 0)
+ pr_err("cci_init failed\n");
+ }
+ kfree(a_ctrl->i2c_reg_tbl);
+ a_ctrl->i2c_reg_tbl = NULL;
+ a_ctrl->actuator_state = ACT_DISABLE_STATE;
+ mutex_unlock(a_ctrl->actuator_mutex);
+ CDBG("Exit\n");
+ return rc;
+}
+
+static const struct v4l2_subdev_internal_ops msm_actuator_internal_ops = {
+ .close = msm_actuator_close,
+};
+
+static long msm_actuator_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc;
+ struct msm_actuator_ctrl_t *a_ctrl = v4l2_get_subdevdata(sd);
+ void __user *argp = (void __user *)arg;
+
+ CDBG("Enter\n");
+ CDBG("%s:%d a_ctrl %pK argp %pK\n", __func__, __LINE__, a_ctrl, argp);
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ return msm_actuator_get_subdev_id(a_ctrl, argp);
+ case VIDIOC_MSM_ACTUATOR_CFG:
+ return msm_actuator_config(a_ctrl, argp);
+ case MSM_SD_NOTIFY_FREEZE:
+ return 0;
+ case MSM_SD_UNNOTIFY_FREEZE:
+ return 0;
+ case MSM_SD_SHUTDOWN:
+ if (!a_ctrl->i2c_client.i2c_func_tbl) {
+ pr_err("a_ctrl->i2c_client.i2c_func_tbl NULL\n");
+ return -EINVAL;
+ }
+ mutex_lock(a_ctrl->actuator_mutex);
+ rc = msm_actuator_power_down(a_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d Actuator Power down failed\n",
+ __func__, __LINE__);
+ }
+ mutex_unlock(a_ctrl->actuator_mutex);
+ return msm_actuator_close(sd, NULL);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_actuator_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct msm_actuator_cfg_data32 *u32 =
+ (struct msm_actuator_cfg_data32 *)arg;
+ struct msm_actuator_cfg_data actuator_data;
+ void *parg = arg;
+ long rc;
+
+ switch (cmd) {
+ case VIDIOC_MSM_ACTUATOR_CFG32:
+ cmd = VIDIOC_MSM_ACTUATOR_CFG;
+ switch (u32->cfgtype) {
+ case CFG_SET_ACTUATOR_INFO:
+ actuator_data.cfgtype = u32->cfgtype;
+ actuator_data.is_af_supported = u32->is_af_supported;
+ actuator_data.cfg.set_info.actuator_params.act_type =
+ u32->cfg.set_info.actuator_params.act_type;
+
+ actuator_data.cfg.set_info.actuator_params
+ .reg_tbl_size =
+ u32->cfg.set_info.actuator_params.reg_tbl_size;
+
+ actuator_data.cfg.set_info.actuator_params.data_size =
+ u32->cfg.set_info.actuator_params.data_size;
+
+ actuator_data.cfg.set_info.actuator_params
+ .init_setting_size =
+ u32->cfg.set_info.actuator_params
+ .init_setting_size;
+
+ actuator_data.cfg.set_info.actuator_params.i2c_addr =
+ u32->cfg.set_info.actuator_params.i2c_addr;
+
+ actuator_data.cfg.set_info.actuator_params.
+ i2c_freq_mode =
+ u32->cfg.set_info.actuator_params.i2c_freq_mode;
+
+ actuator_data.cfg.set_info.actuator_params
+ .i2c_addr_type =
+ u32->cfg.set_info.actuator_params.i2c_addr_type;
+
+ actuator_data.cfg.set_info.actuator_params
+ .i2c_data_type =
+ u32->cfg.set_info.actuator_params.i2c_data_type;
+
+ actuator_data.cfg.set_info.actuator_params
+ .reg_tbl_params =
+ compat_ptr(
+ u32->cfg.set_info.actuator_params
+ .reg_tbl_params);
+
+ actuator_data.cfg.set_info.actuator_params
+ .init_settings =
+ compat_ptr(
+ u32->cfg.set_info.actuator_params
+ .init_settings);
+
+ actuator_data.cfg.set_info.af_tuning_params
+ .initial_code =
+ u32->cfg.set_info.af_tuning_params.initial_code;
+
+ actuator_data.cfg.set_info.af_tuning_params.pwd_step =
+ u32->cfg.set_info.af_tuning_params.pwd_step;
+
+ actuator_data.cfg.set_info.af_tuning_params
+ .region_size =
+ u32->cfg.set_info.af_tuning_params.region_size;
+
+ actuator_data.cfg.set_info.af_tuning_params
+ .total_steps =
+ u32->cfg.set_info.af_tuning_params.total_steps;
+
+ actuator_data.cfg.set_info.af_tuning_params
+ .region_params = compat_ptr(
+ u32->cfg.set_info.af_tuning_params
+ .region_params);
+
+ actuator_data.cfg.set_info.actuator_params.park_lens =
+ u32->cfg.set_info.actuator_params.park_lens;
+
+ parg = &actuator_data;
+ break;
+ case CFG_SET_DEFAULT_FOCUS:
+ case CFG_MOVE_FOCUS:
+ actuator_data.cfgtype = u32->cfgtype;
+ actuator_data.is_af_supported = u32->is_af_supported;
+ actuator_data.cfg.move.dir = u32->cfg.move.dir;
+
+ actuator_data.cfg.move.sign_dir =
+ u32->cfg.move.sign_dir;
+
+ actuator_data.cfg.move.dest_step_pos =
+ u32->cfg.move.dest_step_pos;
+
+ actuator_data.cfg.move.num_steps =
+ u32->cfg.move.num_steps;
+
+ actuator_data.cfg.move.curr_lens_pos =
+ u32->cfg.move.curr_lens_pos;
+
+ actuator_data.cfg.move.ringing_params =
+ compat_ptr(u32->cfg.move.ringing_params);
+ parg = &actuator_data;
+ break;
+ case CFG_SET_POSITION:
+ actuator_data.cfgtype = u32->cfgtype;
+ actuator_data.is_af_supported = u32->is_af_supported;
+ memcpy(&actuator_data.cfg.setpos, &(u32->cfg.setpos),
+ sizeof(struct msm_actuator_set_position_t));
+ break;
+ default:
+ actuator_data.cfgtype = u32->cfgtype;
+ parg = &actuator_data;
+ break;
+ }
+ }
+
+ rc = msm_actuator_subdev_ioctl(sd, cmd, parg);
+
+ switch (cmd) {
+
+ case VIDIOC_MSM_ACTUATOR_CFG:
+
+ switch (u32->cfgtype) {
+
+ case CFG_SET_DEFAULT_FOCUS:
+ case CFG_MOVE_FOCUS:
+ u32->cfg.move.curr_lens_pos =
+ actuator_data.cfg.move.curr_lens_pos;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static long msm_actuator_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_actuator_subdev_do_ioctl);
+}
+#endif
+
+static int32_t msm_actuator_power_up(struct msm_actuator_ctrl_t *a_ctrl)
+{
+ int rc = 0;
+ enum msm_sensor_power_seq_gpio_t gpio;
+
+ CDBG("%s called\n", __func__);
+
+ rc = msm_actuator_vreg_control(a_ctrl, 1);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+
+ for (gpio = SENSOR_GPIO_AF_PWDM; gpio < SENSOR_GPIO_MAX; gpio++) {
+ if (a_ctrl->gconf &&
+ a_ctrl->gconf->gpio_num_info &&
+ a_ctrl->gconf->gpio_num_info->valid[gpio] == 1) {
+ rc = msm_camera_request_gpio_table(
+ a_ctrl->gconf->cam_gpio_req_tbl,
+ a_ctrl->gconf->cam_gpio_req_tbl_size, 1);
+ if (rc < 0) {
+ pr_err("ERR:%s:Failed in selecting state for actuator: %d\n",
+ __func__, rc);
+ return rc;
+ }
+ if (a_ctrl->cam_pinctrl_status) {
+ rc = pinctrl_select_state(
+ a_ctrl->pinctrl_info.pinctrl,
+ a_ctrl->pinctrl_info.gpio_state_active);
+ if (rc < 0)
+ pr_err("ERR:%s:%d cannot set pin to active state: %d",
+ __func__, __LINE__, rc);
+ }
+
+ gpio_set_value_cansleep(
+ a_ctrl->gconf->gpio_num_info->gpio_num[gpio],
+ 1);
+ }
+ }
+
+ /* VREG needs some delay to power up */
+ usleep_range(2000, 3000);
+ a_ctrl->actuator_state = ACT_ENABLE_STATE;
+
+ CDBG("Exit\n");
+ return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_actuator_subdev_core_ops = {
+ .ioctl = msm_actuator_subdev_ioctl,
+};
+
+static struct v4l2_subdev_ops msm_actuator_subdev_ops = {
+ .core = &msm_actuator_subdev_core_ops,
+};
+
+static const struct i2c_device_id msm_actuator_i2c_id[] = {
+ {"qcom,actuator", (kernel_ulong_t)NULL},
+ { }
+};
+
+static int32_t msm_actuator_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ struct msm_actuator_ctrl_t *act_ctrl_t = NULL;
+ struct msm_actuator_vreg *vreg_cfg = NULL;
+
+ CDBG("Enter\n");
+
+ if (client == NULL) {
+ pr_err("msm_actuator_i2c_probe: client is null\n");
+ return -EINVAL;
+ }
+
+ act_ctrl_t = kzalloc(sizeof(struct msm_actuator_ctrl_t),
+ GFP_KERNEL);
+ if (!act_ctrl_t)
+ return -ENOMEM;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ pr_err("i2c_check_functionality failed\n");
+ goto probe_failure;
+ }
+
+ CDBG("client = 0x%pK\n", client);
+
+ rc = of_property_read_u32(client->dev.of_node, "cell-index",
+ &act_ctrl_t->subdev_id);
+ CDBG("cell-index %d, rc %d\n", act_ctrl_t->subdev_id, rc);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ goto probe_failure;
+ }
+
+ if (of_find_property(client->dev.of_node,
+ "qcom,cam-vreg-name", NULL)) {
+ vreg_cfg = &act_ctrl_t->vreg_cfg;
+ rc = msm_camera_get_dt_vreg_data(client->dev.of_node,
+ &vreg_cfg->cam_vreg, &vreg_cfg->num_vreg);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ goto probe_failure;
+ }
+ }
+
+ act_ctrl_t->i2c_driver = &msm_actuator_i2c_driver;
+ act_ctrl_t->i2c_client.client = client;
+ act_ctrl_t->curr_step_pos = 0,
+ act_ctrl_t->curr_region_index = 0,
+ /* Set device type as I2C */
+ act_ctrl_t->act_device_type = MSM_CAMERA_I2C_DEVICE;
+ act_ctrl_t->i2c_client.i2c_func_tbl = &msm_sensor_qup_func_tbl;
+ act_ctrl_t->act_v4l2_subdev_ops = &msm_actuator_subdev_ops;
+ act_ctrl_t->actuator_mutex = &msm_actuator_mutex;
+ act_ctrl_t->cam_name = act_ctrl_t->subdev_id;
+ CDBG("act_ctrl_t->cam_name: %d", act_ctrl_t->cam_name);
+ /* Assign name for sub device */
+ snprintf(act_ctrl_t->msm_sd.sd.name, sizeof(act_ctrl_t->msm_sd.sd.name),
+ "%s", act_ctrl_t->i2c_driver->driver.name);
+
+ /* Initialize sub device */
+ v4l2_i2c_subdev_init(&act_ctrl_t->msm_sd.sd,
+ act_ctrl_t->i2c_client.client,
+ act_ctrl_t->act_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&act_ctrl_t->msm_sd.sd, act_ctrl_t);
+ act_ctrl_t->msm_sd.sd.internal_ops = &msm_actuator_internal_ops;
+ act_ctrl_t->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ media_entity_init(&act_ctrl_t->msm_sd.sd.entity, 0, NULL, 0);
+ act_ctrl_t->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ act_ctrl_t->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_ACTUATOR;
+ act_ctrl_t->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x2;
+ msm_sd_register(&act_ctrl_t->msm_sd);
+ msm_cam_copy_v4l2_subdev_fops(&msm_actuator_v4l2_subdev_fops);
+#ifdef CONFIG_COMPAT
+ msm_actuator_v4l2_subdev_fops.compat_ioctl32 =
+ msm_actuator_subdev_fops_ioctl;
+#endif
+ act_ctrl_t->msm_sd.sd.devnode->fops =
+ &msm_actuator_v4l2_subdev_fops;
+ act_ctrl_t->actuator_state = ACT_DISABLE_STATE;
+ pr_info("msm_actuator_i2c_probe: succeeded\n");
+ CDBG("Exit\n");
+
+ return 0;
+
+probe_failure:
+ kfree(act_ctrl_t);
+ return rc;
+}
+
+static int32_t msm_actuator_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct msm_camera_cci_client *cci_client = NULL;
+ struct msm_actuator_ctrl_t *msm_actuator_t = NULL;
+ struct msm_actuator_vreg *vreg_cfg;
+
+ CDBG("Enter\n");
+
+ if (!pdev->dev.of_node) {
+ pr_err("of_node NULL\n");
+ return -EINVAL;
+ }
+
+ msm_actuator_t = kzalloc(sizeof(struct msm_actuator_ctrl_t),
+ GFP_KERNEL);
+ if (!msm_actuator_t)
+ return -ENOMEM;
+ rc = of_property_read_u32((&pdev->dev)->of_node, "cell-index",
+ &pdev->id);
+ CDBG("cell-index %d, rc %d\n", pdev->id, rc);
+ if (rc < 0) {
+ kfree(msm_actuator_t);
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32((&pdev->dev)->of_node, "qcom,cci-master",
+ &msm_actuator_t->cci_master);
+ CDBG("qcom,cci-master %d, rc %d\n", msm_actuator_t->cci_master, rc);
+ if (rc < 0 || msm_actuator_t->cci_master >= MASTER_MAX) {
+ kfree(msm_actuator_t);
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+
+ if (of_find_property((&pdev->dev)->of_node,
+ "qcom,cam-vreg-name", NULL)) {
+ vreg_cfg = &msm_actuator_t->vreg_cfg;
+ rc = msm_camera_get_dt_vreg_data((&pdev->dev)->of_node,
+ &vreg_cfg->cam_vreg, &vreg_cfg->num_vreg);
+ if (rc < 0) {
+ kfree(msm_actuator_t);
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+ }
+ rc = msm_sensor_driver_get_gpio_data(&(msm_actuator_t->gconf),
+ (&pdev->dev)->of_node);
+ if (-ENODEV == rc) {
+ pr_notice("No valid actuator GPIOs data\n");
+ } else if (rc < 0) {
+ pr_err("Error Actuator GPIOs\n");
+ } else {
+ msm_actuator_t->cam_pinctrl_status = 1;
+ rc = msm_camera_pinctrl_init(
+ &(msm_actuator_t->pinctrl_info), &(pdev->dev));
+ if (rc < 0) {
+ pr_err("ERR: Error in reading actuator pinctrl\n");
+ msm_actuator_t->cam_pinctrl_status = 0;
+ }
+ }
+
+ msm_actuator_t->act_v4l2_subdev_ops = &msm_actuator_subdev_ops;
+ msm_actuator_t->actuator_mutex = &msm_actuator_mutex;
+ msm_actuator_t->cam_name = pdev->id;
+
+ /* Set platform device handle */
+ msm_actuator_t->pdev = pdev;
+ /* Set device type as platform device */
+ msm_actuator_t->act_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ msm_actuator_t->i2c_client.i2c_func_tbl = &msm_sensor_cci_func_tbl;
+ msm_actuator_t->i2c_client.cci_client = kzalloc(sizeof(
+ struct msm_camera_cci_client), GFP_KERNEL);
+ if (!msm_actuator_t->i2c_client.cci_client) {
+ kfree(msm_actuator_t->vreg_cfg.cam_vreg);
+ kfree(msm_actuator_t->gconf);
+ kfree(msm_actuator_t);
+ pr_err("failed no memory\n");
+ return -ENOMEM;
+ }
+
+ cci_client = msm_actuator_t->i2c_client.cci_client;
+ cci_client->cci_subdev = msm_cci_get_subdev();
+ cci_client->cci_i2c_master = msm_actuator_t->cci_master;
+ v4l2_subdev_init(&msm_actuator_t->msm_sd.sd,
+ msm_actuator_t->act_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&msm_actuator_t->msm_sd.sd, msm_actuator_t);
+ msm_actuator_t->msm_sd.sd.internal_ops = &msm_actuator_internal_ops;
+ msm_actuator_t->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(msm_actuator_t->msm_sd.sd.name,
+ ARRAY_SIZE(msm_actuator_t->msm_sd.sd.name), "msm_actuator");
+ media_entity_init(&msm_actuator_t->msm_sd.sd.entity, 0, NULL, 0);
+ msm_actuator_t->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ msm_actuator_t->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_ACTUATOR;
+ msm_actuator_t->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x2;
+ msm_sd_register(&msm_actuator_t->msm_sd);
+ msm_actuator_t->actuator_state = ACT_DISABLE_STATE;
+ msm_cam_copy_v4l2_subdev_fops(&msm_actuator_v4l2_subdev_fops);
+#ifdef CONFIG_COMPAT
+ msm_actuator_v4l2_subdev_fops.compat_ioctl32 =
+ msm_actuator_subdev_fops_ioctl;
+#endif
+ msm_actuator_t->msm_sd.sd.devnode->fops =
+ &msm_actuator_v4l2_subdev_fops;
+
+ CDBG("Exit\n");
+ return rc;
+}
+
+static const struct of_device_id msm_actuator_i2c_dt_match[] = {
+ {.compatible = "qcom,actuator"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_actuator_i2c_dt_match);
+
+static struct i2c_driver msm_actuator_i2c_driver = {
+ .id_table = msm_actuator_i2c_id,
+ .probe = msm_actuator_i2c_probe,
+ .remove = __exit_p(msm_actuator_i2c_remove),
+ .driver = {
+ .name = "qcom,actuator",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_actuator_i2c_dt_match,
+ },
+};
+
+static const struct of_device_id msm_actuator_dt_match[] = {
+ {.compatible = "qcom,actuator", .data = NULL},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_actuator_dt_match);
+
+static struct platform_driver msm_actuator_platform_driver = {
+ .probe = msm_actuator_platform_probe,
+ .driver = {
+ .name = "qcom,actuator",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_actuator_dt_match,
+ },
+};
+
+static int __init msm_actuator_init_module(void)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+ rc = platform_driver_register(&msm_actuator_platform_driver);
+ if (!rc)
+ return rc;
+
+ CDBG("%s:%d rc %d\n", __func__, __LINE__, rc);
+ return i2c_add_driver(&msm_actuator_i2c_driver);
+}
+
+static struct msm_actuator msm_vcm_actuator_table = {
+ .act_type = ACTUATOR_VCM,
+ .func_tbl = {
+ .actuator_init_step_table = msm_actuator_init_step_table,
+ .actuator_move_focus = msm_actuator_move_focus,
+ .actuator_write_focus = msm_actuator_write_focus,
+ .actuator_set_default_focus = msm_actuator_set_default_focus,
+ .actuator_init_focus = msm_actuator_init_focus,
+ .actuator_parse_i2c_params = msm_actuator_parse_i2c_params,
+ .actuator_set_position = msm_actuator_set_position,
+ .actuator_park_lens = msm_actuator_park_lens,
+ },
+};
+
+static struct msm_actuator msm_piezo_actuator_table = {
+ .act_type = ACTUATOR_PIEZO,
+ .func_tbl = {
+ .actuator_init_step_table = NULL,
+ .actuator_move_focus = msm_actuator_piezo_move_focus,
+ .actuator_write_focus = NULL,
+ .actuator_set_default_focus =
+ msm_actuator_piezo_set_default_focus,
+ .actuator_init_focus = msm_actuator_init_focus,
+ .actuator_parse_i2c_params = msm_actuator_parse_i2c_params,
+ .actuator_park_lens = NULL,
+ },
+};
+
+static struct msm_actuator msm_hvcm_actuator_table = {
+ .act_type = ACTUATOR_HVCM,
+ .func_tbl = {
+ .actuator_init_step_table = msm_actuator_init_step_table,
+ .actuator_move_focus = msm_actuator_move_focus,
+ .actuator_write_focus = msm_actuator_write_focus,
+ .actuator_set_default_focus = msm_actuator_set_default_focus,
+ .actuator_init_focus = msm_actuator_init_focus,
+ .actuator_parse_i2c_params = msm_actuator_parse_i2c_params,
+ .actuator_set_position = msm_actuator_set_position,
+ .actuator_park_lens = msm_actuator_park_lens,
+ },
+};
+
+static struct msm_actuator msm_bivcm_actuator_table = {
+ .act_type = ACTUATOR_BIVCM,
+ .func_tbl = {
+ .actuator_init_step_table = msm_actuator_bivcm_init_step_table,
+ .actuator_move_focus = msm_actuator_bivcm_move_focus,
+ .actuator_write_focus = NULL,
+ .actuator_set_default_focus = msm_actuator_set_default_focus,
+ .actuator_init_focus = msm_actuator_init_focus,
+ .actuator_parse_i2c_params = NULL,
+ .actuator_set_position = msm_actuator_bivcm_set_position,
+ .actuator_park_lens = NULL,
+ },
+};
+
+module_init(msm_actuator_init_module);
+MODULE_DESCRIPTION("MSM ACTUATOR");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.h b/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.h
new file mode 100644
index 000000000000..78fddb2b0563
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.h
@@ -0,0 +1,114 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_ACTUATOR_H
+#define MSM_ACTUATOR_H
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <soc/qcom/ais.h>
+#include <media/v4l2-subdev.h>
+#include <media/ais/msm_ais.h>
+#include "msm_camera_i2c.h"
+#include "msm_camera_dt_util.h"
+#include "msm_camera_io_util.h"
+
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+#define MSM_ACTUATOR_MAX_VREGS (10)
+#define ACTUATOR_MAX_POLL_COUNT 10
+
+struct msm_actuator_ctrl_t;
+
+enum msm_actuator_state_t {
+ ACT_ENABLE_STATE,
+ ACT_OPS_ACTIVE,
+ ACT_OPS_INACTIVE,
+ ACT_DISABLE_STATE,
+};
+
+struct msm_actuator_func_tbl {
+ int32_t (*actuator_i2c_write_b_af)(struct msm_actuator_ctrl_t *,
+ uint8_t,
+ uint8_t);
+ int32_t (*actuator_init_step_table)(struct msm_actuator_ctrl_t *,
+ struct msm_actuator_set_info_t *);
+ int32_t (*actuator_init_focus)(struct msm_actuator_ctrl_t *,
+ uint16_t, struct reg_settings_t *);
+ int32_t (*actuator_set_default_focus)(struct msm_actuator_ctrl_t *,
+ struct msm_actuator_move_params_t *);
+ int32_t (*actuator_move_focus)(struct msm_actuator_ctrl_t *,
+ struct msm_actuator_move_params_t *);
+ void (*actuator_parse_i2c_params)(struct msm_actuator_ctrl_t *,
+ int16_t, uint32_t, uint16_t);
+ void (*actuator_write_focus)(struct msm_actuator_ctrl_t *,
+ uint16_t,
+ struct damping_params_t *,
+ int8_t,
+ int16_t);
+ int32_t (*actuator_set_position)(struct msm_actuator_ctrl_t *,
+ struct msm_actuator_set_position_t *);
+ int32_t (*actuator_park_lens)(struct msm_actuator_ctrl_t *);
+};
+
+struct msm_actuator {
+ enum actuator_type act_type;
+ struct msm_actuator_func_tbl func_tbl;
+};
+
+struct msm_actuator_vreg {
+ struct camera_vreg_t *cam_vreg;
+ void *data[MSM_ACTUATOR_MAX_VREGS];
+ int num_vreg;
+};
+
+struct msm_actuator_ctrl_t {
+ struct i2c_driver *i2c_driver;
+ struct platform_driver *pdriver;
+ struct platform_device *pdev;
+ struct msm_camera_i2c_client i2c_client;
+ enum msm_camera_device_type_t act_device_type;
+ struct msm_sd_subdev msm_sd;
+ enum af_camera_name cam_name;
+ struct mutex *actuator_mutex;
+ struct msm_actuator_func_tbl *func_tbl;
+ enum msm_camera_i2c_data_type i2c_data_type;
+ struct v4l2_subdev sdev;
+ struct v4l2_subdev_ops *act_v4l2_subdev_ops;
+
+ int16_t curr_step_pos;
+ uint16_t curr_region_index;
+ uint16_t *step_position_table;
+ struct region_params_t region_params[MAX_ACTUATOR_REGION];
+ uint16_t reg_tbl_size;
+ struct msm_actuator_reg_params_t reg_tbl[MAX_ACTUATOR_REG_TBL_SIZE];
+ uint16_t region_size;
+ void *user_data;
+ uint32_t total_steps;
+ uint16_t pwd_step;
+ uint16_t initial_code;
+ struct msm_camera_i2c_reg_array *i2c_reg_tbl;
+ uint16_t i2c_tbl_index;
+ enum cci_i2c_master_t cci_master;
+ uint32_t subdev_id;
+ enum msm_actuator_state_t actuator_state;
+ struct msm_actuator_vreg vreg_cfg;
+ struct park_lens_data_t park_lens;
+ uint32_t max_code_size;
+ struct msm_camera_gpio_conf *gconf;
+ struct msm_pinctrl_info pinctrl_info;
+ uint8_t cam_pinctrl_status;
+};
+
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/cci/Makefile b/drivers/media/platform/msm/ais/sensor/cci/Makefile
new file mode 100644
index 000000000000..3942508c0d66
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/cci/Makefile
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+obj-$(CONFIG_MSM_AIS) += msm_cci.o
diff --git a/drivers/media/platform/msm/ais/sensor/cci/msm_cam_cci_hwreg.h b/drivers/media/platform/msm/ais/sensor/cci/msm_cam_cci_hwreg.h
new file mode 100644
index 000000000000..d131ec583bf2
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/cci/msm_cam_cci_hwreg.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CAM_CCI_HWREG__
+#define __MSM_CAM_CCI_HWREG__
+
+#define CCI_HW_VERSION_ADDR 0x00000000
+#define CCI_RESET_CMD_ADDR 0x00000004
+#define CCI_RESET_CMD_RMSK 0x0f73f3f7
+#define CCI_M0_RESET_RMSK 0x3F1
+#define CCI_M1_RESET_RMSK 0x3F001
+#define CCI_QUEUE_START_ADDR 0x00000008
+#define CCI_SET_CID_SYNC_TIMER_ADDR 0x00000010
+#define CCI_SET_CID_SYNC_TIMER_OFFSET 0x00000004
+#define CCI_I2C_M0_SCL_CTL_ADDR 0x00000100
+#define CCI_I2C_M0_SDA_CTL_0_ADDR 0x00000104
+#define CCI_I2C_M0_SDA_CTL_1_ADDR 0x00000108
+#define CCI_I2C_M0_SDA_CTL_2_ADDR 0x0000010c
+#define CCI_I2C_M0_READ_DATA_ADDR 0x00000118
+#define CCI_I2C_M0_MISC_CTL_ADDR 0x00000110
+#define CCI_I2C_M0_READ_BUF_LEVEL_ADDR 0x0000011C
+#define CCI_HALT_REQ_ADDR 0x00000034
+#define CCI_M0_HALT_REQ_RMSK 0x1
+#define CCI_M1_HALT_REQ_RMSK 0x2
+#define CCI_I2C_M1_SCL_CTL_ADDR 0x00000200
+#define CCI_I2C_M1_SDA_CTL_0_ADDR 0x00000204
+#define CCI_I2C_M1_SDA_CTL_1_ADDR 0x00000208
+#define CCI_I2C_M1_SDA_CTL_2_ADDR 0x0000020c
+#define CCI_I2C_M1_MISC_CTL_ADDR 0x00000210
+#define CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR 0x00000304
+#define CCI_I2C_M0_Q0_CUR_CMD_ADDR 0x00000308
+#define CCI_I2C_M0_Q0_REPORT_STATUS_ADDR 0x0000030c
+#define CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR 0x00000300
+#define CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x00000310
+#define CCI_IRQ_MASK_0_ADDR 0x00000c04
+#define CCI_IRQ_MASK_0_RMSK 0x7fff7ff7
+#define CCI_IRQ_CLEAR_0_ADDR 0x00000c08
+#define CCI_IRQ_STATUS_0_ADDR 0x00000c0c
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK 0x4000000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK 0x2000000
+#define CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK 0x1000000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK 0x100000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK 0x10000
+#define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK 0x1000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK 0x100
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK 0x10
+#define CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK 0x18000EE6
+#define CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK 0x60EE6000
+#define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK 0x1
+#define CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x00000c00
+
+#define DEBUG_TOP_REG_START 0x0
+#define DEBUG_TOP_REG_COUNT 14
+#define DEBUG_MASTER_REG_START 0x100
+#define DEBUG_MASTER_REG_COUNT 8
+#define DEBUG_MASTER_QUEUE_REG_START 0x300
+#define DEBUG_MASTER_QUEUE_REG_COUNT 6
+#define DEBUG_INTR_REG_START 0xC00
+#define DEBUG_INTR_REG_COUNT 7
+#endif /* __MSM_CAM_CCI_HWREG__ */
diff --git a/drivers/media/platform/msm/ais/sensor/cci/msm_cci.c b/drivers/media/platform/msm/ais/sensor/cci/msm_cci.c
new file mode 100644
index 000000000000..42f8c4dcfaa0
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/cci/msm_cci.c
@@ -0,0 +1,2167 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include "msm_sd.h"
+#include "msm_cci.h"
+#include "msm_cam_cci_hwreg.h"
+#include "msm_camera_io_util.h"
+#include "msm_camera_dt_util.h"
+#include "cam_hw_ops.h"
+
+#define V4L2_IDENT_CCI 50005
+#define CCI_I2C_QUEUE_0_SIZE 64
+#define CCI_I2C_Q0_SIZE_128W 128
+#define CCI_I2C_QUEUE_1_SIZE 16
+#define CCI_I2C_Q1_SIZE_32W 32
+#define CYCLES_PER_MICRO_SEC_DEFAULT 4915
+#define CCI_MAX_DELAY 1000000
+
+#define CCI_TIMEOUT msecs_to_jiffies(600)
+
+/* TODO move this somewhere else */
+#define MSM_CCI_DRV_NAME "msm_cci"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#undef CCI_DBG
+#ifdef MSM_CCI_DEBUG
+#define CCI_DBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CCI_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+/* Max bytes that can be read per CCI read transaction */
+#define CCI_READ_MAX 12
+#define CCI_I2C_READ_MAX_RETRIES 3
+#define CCI_I2C_MAX_READ 8192
+#define CCI_I2C_MAX_WRITE 8192
+
+#define PRIORITY_QUEUE (QUEUE_0)
+#define SYNC_QUEUE (QUEUE_1)
+
+static struct v4l2_subdev *g_cci_subdev;
+
+static void msm_cci_dump_registers(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master, enum cci_i2c_queue_t queue)
+{
+ uint32_t read_val = 0;
+ uint32_t i = 0;
+ uint32_t reg_offset = 0;
+
+ /* CCI Top Registers */
+ CCI_DBG(" **** %s : %d CCI TOP Registers ****\n", __func__, __LINE__);
+ for (i = 0; i < DEBUG_TOP_REG_COUNT; i++) {
+ reg_offset = DEBUG_TOP_REG_START + i * 4;
+ read_val = msm_camera_io_r_mb(cci_dev->base + reg_offset);
+ CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
+ __func__, __LINE__, reg_offset, read_val);
+ }
+
+ /* CCI Master registers */
+ CCI_DBG(" **** %s : %d CCI MASTER%d Registers ****\n",
+ __func__, __LINE__, master);
+ for (i = 0; i < DEBUG_MASTER_REG_COUNT; i++) {
+ if (i == 6)
+ continue;
+ reg_offset = DEBUG_MASTER_REG_START + master*0x100 + i * 4;
+ read_val = msm_camera_io_r_mb(cci_dev->base + reg_offset);
+ CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
+ __func__, __LINE__, reg_offset, read_val);
+ }
+
+ /* CCI Master Queue registers */
+ CCI_DBG(" **** %s : %d CCI MASTER%d QUEUE%d Registers ****\n",
+ __func__, __LINE__, master, queue);
+ for (i = 0; i < DEBUG_MASTER_QUEUE_REG_COUNT; i++) {
+ reg_offset = DEBUG_MASTER_QUEUE_REG_START + master*0x200 +
+ queue*0x100 + i * 4;
+ read_val = msm_camera_io_r_mb(cci_dev->base + reg_offset);
+ CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
+ __func__, __LINE__, reg_offset, read_val);
+ }
+
+ /* CCI Interrupt registers */
+ CCI_DBG(" **** %s : %d CCI Interrupt Registers ****\n",
+ __func__, __LINE__);
+ for (i = 0; i < DEBUG_INTR_REG_COUNT; i++) {
+ reg_offset = DEBUG_INTR_REG_START + i * 4;
+ read_val = msm_camera_io_r_mb(cci_dev->base + reg_offset);
+ CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
+ __func__, __LINE__, reg_offset, read_val);
+ }
+}
+
+static int32_t msm_cci_set_clk_param(struct cci_device *cci_dev,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ struct msm_cci_clk_params_t *clk_params = NULL;
+ enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
+ enum i2c_freq_mode_t i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+
+ if ((i2c_freq_mode >= I2C_MAX_MODES) || (i2c_freq_mode < 0)) {
+ pr_err("%s:%d invalid i2c_freq_mode = %d",
+ __func__, __LINE__, i2c_freq_mode);
+ return -EINVAL;
+ }
+
+ if (cci_dev->i2c_freq_mode[master] == i2c_freq_mode)
+ return 0;
+
+ clk_params = &cci_dev->cci_clk_params[i2c_freq_mode];
+ if (master == MASTER_0) {
+ msm_camera_io_w_mb(clk_params->hw_thigh << 16 |
+ clk_params->hw_tlow,
+ cci_dev->base + CCI_I2C_M0_SCL_CTL_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_tsu_sto << 16 |
+ clk_params->hw_tsu_sta,
+ cci_dev->base + CCI_I2C_M0_SDA_CTL_0_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_thd_dat << 16 |
+ clk_params->hw_thd_sta,
+ cci_dev->base + CCI_I2C_M0_SDA_CTL_1_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_tbuf,
+ cci_dev->base + CCI_I2C_M0_SDA_CTL_2_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
+ clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
+ cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR);
+ } else if (master == MASTER_1) {
+ msm_camera_io_w_mb(clk_params->hw_thigh << 16 |
+ clk_params->hw_tlow,
+ cci_dev->base + CCI_I2C_M1_SCL_CTL_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_tsu_sto << 16 |
+ clk_params->hw_tsu_sta,
+ cci_dev->base + CCI_I2C_M1_SDA_CTL_0_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_thd_dat << 16 |
+ clk_params->hw_thd_sta,
+ cci_dev->base + CCI_I2C_M1_SDA_CTL_1_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_tbuf,
+ cci_dev->base + CCI_I2C_M1_SDA_CTL_2_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
+ clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
+ cci_dev->base + CCI_I2C_M1_MISC_CTL_ADDR);
+ }
+ cci_dev->i2c_freq_mode[master] = i2c_freq_mode;
+ return 0;
+}
+
+static void msm_cci_flush_queue(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master)
+{
+ int32_t rc = 0;
+
+ msm_camera_io_w_mb(1 << master, cci_dev->base + CCI_HALT_REQ_ADDR);
+ rc = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].reset_complete, CCI_TIMEOUT);
+ if (rc < 0) {
+ pr_err("%s:%d wait failed\n", __func__, __LINE__);
+ } else if (rc == 0) {
+ pr_err("%s:%d wait timeout\n", __func__, __LINE__);
+
+ /* Set reset pending flag to TRUE */
+ cci_dev->cci_master_info[master].reset_pending = TRUE;
+
+ /* Set proper mask to RESET CMD address based on MASTER */
+ if (master == MASTER_0)
+ msm_camera_io_w_mb(CCI_M0_RESET_RMSK,
+ cci_dev->base + CCI_RESET_CMD_ADDR);
+ else
+ msm_camera_io_w_mb(CCI_M1_RESET_RMSK,
+ cci_dev->base + CCI_RESET_CMD_ADDR);
+
+ /* wait for reset done irq */
+ rc = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].reset_complete,
+ CCI_TIMEOUT);
+ if (rc <= 0)
+ pr_err("%s:%d wait failed %d\n", __func__, __LINE__,
+ rc);
+ }
+}
+
+static int32_t msm_cci_validate_queue(struct cci_device *cci_dev,
+ uint32_t len,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+ uint32_t read_val = 0;
+ uint32_t reg_offset = master * 0x200 + queue * 0x100;
+
+ read_val = msm_camera_io_r_mb(cci_dev->base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d\n",
+ __func__, __LINE__, read_val, len,
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
+ if ((read_val + len + 1) > cci_dev->
+ cci_i2c_queue_info[master][queue].max_queue_size) {
+ uint32_t reg_val = 0;
+ uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
+
+ CDBG("%s:%d CCI_I2C_REPORT_CMD\n", __func__, __LINE__);
+ msm_camera_io_w_mb(report_val,
+ cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+ read_val++;
+ CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d, queue: %d\n",
+ __func__, __LINE__, read_val, queue);
+ msm_camera_io_w_mb(read_val, cci_dev->base +
+ CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+ reg_val = 1 << ((master * 2) + queue);
+ CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__);
+ atomic_set(&cci_dev->cci_master_info[master].
+ done_pending[queue], 1);
+ msm_camera_io_w_mb(reg_val, cci_dev->base +
+ CCI_QUEUE_START_ADDR);
+ CDBG("%s line %d wait_for_completion_timeout\n",
+ __func__, __LINE__);
+ atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+ rc = wait_for_completion_timeout(&cci_dev->
+ cci_master_info[master].report_q[queue], CCI_TIMEOUT);
+ if (rc <= 0) {
+ pr_err("%s: wait_for_completion_timeout %d\n",
+ __func__, __LINE__);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ msm_cci_flush_queue(cci_dev, master);
+ return rc;
+ }
+ rc = cci_dev->cci_master_info[master].status;
+ if (rc < 0)
+ pr_err("%s failed rc %d\n", __func__, rc);
+ }
+ return rc;
+}
+
+static int32_t msm_cci_write_i2c_queue(struct cci_device *cci_dev,
+ uint32_t val,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+ uint32_t reg_offset = master * 0x200 + queue * 0x100;
+
+ if (!cci_dev) {
+ pr_err("%s: failed %d", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ rc = msm_cci_validate_queue(cci_dev, 1, master, queue);
+ if (rc < 0) {
+ pr_err("%s: failed %d", __func__, __LINE__);
+ return rc;
+ }
+ CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
+ __func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset, val);
+ msm_camera_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+ return rc;
+}
+
+static uint32_t msm_cci_wait(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+
+ if (!cci_dev) {
+ pr_err("%s: failed %d", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ rc = wait_for_completion_timeout(&cci_dev->
+ cci_master_info[master].report_q[queue], CCI_TIMEOUT);
+ CDBG("%s line %d wait DONE_for_completion_timeout\n",
+ __func__, __LINE__);
+
+ if (rc <= 0) {
+ msm_cci_dump_registers(cci_dev, master, queue);
+ pr_err("%s: %d wait for queue: %d\n",
+ __func__, __LINE__, queue);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ msm_cci_flush_queue(cci_dev, master);
+ return rc;
+ }
+ rc = cci_dev->cci_master_info[master].status;
+ if (rc < 0) {
+ pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ return 0;
+}
+
+static int32_t msm_cci_addr_to_num_bytes(
+ enum msm_camera_i2c_reg_addr_type addr_type)
+{
+ int32_t retVal;
+
+ switch (addr_type) {
+ case MSM_CAMERA_I2C_BYTE_ADDR:
+ retVal = 1;
+ break;
+ case MSM_CAMERA_I2C_WORD_ADDR:
+ retVal = 2;
+ break;
+ case MSM_CAMERA_I2C_3B_ADDR:
+ retVal = 3;
+ break;
+ default:
+ pr_err("%s: %d failed: %d\n", __func__, __LINE__, addr_type);
+ retVal = 1;
+ break;
+ }
+ return retVal;
+}
+
+static int32_t msm_cci_data_to_num_bytes(
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t retVal;
+
+ switch (data_type) {
+ case MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA:
+ case MSM_CAMERA_I2C_SET_BYTE_MASK:
+ case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ retVal = 1;
+ break;
+ case MSM_CAMERA_I2C_SET_WORD_MASK:
+ case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ retVal = 2;
+ break;
+ case MSM_CAMERA_I2C_DWORD_DATA:
+ retVal = 4;
+ break;
+ default:
+ pr_err("%s: %d failed: %d\n", __func__, __LINE__, data_type);
+ retVal = 1;
+ break;
+ }
+ return retVal;
+}
+
+static int32_t msm_cci_calc_cmd_len(struct cci_device *cci_dev,
+ struct msm_camera_cci_ctrl *c_ctrl, uint32_t cmd_size,
+ struct msm_camera_i2c_reg_array *i2c_cmd, uint32_t *pack)
+{
+ uint8_t i;
+ uint32_t len = 0;
+ uint8_t data_len = 0, addr_len = 0;
+ uint8_t pack_max_len;
+ struct msm_camera_i2c_reg_setting *msg;
+ struct msm_camera_i2c_reg_array *cmd = i2c_cmd;
+ uint32_t size = cmd_size;
+
+ if (!cci_dev || !c_ctrl) {
+ pr_err("%s: failed %d", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ msg = &c_ctrl->cfg.cci_i2c_write_cfg;
+ *pack = 0;
+
+ if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ) {
+ addr_len = msm_cci_addr_to_num_bytes(msg->addr_type);
+ len = (size + addr_len) <= (cci_dev->payload_size) ?
+ (size + addr_len):cci_dev->payload_size;
+ } else {
+ addr_len = msm_cci_addr_to_num_bytes(msg->addr_type);
+ data_len = msm_cci_data_to_num_bytes(msg->data_type);
+ len = data_len + addr_len;
+ pack_max_len = size < (cci_dev->payload_size-len) ?
+ size : (cci_dev->payload_size-len);
+ for (i = 0; i < pack_max_len;) {
+ if (cmd->delay || ((cmd - i2c_cmd) >= (cmd_size - 1)))
+ break;
+ if (cmd->reg_addr + 1 ==
+ (cmd+1)->reg_addr) {
+ len += data_len;
+ *pack += data_len;
+ } else
+ break;
+ i += data_len;
+ cmd++;
+ }
+ }
+
+ if (len > cci_dev->payload_size) {
+ pr_err("Len error: %d", len);
+ return -EINVAL;
+ }
+
+ len += 1; /*add i2c WR command*/
+ len = len/4 + 1;
+
+ return len;
+}
+
+static void msm_cci_load_report_cmd(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ uint32_t read_val = msm_camera_io_r_mb(cci_dev->base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
+
+ CDBG("%s:%d CCI_I2C_REPORT_CMD curr_w_cnt: %d\n",
+ __func__, __LINE__, read_val);
+ msm_camera_io_w_mb(report_val,
+ cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+ read_val++;
+
+ CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d\n",
+ __func__, __LINE__, read_val);
+ msm_camera_io_w_mb(read_val, cci_dev->base +
+ CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+}
+
+static int32_t msm_cci_wait_report_cmd(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ uint32_t reg_val = 1 << ((master * 2) + queue);
+
+ msm_cci_load_report_cmd(cci_dev, master, queue);
+ atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+ atomic_set(&cci_dev->cci_master_info[master].done_pending[queue], 1);
+ msm_camera_io_w_mb(reg_val, cci_dev->base +
+ CCI_QUEUE_START_ADDR);
+ return msm_cci_wait(cci_dev, master, queue);
+}
+
+static void msm_cci_process_half_q(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ uint32_t reg_val = 1 << ((master * 2) + queue);
+
+ if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) {
+ msm_cci_load_report_cmd(cci_dev, master, queue);
+ atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+ msm_camera_io_w_mb(reg_val, cci_dev->base +
+ CCI_QUEUE_START_ADDR);
+ }
+}
+
+static int32_t msm_cci_process_full_q(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+
+ if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 1) {
+ atomic_set(&cci_dev->cci_master_info[master].
+ done_pending[queue], 1);
+ rc = msm_cci_wait(cci_dev, master, queue);
+ if (rc < 0) {
+ pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ } else {
+ rc = msm_cci_wait_report_cmd(cci_dev, master, queue);
+ if (rc < 0) {
+ pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+static int32_t msm_cci_lock_queue(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue, uint32_t en)
+{
+ uint32_t val;
+
+ if (queue != PRIORITY_QUEUE)
+ return 0;
+
+ val = en ? CCI_I2C_LOCK_CMD : CCI_I2C_UNLOCK_CMD;
+ return msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+}
+
+static int32_t msm_cci_transfer_end(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+
+ if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) {
+ rc = msm_cci_lock_queue(cci_dev, master, queue, 0);
+ if (rc < 0) {
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ return rc;
+ }
+ rc = msm_cci_wait_report_cmd(cci_dev, master, queue);
+ if (rc < 0) {
+ pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ } else {
+ atomic_set(&cci_dev->cci_master_info[master].
+ done_pending[queue], 1);
+ rc = msm_cci_wait(cci_dev, master, queue);
+ if (rc < 0) {
+ pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ rc = msm_cci_lock_queue(cci_dev, master, queue, 0);
+ if (rc < 0) {
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ return rc;
+ }
+ rc = msm_cci_wait_report_cmd(cci_dev, master, queue);
+ if (rc < 0) {
+ pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+static int32_t msm_cci_get_queue_free_size(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ uint32_t read_val = 0;
+ uint32_t reg_offset = master * 0x200 + queue * 0x100;
+
+ read_val = msm_camera_io_r_mb(cci_dev->base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d max %d\n",
+ __func__, __LINE__, read_val,
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
+ return (cci_dev->
+ cci_i2c_queue_info[master][queue].max_queue_size) -
+ read_val;
+}
+
+static int32_t msm_cci_data_queue(struct cci_device *cci_dev,
+ struct msm_camera_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
+ enum cci_i2c_sync sync_en)
+{
+ uint16_t i = 0, j = 0, k = 0, h = 0, len = 0;
+ int32_t rc = 0, free_size = 0, en_seq_write = 0;
+ uint32_t cmd = 0, delay = 0;
+ uint8_t data[12];
+ uint16_t reg_addr = 0;
+ struct msm_camera_i2c_reg_setting *i2c_msg =
+ &c_ctrl->cfg.cci_i2c_write_cfg;
+ uint16_t cmd_size = i2c_msg->size;
+ struct msm_camera_i2c_reg_array *i2c_cmd = i2c_msg->reg_setting;
+ enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
+
+ uint32_t read_val = 0;
+ uint32_t reg_offset;
+ uint32_t val = 0;
+ uint32_t max_queue_size, queue_size = 0;
+
+ if (i2c_cmd == NULL) {
+ pr_err("%s:%d Failed line\n", __func__,
+ __LINE__);
+ return -EINVAL;
+ }
+
+ if ((!cmd_size) || (cmd_size > CCI_I2C_MAX_WRITE)) {
+ pr_err("%s:%d failed: invalid cmd_size %d\n",
+ __func__, __LINE__, cmd_size);
+ return -EINVAL;
+ }
+
+ CDBG("%s addr type %d data type %d cmd_size %d\n", __func__,
+ i2c_msg->addr_type, i2c_msg->data_type, cmd_size);
+
+ if (i2c_msg->addr_type >= MSM_CAMERA_I2C_ADDR_TYPE_MAX) {
+ pr_err("%s:%d failed: invalid addr_type 0x%X\n",
+ __func__, __LINE__, i2c_msg->addr_type);
+ return -EINVAL;
+ }
+ if (i2c_msg->data_type >= MSM_CAMERA_I2C_DATA_TYPE_MAX) {
+ pr_err("%s:%d failed: invalid data_type 0x%X\n",
+ __func__, __LINE__, i2c_msg->data_type);
+ return -EINVAL;
+ }
+ reg_offset = master * 0x200 + queue * 0x100;
+
+ msm_camera_io_w_mb(cci_dev->cci_wait_sync_cfg.cid,
+ cci_dev->base + CCI_SET_CID_SYNC_TIMER_ADDR +
+ cci_dev->cci_wait_sync_cfg.csid *
+ CCI_SET_CID_SYNC_TIMER_OFFSET);
+
+ val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+ c_ctrl->cci_info->retries << 16 |
+ c_ctrl->cci_info->id_map << 18;
+
+ CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
+ __func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset, val);
+ msm_camera_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+
+ atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 0);
+
+ max_queue_size = cci_dev->cci_i2c_queue_info[master][queue].
+ max_queue_size;
+
+ if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ)
+ queue_size = max_queue_size;
+ else
+ queue_size = max_queue_size/2;
+ reg_addr = i2c_cmd->reg_addr;
+
+ if (sync_en == MSM_SYNC_ENABLE && cci_dev->valid_sync &&
+ cmd_size < max_queue_size) {
+ val = CCI_I2C_WAIT_SYNC_CMD |
+ ((cci_dev->cci_wait_sync_cfg.line) << 4);
+ msm_camera_io_w_mb(val,
+ cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+ }
+
+ rc = msm_cci_lock_queue(cci_dev, master, queue, 1);
+ if (rc < 0) {
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ return rc;
+ }
+
+ while (cmd_size) {
+ uint32_t pack = 0;
+
+ len = msm_cci_calc_cmd_len(cci_dev, c_ctrl, cmd_size,
+ i2c_cmd, &pack);
+ if (len <= 0) {
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ read_val = msm_camera_io_r_mb(cci_dev->base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ CDBG("%s line %d CUR_WORD_CNT_ADDR %d len %d max %d\n",
+ __func__, __LINE__, read_val, len, max_queue_size);
+ /* + 1 - space alocation for Report CMD */
+ if ((read_val + len + 1) > queue_size) {
+ if ((read_val + len + 1) > max_queue_size) {
+ rc = msm_cci_process_full_q(cci_dev,
+ master, queue);
+ if (rc < 0) {
+ pr_err("%s failed line %d\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ continue;
+ }
+ msm_cci_process_half_q(cci_dev, master, queue);
+ }
+
+ CDBG("%s cmd_size %d addr 0x%x data 0x%x\n", __func__,
+ cmd_size, i2c_cmd->reg_addr, i2c_cmd->reg_data);
+ delay = i2c_cmd->delay;
+ i = 0;
+ data[i++] = CCI_I2C_WRITE_CMD;
+
+ /* in case of multiple command
+ * MSM_CCI_I2C_WRITE : address is not continuous, so update
+ * address for a new packet.
+ * MSM_CCI_I2C_WRITE_SEQ : address is continuous, need to keep
+ * the incremented address for a
+ * new packet
+ */
+ if (c_ctrl->cmd == MSM_CCI_I2C_WRITE ||
+ c_ctrl->cmd == MSM_CCI_I2C_WRITE_ASYNC ||
+ c_ctrl->cmd == MSM_CCI_I2C_WRITE_SYNC ||
+ c_ctrl->cmd == MSM_CCI_I2C_WRITE_SYNC_BLOCK)
+ reg_addr = i2c_cmd->reg_addr;
+
+ if (en_seq_write == 0) {
+ /* either byte or word addr */
+ if (i2c_msg->addr_type == MSM_CAMERA_I2C_BYTE_ADDR)
+ data[i++] = reg_addr;
+ else {
+ data[i++] = (reg_addr & 0xFF00) >> 8;
+ data[i++] = reg_addr & 0x00FF;
+ }
+ }
+ /* max of 10 data bytes */
+ do {
+ if (i2c_msg->data_type == MSM_CAMERA_I2C_BYTE_DATA) {
+ data[i++] = i2c_cmd->reg_data;
+ reg_addr++;
+ } else {
+ if ((i + 1) <= cci_dev->payload_size) {
+ data[i++] = (i2c_cmd->reg_data &
+ 0xFF00) >> 8; /* MSB */
+ data[i++] = i2c_cmd->reg_data &
+ 0x00FF; /* LSB */
+ reg_addr++;
+ } else
+ break;
+ }
+ i2c_cmd++;
+ --cmd_size;
+ } while (((c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ) || pack--) &&
+ (cmd_size > 0) && (i <= cci_dev->payload_size));
+ free_size = msm_cci_get_queue_free_size(cci_dev, master,
+ queue);
+ if ((c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ) &&
+ ((i-1) == MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11) &&
+ cci_dev->support_seq_write && cmd_size > 0 &&
+ free_size > BURST_MIN_FREE_SIZE) {
+ data[0] |= 0xF0;
+ en_seq_write = 1;
+ } else {
+ data[0] |= ((i-1) << 4);
+ en_seq_write = 0;
+ }
+ len = ((i-1)/4) + 1;
+
+ read_val = msm_camera_io_r_mb(cci_dev->base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ for (h = 0, k = 0; h < len; h++) {
+ cmd = 0;
+ for (j = 0; (j < 4 && k < i); j++)
+ cmd |= (data[k++] << (j * 8));
+ CDBG("%s LOAD_DATA_ADDR 0x%x, q: %d, len:%d, cnt: %d\n",
+ __func__, cmd, queue, len, read_val);
+ msm_camera_io_w_mb(cmd, cci_dev->base +
+ CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ master * 0x200 + queue * 0x100);
+
+ read_val += 1;
+ msm_camera_io_w_mb(read_val, cci_dev->base +
+ CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+ }
+
+ if ((delay > 0) && (delay < CCI_MAX_DELAY) &&
+ en_seq_write == 0) {
+ cmd = (uint32_t)((delay * cci_dev->cycles_per_us) /
+ 0x100);
+ cmd <<= 4;
+ cmd |= CCI_I2C_WAIT_CMD;
+ CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x\n",
+ __func__, cmd);
+ msm_camera_io_w_mb(cmd, cci_dev->base +
+ CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ master * 0x200 + queue * 0x100);
+ read_val += 1;
+ msm_camera_io_w_mb(read_val, cci_dev->base +
+ CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+ }
+ }
+
+ rc = msm_cci_transfer_end(cci_dev, master, queue);
+ if (rc < 0) {
+ pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ return rc;
+}
+
+static int32_t msm_cci_i2c_read(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ int32_t rc = 0;
+ uint32_t val = 0;
+ int32_t read_words = 0, exp_words = 0;
+ int32_t index = 0, first_byte = 0;
+ uint32_t i = 0;
+ enum cci_i2c_master_t master;
+ enum cci_i2c_queue_t queue = QUEUE_1;
+ struct cci_device *cci_dev = NULL;
+ struct msm_camera_cci_i2c_read_cfg *read_cfg = NULL;
+
+ CDBG("%s line %d\n", __func__, __LINE__);
+ cci_dev = v4l2_get_subdevdata(sd);
+ master = c_ctrl->cci_info->cci_i2c_master;
+ read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+
+ if (master >= MASTER_MAX || master < 0) {
+ pr_err("%s:%d Invalid I2C master %d\n",
+ __func__, __LINE__, master);
+ return -EINVAL;
+ }
+
+ mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+
+ /* Set the I2C Frequency */
+ rc = msm_cci_set_clk_param(cci_dev, c_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d msm_cci_set_clk_param failed rc = %d\n",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+
+ /*
+ * Call validate queue to make sure queue is empty before starting.
+ * If this call fails, don't proceed with i2c_read call. This is to
+ * avoid overflow / underflow of queue
+ */
+ rc = msm_cci_validate_queue(cci_dev,
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1,
+ master, queue);
+ if (rc < 0) {
+ pr_err("%s:%d Initial validataion failed rc %d\n", __func__,
+ __LINE__, rc);
+ goto ERROR;
+ }
+
+ if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
+ pr_err("%s:%d More than max retries\n", __func__,
+ __LINE__);
+ goto ERROR;
+ }
+
+ if (read_cfg->data == NULL) {
+ pr_err("%s:%d Data ptr is NULL\n", __func__,
+ __LINE__);
+ goto ERROR;
+ }
+
+ CDBG("%s master %d, queue %d\n", __func__, master, queue);
+ CDBG("%s set param sid 0x%x retries %d id_map %d\n", __func__,
+ c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
+ c_ctrl->cci_info->id_map);
+ val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+ c_ctrl->cci_info->retries << 16 |
+ c_ctrl->cci_info->id_map << 18;
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = CCI_I2C_LOCK_CMD;
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ if (read_cfg->addr_type >= MSM_CAMERA_I2C_ADDR_TYPE_MAX) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4);
+ for (i = 0; i < read_cfg->addr_type; i++) {
+ val |= ((read_cfg->addr >> (i << 3)) & 0xFF) <<
+ ((read_cfg->addr_type - i) << 3);
+ }
+
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = CCI_I2C_UNLOCK_CMD;
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = msm_camera_io_r_mb(cci_dev->base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+ + master * 0x200 + queue * 0x100);
+ CDBG("%s cur word cnt 0x%x\n", __func__, val);
+ msm_camera_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+ + master * 0x200 + queue * 0x100);
+
+ val = 1 << ((master * 2) + queue);
+ msm_camera_io_w_mb(val, cci_dev->base + CCI_QUEUE_START_ADDR);
+ CDBG("%s:%d E wait_for_completion_timeout\n", __func__,
+ __LINE__);
+
+ rc = wait_for_completion_timeout(&cci_dev->
+ cci_master_info[master].reset_complete, CCI_TIMEOUT);
+ if (rc <= 0) {
+ msm_cci_dump_registers(cci_dev, master, queue);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ pr_err("%s: %d wait_for_completion_timeout rc = %d\n",
+ __func__, __LINE__, rc);
+ msm_cci_flush_queue(cci_dev, master);
+ goto ERROR;
+ } else {
+ rc = 0;
+ }
+
+ read_words = msm_camera_io_r_mb(cci_dev->base +
+ CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+ exp_words = ((read_cfg->num_byte / 4) + 1);
+ if (read_words != exp_words) {
+ pr_err("%s:%d read_words = %d, exp words = %d\n", __func__,
+ __LINE__, read_words, exp_words);
+ memset(read_cfg->data, 0, read_cfg->num_byte);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ index = 0;
+ CDBG("%s index %d num_type %d\n", __func__, index,
+ read_cfg->num_byte);
+ first_byte = 0;
+ do {
+ val = msm_camera_io_r_mb(cci_dev->base +
+ CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
+ CDBG("%s read val 0x%x\n", __func__, val);
+ for (i = 0; (i < 4) && (index < read_cfg->num_byte); i++) {
+ CDBG("%s i %d index %d\n", __func__, i, index);
+ if (!first_byte) {
+ CDBG("%s sid 0x%x\n", __func__, val & 0xFF);
+ first_byte++;
+ } else {
+ read_cfg->data[index] =
+ (val >> (i * 8)) & 0xFF;
+ CDBG("%s data[%d] 0x%x\n", __func__, index,
+ read_cfg->data[index]);
+ index++;
+ }
+ }
+ } while (--read_words > 0);
+ERROR:
+ mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+ return rc;
+}
+
+static int32_t msm_cci_i2c_read_bytes(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ int32_t rc = 0;
+ struct cci_device *cci_dev = NULL;
+ enum cci_i2c_master_t master;
+ struct msm_camera_cci_i2c_read_cfg *read_cfg = NULL;
+ uint16_t read_bytes = 0;
+
+ if (!sd || !c_ctrl) {
+ pr_err("%s:%d sd %pK c_ctrl %pK\n", __func__,
+ __LINE__, sd, c_ctrl);
+ return -EINVAL;
+ }
+ if (!c_ctrl->cci_info) {
+ pr_err("%s:%d cci_info NULL\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (!cci_dev) {
+ pr_err("%s:%d cci_dev NULL\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ if (cci_dev->cci_state != CCI_STATE_ENABLED) {
+ pr_err("%s invalid cci state %d\n",
+ __func__, cci_dev->cci_state);
+ return -EINVAL;
+ }
+
+ if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+ || c_ctrl->cci_info->cci_i2c_master < 0) {
+ pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ master = c_ctrl->cci_info->cci_i2c_master;
+ read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+ if ((!read_cfg->num_byte) || (read_cfg->num_byte > CCI_I2C_MAX_READ)) {
+ pr_err("%s:%d read num bytes 0\n", __func__, __LINE__);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+
+ read_bytes = read_cfg->num_byte;
+ do {
+ if (read_bytes > CCI_READ_MAX)
+ read_cfg->num_byte = CCI_READ_MAX;
+ else
+ read_cfg->num_byte = read_bytes;
+ rc = msm_cci_i2c_read(sd, c_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__, __LINE__, rc);
+ goto ERROR;
+ }
+ if (read_bytes > CCI_READ_MAX) {
+ read_cfg->addr += CCI_READ_MAX;
+ read_cfg->data += CCI_READ_MAX;
+ read_bytes -= CCI_READ_MAX;
+ } else {
+ read_bytes = 0;
+ }
+ } while (read_bytes);
+ERROR:
+ return rc;
+}
+
+static int32_t msm_cci_i2c_write(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
+ enum cci_i2c_sync sync_en)
+{
+ int32_t rc = 0;
+ struct cci_device *cci_dev;
+ enum cci_i2c_master_t master;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (cci_dev->cci_state != CCI_STATE_ENABLED) {
+ pr_err("%s invalid cci state %d\n",
+ __func__, cci_dev->cci_state);
+ return -EINVAL;
+ }
+ master = c_ctrl->cci_info->cci_i2c_master;
+ CDBG("%s set param sid 0x%x retries %d id_map %d\n", __func__,
+ c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
+ c_ctrl->cci_info->id_map);
+
+ /* Set the I2C Frequency */
+ rc = msm_cci_set_clk_param(cci_dev, c_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d msm_cci_set_clk_param failed rc = %d\n",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+ /*
+ * Call validate queue to make sure queue is empty before starting.
+ * If this call fails, don't proceed with i2c_write call. This is to
+ * avoid overflow / underflow of queue
+ */
+ rc = msm_cci_validate_queue(cci_dev,
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size-1,
+ master, queue);
+ if (rc < 0) {
+ pr_err("%s:%d Initial validataion failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ }
+ if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
+ pr_err("%s:%d More than max retries\n", __func__,
+ __LINE__);
+ goto ERROR;
+ }
+ rc = msm_cci_data_queue(cci_dev, c_ctrl, queue, sync_en);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ERROR:
+ return rc;
+}
+
+static void msm_cci_write_async_helper(struct work_struct *work)
+{
+ int rc;
+ struct cci_device *cci_dev;
+ struct cci_write_async *write_async =
+ container_of(work, struct cci_write_async, work);
+ struct msm_camera_i2c_reg_setting *i2c_msg;
+ enum cci_i2c_master_t master;
+ struct msm_camera_cci_master_info *cci_master_info;
+
+ cci_dev = write_async->cci_dev;
+ i2c_msg = &write_async->c_ctrl.cfg.cci_i2c_write_cfg;
+ master = write_async->c_ctrl.cci_info->cci_i2c_master;
+ cci_master_info = &cci_dev->cci_master_info[master];
+
+ mutex_lock(&cci_master_info->mutex_q[write_async->queue]);
+ rc = msm_cci_i2c_write(&cci_dev->msm_sd.sd,
+ &write_async->c_ctrl, write_async->queue, write_async->sync_en);
+ mutex_unlock(&cci_master_info->mutex_q[write_async->queue]);
+ if (rc < 0)
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+
+ kfree(write_async->c_ctrl.cfg.cci_i2c_write_cfg.reg_setting);
+ kfree(write_async);
+
+ CDBG("%s: %d Exit\n", __func__, __LINE__);
+}
+
+static int32_t msm_cci_i2c_write_async(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
+ enum cci_i2c_sync sync_en)
+{
+ struct cci_write_async *write_async;
+ struct cci_device *cci_dev;
+ struct msm_camera_i2c_reg_setting *cci_i2c_write_cfg;
+ struct msm_camera_i2c_reg_setting *cci_i2c_write_cfg_w;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+
+ CDBG("%s: %d Enter\n", __func__, __LINE__);
+
+ write_async = kzalloc(sizeof(*write_async), GFP_KERNEL);
+ if (!write_async)
+ return -ENOMEM;
+
+ INIT_WORK(&write_async->work, msm_cci_write_async_helper);
+ write_async->cci_dev = cci_dev;
+ write_async->c_ctrl = *c_ctrl;
+ write_async->queue = queue;
+ write_async->sync_en = sync_en;
+
+ cci_i2c_write_cfg = &c_ctrl->cfg.cci_i2c_write_cfg;
+ cci_i2c_write_cfg_w = &write_async->c_ctrl.cfg.cci_i2c_write_cfg;
+
+ if (cci_i2c_write_cfg->size == 0) {
+ pr_err("%s: %d Size = 0\n", __func__, __LINE__);
+ kfree(write_async);
+ return -EINVAL;
+ }
+
+ cci_i2c_write_cfg_w->reg_setting =
+ kzalloc(sizeof(struct msm_camera_i2c_reg_array)*
+ cci_i2c_write_cfg->size, GFP_KERNEL);
+ if (!cci_i2c_write_cfg_w->reg_setting) {
+ pr_err("%s: %d Couldn't allocate memory\n", __func__, __LINE__);
+ kfree(write_async);
+ return -ENOMEM;
+ }
+ memcpy(cci_i2c_write_cfg_w->reg_setting,
+ cci_i2c_write_cfg->reg_setting,
+ (sizeof(struct msm_camera_i2c_reg_array)*
+ cci_i2c_write_cfg->size));
+
+ cci_i2c_write_cfg_w->addr_type = cci_i2c_write_cfg->addr_type;
+ cci_i2c_write_cfg_w->data_type = cci_i2c_write_cfg->data_type;
+ cci_i2c_write_cfg_w->size = cci_i2c_write_cfg->size;
+ cci_i2c_write_cfg_w->delay = cci_i2c_write_cfg->delay;
+
+ queue_work(cci_dev->write_wq[write_async->queue], &write_async->work);
+
+ CDBG("%s: %d Exit\n", __func__, __LINE__);
+
+ return 0;
+}
+
+static int32_t msm_cci_pinctrl_init(struct cci_device *cci_dev)
+{
+ struct msm_pinctrl_info *cci_pctrl = NULL;
+
+ cci_pctrl = &cci_dev->cci_pinctrl;
+ cci_pctrl->pinctrl = devm_pinctrl_get(&cci_dev->pdev->dev);
+ if (IS_ERR_OR_NULL(cci_pctrl->pinctrl)) {
+ pr_err("%s:%d devm_pinctrl_get cci_pinctrl failed\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ cci_pctrl->gpio_state_active = pinctrl_lookup_state(
+ cci_pctrl->pinctrl,
+ CCI_PINCTRL_STATE_DEFAULT);
+ if (IS_ERR_OR_NULL(cci_pctrl->gpio_state_active)) {
+ pr_err("%s:%d look up state for active state failed\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ cci_pctrl->gpio_state_suspend = pinctrl_lookup_state(
+ cci_pctrl->pinctrl,
+ CCI_PINCTRL_STATE_SLEEP);
+ if (IS_ERR_OR_NULL(cci_pctrl->gpio_state_suspend)) {
+ pr_err("%s:%d look up state for suspend state failed\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static uint32_t msm_cci_cycles_per_ms(unsigned long clk)
+{
+ uint32_t cycles_per_us;
+
+ if (clk)
+ cycles_per_us = ((clk/1000)*256)/1000;
+ else {
+ pr_err("%s:%d, failed: Can use default: %d",
+ __func__, __LINE__, CYCLES_PER_MICRO_SEC_DEFAULT);
+ cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
+ }
+ return cycles_per_us;
+}
+
+static uint32_t *msm_cci_get_clk_rates(struct cci_device *cci_dev,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ uint32_t j;
+ int32_t idx;
+ uint32_t cci_clk_src;
+ unsigned long clk;
+
+ struct msm_cci_clk_params_t *clk_params = NULL;
+ enum i2c_freq_mode_t i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+ struct device_node *of_node = cci_dev->pdev->dev.of_node;
+
+ if ((i2c_freq_mode >= I2C_MAX_MODES) || (i2c_freq_mode < 0)) {
+ pr_err("%s:%d invalid i2c_freq_mode %d\n",
+ __func__, __LINE__, i2c_freq_mode);
+ return NULL;
+ }
+
+ clk_params = &cci_dev->cci_clk_params[i2c_freq_mode];
+ cci_clk_src = clk_params->cci_clk_src;
+
+ idx = of_property_match_string(of_node,
+ "clock-names", CCI_CLK_SRC_NAME);
+ if (idx < 0) {
+ cci_dev->cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
+ return cci_dev->cci_clk_rates[0];
+ }
+
+ if (cci_clk_src == 0) {
+ clk = cci_dev->cci_clk_rates[0][idx];
+ cci_dev->cycles_per_us = msm_cci_cycles_per_ms(clk);
+ return cci_dev->cci_clk_rates[0];
+ }
+
+ for (j = 0; j < cci_dev->num_clk_cases; j++) {
+ clk = cci_dev->cci_clk_rates[j][idx];
+ if (clk == cci_clk_src) {
+ cci_dev->cycles_per_us = msm_cci_cycles_per_ms(clk);
+ cci_dev->cci_clk_src = cci_clk_src;
+ return cci_dev->cci_clk_rates[j];
+ }
+ }
+
+ return NULL;
+}
+
+static int32_t msm_cci_i2c_set_sync_prms(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ int32_t rc = 0;
+ struct cci_device *cci_dev;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (!cci_dev || !c_ctrl) {
+ pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
+ __LINE__, cci_dev, c_ctrl);
+ rc = -EINVAL;
+ return rc;
+ }
+ cci_dev->cci_wait_sync_cfg = c_ctrl->cfg.cci_wait_sync_cfg;
+ cci_dev->valid_sync = cci_dev->cci_wait_sync_cfg.csid < 0 ? 0 : 1;
+
+ return rc;
+}
+
+static int32_t msm_cci_init(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ uint8_t i = 0, j = 0;
+ int32_t rc = 0, ret = 0;
+ struct cci_device *cci_dev;
+ enum cci_i2c_master_t master = MASTER_0;
+ uint32_t *clk_rates = NULL;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (!cci_dev || !c_ctrl) {
+ pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
+ __LINE__, cci_dev, c_ctrl);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CCI,
+ CAM_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ if (cci_dev->ref_count++) {
+ CDBG("%s ref_count %d\n", __func__, cci_dev->ref_count);
+ master = c_ctrl->cci_info->cci_i2c_master;
+ CDBG("%s:%d master %d\n", __func__, __LINE__, master);
+ if (master < MASTER_MAX && master >= 0) {
+ mutex_lock(&cci_dev->cci_master_info[master].mutex);
+ mutex_lock(&cci_dev->cci_master_info[master].
+ mutex_q[PRIORITY_QUEUE]);
+ mutex_lock(&cci_dev->cci_master_info[master].
+ mutex_q[SYNC_QUEUE]);
+ flush_workqueue(cci_dev->write_wq[master]);
+ /* Re-initialize the completion */
+ reinit_completion(&cci_dev->
+ cci_master_info[master].reset_complete);
+ for (i = 0; i < NUM_QUEUES; i++)
+ reinit_completion(&cci_dev->
+ cci_master_info[master].report_q[i]);
+ /* Set reset pending flag to TRUE */
+ cci_dev->cci_master_info[master].reset_pending = TRUE;
+ /* Set proper mask to RESET CMD address */
+ if (master == MASTER_0)
+ msm_camera_io_w_mb(CCI_M0_RESET_RMSK,
+ cci_dev->base + CCI_RESET_CMD_ADDR);
+ else
+ msm_camera_io_w_mb(CCI_M1_RESET_RMSK,
+ cci_dev->base + CCI_RESET_CMD_ADDR);
+ /* wait for reset done irq */
+ rc = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].
+ reset_complete,
+ CCI_TIMEOUT);
+ if (rc <= 0)
+ pr_err("%s:%d wait failed %d\n", __func__,
+ __LINE__, rc);
+ mutex_unlock(&cci_dev->cci_master_info[master].
+ mutex_q[SYNC_QUEUE]);
+ mutex_unlock(&cci_dev->cci_master_info[master].
+ mutex_q[PRIORITY_QUEUE]);
+ mutex_unlock(&cci_dev->cci_master_info[master].mutex);
+ }
+ return 0;
+ }
+ ret = msm_cci_pinctrl_init(cci_dev);
+ if (ret < 0) {
+ pr_err("%s:%d Initialization of pinctrl failed\n",
+ __func__, __LINE__);
+ cci_dev->cci_pinctrl_status = 0;
+ } else {
+ cci_dev->cci_pinctrl_status = 1;
+ }
+ rc = msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
+ cci_dev->cci_gpio_tbl_size, 1);
+ if (cci_dev->cci_pinctrl_status) {
+ ret = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
+ cci_dev->cci_pinctrl.gpio_state_active);
+ if (ret)
+ pr_err("%s:%d cannot set pin to active state\n",
+ __func__, __LINE__);
+ }
+ if (rc < 0) {
+ CDBG("%s: request gpio failed\n", __func__);
+ goto request_gpio_failed;
+ }
+
+ rc = msm_camera_config_vreg(&cci_dev->pdev->dev, cci_dev->cci_vreg,
+ cci_dev->regulator_count, NULL, 0, &cci_dev->cci_reg_ptr[0], 1);
+ if (rc < 0) {
+ pr_err("%s:%d cci config_vreg failed\n", __func__, __LINE__);
+ goto clk_enable_failed;
+ }
+
+ rc = msm_camera_enable_vreg(&cci_dev->pdev->dev, cci_dev->cci_vreg,
+ cci_dev->regulator_count, NULL, 0, &cci_dev->cci_reg_ptr[0], 1);
+ if (rc < 0) {
+ pr_err("%s:%d cci enable_vreg failed\n", __func__, __LINE__);
+ goto reg_enable_failed;
+ }
+
+ clk_rates = msm_cci_get_clk_rates(cci_dev, c_ctrl);
+ if (!clk_rates) {
+ pr_err("%s: clk enable failed\n", __func__);
+ goto reg_enable_failed;
+ }
+
+ for (i = 0; i < cci_dev->num_clk; i++) {
+ cci_dev->cci_clk_info[i].clk_rate =
+ clk_rates[i];
+ }
+ rc = msm_camera_clk_enable(&cci_dev->pdev->dev,
+ cci_dev->cci_clk_info, cci_dev->cci_clk,
+ cci_dev->num_clk, true);
+ if (rc < 0) {
+ pr_err("%s: clk enable failed\n", __func__);
+ goto reg_enable_failed;
+ }
+
+ /* Re-initialize the completion */
+ reinit_completion(&cci_dev->cci_master_info[master].reset_complete);
+ for (i = 0; i < NUM_QUEUES; i++)
+ reinit_completion(&cci_dev->cci_master_info[master].
+ report_q[i]);
+ rc = msm_camera_enable_irq(cci_dev->irq, true);
+ if (rc < 0)
+ pr_err("%s: irq enable failed\n", __func__);
+ cci_dev->hw_version = msm_camera_io_r_mb(cci_dev->base +
+ CCI_HW_VERSION_ADDR);
+ pr_info("%s:%d: hw_version = 0x%x\n", __func__, __LINE__,
+ cci_dev->hw_version);
+ cci_dev->payload_size =
+ MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_10;
+ cci_dev->support_seq_write = 0;
+ if (cci_dev->hw_version >= 0x10020000) {
+ cci_dev->payload_size =
+ MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11;
+ cci_dev->support_seq_write = 1;
+ }
+ for (i = 0; i < NUM_MASTERS; i++) {
+ for (j = 0; j < NUM_QUEUES; j++) {
+ if (j == QUEUE_0) {
+ if (cci_dev->hw_version >= 0x10060000)
+ cci_dev->cci_i2c_queue_info[i][j].
+ max_queue_size =
+ CCI_I2C_Q0_SIZE_128W;
+ else
+ cci_dev->cci_i2c_queue_info[i][j].
+ max_queue_size =
+ CCI_I2C_QUEUE_0_SIZE;
+ } else {
+ if (cci_dev->hw_version >= 0x10060000)
+ cci_dev->cci_i2c_queue_info[i][j].
+ max_queue_size =
+ CCI_I2C_Q1_SIZE_32W;
+ else
+ cci_dev->cci_i2c_queue_info[i][j].
+ max_queue_size =
+ CCI_I2C_QUEUE_1_SIZE;
+ }
+ CDBG("CCI Master[%d] :: Q0 size: %d Q1 size: %d\n", i,
+ cci_dev->cci_i2c_queue_info[i][j].
+ max_queue_size,
+ cci_dev->cci_i2c_queue_info[i][j].
+ max_queue_size);
+ }
+ }
+
+ cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+ msm_camera_io_w_mb(CCI_RESET_CMD_RMSK, cci_dev->base +
+ CCI_RESET_CMD_ADDR);
+ msm_camera_io_w_mb(0x1, cci_dev->base + CCI_RESET_CMD_ADDR);
+ rc = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[MASTER_0].reset_complete,
+ CCI_TIMEOUT);
+ if (rc <= 0) {
+ pr_err("%s: wait_for_completion_timeout %d\n",
+ __func__, __LINE__);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ goto reset_complete_failed;
+ }
+ for (i = 0; i < MASTER_MAX; i++)
+ cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
+ msm_camera_io_w_mb(CCI_IRQ_MASK_0_RMSK,
+ cci_dev->base + CCI_IRQ_MASK_0_ADDR);
+ msm_camera_io_w_mb(CCI_IRQ_MASK_0_RMSK,
+ cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
+ msm_camera_io_w_mb(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+ for (i = 0; i < MASTER_MAX; i++) {
+ if (!cci_dev->write_wq[i]) {
+ pr_err("Failed to flush write wq\n");
+ rc = -ENOMEM;
+ goto reset_complete_failed;
+ } else {
+ flush_workqueue(cci_dev->write_wq[i]);
+ }
+ }
+ cci_dev->cci_state = CCI_STATE_ENABLED;
+
+ return 0;
+
+reset_complete_failed:
+ msm_camera_enable_irq(cci_dev->irq, false);
+ msm_camera_clk_enable(&cci_dev->pdev->dev, cci_dev->cci_clk_info,
+ cci_dev->cci_clk, cci_dev->num_clk, false);
+reg_enable_failed:
+ msm_camera_config_vreg(&cci_dev->pdev->dev, cci_dev->cci_vreg,
+ cci_dev->regulator_count, NULL, 0, &cci_dev->cci_reg_ptr[0], 0);
+clk_enable_failed:
+ if (cci_dev->cci_pinctrl_status) {
+ ret = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
+ cci_dev->cci_pinctrl.gpio_state_suspend);
+ if (ret)
+ pr_err("%s:%d cannot set pin to suspend state\n",
+ __func__, __LINE__);
+ }
+ msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
+ cci_dev->cci_gpio_tbl_size, 0);
+request_gpio_failed:
+ cci_dev->ref_count--;
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CCI,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+ return rc;
+}
+
+static int32_t msm_cci_release(struct v4l2_subdev *sd)
+{
+ uint8_t i = 0, rc = 0;
+ struct cci_device *cci_dev;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (!cci_dev->ref_count || cci_dev->cci_state != CCI_STATE_ENABLED) {
+ pr_err("%s invalid ref count %d / cci state %d\n",
+ __func__, cci_dev->ref_count, cci_dev->cci_state);
+ rc = -EINVAL;
+ goto ahb_vote_suspend;
+ }
+ if (--cci_dev->ref_count) {
+ CDBG("%s ref_count Exit %d\n", __func__, cci_dev->ref_count);
+ rc = 0;
+ goto ahb_vote_suspend;
+ }
+ for (i = 0; i < MASTER_MAX; i++)
+ if (cci_dev->write_wq[i])
+ flush_workqueue(cci_dev->write_wq[i]);
+
+ msm_camera_enable_irq(cci_dev->irq, false);
+ msm_camera_clk_enable(&cci_dev->pdev->dev, cci_dev->cci_clk_info,
+ cci_dev->cci_clk, cci_dev->num_clk, false);
+
+ rc = msm_camera_enable_vreg(&cci_dev->pdev->dev, cci_dev->cci_vreg,
+ cci_dev->regulator_count, NULL, 0, &cci_dev->cci_reg_ptr[0], 0);
+ if (rc < 0)
+ pr_err("%s:%d cci disable_vreg failed\n", __func__, __LINE__);
+
+ rc = msm_camera_config_vreg(&cci_dev->pdev->dev, cci_dev->cci_vreg,
+ cci_dev->regulator_count, NULL, 0, &cci_dev->cci_reg_ptr[0], 0);
+ if (rc < 0)
+ pr_err("%s:%d cci unconfig_vreg failed\n", __func__, __LINE__);
+
+ if (cci_dev->cci_pinctrl_status) {
+ rc = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
+ cci_dev->cci_pinctrl.gpio_state_suspend);
+ if (rc)
+ pr_err("%s:%d cannot set pin to active state\n",
+ __func__, __LINE__);
+ }
+ cci_dev->cci_pinctrl_status = 0;
+ msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
+ cci_dev->cci_gpio_tbl_size, 0);
+ for (i = 0; i < MASTER_MAX; i++)
+ cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
+ cci_dev->cci_state = CCI_STATE_DISABLED;
+ cci_dev->cycles_per_us = 0;
+ cci_dev->cci_clk_src = 0;
+
+ahb_vote_suspend:
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CCI,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+ return rc;
+}
+
+static int32_t msm_cci_write(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ int32_t rc = 0;
+ struct cci_device *cci_dev;
+ enum cci_i2c_master_t master;
+ struct msm_camera_cci_master_info *cci_master_info;
+ uint32_t i;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (!cci_dev || !c_ctrl) {
+ pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
+ __LINE__, cci_dev, c_ctrl);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+ || c_ctrl->cci_info->cci_i2c_master < 0) {
+ pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ master = c_ctrl->cci_info->cci_i2c_master;
+ cci_master_info = &cci_dev->cci_master_info[master];
+
+ switch (c_ctrl->cmd) {
+ case MSM_CCI_I2C_WRITE_SYNC_BLOCK:
+ mutex_lock(&cci_master_info->mutex_q[SYNC_QUEUE]);
+ rc = msm_cci_i2c_write(sd, c_ctrl,
+ SYNC_QUEUE, MSM_SYNC_ENABLE);
+ mutex_unlock(&cci_master_info->mutex_q[SYNC_QUEUE]);
+ break;
+ case MSM_CCI_I2C_WRITE_SYNC:
+ rc = msm_cci_i2c_write_async(sd, c_ctrl,
+ SYNC_QUEUE, MSM_SYNC_ENABLE);
+ break;
+ case MSM_CCI_I2C_WRITE:
+ case MSM_CCI_I2C_WRITE_SEQ:
+ for (i = 0; i < NUM_QUEUES; i++) {
+ if (mutex_trylock(&cci_master_info->mutex_q[i])) {
+ rc = msm_cci_i2c_write(sd, c_ctrl, i,
+ MSM_SYNC_DISABLE);
+ mutex_unlock(&cci_master_info->mutex_q[i]);
+ return rc;
+ }
+ }
+ mutex_lock(&cci_master_info->mutex_q[PRIORITY_QUEUE]);
+ rc = msm_cci_i2c_write(sd, c_ctrl,
+ PRIORITY_QUEUE, MSM_SYNC_DISABLE);
+ mutex_unlock(&cci_master_info->mutex_q[PRIORITY_QUEUE]);
+ break;
+ case MSM_CCI_I2C_WRITE_ASYNC:
+ rc = msm_cci_i2c_write_async(sd, c_ctrl,
+ PRIORITY_QUEUE, MSM_SYNC_DISABLE);
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ }
+ return rc;
+}
+
+static int32_t msm_cci_config(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *cci_ctrl)
+{
+ int32_t rc = 0;
+
+ CDBG("%s line %d cmd %d\n", __func__, __LINE__,
+ cci_ctrl->cmd);
+ switch (cci_ctrl->cmd) {
+ case MSM_CCI_INIT:
+ rc = msm_cci_init(sd, cci_ctrl);
+ break;
+ case MSM_CCI_RELEASE:
+ rc = msm_cci_release(sd);
+ break;
+ case MSM_CCI_I2C_READ:
+ rc = msm_cci_i2c_read_bytes(sd, cci_ctrl);
+ break;
+ case MSM_CCI_I2C_WRITE:
+ case MSM_CCI_I2C_WRITE_SEQ:
+ case MSM_CCI_I2C_WRITE_SYNC:
+ case MSM_CCI_I2C_WRITE_ASYNC:
+ case MSM_CCI_I2C_WRITE_SYNC_BLOCK:
+ rc = msm_cci_write(sd, cci_ctrl);
+ break;
+ case MSM_CCI_GPIO_WRITE:
+ break;
+ case MSM_CCI_SET_SYNC_CID:
+ rc = msm_cci_i2c_set_sync_prms(sd, cci_ctrl);
+ break;
+
+ default:
+ rc = -ENOIOCTLCMD;
+ }
+ CDBG("%s line %d rc %d\n", __func__, __LINE__, rc);
+ cci_ctrl->status = rc;
+ return rc;
+}
+
+static irqreturn_t msm_cci_irq(int irq_num, void *data)
+{
+ uint32_t irq;
+ struct cci_device *cci_dev = data;
+
+ irq = msm_camera_io_r_mb(cci_dev->base + CCI_IRQ_STATUS_0_ADDR);
+ msm_camera_io_w_mb(irq, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
+ msm_camera_io_w_mb(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+ CDBG("%s CCI_I2C_M0_STATUS_ADDR = 0x%x\n", __func__, irq);
+ if (irq & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
+ if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) {
+ cci_dev->cci_master_info[MASTER_0].reset_pending =
+ FALSE;
+ complete(&cci_dev->cci_master_info[MASTER_0].
+ reset_complete);
+ }
+ if (cci_dev->cci_master_info[MASTER_1].reset_pending == TRUE) {
+ cci_dev->cci_master_info[MASTER_1].reset_pending =
+ FALSE;
+ complete(&cci_dev->cci_master_info[MASTER_1].
+ reset_complete);
+ }
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) {
+ cci_dev->cci_master_info[MASTER_0].status = 0;
+ complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) {
+ struct msm_camera_cci_master_info *cci_master_info;
+
+ cci_master_info = &cci_dev->cci_master_info[MASTER_0];
+ atomic_set(&cci_master_info->q_free[QUEUE_0], 0);
+ cci_master_info->status = 0;
+ if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) {
+ complete(&cci_master_info->report_q[QUEUE_0]);
+ atomic_set(&cci_master_info->done_pending[QUEUE_0], 0);
+ }
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK) {
+ struct msm_camera_cci_master_info *cci_master_info;
+
+ cci_master_info = &cci_dev->cci_master_info[MASTER_0];
+ atomic_set(&cci_master_info->q_free[QUEUE_1], 0);
+ cci_master_info->status = 0;
+ if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) {
+ complete(&cci_master_info->report_q[QUEUE_1]);
+ atomic_set(&cci_master_info->done_pending[QUEUE_1], 0);
+ }
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) {
+ cci_dev->cci_master_info[MASTER_1].status = 0;
+ complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) {
+ struct msm_camera_cci_master_info *cci_master_info;
+
+ cci_master_info = &cci_dev->cci_master_info[MASTER_1];
+ atomic_set(&cci_master_info->q_free[QUEUE_0], 0);
+ cci_master_info->status = 0;
+ if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) {
+ complete(&cci_master_info->report_q[QUEUE_0]);
+ atomic_set(&cci_master_info->done_pending[QUEUE_0], 0);
+ }
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK) {
+ struct msm_camera_cci_master_info *cci_master_info;
+
+ cci_master_info = &cci_dev->cci_master_info[MASTER_1];
+ atomic_set(&cci_master_info->q_free[QUEUE_1], 0);
+ cci_master_info->status = 0;
+ if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) {
+ complete(&cci_master_info->report_q[QUEUE_1]);
+ atomic_set(&cci_master_info->done_pending[QUEUE_1], 0);
+ }
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
+ cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+ msm_camera_io_w_mb(CCI_M0_RESET_RMSK,
+ cci_dev->base + CCI_RESET_CMD_ADDR);
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
+ cci_dev->cci_master_info[MASTER_1].reset_pending = TRUE;
+ msm_camera_io_w_mb(CCI_M1_RESET_RMSK,
+ cci_dev->base + CCI_RESET_CMD_ADDR);
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
+ pr_err("%s:%d MASTER_0 error 0x%x\n", __func__, __LINE__, irq);
+ cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
+ msm_camera_io_w_mb(CCI_M0_HALT_REQ_RMSK,
+ cci_dev->base + CCI_HALT_REQ_ADDR);
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
+ pr_err("%s:%d MASTER_1 error 0x%x\n", __func__, __LINE__, irq);
+ cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
+ msm_camera_io_w_mb(CCI_M1_HALT_REQ_RMSK,
+ cci_dev->base + CCI_HALT_REQ_ADDR);
+ }
+ return IRQ_HANDLED;
+}
+
+static int msm_cci_irq_routine(struct v4l2_subdev *sd, u32 status,
+ bool *handled)
+{
+ struct cci_device *cci_dev = v4l2_get_subdevdata(sd);
+ irqreturn_t ret;
+
+ CDBG("%s line %d\n", __func__, __LINE__);
+ ret = msm_cci_irq(cci_dev->irq->start, cci_dev);
+ CDBG("%s: msm_cci_irq return %d\n", __func__, ret);
+ *handled = TRUE;
+ return 0;
+}
+
+static long msm_cci_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int32_t rc = 0;
+
+ CDBG("%s line %d\n", __func__, __LINE__);
+ switch (cmd) {
+ case VIDIOC_MSM_CCI_CFG:
+ rc = msm_cci_config(sd, arg);
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ break;
+ case MSM_SD_UNNOTIFY_FREEZE:
+ break;
+ case MSM_SD_SHUTDOWN: {
+ struct msm_camera_cci_ctrl ctrl_cmd;
+
+ ctrl_cmd.cmd = MSM_CCI_RELEASE;
+ rc = msm_cci_config(sd, &ctrl_cmd);
+ break;
+ }
+ default:
+ rc = -ENOIOCTLCMD;
+ }
+ CDBG("%s line %d rc %d\n", __func__, __LINE__, rc);
+ return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_cci_subdev_core_ops = {
+ .ioctl = &msm_cci_subdev_ioctl,
+ .interrupt_service_routine = msm_cci_irq_routine,
+};
+
+static const struct v4l2_subdev_ops msm_cci_subdev_ops = {
+ .core = &msm_cci_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_cci_internal_ops;
+
+static void msm_cci_init_cci_params(struct cci_device *new_cci_dev)
+{
+ uint8_t i = 0, j = 0;
+
+ for (i = 0; i < NUM_MASTERS; i++) {
+ new_cci_dev->cci_master_info[i].status = 0;
+ mutex_init(&new_cci_dev->cci_master_info[i].mutex);
+ init_completion(&new_cci_dev->
+ cci_master_info[i].reset_complete);
+
+ for (j = 0; j < NUM_QUEUES; j++) {
+ mutex_init(&new_cci_dev->cci_master_info[i].mutex_q[j]);
+ init_completion(&new_cci_dev->
+ cci_master_info[i].report_q[j]);
+ }
+ }
+}
+
+static int32_t msm_cci_init_gpio_params(struct cci_device *cci_dev)
+{
+ int32_t rc = 0, i = 0;
+ uint32_t *val_array = NULL;
+ uint8_t tbl_size = 0;
+ struct device_node *of_node = cci_dev->pdev->dev.of_node;
+ struct gpio *gpio_tbl = NULL;
+
+ cci_dev->cci_gpio_tbl_size = tbl_size = of_gpio_count(of_node);
+ CDBG("%s gpio count %d\n", __func__, tbl_size);
+ if (!tbl_size) {
+ pr_err("%s:%d gpio count 0\n", __func__, __LINE__);
+ return 0;
+ }
+
+ gpio_tbl = cci_dev->cci_gpio_tbl =
+ kzalloc(sizeof(struct gpio) * tbl_size, GFP_KERNEL);
+ if (!gpio_tbl) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return 0;
+ }
+
+ for (i = 0; i < tbl_size; i++) {
+ gpio_tbl[i].gpio = of_get_gpio(of_node, i);
+ CDBG("%s gpio_tbl[%d].gpio = %d\n", __func__, i,
+ gpio_tbl[i].gpio);
+ }
+
+ val_array = kcalloc(tbl_size, sizeof(uint32_t), GFP_KERNEL);
+ if (!val_array) {
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,gpio-tbl-flags",
+ val_array, tbl_size);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < tbl_size; i++) {
+ gpio_tbl[i].flags = val_array[i];
+ CDBG("%s gpio_tbl[%d].flags = %ld\n", __func__, i,
+ gpio_tbl[i].flags);
+ }
+
+ for (i = 0; i < tbl_size; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,gpio-tbl-label", i, &gpio_tbl[i].label);
+ CDBG("%s gpio_tbl[%d].label = %s\n", __func__, i,
+ gpio_tbl[i].label);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ }
+
+ kfree(val_array);
+ return rc;
+
+ERROR2:
+ kfree(val_array);
+ERROR1:
+ kfree(cci_dev->cci_gpio_tbl);
+ cci_dev->cci_gpio_tbl = NULL;
+ cci_dev->cci_gpio_tbl_size = 0;
+ return rc;
+}
+
+static void msm_cci_init_default_clk_params(struct cci_device *cci_dev,
+ uint8_t index)
+{
+ /* default clock params are for 100Khz */
+ cci_dev->cci_clk_params[index].hw_thigh = 201;
+ cci_dev->cci_clk_params[index].hw_tlow = 174;
+ cci_dev->cci_clk_params[index].hw_tsu_sto = 204;
+ cci_dev->cci_clk_params[index].hw_tsu_sta = 231;
+ cci_dev->cci_clk_params[index].hw_thd_dat = 22;
+ cci_dev->cci_clk_params[index].hw_thd_sta = 162;
+ cci_dev->cci_clk_params[index].hw_tbuf = 227;
+ cci_dev->cci_clk_params[index].hw_scl_stretch_en = 0;
+ cci_dev->cci_clk_params[index].hw_trdhld = 6;
+ cci_dev->cci_clk_params[index].hw_tsp = 3;
+ cci_dev->cci_clk_params[index].cci_clk_src = 37500000;
+}
+
+static void msm_cci_init_clk_params(struct cci_device *cci_dev)
+{
+ int32_t rc = 0;
+ uint32_t val = 0;
+ uint8_t count = 0;
+ struct device_node *of_node = cci_dev->pdev->dev.of_node;
+ struct device_node *src_node = NULL;
+
+ for (count = 0; count < I2C_MAX_MODES; count++) {
+
+ if (count == I2C_STANDARD_MODE)
+ src_node = of_find_node_by_name(of_node,
+ "qcom,i2c_standard_mode");
+ else if (count == I2C_FAST_MODE)
+ src_node = of_find_node_by_name(of_node,
+ "qcom,i2c_fast_mode");
+ else if (count == I2C_FAST_PLUS_MODE)
+ src_node = of_find_node_by_name(of_node,
+ "qcom,i2c_fast_plus_mode");
+ else
+ src_node = of_find_node_by_name(of_node,
+ "qcom,i2c_custom_mode");
+
+ rc = of_property_read_u32(src_node, "qcom,hw-thigh", &val);
+ CDBG("%s qcom,hw-thigh %d, rc %d\n", __func__, val, rc);
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_thigh = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-tlow",
+ &val);
+ CDBG("%s qcom,hw-tlow %d, rc %d\n", __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_tlow = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-tsu-sto",
+ &val);
+ CDBG("%s qcom,hw-tsu-sto %d, rc %d\n",
+ __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_tsu_sto = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-tsu-sta",
+ &val);
+ CDBG("%s qcom,hw-tsu-sta %d, rc %d\n",
+ __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_tsu_sta = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-thd-dat",
+ &val);
+ CDBG("%s qcom,hw-thd-dat %d, rc %d\n",
+ __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_thd_dat = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-thd-sta",
+ &val);
+ CDBG("%s qcom,hw-thd-sta %d, rc %d\n", __func__,
+ val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_thd_sta = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-tbuf",
+ &val);
+ CDBG("%s qcom,hw-tbuf %d, rc %d\n", __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_tbuf = val;
+ rc = of_property_read_u32(src_node,
+ "qcom,hw-scl-stretch-en", &val);
+ CDBG("%s qcom,hw-scl-stretch-en %d, rc %d\n",
+ __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_scl_stretch_en = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-trdhld",
+ &val);
+ CDBG("%s qcom,hw-trdhld %d, rc %d\n",
+ __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_trdhld = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-tsp",
+ &val);
+ CDBG("%s qcom,hw-tsp %d, rc %d\n", __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_tsp = val;
+ val = 0;
+ rc = of_property_read_u32(src_node, "qcom,cci-clk-src",
+ &val);
+ CDBG("%s qcom,cci-clk-src %d, rc %d\n",
+ __func__, val, rc);
+ cci_dev->cci_clk_params[count].cci_clk_src = val;
+ } else
+ msm_cci_init_default_clk_params(cci_dev, count);
+
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+}
+
+struct v4l2_subdev *msm_cci_get_subdev(void)
+{
+ return g_cci_subdev;
+}
+
+static int msm_cci_probe(struct platform_device *pdev)
+{
+ struct cci_device *new_cci_dev;
+ int rc = 0, i = 0;
+
+ CDBG("%s: pdev %pK device id = %d\n", __func__, pdev, pdev->id);
+ new_cci_dev = kzalloc(sizeof(struct cci_device), GFP_KERNEL);
+ if (!new_cci_dev) {
+ pr_err("%s: no enough memory\n", __func__);
+ return -ENOMEM;
+ }
+ v4l2_subdev_init(&new_cci_dev->msm_sd.sd, &msm_cci_subdev_ops);
+ new_cci_dev->msm_sd.sd.internal_ops = &msm_cci_internal_ops;
+ snprintf(new_cci_dev->msm_sd.sd.name,
+ ARRAY_SIZE(new_cci_dev->msm_sd.sd.name), "msm_cci");
+ new_cci_dev->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_CCI;
+ v4l2_set_subdevdata(&new_cci_dev->msm_sd.sd, new_cci_dev);
+ platform_set_drvdata(pdev, &new_cci_dev->msm_sd.sd);
+ CDBG("%s sd %pK\n", __func__, &new_cci_dev->msm_sd.sd);
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+
+ rc = msm_camera_get_clk_info_and_rates(pdev,
+ &new_cci_dev->cci_clk_info, &new_cci_dev->cci_clk,
+ &new_cci_dev->cci_clk_rates, &new_cci_dev->num_clk_cases,
+ &new_cci_dev->num_clk);
+ if (rc < 0) {
+ pr_err("%s: msm_cci_get_clk_info() failed", __func__);
+ kfree(new_cci_dev);
+ return -EFAULT;
+ }
+
+ new_cci_dev->ref_count = 0;
+ new_cci_dev->base = msm_camera_get_reg_base(pdev, "cci", true);
+ if (!new_cci_dev->base) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto cci_no_resource;
+ }
+ new_cci_dev->irq = msm_camera_get_irq(pdev, "cci");
+ if (!new_cci_dev->irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto cci_no_resource;
+ }
+ CDBG("%s line %d cci irq start %d end %d\n", __func__,
+ __LINE__,
+ (int) new_cci_dev->irq->start,
+ (int) new_cci_dev->irq->end);
+ rc = msm_camera_register_irq(pdev, new_cci_dev->irq,
+ msm_cci_irq, IRQF_TRIGGER_RISING, "cci", new_cci_dev);
+ if (rc < 0) {
+ pr_err("%s: irq request fail\n", __func__);
+ rc = -EBUSY;
+ goto cci_release_mem;
+ }
+
+ msm_camera_enable_irq(new_cci_dev->irq, false);
+ new_cci_dev->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x6;
+ msm_sd_register(&new_cci_dev->msm_sd);
+ new_cci_dev->pdev = pdev;
+ msm_cci_init_cci_params(new_cci_dev);
+ msm_cci_init_clk_params(new_cci_dev);
+ msm_cci_init_gpio_params(new_cci_dev);
+
+ rc = msm_camera_get_dt_vreg_data(new_cci_dev->pdev->dev.of_node,
+ &(new_cci_dev->cci_vreg), &(new_cci_dev->regulator_count));
+ if (rc < 0) {
+ pr_err("%s: msm_camera_get_dt_vreg_data fail\n", __func__);
+ rc = -EFAULT;
+ goto cci_release_mem;
+ }
+
+ if ((new_cci_dev->regulator_count < 0) ||
+ (new_cci_dev->regulator_count > MAX_REGULATOR)) {
+ pr_err("%s: invalid reg count = %d, max is %d\n", __func__,
+ new_cci_dev->regulator_count, MAX_REGULATOR);
+ rc = -EFAULT;
+ goto cci_invalid_vreg_data;
+ }
+
+ rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (rc)
+ pr_err("%s: failed to add child nodes, rc=%d\n", __func__, rc);
+ new_cci_dev->cci_state = CCI_STATE_DISABLED;
+ g_cci_subdev = &new_cci_dev->msm_sd.sd;
+ for (i = 0; i < MASTER_MAX; i++) {
+ new_cci_dev->write_wq[i] = create_singlethread_workqueue(
+ "msm_cci_wq");
+ if (!new_cci_dev->write_wq[i])
+ pr_err("Failed to create write wq\n");
+ }
+ CDBG("%s cci subdev %pK\n", __func__, &new_cci_dev->msm_sd.sd);
+ CDBG("%s line %d\n", __func__, __LINE__);
+ return 0;
+
+cci_invalid_vreg_data:
+ kfree(new_cci_dev->cci_vreg);
+cci_release_mem:
+ msm_camera_put_reg_base(pdev, new_cci_dev->base, "cci", true);
+cci_no_resource:
+ kfree(new_cci_dev);
+ return rc;
+}
+
+static int msm_cci_exit(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
+ struct cci_device *cci_dev =
+ v4l2_get_subdevdata(subdev);
+
+ msm_camera_put_clk_info_and_rates(pdev,
+ &cci_dev->cci_clk_info, &cci_dev->cci_clk,
+ &cci_dev->cci_clk_rates, cci_dev->num_clk_cases,
+ cci_dev->num_clk);
+
+ msm_camera_put_reg_base(pdev, cci_dev->base, "cci", true);
+ kfree(cci_dev);
+ return 0;
+}
+
+static const struct of_device_id msm_cci_dt_match[] = {
+ {.compatible = "qcom,cci"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_cci_dt_match);
+
+static struct platform_driver cci_driver = {
+ .probe = msm_cci_probe,
+ .remove = msm_cci_exit,
+ .driver = {
+ .name = MSM_CCI_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_cci_dt_match,
+ },
+};
+
+static int __init msm_cci_init_module(void)
+{
+ return platform_driver_register(&cci_driver);
+}
+
+static void __exit msm_cci_exit_module(void)
+{
+ platform_driver_unregister(&cci_driver);
+}
+
+module_init(msm_cci_init_module);
+module_exit(msm_cci_exit_module);
+MODULE_DESCRIPTION("MSM CCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/sensor/cci/msm_cci.h b/drivers/media/platform/msm/ais/sensor/cci/msm_cci.h
new file mode 100644
index 000000000000..9a2bb6ff6b69
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/cci/msm_cci.h
@@ -0,0 +1,231 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CCI_H
+#define MSM_CCI_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <linux/workqueue.h>
+#include <media/ais/msm_ais_sensor.h>
+#include <soc/qcom/ais.h>
+#include "msm_sd.h"
+#include "cam_soc_api.h"
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE 1
+#define FALSE 0
+
+#define CCI_PINCTRL_STATE_DEFAULT "cci_default"
+#define CCI_PINCTRL_STATE_SLEEP "cci_suspend"
+
+#define CCI_NUM_CLK_MAX 16
+#define CCI_NUM_CLK_CASES 5
+#define CCI_CLK_SRC_NAME "cci_src_clk"
+#define MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_10 10
+#define MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11 11
+#define BURST_MIN_FREE_SIZE 8
+
+enum cci_i2c_sync {
+ MSM_SYNC_DISABLE,
+ MSM_SYNC_ENABLE,
+};
+
+enum cci_i2c_queue_t {
+ QUEUE_0,
+ QUEUE_1,
+ QUEUE_INVALID,
+};
+
+struct msm_camera_cci_client {
+ struct v4l2_subdev *cci_subdev;
+ uint32_t freq;
+ enum i2c_freq_mode_t i2c_freq_mode;
+ enum cci_i2c_master_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+ uint32_t timeout;
+ uint16_t retries;
+ uint16_t id_map;
+};
+
+enum msm_cci_cmd_type {
+ MSM_CCI_INIT,
+ MSM_CCI_RELEASE,
+ MSM_CCI_SET_SID,
+ MSM_CCI_SET_FREQ,
+ MSM_CCI_SET_SYNC_CID,
+ MSM_CCI_I2C_READ,
+ MSM_CCI_I2C_WRITE,
+ MSM_CCI_I2C_WRITE_SEQ,
+ MSM_CCI_I2C_WRITE_ASYNC,
+ MSM_CCI_GPIO_WRITE,
+ MSM_CCI_I2C_WRITE_SYNC,
+ MSM_CCI_I2C_WRITE_SYNC_BLOCK,
+};
+
+struct msm_camera_cci_wait_sync_cfg {
+ uint16_t cid;
+ int16_t csid;
+ uint16_t line;
+ uint16_t delay;
+};
+
+struct msm_camera_cci_gpio_cfg {
+ uint16_t gpio_queue;
+ uint16_t i2c_queue;
+};
+
+struct msm_camera_cci_i2c_read_cfg {
+ uint32_t addr;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+ uint8_t *data;
+ uint16_t num_byte;
+};
+
+struct msm_camera_cci_i2c_queue_info {
+ uint32_t max_queue_size;
+ uint32_t report_id;
+ uint32_t irq_en;
+ uint32_t capture_rep_data;
+};
+
+struct msm_camera_cci_ctrl {
+ int32_t status;
+ struct msm_camera_cci_client *cci_info;
+ enum msm_cci_cmd_type cmd;
+ union {
+ struct msm_camera_i2c_reg_setting cci_i2c_write_cfg;
+ struct msm_camera_cci_i2c_read_cfg cci_i2c_read_cfg;
+ struct msm_camera_cci_wait_sync_cfg cci_wait_sync_cfg;
+ struct msm_camera_cci_gpio_cfg gpio_cfg;
+ } cfg;
+};
+
+struct msm_camera_cci_master_info {
+ uint32_t status;
+ atomic_t q_free[NUM_QUEUES];
+ uint8_t q_lock[NUM_QUEUES];
+ uint8_t reset_pending;
+ struct mutex mutex;
+ struct completion reset_complete;
+ struct mutex mutex_q[NUM_QUEUES];
+ struct completion report_q[NUM_QUEUES];
+ atomic_t done_pending[NUM_QUEUES];
+};
+
+struct msm_cci_clk_params_t {
+ uint16_t hw_thigh;
+ uint16_t hw_tlow;
+ uint16_t hw_tsu_sto;
+ uint16_t hw_tsu_sta;
+ uint16_t hw_thd_dat;
+ uint16_t hw_thd_sta;
+ uint16_t hw_tbuf;
+ uint8_t hw_scl_stretch_en;
+ uint8_t hw_trdhld;
+ uint8_t hw_tsp;
+ uint32_t cci_clk_src;
+};
+
+enum msm_cci_state_t {
+ CCI_STATE_ENABLED,
+ CCI_STATE_DISABLED,
+};
+
+struct cci_device {
+ struct platform_device *pdev;
+ struct msm_sd_subdev msm_sd;
+ struct v4l2_subdev subdev;
+ struct resource *irq;
+ void __iomem *base;
+
+ uint32_t hw_version;
+ uint8_t ref_count;
+ enum msm_cci_state_t cci_state;
+ size_t num_clk;
+ size_t num_clk_cases;
+ struct clk **cci_clk;
+ uint32_t **cci_clk_rates;
+ struct msm_cam_clk_info *cci_clk_info;
+ struct msm_camera_cci_i2c_queue_info
+ cci_i2c_queue_info[NUM_MASTERS][NUM_QUEUES];
+ struct msm_camera_cci_master_info cci_master_info[NUM_MASTERS];
+ enum i2c_freq_mode_t i2c_freq_mode[NUM_MASTERS];
+ struct msm_cci_clk_params_t cci_clk_params[I2C_MAX_MODES];
+ struct gpio *cci_gpio_tbl;
+ uint8_t cci_gpio_tbl_size;
+ struct msm_pinctrl_info cci_pinctrl;
+ uint8_t cci_pinctrl_status;
+ uint32_t cycles_per_us;
+ uint32_t cci_clk_src;
+ struct camera_vreg_t *cci_vreg;
+ struct regulator *cci_reg_ptr[MAX_REGULATOR];
+ int32_t regulator_count;
+ uint8_t payload_size;
+ uint8_t support_seq_write;
+ struct workqueue_struct *write_wq[MASTER_MAX];
+ struct msm_camera_cci_wait_sync_cfg cci_wait_sync_cfg;
+ uint8_t valid_sync;
+};
+
+enum msm_cci_i2c_cmd_type {
+ CCI_I2C_SET_PARAM_CMD = 1,
+ CCI_I2C_WAIT_CMD,
+ CCI_I2C_WAIT_SYNC_CMD,
+ CCI_I2C_WAIT_GPIO_EVENT_CMD,
+ CCI_I2C_TRIG_I2C_EVENT_CMD,
+ CCI_I2C_LOCK_CMD,
+ CCI_I2C_UNLOCK_CMD,
+ CCI_I2C_REPORT_CMD,
+ CCI_I2C_WRITE_CMD,
+ CCI_I2C_READ_CMD,
+ CCI_I2C_WRITE_DISABLE_P_CMD,
+ CCI_I2C_READ_DISABLE_P_CMD,
+ CCI_I2C_WRITE_CMD2,
+ CCI_I2C_WRITE_CMD3,
+ CCI_I2C_REPEAT_CMD,
+ CCI_I2C_INVALID_CMD,
+};
+
+enum msm_cci_gpio_cmd_type {
+ CCI_GPIO_SET_PARAM_CMD = 1,
+ CCI_GPIO_WAIT_CMD,
+ CCI_GPIO_WAIT_SYNC_CMD,
+ CCI_GPIO_WAIT_GPIO_IN_EVENT_CMD,
+ CCI_GPIO_WAIT_I2C_Q_TRIG_EVENT_CMD,
+ CCI_GPIO_OUT_CMD,
+ CCI_GPIO_TRIG_EVENT_CMD,
+ CCI_GPIO_REPORT_CMD,
+ CCI_GPIO_REPEAT_CMD,
+ CCI_GPIO_CONTINUE_CMD,
+ CCI_GPIO_INVALID_CMD,
+};
+
+struct cci_write_async {
+ struct cci_device *cci_dev;
+ struct msm_camera_cci_ctrl c_ctrl;
+ enum cci_i2c_queue_t queue;
+ struct work_struct work;
+ enum cci_i2c_sync sync_en;
+};
+
+struct v4l2_subdev *msm_cci_get_subdev(void);
+
+#define VIDIOC_MSM_CCI_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 23, struct msm_camera_cci_ctrl *)
+
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csid/Makefile b/drivers/media/platform/msm/ais/sensor/csid/Makefile
new file mode 100644
index 000000000000..5a21052f7347
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csid/Makefile
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+obj-$(CONFIG_MSM_AIS) += msm_csid.o
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_0_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_0_hwreg.h
new file mode 100644
index 000000000000..f88c0ef82499
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_0_hwreg.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_2_0_HWREG_H
+#define MSM_CSID_2_0_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v2_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+
+struct csid_reg_parms_t csid_v2_0 = {
+
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x4,
+ 0x8,
+ 0xc,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x5c,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6c,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x9C,
+ 0xA0,
+ 0xA8,
+ 0xAC,
+ 0xB0,
+ 11,
+ 0x7FFF,
+ 0x2,
+ 17,
+ 0x02000011,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_2_hwreg.h
new file mode 100644
index 000000000000..e2bb6cd499ff
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_2_hwreg.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_2_2_HWREG_H
+#define MSM_CSID_2_2_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v2_2[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+
+struct csid_reg_parms_t csid_v2_2 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x4,
+ 0x8,
+ 0xc,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x5c,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6c,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x9C,
+ 0xA0,
+ 0xA8,
+ 0xAC,
+ 0xB0,
+ 11,
+ 0x7FFF,
+ 0x2,
+ 17,
+ 0x02001000,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_0_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_0_hwreg.h
new file mode 100644
index 000000000000..440f869692f7
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_0_hwreg.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_0_HWREG_H
+#define MSM_CSID_3_0_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v3_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+
+struct csid_reg_parms_t csid_v3_0 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0xA0,
+ 0xA4,
+ 0xAC,
+ 0xB0,
+ 0xB4,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30000000,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_1_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_1_hwreg.h
new file mode 100644
index 000000000000..dde47046b679
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_1_hwreg.h
@@ -0,0 +1,64 @@
+ /* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_1_HWREG_H
+#define MSM_CSID_3_1_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v3_1[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+
+struct csid_reg_parms_t csid_v3_1 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0xA0,
+ 0xA4,
+ 0xAC,
+ 0xB0,
+ 0xB4,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30010000,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_2_hwreg.h
new file mode 100644
index 000000000000..5241a90fbc86
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_2_hwreg.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_2_HWREG_H
+#define MSM_CSID_3_2_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v3_2[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+
+struct csid_reg_parms_t csid_v3_2 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0xA0,
+ 0xA4,
+ 0xAC,
+ 0xB0,
+ 0xB4,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30020000,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_1_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_1_hwreg.h
new file mode 100644
index 000000000000..0e8ff6c0986d
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_1_hwreg.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_4_1_HWREG_H
+#define MSM_CSID_3_4_1_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+uint8_t csid_lane_assign_v3_4_1[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+
+struct csid_reg_parms_t csid_v3_4_1 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0xA0,
+ 0xA4,
+ 0xAC,
+ 0xB0,
+ 0xB4,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30040001,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_2_hwreg.h
new file mode 100644
index 000000000000..651526cb3db8
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_2_hwreg.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_4_2_HWREG_H
+#define MSM_CSID_3_4_2_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v3_4_2[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
+struct csid_reg_parms_t csid_v3_4_2 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0xA0,
+ 0xA4,
+ 0xAC,
+ 0xB0,
+ 0xB4,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30040002,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_3_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_3_hwreg.h
new file mode 100644
index 000000000000..fff29fc9d4c4
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_3_hwreg.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_4_3_HWREG_H
+#define MSM_CSID_3_4_3_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v3_4_3[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
+struct csid_reg_parms_t csid_v3_4_3 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0xA0,
+ 0xA4,
+ 0xAC,
+ 0xB0,
+ 0xB4,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30040003,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_1_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_1_hwreg.h
new file mode 100644
index 000000000000..f7d7d3548c4b
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_1_hwreg.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_5_1_HWREG_H
+#define MSM_CSID_3_5_1_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v3_5_1[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
+
+struct csid_reg_parms_t csid_v3_5_1 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x24,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0x9C,
+ 0xA0,
+ 0xA8,
+ 0xAC,
+ 0xB4,
+ 0xB8,
+ 0xBC,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30050001,
+ 0xC,
+ 0x84,
+ 0xA4,
+ 0x7f010800,
+ 20,
+ 17,
+ 16,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h
new file mode 100644
index 000000000000..b423b6e510a0
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_5_HWREG_H
+#define MSM_CSID_3_5_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v3_5[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
+
+struct csid_reg_parms_t csid_v3_5 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x24,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0x9C,
+ 0xA0,
+ 0xA8,
+ 0xAC,
+ 0xB4,
+ 0xB8,
+ 0xBC,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30050000,
+ 0xC,
+ 0x84,
+ 0xA4,
+ 0x7f010800,
+ 20,
+ 17,
+ 16,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_6_0_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_6_0_hwreg.h
new file mode 100644
index 000000000000..b95a774ca737
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_6_0_hwreg.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_6_0_HWREG_H
+#define MSM_CSID_3_6_0_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v3_6_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+struct csid_reg_parms_t csid_v3_6_0 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0xA0,
+ 0xA4,
+ 0xAC,
+ 0xB0,
+ 0xB4,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30060000,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c
new file mode 100644
index 000000000000..331ba939adfa
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c
@@ -0,0 +1,1341 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/irqreturn.h>
+#include "msm_csid.h"
+#include "msm_sd.h"
+#include "msm_camera_io_util.h"
+#include "msm_camera_dt_util.h"
+#include "include/msm_csid_2_0_hwreg.h"
+#include "include/msm_csid_2_2_hwreg.h"
+#include "include/msm_csid_3_0_hwreg.h"
+#include "include/msm_csid_3_1_hwreg.h"
+#include "include/msm_csid_3_2_hwreg.h"
+#include "include/msm_csid_3_5_hwreg.h"
+#include "include/msm_csid_3_4_1_hwreg.h"
+#include "include/msm_csid_3_4_2_hwreg.h"
+#include "include/msm_csid_3_4_3_hwreg.h"
+#include "include/msm_csid_3_6_0_hwreg.h"
+#include "include/msm_csid_3_5_1_hwreg.h"
+#include "cam_hw_ops.h"
+
+#define V4L2_IDENT_CSID 50002
+#define CSID_VERSION_V20 0x02000011
+#define CSID_VERSION_V22 0x02001000
+#define CSID_VERSION_V30 0x30000000
+#define CSID_VERSION_V31 0x30010000
+#define CSID_VERSION_V31_1 0x30010001
+#define CSID_VERSION_V31_3 0x30010003
+#define CSID_VERSION_V32 0x30020000
+#define CSID_VERSION_V33 0x30030000
+#define CSID_VERSION_V34 0x30040000
+#define CSID_VERSION_V34_1 0x30040001
+#define CSID_VERSION_V34_2 0x30040002
+#define CSID_VERSION_V34_3 0x30040003
+#define CSID_VERSION_V36 0x30060000
+#define CSID_VERSION_V37 0x30070000
+#define CSID_VERSION_V35 0x30050000
+#define CSID_VERSION_V35_1 0x30050001
+#define CSID_VERSION_V40 0x40000000
+#define MSM_CSID_DRV_NAME "msm_csid"
+
+#define DBG_CSID 0
+#define SHORT_PKT_CAPTURE 0
+#define SHORT_PKT_OFFSET 0x200
+#define ENABLE_3P_BIT 1
+#define SOF_DEBUG_ENABLE 1
+#define SOF_DEBUG_DISABLE 0
+
+#define TRUE 1
+#define FALSE 0
+
+#define MAX_LANE_COUNT 4
+#define CSID_TIMEOUT msecs_to_jiffies(100)
+
+#undef CDBG
+#ifdef CONFIG_MSM_AIS_DEBUG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#else
+#define CDBG(fmt, args...)
+#endif
+
+static struct camera_vreg_t csid_vreg_info[] = {
+ {"qcom,mipi-csi-vdd", 0, 0, 12000},
+};
+
+#ifdef CONFIG_COMPAT
+static struct v4l2_file_operations msm_csid_v4l2_subdev_fops;
+#endif
+
+static int msm_csid_cid_lut(struct csid_device *csid_dev)
+{
+ int rc = 0, i = 0;
+ uint32_t val = 0;
+
+ struct msm_camera_csid_lut_params *csid_lut_params =
+ &csid_dev->current_csid_params.lut_params;
+
+ for (i = 0; i < MAX_CID; i++) {
+ if (i != csid_lut_params->vc_cfg_a[i].cid)
+ continue;
+
+ CDBG("%s lut params num_cid = %d, cid = %d\n",
+ __func__,
+ csid_lut_params->num_cid,
+ csid_lut_params->vc_cfg_a[i].cid);
+ CDBG("%s lut params dt = 0x%x, df = %d\n", __func__,
+ csid_lut_params->vc_cfg_a[i].dt,
+ csid_lut_params->vc_cfg_a[i].decode_format);
+ if (csid_lut_params->vc_cfg_a[i].dt < 0x12 ||
+ csid_lut_params->vc_cfg_a[i].dt > 0x37) {
+ CDBG("%s: unsupported data type 0x%x\n",
+ __func__, csid_lut_params->vc_cfg_a[i].dt);
+ continue;
+ }
+ val = msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_cid_lut_vc_0_addr +
+ (csid_lut_params->vc_cfg_a[i].cid >> 2) * 4)
+ & ~(0xFF << ((csid_lut_params->vc_cfg_a[i].cid % 4) *
+ 8));
+ val |= (csid_lut_params->vc_cfg_a[i].dt <<
+ ((csid_lut_params->vc_cfg_a[i].cid % 4) * 8));
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_cid_lut_vc_0_addr +
+ (csid_lut_params->vc_cfg_a[i].cid >> 2) * 4);
+ }
+ return rc;
+}
+
+static int msm_csid_start(struct csid_device *csid_dev, uint32_t cid_mask)
+{
+ uint32_t val = 0, i = 0;
+ struct msm_camera_csid_lut_params *csid_lut_params =
+ &csid_dev->current_csid_params.lut_params;
+
+ for (i = 0; i < MAX_CID; i++) {
+ if (!(cid_mask & (1 << i)))
+ continue;
+
+ val = (csid_lut_params->vc_cfg_a[i].decode_format << 4) | 0x3;
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_cid_n_cfg_addr +
+ (i * 4));
+ }
+
+ return 0;
+}
+
+static int msm_csid_stop(struct csid_device *csid_dev, uint32_t cid_mask)
+{
+ uint32_t val = 0, i = 0;
+ struct msm_camera_csid_lut_params *csid_lut_params;
+
+ csid_lut_params = &csid_dev->current_csid_params.lut_params;
+ for (i = 0; i < MAX_CID; i++) {
+ if (!(cid_mask & (1 << i)))
+ continue;
+
+ val = 0;
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_cid_n_cfg_addr +
+ (i * 4));
+ }
+
+ return 0;
+}
+
+#if (DBG_CSID)
+static void msm_csid_set_debug_reg(struct csid_device *csid_dev,
+ struct msm_camera_csid_params *csid_params)
+{
+ uint32_t val = 0;
+
+ if ((csid_dev->hw_dts_version == CSID_VERSION_V34_1) ||
+ (csid_dev->hw_dts_version == CSID_VERSION_V36)) {
+ val = ((1 << csid_params->lane_cnt) - 1) << 20;
+ msm_camera_io_w(0x7f010800 | val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_mask_addr);
+ msm_camera_io_w(0x7f010800 | val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+ } else {
+ if (csid_dev->csid_3p_enabled == 1) {
+ val = ((1 << csid_params->lane_cnt) - 1) <<
+ csid_dev->ctrl_reg->
+ csid_reg.csid_err_lane_overflow_offset_3p;
+ } else {
+ val = ((1 << csid_params->lane_cnt) - 1) <<
+ csid_dev->ctrl_reg->
+ csid_reg.csid_err_lane_overflow_offset_2p;
+ }
+ val |= csid_dev->ctrl_reg->csid_reg.csid_irq_mask_val;
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_mask_addr);
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+ }
+}
+#elif(SHORT_PKT_CAPTURE)
+static void msm_csid_set_debug_reg(struct csid_device *csid_dev,
+ struct msm_camera_csid_params *csid_params)
+{
+ uint32_t val = 0;
+
+ if ((csid_dev->hw_dts_version == CSID_VERSION_V34_1) ||
+ (csid_dev->hw_dts_version == CSID_VERSION_V36)) {
+ val = ((1 << csid_params->lane_cnt) - 1) << 20;
+ msm_camera_io_w(0x7f010a00 | val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_mask_addr);
+ msm_camera_io_w(0x7f010a00 | val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+ } else {
+ if (csid_dev->csid_3p_enabled == 1) {
+ val = ((1 << csid_params->lane_cnt) - 1) <<
+ csid_dev->ctrl_reg->
+ csid_reg.csid_err_lane_overflow_offset_3p;
+ } else {
+ val = ((1 << csid_params->lane_cnt) - 1) <<
+ csid_dev->ctrl_reg->
+ csid_reg.csid_err_lane_overflow_offset_2p;
+ }
+ val |= csid_dev->ctrl_reg->csid_reg.csid_irq_mask_val;
+ val |= SHORT_PKT_OFFSET;
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_mask_addr);
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+ }
+}
+#else
+static void msm_csid_set_debug_reg(struct csid_device *csid_dev,
+ struct msm_camera_csid_params *csid_params) {}
+#endif
+
+static void msm_csid_set_sof_freeze_debug_reg(
+ struct csid_device *csid_dev, uint8_t irq_enable)
+{
+ uint32_t val = 0;
+
+ if (!irq_enable) {
+ val = msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_status_addr);
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+ msm_camera_io_w(0, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_mask_addr);
+ return;
+ }
+
+ if (csid_dev->csid_3p_enabled == 1) {
+ val = ((1 << csid_dev->current_csid_params.lane_cnt) - 1) <<
+ csid_dev->ctrl_reg->
+ csid_reg.csid_err_lane_overflow_offset_3p;
+ } else {
+ val = ((1 << csid_dev->current_csid_params.lane_cnt) - 1) <<
+ csid_dev->ctrl_reg->
+ csid_reg.csid_err_lane_overflow_offset_2p;
+ }
+ val |= csid_dev->ctrl_reg->csid_reg.csid_irq_mask_val;
+ val |= SHORT_PKT_OFFSET;
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_mask_addr);
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+}
+
+static int msm_csid_reset(struct csid_device *csid_dev)
+{
+ int32_t rc = 0;
+
+ msm_camera_io_w(csid_dev->ctrl_reg->csid_reg.csid_rst_stb_all,
+ csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_rst_cmd_addr);
+ rc = wait_for_completion_timeout(&csid_dev->reset_complete,
+ CSID_TIMEOUT);
+ if (rc <= 0) {
+ pr_err("wait_for_completion in msm_csid_reset fail rc = %d\n",
+ rc);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ }
+ return rc;
+}
+
+static bool msm_csid_find_max_clk_rate(struct csid_device *csid_dev)
+{
+ int i;
+ bool ret = FALSE;
+
+ for (i = 0; i < csid_dev->num_clk; i++) {
+ if (!strcmp(csid_dev->csid_clk_info[i].clk_name,
+ "csi_src_clk")) {
+ CDBG("%s:%d, copy csi_src_clk, clk_rate[%d] = %ld",
+ __func__, __LINE__, i,
+ csid_dev->csid_clk_info[i].clk_rate);
+ csid_dev->csid_max_clk =
+ csid_dev->csid_clk_info[i].clk_rate;
+ csid_dev->csid_clk_index = i;
+ ret = TRUE;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int msm_csid_config(struct csid_device *csid_dev)
+{
+ int rc = 0;
+ uint32_t val = 0;
+ long clk_rate = 0;
+ uint32_t input_sel;
+ uint32_t lane_assign = 0;
+ uint8_t lane_num = 0;
+ uint8_t i, j;
+ void __iomem *csidbase;
+ struct msm_camera_csid_params *csid_params = &csid_dev->
+ current_csid_params;
+
+ csidbase = csid_dev->base;
+ if (!csidbase) {
+ pr_err("%s:%d csidbase %pK\n", __func__,
+ __LINE__, csidbase);
+ return -EINVAL;
+ }
+
+ CDBG("%s csid_params, lane_cnt = %d, lane_assign = 0x%x\n",
+ __func__,
+ csid_params->lane_cnt,
+ csid_params->lane_assign);
+ CDBG("%s csid_params phy_sel = %d\n", __func__,
+ csid_params->phy_sel);
+ if ((csid_params->lane_cnt == 0) ||
+ (csid_params->lane_cnt > MAX_LANE_COUNT)) {
+ pr_err("%s:%d invalid lane count = %d\n",
+ __func__, __LINE__, csid_params->lane_cnt);
+ return -EINVAL;
+ }
+
+#ifdef RESET_CSID_CFG
+ rc = msm_csid_reset(csid_dev);
+ if (rc < 0) {
+ pr_err("%s:%d msm_csid_reset failed\n", __func__, __LINE__);
+ return rc;
+ }
+#endif
+
+ if (!msm_csid_find_max_clk_rate(csid_dev))
+ pr_err("msm_csid_find_max_clk_rate failed\n");
+
+ clk_rate = (csid_params->csi_clk > 0) ?
+ (csid_params->csi_clk) : csid_dev->csid_max_clk;
+
+ clk_rate = msm_camera_clk_set_rate(&csid_dev->pdev->dev,
+ csid_dev->csid_clk[csid_dev->csid_clk_index], clk_rate);
+ if (clk_rate < 0) {
+ pr_err("csi_src_clk set failed\n");
+ return -EINVAL;
+ }
+
+ if (csid_dev->is_testmode == 1) {
+ struct msm_camera_csid_testmode_parms *tm;
+
+ tm = &csid_dev->testmode_params;
+
+ /* 31:24 V blank, 23:13 H blank, 3:2 num of active DT, 1:0 VC */
+ val = ((tm->v_blanking_count & 0xFF) << 24) |
+ ((tm->h_blanking_count & 0x7FF) << 13);
+ msm_camera_io_w(val, csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_tg_vc_cfg_addr);
+ CDBG("[TG] CSID_TG_VC_CFG_ADDR 0x%08x\n", val);
+
+ /* 28:16 bytes per lines, 12:0 num of lines */
+ val = ((tm->num_bytes_per_line & 0x1FFF) << 16) |
+ (tm->num_lines & 0x1FFF);
+ msm_camera_io_w(val, csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_tg_dt_n_cfg_0_addr);
+ CDBG("[TG] CSID_TG_DT_n_CFG_0_ADDR 0x%08x\n", val);
+
+ /* 5:0 data type */
+ val = csid_params->lut_params.vc_cfg_a[0].dt;
+ msm_camera_io_w(val, csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_tg_dt_n_cfg_1_addr);
+ CDBG("[TG] CSID_TG_DT_n_CFG_1_ADDR 0x%08x\n", val);
+
+ /* 2:0 output random */
+ msm_camera_io_w(csid_dev->testmode_params.payload_mode,
+ csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_tg_dt_n_cfg_2_addr);
+ } else {
+ val = csid_params->lane_cnt - 1;
+
+ for (i = 0, j = 0; i < PHY_LANE_MAX; i++) {
+ if (i == PHY_LANE_CLK)
+ continue;
+ lane_num = (csid_params->lane_assign >> j) & 0xF;
+ if (lane_num >= PHY_LANE_MAX) {
+ pr_err("%s:%d invalid lane number %d\n",
+ __func__, __LINE__, lane_num);
+ return -EINVAL;
+ }
+ if (csid_dev->ctrl_reg->csid_lane_assign[lane_num] >=
+ PHY_LANE_MAX){
+ pr_err("%s:%d invalid lane map %d\n",
+ __func__, __LINE__,
+ csid_dev->ctrl_reg->
+ csid_lane_assign[lane_num]);
+ return -EINVAL;
+ }
+ lane_assign |=
+ csid_dev->ctrl_reg->csid_lane_assign[lane_num]
+ << j;
+ j += 4;
+ }
+
+ CDBG("%s csid_params calculated lane_assign = 0x%X\n",
+ __func__, lane_assign);
+
+ val |= lane_assign <<
+ csid_dev->ctrl_reg->csid_reg.csid_dl_input_sel_shift;
+ if (csid_dev->hw_version < CSID_VERSION_V30) {
+ val |= (0xF << 10);
+ msm_camera_io_w(val, csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_core_ctrl_0_addr);
+ } else {
+ msm_camera_io_w(val, csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_core_ctrl_0_addr);
+ val = csid_params->phy_sel <<
+ csid_dev->ctrl_reg->csid_reg.csid_phy_sel_shift;
+ val |= 0xF;
+ msm_camera_io_w(val, csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_core_ctrl_1_addr);
+ }
+ if ((csid_dev->hw_version == CSID_VERSION_V35) &&
+ (csid_params->csi_3p_sel == 1)) {
+ csid_dev->csid_3p_enabled = 1;
+ val = (csid_params->lane_cnt - 1) << ENABLE_3P_BIT;
+
+ for (i = 0; i < csid_params->lane_cnt; i++) {
+ input_sel =
+ (csid_params->lane_assign >> (4*i))
+ & 0xF;
+ val |= input_sel << (4*(i+1));
+ }
+ val |= csid_params->phy_sel <<
+ csid_dev->ctrl_reg->csid_reg.csid_phy_sel_shift_3p;
+ val |= ENABLE_3P_BIT;
+ msm_camera_io_w(val, csidbase + csid_dev->ctrl_reg
+ ->csid_reg.csid_3p_ctrl_0_addr);
+ }
+ }
+
+ rc = msm_csid_cid_lut(csid_dev);
+ if (rc < 0) {
+ pr_err("%s:%d config cid lut failed\n", __func__, __LINE__);
+ return rc;
+ }
+ msm_csid_set_debug_reg(csid_dev, csid_params);
+
+ if (csid_dev->is_testmode == 1)
+ msm_camera_io_w(0x00A06437, csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_tg_ctrl_addr);
+
+ return rc;
+}
+
+#if SHORT_PKT_CAPTURE
+static irqreturn_t msm_csid_irq(int irq_num, void *data)
+{
+ uint32_t irq;
+ uint32_t short_dt = 0;
+ uint32_t count = 0, dt = 0;
+ struct csid_device *csid_dev = data;
+
+ if (!csid_dev) {
+ pr_err("%s:%d csid_dev NULL\n", __func__, __LINE__);
+ return IRQ_HANDLED;
+ }
+ irq = msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_status_addr);
+ CDBG("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n",
+ __func__, csid_dev->pdev->id, irq);
+ if (irq & (0x1 <<
+ csid_dev->ctrl_reg->csid_reg.csid_rst_done_irq_bitshift))
+ complete(&csid_dev->reset_complete);
+ if (irq & SHORT_PKT_OFFSET) {
+ short_dt = msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->
+ csid_reg.csid_captured_short_pkt_addr);
+ count = (short_dt >> 8) & 0xffff;
+ dt = short_dt >> 24;
+ CDBG("CSID:: %s:%d core %d dt: 0x%x, count: %d\n",
+ __func__, __LINE__, csid_dev->pdev->id, dt, count);
+ msm_camera_io_w(0x101, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_rst_cmd_addr);
+ }
+ msm_camera_io_w(irq, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+ return IRQ_HANDLED;
+}
+#else
+static irqreturn_t msm_csid_irq(int irq_num, void *data)
+{
+ uint32_t irq;
+ struct csid_device *csid_dev = data;
+
+ if (!csid_dev) {
+ pr_err("%s:%d csid_dev NULL\n", __func__, __LINE__);
+ return IRQ_HANDLED;
+ }
+
+ if (csid_dev->csid_sof_debug == SOF_DEBUG_ENABLE) {
+ if (csid_dev->csid_sof_debug_count < CSID_SOF_DEBUG_COUNT)
+ csid_dev->csid_sof_debug_count++;
+ else {
+ msm_csid_set_sof_freeze_debug_reg(csid_dev, false);
+ return IRQ_HANDLED;
+ }
+ }
+
+ irq = msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_status_addr);
+ pr_err_ratelimited("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n",
+ __func__, csid_dev->pdev->id, irq);
+ if (irq & (0x1 <<
+ csid_dev->ctrl_reg->csid_reg.csid_rst_done_irq_bitshift))
+ complete(&csid_dev->reset_complete);
+ msm_camera_io_w(irq, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+ return IRQ_HANDLED;
+}
+#endif
+
+static int msm_csid_irq_routine(struct v4l2_subdev *sd, u32 status,
+ bool *handled)
+{
+ struct csid_device *csid_dev = v4l2_get_subdevdata(sd);
+ irqreturn_t ret;
+
+ CDBG("%s E\n", __func__);
+ ret = msm_csid_irq(csid_dev->irq->start, csid_dev);
+ *handled = TRUE;
+ return 0;
+}
+
+static int msm_csid_init(struct csid_device *csid_dev, uint32_t *csid_version)
+{
+ int rc = 0;
+
+ if (!csid_version) {
+ pr_err("%s:%d csid_version NULL\n", __func__, __LINE__);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ csid_dev->csid_sof_debug_count = 0;
+ csid_dev->reg_ptr = NULL;
+
+ if (csid_dev->csid_state == CSID_POWER_UP) {
+ pr_err("%s: csid invalid state %d\n", __func__,
+ csid_dev->csid_state);
+ return -EINVAL;
+ }
+
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSID,
+ CAM_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ pr_info("%s: CSID_VERSION = 0x%x\n", __func__,
+ csid_dev->ctrl_reg->csid_reg.csid_version);
+ /* power up */
+ rc = msm_camera_config_vreg(&csid_dev->pdev->dev, csid_dev->csid_vreg,
+ csid_dev->regulator_count, NULL, 0,
+ &csid_dev->csid_reg_ptr[0], 1);
+ if (rc < 0) {
+ pr_err("%s:%d csid config_vreg failed\n", __func__, __LINE__);
+ goto top_vreg_config_failed;
+ }
+
+ rc = msm_camera_config_vreg(&csid_dev->pdev->dev,
+ csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 1);
+ if (rc < 0) {
+ pr_err("%s: regulator on failed\n", __func__);
+ goto csid_vreg_config_failed;
+ }
+
+ rc = msm_camera_enable_vreg(&csid_dev->pdev->dev, csid_dev->csid_vreg,
+ csid_dev->regulator_count, NULL, 0,
+ &csid_dev->csid_reg_ptr[0], 1);
+ if (rc < 0) {
+ pr_err("%s:%d csid enable_vreg failed\n", __func__, __LINE__);
+ goto top_vreg_enable_failed;
+ }
+
+ rc = msm_camera_enable_vreg(&csid_dev->pdev->dev,
+ csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 1);
+ if (rc < 0) {
+ pr_err("%s: regulator enable failed\n", __func__);
+ goto csid_vreg_enable_failed;
+ }
+ rc = msm_camera_clk_enable(&csid_dev->pdev->dev,
+ csid_dev->csid_clk_info, csid_dev->csid_clk,
+ csid_dev->num_clk, true);
+ if (rc < 0) {
+ pr_err("%s:%d clock enable failed\n",
+ __func__, __LINE__);
+ goto clk_enable_failed;
+ }
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ csid_dev->hw_version =
+ msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_hw_version_addr);
+ CDBG("%s:%d called csid_dev->hw_version %x\n", __func__, __LINE__,
+ csid_dev->hw_version);
+ *csid_version = csid_dev->hw_version;
+ csid_dev->csid_sof_debug = SOF_DEBUG_DISABLE;
+
+ csid_dev->is_testmode = 0;
+
+ init_completion(&csid_dev->reset_complete);
+
+ rc = msm_camera_enable_irq(csid_dev->irq, true);
+ if (rc < 0)
+ pr_err("%s: irq enable failed\n", __func__);
+ rc = msm_csid_reset(csid_dev);
+ if (rc < 0) {
+ pr_err("%s:%d msm_csid_reset failed\n", __func__, __LINE__);
+ goto msm_csid_reset_fail;
+ }
+
+ csid_dev->csid_state = CSID_POWER_UP;
+ return rc;
+
+msm_csid_reset_fail:
+ msm_camera_enable_irq(csid_dev->irq, false);
+ msm_camera_clk_enable(&csid_dev->pdev->dev, csid_dev->csid_clk_info,
+ csid_dev->csid_clk, csid_dev->num_clk, false);
+clk_enable_failed:
+ msm_camera_enable_vreg(&csid_dev->pdev->dev,
+ csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 0);
+csid_vreg_enable_failed:
+ msm_camera_enable_vreg(&csid_dev->pdev->dev, csid_dev->csid_vreg,
+ csid_dev->regulator_count, NULL, 0,
+ &csid_dev->csid_reg_ptr[0], 0);
+top_vreg_enable_failed:
+ msm_camera_config_vreg(&csid_dev->pdev->dev,
+ csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 0);
+csid_vreg_config_failed:
+ msm_camera_config_vreg(&csid_dev->pdev->dev, csid_dev->csid_vreg,
+ csid_dev->regulator_count, NULL, 0,
+ &csid_dev->csid_reg_ptr[0], 0);
+top_vreg_config_failed:
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSID,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote from AHB\n", __func__);
+ return rc;
+}
+
+static int msm_csid_release(struct csid_device *csid_dev)
+{
+ uint32_t irq;
+
+ if (csid_dev->csid_state != CSID_POWER_UP) {
+ pr_err("%s: csid invalid state %d\n", __func__,
+ csid_dev->csid_state);
+ return -EINVAL;
+ }
+
+ CDBG("%s:%d, hw_version = 0x%x\n", __func__, __LINE__,
+ csid_dev->hw_version);
+
+ irq = msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_status_addr);
+ msm_camera_io_w(irq, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+ msm_camera_io_w(0, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_mask_addr);
+
+ msm_camera_enable_irq(csid_dev->irq, false);
+
+ msm_camera_clk_enable(&csid_dev->pdev->dev,
+ csid_dev->csid_clk_info,
+ csid_dev->csid_clk,
+ csid_dev->num_clk, false);
+
+ msm_camera_enable_vreg(&csid_dev->pdev->dev,
+ csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 0);
+
+ msm_camera_enable_vreg(&csid_dev->pdev->dev,
+ csid_dev->csid_vreg, csid_dev->regulator_count, NULL,
+ 0, &csid_dev->csid_reg_ptr[0], 0);
+
+ msm_camera_config_vreg(&csid_dev->pdev->dev,
+ csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 0);
+
+ msm_camera_config_vreg(&csid_dev->pdev->dev,
+ csid_dev->csid_vreg, csid_dev->regulator_count, NULL,
+ 0, &csid_dev->csid_reg_ptr[0], 0);
+
+ if (!IS_ERR_OR_NULL(csid_dev->reg_ptr)) {
+ regulator_disable(csid_dev->reg_ptr);
+ regulator_put(csid_dev->reg_ptr);
+ }
+
+ csid_dev->csid_state = CSID_POWER_DOWN;
+
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSID,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote from AHB\n", __func__);
+ return 0;
+}
+
+static int32_t msm_csid_cmd(struct csid_device *csid_dev, void __user *arg)
+{
+ int rc = 0;
+ struct csid_cfg_data *cdata = (struct csid_cfg_data *)arg;
+
+ if (!csid_dev || !cdata) {
+ pr_err("%s:%d csid_dev %pK, cdata %pK\n", __func__, __LINE__,
+ csid_dev, cdata);
+ return -EINVAL;
+ }
+ CDBG("%s cfgtype = %d\n", __func__, cdata->cfgtype);
+ switch (cdata->cfgtype) {
+ case CSID_INIT:
+ rc = msm_csid_init(csid_dev, &cdata->cfg.csid_version);
+ CDBG("%s csid version 0x%x\n", __func__,
+ cdata->cfg.csid_version);
+ break;
+ case CSID_TESTMODE_CFG: {
+ csid_dev->is_testmode = 1;
+ if (copy_from_user(&csid_dev->testmode_params,
+ (void *)cdata->cfg.csid_testmode_params,
+ sizeof(struct msm_camera_csid_testmode_parms))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ break;
+ }
+ case CSID_CFG: {
+ struct msm_camera_csid_params csid_params;
+ int i = 0;
+
+ if (copy_from_user(&csid_params,
+ (void *)cdata->cfg.csid_params,
+ sizeof(struct msm_camera_csid_params))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ if (csid_params.lut_params.num_cid < 1 ||
+ csid_params.lut_params.num_cid > MAX_CID) {
+ pr_err("%s: %d num_cid outside range\n",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+
+ memset(&csid_dev->current_csid_params, 0x0,
+ sizeof(csid_dev->current_csid_params));
+ csid_dev->current_csid_params.lane_cnt = csid_params.lane_cnt;
+ csid_dev->current_csid_params.lane_assign = csid_params.
+ lane_assign;
+ csid_dev->current_csid_params.phy_sel = csid_params.phy_sel;
+ csid_dev->current_csid_params.csi_clk = csid_params.csi_clk;
+ csid_dev->current_csid_params.csi_3p_sel = csid_params.
+ csi_3p_sel;
+
+ for (i = 0; i < csid_params.lut_params.num_cid; i++) {
+ unsigned char cid = csid_params.lut_params.vc_cfg_a[i].
+ cid;
+
+ csid_dev->current_csid_params.lut_params.vc_cfg_a[cid] =
+ csid_params.lut_params.vc_cfg_a[i];
+
+ CDBG("vc_cfg_a[%d] : dt=%d, decode_fmt=%d",
+ csid_params.lut_params.vc_cfg_a[i].cid,
+ csid_params.lut_params.vc_cfg_a[i].dt,
+ csid_params.lut_params.vc_cfg_a[i].
+ decode_format);
+ }
+
+ rc = msm_csid_config(csid_dev);
+ break;
+ }
+ case CSID_RELEASE:
+ rc = msm_csid_release(csid_dev);
+ break;
+ case CSID_UPDATE_CFG: {
+ struct msm_camera_csid_params csid_params;
+ int i = 0;
+
+ if (copy_from_user(&csid_params,
+ (void *)cdata->cfg.csid_params,
+ sizeof(struct msm_camera_csid_params))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ if (csid_params.lut_params.num_cid < 1 ||
+ csid_params.lut_params.num_cid > MAX_CID) {
+ pr_err("%s: %d num_cid outside range\n",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+
+ for (i = 0; i < csid_params.lut_params.num_cid; i++) {
+ unsigned char cid = csid_params.lut_params.vc_cfg_a[i].
+ cid;
+
+ csid_dev->current_csid_params.lut_params.vc_cfg_a[cid] =
+ csid_params.lut_params.vc_cfg_a[i];
+
+ CDBG("vc_cfg_a[%d] : dt=%d, decode_fmt=%d",
+ csid_params.lut_params.vc_cfg_a[i].cid,
+ csid_params.lut_params.vc_cfg_a[i].dt,
+ csid_params.lut_params.vc_cfg_a[i].
+ decode_format);
+ }
+
+ rc = msm_csid_cid_lut(csid_dev);
+
+ break;
+ }
+ case CSID_START:
+ rc = msm_csid_start(csid_dev, cdata->cfg.csid_cidmask);
+ break;
+ case CSID_STOP:
+ rc = msm_csid_stop(csid_dev, cdata->cfg.csid_cidmask);
+ break;
+ default:
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+ return rc;
+}
+
+static int32_t msm_csid_get_subdev_id(struct csid_device *csid_dev, void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+
+ if (!subdev_id) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ *subdev_id = csid_dev->pdev->id;
+ CDBG("%s:%d subdev_id %d\n", __func__, __LINE__, *subdev_id);
+ return 0;
+}
+
+static long msm_csid_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = -ENOIOCTLCMD;
+ struct csid_device *csid_dev = v4l2_get_subdevdata(sd);
+
+ mutex_lock(&csid_dev->mutex);
+ CDBG("%s:%d id %d\n", __func__, __LINE__, csid_dev->pdev->id);
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ rc = msm_csid_get_subdev_id(csid_dev, arg);
+ break;
+ case VIDIOC_MSM_CSID_IO_CFG:
+ rc = msm_csid_cmd(csid_dev, arg);
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ if (csid_dev->csid_state != CSID_POWER_UP)
+ break;
+ if (csid_dev->csid_sof_debug == SOF_DEBUG_DISABLE) {
+ csid_dev->csid_sof_debug = SOF_DEBUG_ENABLE;
+ msm_csid_set_sof_freeze_debug_reg(csid_dev, true);
+ }
+ break;
+ case MSM_SD_UNNOTIFY_FREEZE:
+ if (csid_dev->csid_state != CSID_POWER_UP)
+ break;
+ csid_dev->csid_sof_debug = SOF_DEBUG_DISABLE;
+ msm_csid_set_sof_freeze_debug_reg(csid_dev, false);
+ break;
+ case VIDIOC_MSM_CSID_RELEASE:
+ case MSM_SD_SHUTDOWN:
+ rc = msm_csid_release(csid_dev);
+ break;
+ default:
+ pr_err_ratelimited("%s: command not found\n", __func__);
+ }
+ CDBG("%s:%d\n", __func__, __LINE__);
+ mutex_unlock(&csid_dev->mutex);
+ return rc;
+}
+
+
+#ifdef CONFIG_COMPAT
+static int32_t msm_csid_cmd32(struct csid_device *csid_dev, void __user *arg)
+{
+ int rc = 0;
+ struct csid_cfg_data32 *arg32 = (struct csid_cfg_data32 *) (arg);
+
+ if (!csid_dev || !arg32) {
+ pr_err("%s:%d csid_dev %pK, arg32 %pK\n", __func__, __LINE__,
+ csid_dev, arg32);
+ return -EINVAL;
+ }
+
+ CDBG("%s cfgtype = %d\n", __func__, arg32->cfgtype);
+ switch (arg32->cfgtype) {
+ case CSID_INIT:
+ rc = msm_csid_init(csid_dev, &arg32->cfg.csid_version);
+ CDBG("%s csid version 0x%x\n", __func__,
+ arg32->cfg.csid_version);
+ break;
+ case CSID_TESTMODE_CFG: {
+ csid_dev->is_testmode = 1;
+ if (copy_from_user(&csid_dev->testmode_params,
+ (void *)compat_ptr(arg32->cfg.csid_testmode_params),
+ sizeof(struct msm_camera_csid_testmode_parms))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ break;
+ }
+ case CSID_CFG: {
+ int i = 0;
+ struct msm_camera_csid_params32 csid_params32;
+
+ if (copy_from_user(&csid_params32,
+ (void *)compat_ptr(arg32->cfg.csid_params),
+ sizeof(struct msm_camera_csid_params32))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (csid_params32.lut_params.num_cid < 1 ||
+ csid_params32.lut_params.num_cid > MAX_CID) {
+ pr_err("%s: %d num_cid outside range\n", __func__,
+ __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+
+ memset(&csid_dev->current_csid_params, 0x0,
+ sizeof(csid_dev->current_csid_params));
+ csid_dev->current_csid_params.lane_cnt = csid_params32.lane_cnt;
+ csid_dev->current_csid_params.lane_assign = csid_params32.
+ lane_assign;
+ csid_dev->current_csid_params.phy_sel = csid_params32.phy_sel;
+ csid_dev->current_csid_params.csi_clk = csid_params32.csi_clk;
+ csid_dev->current_csid_params.csi_3p_sel = csid_params32.
+ csi_3p_sel;
+
+ for (i = 0; i < csid_params32.lut_params.num_cid; i++) {
+ unsigned char cid =
+ csid_params32.lut_params.vc_cfg_a[i].cid;
+ csid_dev->current_csid_params.lut_params.vc_cfg_a[cid] =
+ csid_params32.lut_params.vc_cfg_a[i];
+
+ CDBG("vc_cfg_a[%d] : dt=%d, decode_fmt=%d",
+ csid_params32.lut_params.vc_cfg_a[i].cid,
+ csid_params32.lut_params.vc_cfg_a[i].dt,
+ csid_params32.lut_params.vc_cfg_a[i].
+ decode_format);
+ }
+
+ rc = msm_csid_config(csid_dev);
+ break;
+ }
+ case CSID_RELEASE:
+ rc = msm_csid_release(csid_dev);
+ break;
+ case CSID_UPDATE_CFG: {
+ int i = 0;
+ struct msm_camera_csid_params32 csid_params32;
+
+ if (copy_from_user(&csid_params32,
+ (void *)compat_ptr(arg32->cfg.csid_params),
+ sizeof(struct msm_camera_csid_params32))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (csid_params32.lut_params.num_cid < 1 ||
+ csid_params32.lut_params.num_cid > MAX_CID) {
+ pr_err("%s: %d num_cid outside range\n", __func__,
+ __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+
+ for (i = 0; i < csid_params32.lut_params.num_cid; i++) {
+ unsigned char cid =
+ csid_params32.lut_params.vc_cfg_a[i].cid;
+ csid_dev->current_csid_params.lut_params.vc_cfg_a[cid] =
+ csid_params32.lut_params.vc_cfg_a[i];
+
+ CDBG("vc_cfg_a[%d] : dt=%d, decode_fmt=%d",
+ csid_params32.lut_params.vc_cfg_a[i].cid,
+ csid_params32.lut_params.vc_cfg_a[i].dt,
+ csid_params32.lut_params.vc_cfg_a[i].
+ decode_format);
+ }
+
+ rc = msm_csid_cid_lut(csid_dev);
+ break;
+ }
+ case CSID_START:
+ rc = msm_csid_start(csid_dev, arg32->cfg.csid_cidmask);
+ break;
+ case CSID_STOP:
+ rc = msm_csid_stop(csid_dev, arg32->cfg.csid_cidmask);
+ break;
+ default:
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+ return rc;
+}
+
+static long msm_csid_subdev_ioctl32(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = -ENOIOCTLCMD;
+ struct csid_device *csid_dev = v4l2_get_subdevdata(sd);
+
+ mutex_lock(&csid_dev->mutex);
+ CDBG("%s:%d id %d\n", __func__, __LINE__, csid_dev->pdev->id);
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ rc = msm_csid_get_subdev_id(csid_dev, arg);
+ break;
+ case VIDIOC_MSM_CSID_IO_CFG32:
+ rc = msm_csid_cmd32(csid_dev, arg);
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ if (csid_dev->csid_state != CSID_POWER_UP)
+ break;
+ if (csid_dev->csid_sof_debug == SOF_DEBUG_DISABLE) {
+ csid_dev->csid_sof_debug = SOF_DEBUG_ENABLE;
+ msm_csid_set_sof_freeze_debug_reg(csid_dev, true);
+ }
+ break;
+ case MSM_SD_UNNOTIFY_FREEZE:
+ if (csid_dev->csid_state != CSID_POWER_UP)
+ break;
+ csid_dev->csid_sof_debug = SOF_DEBUG_DISABLE;
+ msm_csid_set_sof_freeze_debug_reg(csid_dev, false);
+ break;
+ case VIDIOC_MSM_CSID_RELEASE:
+ case MSM_SD_SHUTDOWN:
+ rc = msm_csid_release(csid_dev);
+ break;
+ default:
+ pr_err_ratelimited("%s: command not found\n", __func__);
+ }
+ CDBG("%s:%d\n", __func__, __LINE__);
+ mutex_unlock(&csid_dev->mutex);
+ return rc;
+}
+
+static long msm_csid_subdev_do_ioctl32(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ return msm_csid_subdev_ioctl32(sd, cmd, arg);
+}
+
+static long msm_csid_subdev_fops_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_csid_subdev_do_ioctl32);
+}
+#endif
+static const struct v4l2_subdev_internal_ops msm_csid_internal_ops;
+
+static struct v4l2_subdev_core_ops msm_csid_subdev_core_ops = {
+ .ioctl = &msm_csid_subdev_ioctl,
+ .interrupt_service_routine = msm_csid_irq_routine,
+};
+
+static const struct v4l2_subdev_ops msm_csid_subdev_ops = {
+ .core = &msm_csid_subdev_core_ops,
+};
+
+static int csid_probe(struct platform_device *pdev)
+{
+ struct csid_device *new_csid_dev;
+ uint32_t csi_vdd_voltage = 0;
+ int rc = 0;
+
+ new_csid_dev = kzalloc(sizeof(struct csid_device), GFP_KERNEL);
+ if (!new_csid_dev)
+ return -ENOMEM;
+
+ new_csid_dev->csid_3p_enabled = 0;
+ new_csid_dev->ctrl_reg = NULL;
+ new_csid_dev->ctrl_reg = kzalloc(sizeof(struct csid_ctrl_t),
+ GFP_KERNEL);
+ if (!new_csid_dev->ctrl_reg) {
+ kfree(new_csid_dev);
+ return -ENOMEM;
+ }
+
+ v4l2_subdev_init(&new_csid_dev->msm_sd.sd, &msm_csid_subdev_ops);
+ v4l2_set_subdevdata(&new_csid_dev->msm_sd.sd, new_csid_dev);
+ platform_set_drvdata(pdev, &new_csid_dev->msm_sd.sd);
+ mutex_init(&new_csid_dev->mutex);
+
+ if (pdev->dev.of_node) {
+ rc = of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+ if (rc < 0) {
+ pr_err("%s:%d failed to read cell-index\n", __func__,
+ __LINE__);
+ goto csid_no_resource;
+ }
+ CDBG("%s device id %d\n", __func__, pdev->id);
+
+ rc = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,csi-vdd-voltage", &csi_vdd_voltage);
+ if (rc < 0) {
+ pr_err("%s:%d failed to read qcom,csi-vdd-voltage\n",
+ __func__, __LINE__);
+ goto csid_no_resource;
+ }
+ CDBG("%s:%d reading mipi_csi_vdd is %d\n", __func__, __LINE__,
+ csi_vdd_voltage);
+
+ csid_vreg_info[0].min_voltage = csi_vdd_voltage;
+ csid_vreg_info[0].max_voltage = csi_vdd_voltage;
+ }
+
+ rc = msm_camera_get_clk_info(pdev, &new_csid_dev->csid_clk_info,
+ &new_csid_dev->csid_clk, &new_csid_dev->num_clk);
+ if (rc < 0) {
+ pr_err("%s: msm_camera_get_clk_info failed", __func__);
+ rc = -EFAULT;
+ goto csid_no_resource;
+ }
+
+ rc = msm_camera_get_dt_vreg_data(pdev->dev.of_node,
+ &(new_csid_dev->csid_vreg), &(new_csid_dev->regulator_count));
+ if (rc < 0) {
+ pr_err("%s: get vreg data from dtsi fail\n", __func__);
+ rc = -EFAULT;
+ goto csid_no_resource;
+ }
+
+ if ((new_csid_dev->regulator_count < 0) ||
+ (new_csid_dev->regulator_count > MAX_REGULATOR)) {
+ pr_err("%s: invalid reg count = %d, max is %d\n", __func__,
+ new_csid_dev->regulator_count, MAX_REGULATOR);
+ rc = -EFAULT;
+ goto csid_no_resource;
+ }
+
+ new_csid_dev->base = msm_camera_get_reg_base(pdev, "csid", true);
+ if (!new_csid_dev->base) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto csid_invalid_vreg_data;
+ }
+ new_csid_dev->irq = msm_camera_get_irq(pdev, "csid");
+ if (!new_csid_dev->irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto csid_invalid_irq;
+ }
+ new_csid_dev->pdev = pdev;
+ new_csid_dev->msm_sd.sd.internal_ops = &msm_csid_internal_ops;
+ new_csid_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(new_csid_dev->msm_sd.sd.name,
+ ARRAY_SIZE(new_csid_dev->msm_sd.sd.name), "msm_csid");
+ media_entity_init(&new_csid_dev->msm_sd.sd.entity, 0, NULL, 0);
+ new_csid_dev->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ new_csid_dev->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_CSID;
+ new_csid_dev->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x5;
+ msm_sd_register(&new_csid_dev->msm_sd);
+
+#ifdef CONFIG_COMPAT
+ msm_cam_copy_v4l2_subdev_fops(&msm_csid_v4l2_subdev_fops);
+ msm_csid_v4l2_subdev_fops.compat_ioctl32 = msm_csid_subdev_fops_ioctl32;
+ new_csid_dev->msm_sd.sd.devnode->fops = &msm_csid_v4l2_subdev_fops;
+#endif
+
+ rc = msm_camera_register_irq(pdev, new_csid_dev->irq,
+ msm_csid_irq, IRQF_TRIGGER_RISING, "csid", new_csid_dev);
+ if (rc < 0) {
+ pr_err("%s: irq request fail\n", __func__);
+ rc = -EBUSY;
+ goto csid_invalid_irq;
+ }
+ rc = msm_camera_enable_irq(new_csid_dev->irq, false);
+ if (rc < 0) {
+ pr_err("%s Error registering irq ", __func__);
+ rc = -EBUSY;
+ goto csid_invalid_irq;
+ }
+
+ if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v2.0")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v2_0;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v2_0;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V20;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v2.2")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v2_2;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v2_2;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V22;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.0")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_0;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_0;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V30;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v4.0")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_0;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_0;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V40;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.1")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_1;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_1;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V31;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.2")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_2;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_2;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V32;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.4.1")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_4_1;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V34_1;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_4_1;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.4.2")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_4_2;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V34_2;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_4_2;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.4.3")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_4_3;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V34_3;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_4_3;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.6.0")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_6_0;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V36;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_6_0;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.5")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_5;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_5;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V35;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.5.1")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_5_1;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_5_1;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V35_1;
+ } else {
+ pr_err("%s:%d, invalid hw version : 0x%x", __func__, __LINE__,
+ new_csid_dev->hw_dts_version);
+ rc = -EINVAL;
+ goto csid_invalid_irq;
+ }
+
+ new_csid_dev->csid_state = CSID_POWER_DOWN;
+ return 0;
+
+csid_invalid_irq:
+ msm_camera_put_reg_base(pdev, new_csid_dev->base, "csid", true);
+csid_invalid_vreg_data:
+ kfree(new_csid_dev->csid_vreg);
+csid_no_resource:
+ mutex_destroy(&new_csid_dev->mutex);
+ kfree(new_csid_dev->ctrl_reg);
+ kfree(new_csid_dev);
+ return rc;
+}
+
+static int msm_csid_exit(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
+ struct csid_device *csid_dev =
+ v4l2_get_subdevdata(subdev);
+
+ msm_camera_put_clk_info(pdev, &csid_dev->csid_clk_info,
+ &csid_dev->csid_clk, csid_dev->num_clk);
+ msm_camera_put_reg_base(pdev, csid_dev->base, "csid", true);
+ kfree(csid_dev);
+ return 0;
+}
+
+static const struct of_device_id msm_csid_dt_match[] = {
+ {.compatible = "qcom,csid"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_csid_dt_match);
+
+static struct platform_driver csid_driver = {
+ .probe = csid_probe,
+ .remove = msm_csid_exit,
+ .driver = {
+ .name = MSM_CSID_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_csid_dt_match,
+ },
+};
+
+static int __init msm_csid_init_module(void)
+{
+ return platform_driver_register(&csid_driver);
+}
+
+static void __exit msm_csid_exit_module(void)
+{
+ platform_driver_unregister(&csid_driver);
+}
+
+module_init(msm_csid_init_module);
+module_exit(msm_csid_exit_module);
+MODULE_DESCRIPTION("MSM CSID driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/sensor/csid/msm_csid.h b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.h
new file mode 100644
index 000000000000..58fc96124487
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.h
@@ -0,0 +1,124 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_H
+#define MSM_CSID_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <media/ais/msm_ais_sensor.h>
+#include "msm_sd.h"
+#include "cam_soc_api.h"
+
+#define CSID_SOF_DEBUG_COUNT 3
+
+enum csiphy_lane_assign {
+ PHY_LANE_D0,
+ PHY_LANE_CLK,
+ PHY_LANE_D1,
+ PHY_LANE_D2,
+ PHY_LANE_D3,
+ PHY_LANE_MAX,
+};
+
+struct csid_reg_parms_t {
+/* MIPI CSID registers */
+ uint32_t csid_hw_version_addr;
+ uint32_t csid_core_ctrl_0_addr;
+ uint32_t csid_core_ctrl_1_addr;
+ uint32_t csid_rst_cmd_addr;
+ uint32_t csid_cid_lut_vc_0_addr;
+ uint32_t csid_cid_lut_vc_1_addr;
+ uint32_t csid_cid_lut_vc_2_addr;
+ uint32_t csid_cid_lut_vc_3_addr;
+ uint32_t csid_cid_n_cfg_addr;
+ uint32_t csid_irq_clear_cmd_addr;
+ uint32_t csid_irq_mask_addr;
+ uint32_t csid_irq_status_addr;
+ uint32_t csid_captured_unmapped_long_pkt_hdr_addr;
+ uint32_t csid_captured_mmaped_long_pkt_hdr_addr;
+ uint32_t csid_captured_short_pkt_addr;
+ uint32_t csid_captured_long_pkt_hdr_addr;
+ uint32_t csid_captured_long_pkt_ftr_addr;
+ uint32_t csid_pif_misr_dl0_addr;
+ uint32_t csid_pif_misr_dl1_addr;
+ uint32_t csid_pif_misr_dl2_addr;
+ uint32_t csid_pif_misr_dl3_addr;
+ uint32_t csid_stats_total_pkts_rcvd_addr;
+ uint32_t csid_stats_ecc_addr;
+ uint32_t csid_stats_crc_addr;
+ uint32_t csid_tg_ctrl_addr;
+ uint32_t csid_tg_vc_cfg_addr;
+ uint32_t csid_tg_dt_n_cfg_0_addr;
+ uint32_t csid_tg_dt_n_cfg_1_addr;
+ uint32_t csid_tg_dt_n_cfg_2_addr;
+ uint32_t csid_rst_done_irq_bitshift;
+ uint32_t csid_rst_stb_all;
+ uint32_t csid_dl_input_sel_shift;
+ uint32_t csid_phy_sel_shift;
+ uint32_t csid_version;
+ uint32_t csid_3p_ctrl_0_addr;
+ uint32_t csid_3p_pkt_hdr_addr;
+ uint32_t csid_test_bus_ctrl;
+ uint32_t csid_irq_mask_val;
+ uint32_t csid_err_lane_overflow_offset_2p;
+ uint32_t csid_err_lane_overflow_offset_3p;
+ uint32_t csid_phy_sel_shift_3p;
+};
+
+struct csid_ctrl_t {
+ struct csid_reg_parms_t csid_reg;
+ uint8_t *csid_lane_assign;
+};
+
+enum msm_csid_state_t {
+ CSID_POWER_UP,
+ CSID_POWER_DOWN,
+};
+
+struct csid_device {
+ struct platform_device *pdev;
+ struct msm_sd_subdev msm_sd;
+ struct resource *irq;
+ struct regulator *csi_vdd;
+ void __iomem *base;
+ struct mutex mutex;
+ struct completion reset_complete;
+ uint32_t hw_version;
+ uint32_t hw_dts_version;
+ enum msm_csid_state_t csid_state;
+ struct csid_ctrl_t *ctrl_reg;
+ struct regulator *reg_ptr;
+ size_t num_clk;
+ struct clk **csid_clk;
+ struct msm_cam_clk_info *csid_clk_info;
+ uint32_t csid_clk_index;
+ uint32_t csid_max_clk;
+ uint32_t csid_3p_enabled;
+ struct camera_vreg_t *csid_vreg;
+ struct regulator *csid_reg_ptr[MAX_REGULATOR];
+ int32_t regulator_count;
+ uint8_t is_testmode;
+ struct msm_camera_csid_testmode_parms testmode_params;
+ struct msm_camera_csid_params current_csid_params;
+ uint32_t csid_sof_debug;
+ uint32_t csid_lane_cnt;
+ uint32_t csid_sof_debug_count;
+
+ void *csiphy_dev;
+};
+
+#define VIDIOC_MSM_CSID_RELEASE \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 12, struct v4l2_subdev*)
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/Makefile b/drivers/media/platform/msm/ais/sensor/csiphy/Makefile
new file mode 100644
index 000000000000..d3fd33252f8e
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/Makefile
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+obj-$(CONFIG_MSM_AIS) += msm_csiphy.o
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h
new file mode 100644
index 000000000000..618926fa8341
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_2_0_HWREG_H
+#define MSM_CSIPHY_2_0_HWREG_H
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+struct csiphy_reg_parms_t csiphy_v2_0 = {
+ /* MIPI CSI PHY registers */
+ 0x17C,
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x100,
+ 0x104,
+ 0x108,
+ 0x10C,
+ 0x110,
+ 0x128,
+ 0x140,
+ 0x144,
+ 0x164,
+ 0x180,
+ 0x1A0,
+ 0x6F,
+ 0x1A4,
+ 0x1C0,
+ 0x1C4,
+ 0x4,
+ 0x1E0,
+ 0x1E8,
+ 0x0,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h
new file mode 100644
index 000000000000..867aec2e0103
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_2_2_HWREG_H
+#define MSM_CSIPHY_2_2_HWREG_H
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+struct csiphy_reg_parms_t csiphy_v2_2 = {
+ /* MIPI CSI PHY registers */
+ 0x17C,
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x100,
+ 0x104,
+ 0x108,
+ 0x10C,
+ 0x110,
+ 0x128,
+ 0x140,
+ 0x144,
+ 0x164,
+ 0x180,
+ 0x1A0,
+ 0x6F,
+ 0x1A4,
+ 0x1C0,
+ 0x1C4,
+ 0x4,
+ 0x1E0,
+ 0x1E8,
+ 0x1,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h
new file mode 100644
index 000000000000..69efdcc71499
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_3_0_HWREG_H
+#define MSM_CSIPHY_3_0_HWREG_H
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+struct csiphy_reg_parms_t csiphy_v3_0 = {
+ /* MIPI CSI PHY registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x100,
+ 0x104,
+ 0x108,
+ 0x10C,
+ 0x110,
+ 0x128,
+ 0x140,
+ 0x144,
+ 0x164,
+ 0x188,
+ 0x18C,
+ 0x1AC,
+ 0x3F,
+ 0x1AC,
+ 0x1CC,
+ 0x1CC,
+ 0x4,
+ 0x1EC,
+ 0x1F4,
+ 0x10,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h
new file mode 100644
index 000000000000..7fc74a366a6c
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_3_1_HWREG_H
+#define MSM_CSIPHY_3_1_HWREG_H
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+struct csiphy_reg_parms_t csiphy_v3_1 = {
+ /* MIPI CSI PHY registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x100,
+ 0x104,
+ 0x108,
+ 0x10C,
+ 0x1C,
+ 0x28,
+ 0x140,
+ 0x144,
+ 0x164,
+ 0x188,
+ 0x18C,
+ 0x1AC,
+ 0x3F,
+ 0x1AC,
+ 0x1CC,
+ 0x1CC,
+ 0x4,
+ 0x1EC,
+ 0x1F4,
+ 0x31,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h
new file mode 100644
index 000000000000..cdf62d46ee7d
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_3_2_HWREG_H
+#define MSM_CSIPHY_3_2_HWREG_H
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+struct csiphy_reg_parms_t csiphy_v3_2 = {
+ /* MIPI CSI PHY registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x100,
+ 0x104,
+ 0x108,
+ 0x10C,
+ 0x110,
+ 0x128,
+ 0x140,
+ 0x144,
+ 0x164,
+ 0x188,
+ 0x18C,
+ 0x1AC,
+ 0x3F,
+ 0x1AC,
+ 0x1CC,
+ 0x1CC,
+ 0x4,
+ 0x1EC,
+ 0x1F4,
+ 0x32,
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h
new file mode 100644
index 000000000000..5af1ded189a6
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_3_4_2_1_HWREG_H
+#define MSM_CSIPHY_3_4_2_1_HWREG_H
+
+#define ULPM_WAKE_UP_TIMER_MODE 2
+#define GLITCH_ELIMINATION_NUM 0x12 /* bit [6:4] */
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+struct csiphy_reg_parms_t csiphy_v3_4_2_1 = {
+ .mipi_csiphy_interrupt_status0_addr = 0x8B0,
+ .mipi_csiphy_interrupt_clear0_addr = 0x858,
+ .mipi_csiphy_glbl_irq_cmd_addr = 0x828,
+ .combo_clk_mask = 0x10,
+};
+
+struct csiphy_reg_3ph_parms_t csiphy_v3_4_2_1_3ph = {
+ /* MIPI CSI PHY registers */
+ {0x814, 0x0},
+ {0x818, 0x1},
+ {0x188, 0x7F},
+ {0x18C, 0x7F},
+ {0x190, 0x0},
+ {0x104, 0x6},
+ {0x108, 0x0},
+ {0x10c, 0x0},
+ {0x114, 0x20},
+ {0x118, 0x3E},
+ {0x11c, 0x41},
+ {0x120, 0x41},
+ {0x124, 0x7F},
+ {0x128, 0x0},
+ {0x12c, 0x0},
+ {0x130, 0x1},
+ {0x134, 0x0},
+ {0x138, 0x0},
+ {0x13C, 0x10},
+ {0x140, 0x1},
+ {0x144, GLITCH_ELIMINATION_NUM},
+ {0x148, 0xFE},
+ {0x14C, 0x1},
+ {0x154, 0x0},
+ {0x15C, 0x33},
+ {0x160, ULPM_WAKE_UP_TIMER_MODE},
+ {0x164, 0x48},
+ {0x168, 0xA0},
+ {0x16C, 0x17},
+ {0x170, 0x41},
+ {0x174, 0x41},
+ {0x178, 0x3E},
+ {0x17C, 0x0},
+ {0x180, 0x0},
+ {0x184, 0x7F},
+ {0x1cc, 0x10},
+ {0x81c, 0x6},
+ {0x82c, 0xFF},
+ {0x830, 0xFF},
+ {0x834, 0xFB},
+ {0x838, 0xFF},
+ {0x83c, 0x7F},
+ {0x840, 0xFF},
+ {0x844, 0xFF},
+ {0x848, 0xEF},
+ {0x84c, 0xFF},
+ {0x850, 0xFF},
+ {0x854, 0xFF},
+ {0x28, 0x0},
+ {0x800, 0x2},
+ {0x0, 0x88},
+ {0x4, 0x8},
+ {0x8, 0x0},
+ {0xC, 0xFF},
+ {0x10, 0x56},
+ {0x2C, 0x1},
+ {0x30, 0x0},
+ {0x34, 0x3},
+ {0x38, 0xfe},
+ {0x3C, 0xB8},
+ {0x1C, 0xE7},
+ {0x14, 0x0},
+ {0x14, 0x60},
+ {0x700, 0x80}
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h
new file mode 100644
index 000000000000..d85dd1ec3a48
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_3_4_2_HWREG_H
+#define MSM_CSIPHY_3_4_2_HWREG_H
+
+#define ULPM_WAKE_UP_TIMER_MODE 2
+#define GLITCH_ELIMINATION_NUM 0x12 /* bit [6:4] */
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+struct csiphy_reg_parms_t csiphy_v3_4_2 = {
+ .mipi_csiphy_interrupt_status0_addr = 0x8B0,
+ .mipi_csiphy_interrupt_clear0_addr = 0x858,
+ .mipi_csiphy_glbl_irq_cmd_addr = 0x828,
+ .combo_clk_mask = 0x10,
+};
+
+struct csiphy_reg_3ph_parms_t csiphy_v3_4_2_3ph = {
+ /* MIPI CSI PHY registers */
+ {0x814, 0x0},
+ {0x818, 0x1},
+ {0x188, 0x7F},
+ {0x18C, 0x7F},
+ {0x190, 0x0},
+ {0x104, 0x6},
+ {0x108, 0x0},
+ {0x10c, 0x0},
+ {0x114, 0x20},
+ {0x118, 0x3E},
+ {0x11c, 0x41},
+ {0x120, 0x41},
+ {0x124, 0x7F},
+ {0x128, 0x0},
+ {0x12c, 0x0},
+ {0x130, 0x1},
+ {0x134, 0x0},
+ {0x138, 0x0},
+ {0x13C, 0x10},
+ {0x140, 0x1},
+ {0x144, GLITCH_ELIMINATION_NUM},
+ {0x148, 0xFE},
+ {0x14C, 0x1},
+ {0x154, 0x0},
+ {0x15C, 0x33},
+ {0x160, ULPM_WAKE_UP_TIMER_MODE},
+ {0x164, 0x48},
+ {0x168, 0xA0},
+ {0x16C, 0x17},
+ {0x170, 0x41},
+ {0x174, 0x41},
+ {0x178, 0x3E},
+ {0x17C, 0x0},
+ {0x180, 0x0},
+ {0x184, 0x7F},
+ {0x1cc, 0x10},
+ {0x81c, 0x6},
+ {0x82c, 0xFF},
+ {0x830, 0xFF},
+ {0x834, 0xFB},
+ {0x838, 0xFF},
+ {0x83c, 0x7F},
+ {0x840, 0xFF},
+ {0x844, 0xFF},
+ {0x848, 0xEF},
+ {0x84c, 0xFF},
+ {0x850, 0xFF},
+ {0x854, 0xFF},
+ {0x28, 0x0},
+ {0x800, 0x2},
+ {0x0, 0x8E},
+ {0x4, 0x8},
+ {0x8, 0x0},
+ {0xC, 0xFF},
+ {0x10, 0x56},
+ {0x2C, 0x1},
+ {0x30, 0x0},
+ {0x34, 0x3},
+ {0x38, 0xfe},
+ {0x3C, 0xB8},
+ {0x1C, 0xE7},
+ {0x14, 0x0},
+ {0x14, 0x60},
+ {0x700, 0x80}
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
new file mode 100644
index 000000000000..99b725a75c8f
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_3_5_HWREG_H
+#define MSM_CSIPHY_3_5_HWREG_H
+
+#define ULPM_WAKE_UP_TIMER_MODE 2
+#define GLITCH_ELIMINATION_NUM 0x12 /* bit [6:4] */
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+struct csiphy_reg_parms_t csiphy_v3_5 = {
+ .mipi_csiphy_interrupt_status0_addr = 0x8B0,
+ .mipi_csiphy_interrupt_clear0_addr = 0x858,
+ .mipi_csiphy_glbl_irq_cmd_addr = 0x828,
+ .combo_clk_mask = 0x10,
+};
+
+struct csiphy_reg_3ph_parms_t csiphy_v3_5_3ph = {
+ /* MIPI CSI PHY registers */
+ {0x814, 0x0},
+ {0x818, 0x1},
+ {0x188, 0x7F},
+ {0x18C, 0x7F},
+ {0x190, 0x0},
+ {0x104, 0x6},
+ {0x108, 0x0},
+ {0x10c, 0x0},
+ {0x114, 0x20},
+ {0x118, 0x3E},
+ {0x11c, 0x41},
+ {0x120, 0x41},
+ {0x124, 0x7F},
+ {0x128, 0x0},
+ {0x12c, 0x0},
+ {0x130, 0x1},
+ {0x134, 0x0},
+ {0x138, 0x0},
+ {0x13C, 0x10},
+ {0x140, 0x1},
+ {0x144, GLITCH_ELIMINATION_NUM},
+ {0x148, 0xFE},
+ {0x14C, 0x1},
+ {0x154, 0x0},
+ {0x15C, 0x33},
+ {0x160, ULPM_WAKE_UP_TIMER_MODE},
+ {0x164, 0x48},
+ {0x168, 0xA0},
+ {0x16C, 0x17},
+ {0x170, 0x41},
+ {0x174, 0x41},
+ {0x178, 0x3E},
+ {0x17C, 0x0},
+ {0x180, 0x0},
+ {0x184, 0x7F},
+ {0x1cc, 0x10},
+ {0x81c, 0x6},
+ {0x82c, 0xFF},
+ {0x830, 0xFF},
+ {0x834, 0xFB},
+ {0x838, 0xFF},
+ {0x83c, 0x7F},
+ {0x840, 0xFF},
+ {0x844, 0xFF},
+ {0x848, 0xEF},
+ {0x84c, 0xFF},
+ {0x850, 0xFF},
+ {0x854, 0xFF},
+ {0x28, 0x0},
+ {0x800, 0x0},
+ {0x0, 0xD7},
+ {0x4, 0x8},
+ {0x8, 0x0},
+ {0xC, 0xA5},
+ {0x10, 0x50},
+ {0x2C, 0x1},
+ {0x30, 0x2},
+ {0x34, 0x3},
+ {0x38, 0x1},
+ {0x3C, 0xB8},
+ {0x1C, 0xA},
+ {0x14, 0x0},
+ {0x0, 0x0},
+ {0x700, 0xC0},
+};
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c
new file mode 100644
index 000000000000..345fac905074
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c
@@ -0,0 +1,1580 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include "msm_csiphy.h"
+#include "msm_sd.h"
+#include "include/msm_csiphy_2_0_hwreg.h"
+#include "include/msm_csiphy_2_2_hwreg.h"
+#include "include/msm_csiphy_3_0_hwreg.h"
+#include "include/msm_csiphy_3_1_hwreg.h"
+#include "include/msm_csiphy_3_2_hwreg.h"
+#include "include/msm_csiphy_3_4_2_hwreg.h"
+#include "include/msm_csiphy_3_4_2_1_hwreg.h"
+#include "include/msm_csiphy_3_5_hwreg.h"
+#include "cam_hw_ops.h"
+
+#define DBG_CSIPHY 0
+#define SOF_DEBUG_ENABLE 1
+#define SOF_DEBUG_DISABLE 0
+
+#define V4L2_IDENT_CSIPHY 50003
+#define CSIPHY_VERSION_V22 0x01
+#define CSIPHY_VERSION_V20 0x00
+#define CSIPHY_VERSION_V30 0x10
+#define CSIPHY_VERSION_V31 0x31
+#define CSIPHY_VERSION_V32 0x32
+#define CSIPHY_VERSION_V342 0x342
+#define CSIPHY_VERSION_V342_1 0x3421
+#define CSIPHY_VERSION_V35 0x35
+#define MSM_CSIPHY_DRV_NAME "msm_csiphy"
+#define CLK_LANE_OFFSET 1
+#define NUM_LANES_OFFSET 4
+
+#define CSI_3PHASE_HW 1
+#define MAX_LANES 4
+#define CLOCK_OFFSET 0x700
+#define CSIPHY_SOF_DEBUG_COUNT 2
+
+#undef CDBG
+#ifdef CONFIG_MSM_AIS_DEBUG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#else
+#define CDBG(fmt, args...)
+#endif
+
+static struct v4l2_file_operations msm_csiphy_v4l2_subdev_fops;
+
+static void msm_csiphy_cphy_irq_config(
+ struct csiphy_device *csiphy_dev,
+ struct msm_camera_csiphy_params *csiphy_params)
+{
+ void __iomem *csiphybase;
+
+ csiphybase = csiphy_dev->base;
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl11.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl11.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl12.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl12.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl13.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl13.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl14.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl14.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl15.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl15.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl16.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl16.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl17.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl17.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl18.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl18.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl19.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl19.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl20.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl20.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl21.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl21.addr);
+}
+
+static int msm_csiphy_3phase_lane_config(
+ struct csiphy_device *csiphy_dev,
+ struct msm_camera_csiphy_params *csiphy_params)
+{
+ uint8_t i = 0;
+ uint16_t lane_mask = 0, lane_enable = 0, temp;
+ void __iomem *csiphybase;
+
+ csiphybase = csiphy_dev->base;
+ lane_mask = csiphy_params->lane_mask & 0x7;
+ while (lane_mask != 0) {
+ temp = (i << 1)+1;
+ lane_enable |= ((lane_mask & 0x1) << temp);
+ lane_mask >>= 1;
+ i++;
+ }
+ msm_camera_io_w(lane_enable,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl5.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl6.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl6.addr);
+ lane_mask = csiphy_params->lane_mask & 0x7;
+ i = 0;
+ while (lane_mask & 0x7) {
+ if (!(lane_mask & 0x1)) {
+ i++;
+ lane_mask >>= 1;
+ continue;
+ }
+
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl21.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl21.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl23.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl23.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl26.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl26.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl27.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl27.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl1.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl1.addr + 0x200*i);
+ msm_camera_io_w(((csiphy_params->settle_cnt >> 8) & 0xff),
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl2.addr + 0x200*i);
+ msm_camera_io_w((csiphy_params->settle_cnt & 0xff),
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl3.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl5.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl5.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl6.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl6.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl7.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl7.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl8.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl8.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl9.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl9.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl10.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl10.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl11.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl11.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl12.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl12.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl15.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl15.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl16.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl16.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl17.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl17.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl18.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl18.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl19.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl19.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl23.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl23.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl24.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl24.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl28.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl28.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl29.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl29.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl30.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl30.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl33.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl33.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl34.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl34.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl35.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl35.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl36.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl36.addr + 0x200*i);
+
+ if (ULPM_WAKE_UP_TIMER_MODE == 0x22) {
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl51.data,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.mipi_csiphy_3ph_lnn_ctrl51.addr +
+ 0x200*i);
+ }
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl25.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl25.addr + 0x200*i);
+
+ lane_mask >>= 1;
+ i++;
+ }
+ if (csiphy_params->combo_mode == 1) {
+ msm_camera_io_w(0x2,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl7.addr);
+ } else {
+ msm_camera_io_w(0x6,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl7.addr);
+ }
+ /* Delay for stabilizing the regulator*/
+ usleep_range(10, 15);
+ msm_csiphy_cphy_irq_config(csiphy_dev, csiphy_params);
+ return 0;
+}
+
+static int msm_csiphy_2phase_lane_config(
+ struct csiphy_device *csiphy_dev,
+ struct msm_camera_csiphy_params *csiphy_params)
+{
+ uint32_t val = 0, lane_enable = 0, clk_lane, mask = 1;
+ uint16_t lane_mask = 0, i = 0, offset;
+ void __iomem *csiphybase;
+
+ csiphybase = csiphy_dev->base;
+ lane_mask = csiphy_params->lane_mask & 0x1f;
+
+ lane_enable = msm_camera_io_r(csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl5.addr);
+
+ for (i = 0; i < MAX_LANES; i++) {
+ if (mask == 0x2) {
+ if (lane_mask & mask)
+ lane_enable |= 0x80;
+ i--;
+ } else if (lane_mask & mask)
+ lane_enable |= 0x1 << (i<<1);
+ mask <<= 1;
+ }
+ CDBG("%s:%d lane_enable: %d\n", __func__, __LINE__, lane_enable);
+
+ msm_camera_io_w(lane_enable,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl5.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl6.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl6.addr);
+
+ for (i = 0, mask = 0x1; i < MAX_LANES; i++) {
+ if (!(lane_mask & mask)) {
+ if (mask == 0x2)
+ i--;
+ mask <<= 0x1;
+ continue;
+ }
+ if (mask == 0x2) {
+ val = 4;
+ offset = CLOCK_OFFSET;
+ clk_lane = 1;
+ i--;
+ } else {
+ offset = 0x200*i;
+ val = 0;
+ clk_lane = 0;
+ }
+
+ /* In combo mode setting the 4th lane
+ * as clk_lane for 1 lane sensor, checking
+ * the lane_mask == 0x18 for one lane sensor
+ */
+ if ((csiphy_params->combo_mode == 1) && (lane_mask == 0x18)) {
+ val |= 0xA;
+ if (mask == csiphy_dev->ctrl_reg->
+ csiphy_reg.combo_clk_mask) {
+ val |= 0x4;
+ clk_lane = 1;
+ }
+ }
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg7.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg7.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg6.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg6.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg8.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg8.addr + offset);
+ msm_camera_io_w(val, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_misc1.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl15.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl15.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg2.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg2.addr + offset);
+
+ msm_camera_io_w((csiphy_params->settle_cnt & 0xFF),
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg3.addr + offset);
+
+ if (clk_lane == 1) {
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnck_cfg1.data, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnck_cfg1.addr);
+
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg4.data, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg4.addr + offset);
+ if (lane_mask == 0x18)
+ msm_camera_io_w(0x80,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg1.addr + offset);
+
+ } else {
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg1.data,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg1.addr + offset);
+ }
+ if ((csiphy_dev->hw_version == CSIPHY_VERSION_V342 ||
+ csiphy_dev->hw_version == CSIPHY_VERSION_V342_1) &&
+ csiphy_params->combo_mode == 1) {
+ msm_camera_io_w(0x52,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg5.addr + offset);
+ } else {
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg5.data,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg5.addr + offset);
+ }
+ if (clk_lane == 1 && lane_mask != 0x18 &&
+ (csiphy_dev->hw_version == CSIPHY_VERSION_V342 ||
+ csiphy_dev->hw_version == CSIPHY_VERSION_V342_1)) {
+ msm_camera_io_w(0x1f,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg9.addr + offset);
+ } else {
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg9.data,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg9.addr + offset);
+ }
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_test_imp.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_test_imp.addr + offset);
+ if ((csiphy_dev->hw_version == CSIPHY_VERSION_V342 ||
+ csiphy_dev->hw_version == CSIPHY_VERSION_V342_1)) {
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl5.data,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl5.addr + offset);
+ }
+ mask <<= 1;
+ }
+ if ((csiphy_dev->hw_version == CSIPHY_VERSION_V342 ||
+ csiphy_dev->hw_version == CSIPHY_VERSION_V342_1) &&
+ csiphy_params->combo_mode != 1) {
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl0.data,
+ csiphy_dev->base + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl0.addr);
+ }
+ msm_csiphy_cphy_irq_config(csiphy_dev, csiphy_params);
+ return 0;
+}
+
+static int msm_csiphy_lane_config(struct csiphy_device *csiphy_dev,
+ struct msm_camera_csiphy_params *csiphy_params)
+{
+ int rc = 0;
+ int j = 0, curr_lane = 0;
+ uint32_t val = 0;
+ long clk_rate = 0;
+ uint8_t lane_cnt = 0;
+ uint16_t lane_mask = 0;
+ void __iomem *csiphybase;
+ uint8_t csiphy_id = csiphy_dev->pdev->id;
+ int32_t lane_val = 0, lane_right = 0, num_lanes = 0;
+ int ratio = 1;
+
+ csiphybase = csiphy_dev->base;
+ if (!csiphybase) {
+ pr_err("%s: csiphybase NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ csiphy_dev->lane_mask[csiphy_id] |= csiphy_params->lane_mask;
+ lane_mask = csiphy_dev->lane_mask[csiphy_id];
+ lane_cnt = csiphy_params->lane_cnt;
+ if (csiphy_params->lane_cnt < 1 || csiphy_params->lane_cnt > 4) {
+ pr_err("%s: unsupported lane cnt %d\n",
+ __func__, csiphy_params->lane_cnt);
+ return rc;
+ }
+
+ clk_rate = (csiphy_params->csiphy_clk > 0)
+ ? csiphy_params->csiphy_clk :
+ csiphy_dev->csiphy_max_clk;
+ clk_rate = msm_camera_clk_set_rate(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_clk[csiphy_dev->csiphy_clk_index],
+ clk_rate);
+ if (clk_rate < 0) {
+ pr_err("csiphy_clk_set_rate failed\n");
+ return -EINVAL;
+ }
+
+ if (clk_rate < csiphy_dev->csiphy_max_clk &&
+ clk_rate > 0) {
+ ratio = csiphy_dev->csiphy_max_clk/clk_rate;
+ csiphy_params->settle_cnt = csiphy_params->settle_cnt/ratio;
+ }
+ CDBG("%s csiphy_params, mask = 0x%x cnt = %d\n",
+ __func__,
+ csiphy_params->lane_mask,
+ csiphy_params->lane_cnt);
+ CDBG("%s csiphy_params, settle cnt = 0x%x csid %d\n",
+ __func__, csiphy_params->settle_cnt,
+ csiphy_params->csid_core);
+
+ if (csiphy_dev->hw_version >= CSIPHY_VERSION_V30) {
+ val = msm_camera_io_r(csiphy_dev->clk_mux_base);
+ if (csiphy_params->combo_mode &&
+ (csiphy_params->lane_mask & 0x18) == 0x18) {
+ val &= ~0xf0;
+ val |= csiphy_params->csid_core << 4;
+ } else {
+ val &= ~0xf;
+ val |= csiphy_params->csid_core;
+ }
+ msm_camera_io_w(val, csiphy_dev->clk_mux_base);
+ CDBG("%s clk mux addr %pK val 0x%x\n", __func__,
+ csiphy_dev->clk_mux_base, val);
+ /* ensure write is done */
+ mb();
+ }
+
+ csiphy_dev->csi_3phase = csiphy_params->csi_3phase;
+ if (csiphy_dev->csiphy_3phase == CSI_3PHASE_HW) {
+ if (csiphy_dev->csi_3phase == 1) {
+ rc = msm_camera_clk_enable(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_3p_clk_info,
+ csiphy_dev->csiphy_3p_clk, 2, true);
+ rc = msm_csiphy_3phase_lane_config(csiphy_dev,
+ csiphy_params);
+ csiphy_dev->num_irq_registers = 20;
+ } else {
+ rc = msm_csiphy_2phase_lane_config(csiphy_dev,
+ csiphy_params);
+ csiphy_dev->num_irq_registers = 11;
+ }
+ if (rc < 0) {
+ pr_err("%s:%d: Error in setting lane configuration\n",
+ __func__, __LINE__);
+ }
+ return rc;
+ }
+
+ msm_camera_io_w(0x1, csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_glbl_t_init_cfg0_addr);
+ msm_camera_io_w(0x1, csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_t_wakeup_cfg0_addr);
+
+ if (csiphy_dev->hw_version < CSIPHY_VERSION_V30) {
+ val = 0x3;
+ msm_camera_io_w((lane_mask << 2) | val,
+ csiphybase +
+ csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_glbl_pwr_cfg_addr);
+ msm_camera_io_w(0x10, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnck_cfg2_addr);
+ msm_camera_io_w(csiphy_params->settle_cnt,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnck_cfg3_addr);
+ msm_camera_io_w(0x24,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_interrupt_mask0_addr);
+ msm_camera_io_w(0x24,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_interrupt_clear0_addr);
+ } else {
+ val = 0x1;
+ msm_camera_io_w((lane_mask << 1) | val,
+ csiphybase +
+ csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_glbl_pwr_cfg_addr);
+ msm_camera_io_w(csiphy_params->combo_mode <<
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_mode_config_shift,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_glbl_reset_addr);
+ }
+
+ lane_mask &= 0x1f;
+ while (lane_mask & 0x1f) {
+ if (!(lane_mask & 0x1)) {
+ j++;
+ lane_mask >>= 1;
+ continue;
+ }
+ msm_camera_io_w(0x10,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_cfg2_addr + 0x40*j);
+ msm_camera_io_w(csiphy_params->settle_cnt,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_cfg3_addr + 0x40*j);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_interrupt_mask_val, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_interrupt_mask_addr + 0x4*j);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_interrupt_mask_val, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_interrupt_clear_addr + 0x4*j);
+ if (csiphy_dev->is_3_1_20nm_hw == 1) {
+ if (j > CLK_LANE_OFFSET) {
+ lane_right = 0x8;
+ num_lanes = (lane_cnt - curr_lane)
+ << NUM_LANES_OFFSET;
+ if (lane_cnt < curr_lane) {
+ pr_err("%s: Lane_cnt is less than curr_lane number\n",
+ __func__);
+ return -EINVAL;
+ }
+ lane_val = lane_right|num_lanes;
+ } else if (j == 1) {
+ lane_val = 0x4;
+ }
+ if (csiphy_params->combo_mode == 1) {
+ /*
+ * In the case of combo mode, the clock is always
+ * 4th lane for the second sensor.
+ * So check whether the sensor is of one lane
+ * sensor and curr_lane for 0.
+ */
+ if (curr_lane == 0 &&
+ ((csiphy_params->lane_mask &
+ 0x18) == 0x18))
+ lane_val = 0x4;
+ }
+ msm_camera_io_w(lane_val, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_misc1_addr + 0x40*j);
+ msm_camera_io_w(0x17, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_test_imp + 0x40*j);
+ curr_lane++;
+ }
+ j++;
+ lane_mask >>= 1;
+ }
+ return rc;
+}
+
+void msm_csiphy_disable_irq(
+ struct csiphy_device *csiphy_dev)
+{
+ void __iomem *csiphybase;
+
+ csiphybase = csiphy_dev->base;
+ msm_camera_io_w(0,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl11.addr);
+ msm_camera_io_w(0,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl12.addr);
+ msm_camera_io_w(0,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl13.addr);
+ msm_camera_io_w(0,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl14.addr);
+ msm_camera_io_w(0,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl15.addr);
+ msm_camera_io_w(0,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl16.addr);
+ msm_camera_io_w(0,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl17.addr);
+ msm_camera_io_w(0,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl18.addr);
+ msm_camera_io_w(0,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl19.addr);
+ msm_camera_io_w(0,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl20.addr);
+ msm_camera_io_w(0,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl21.addr);
+}
+
+static irqreturn_t msm_csiphy_irq(int irq_num, void *data)
+{
+ uint32_t irq;
+ int i;
+ struct csiphy_device *csiphy_dev = data;
+
+ if (csiphy_dev->csiphy_sof_debug == SOF_DEBUG_ENABLE) {
+ if (csiphy_dev->csiphy_sof_debug_count < CSIPHY_SOF_DEBUG_COUNT)
+ csiphy_dev->csiphy_sof_debug_count++;
+ else {
+ msm_csiphy_disable_irq(csiphy_dev);
+ return IRQ_HANDLED;
+ }
+ }
+
+ for (i = 0; i < csiphy_dev->num_irq_registers; i++) {
+ irq = msm_camera_io_r(
+ csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_interrupt_status0_addr + 0x4*i);
+ msm_camera_io_w(irq,
+ csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_interrupt_clear0_addr + 0x4*i);
+ pr_err_ratelimited(
+ "%s CSIPHY%d_IRQ_STATUS_ADDR%d = 0x%x\n",
+ __func__, csiphy_dev->pdev->id, i, irq);
+ msm_camera_io_w(0x0,
+ csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_interrupt_clear0_addr + 0x4*i);
+ }
+ msm_camera_io_w(0x1, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_glbl_irq_cmd_addr);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_glbl_irq_cmd_addr);
+ return IRQ_HANDLED;
+}
+
+static void msm_csiphy_reset(struct csiphy_device *csiphy_dev)
+{
+ msm_camera_io_w(0x1, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.mipi_csiphy_glbl_reset_addr);
+ usleep_range(5000, 8000);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.mipi_csiphy_glbl_reset_addr);
+}
+
+static void msm_csiphy_3ph_reset(struct csiphy_device *csiphy_dev)
+{
+ msm_camera_io_w(0x1, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl0.addr);
+ usleep_range(5000, 8000);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl0.addr);
+}
+
+#if DBG_CSIPHY
+static int msm_csiphy_init(struct csiphy_device *csiphy_dev)
+{
+ int rc = 0;
+
+ if (csiphy_dev == NULL) {
+ pr_err("%s: csiphy_dev NULL\n", __func__);
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ if (csiphy_dev->ref_count++) {
+ CDBG("%s csiphy refcount = %d\n", __func__,
+ csiphy_dev->ref_count);
+ return rc;
+ }
+ CDBG("%s:%d called\n", __func__, __LINE__);
+
+ if (csiphy_dev->csiphy_state == CSIPHY_POWER_UP) {
+ pr_err("%s: csiphy invalid state %d\n", __func__,
+ csiphy_dev->csiphy_state);
+ rc = -EINVAL;
+ return rc;
+ }
+ CDBG("%s:%d called\n", __func__, __LINE__);
+
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
+ CAM_AHB_SVS_VOTE);
+ if (rc < 0) {
+ csiphy_dev->ref_count--;
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ CDBG("%s:%d called\n", __func__, __LINE__);
+
+ rc = msm_camera_clk_enable(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+ csiphy_dev->num_clk, true);
+
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ if (rc < 0) {
+ pr_err("%s: csiphy clk enable failed\n", __func__);
+ csiphy_dev->ref_count--;
+ goto csiphy_resource_fail;
+ }
+ CDBG("%s:%d called\n", __func__, __LINE__);
+
+ rc = msm_camera_enable_irq(csiphy_dev->irq, true);
+ if (rc < 0)
+ pr_err("%s: irq enable failed\n", __func__);
+
+ if (csiphy_dev->csiphy_3phase == CSI_3PHASE_HW)
+ msm_csiphy_3ph_reset(csiphy_dev);
+ else
+ msm_csiphy_reset(csiphy_dev);
+
+ CDBG("%s:%d called\n", __func__, __LINE__);
+
+ if (csiphy_dev->hw_dts_version == CSIPHY_VERSION_V30)
+ csiphy_dev->hw_version =
+ msm_camera_io_r(csiphy_dev->base +
+ csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_hw_version_addr);
+ else
+ csiphy_dev->hw_version = csiphy_dev->hw_dts_version;
+
+ CDBG("%s:%d called csiphy_dev->hw_version 0x%x\n", __func__, __LINE__,
+ csiphy_dev->hw_version);
+ csiphy_dev->csiphy_state = CSIPHY_POWER_UP;
+ return 0;
+
+csiphy_resource_fail:
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+}
+#else
+static int msm_csiphy_init(struct csiphy_device *csiphy_dev)
+{
+ int rc = 0;
+
+ CDBG("CSI_PHY: msm_csiphy_init\n");
+ if (csiphy_dev == NULL) {
+ pr_err("%s: csiphy_dev NULL\n", __func__);
+ rc = -ENOMEM;
+ return rc;
+ }
+ csiphy_dev->csiphy_sof_debug_count = 0;
+ if (csiphy_dev->ref_count++) {
+ CDBG("%s csiphy refcount = %d\n", __func__,
+ csiphy_dev->ref_count);
+ return rc;
+ }
+
+ if (csiphy_dev->csiphy_state == CSIPHY_POWER_UP) {
+ pr_err("%s: csiphy invalid state %d\n", __func__,
+ csiphy_dev->csiphy_state);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
+ CAM_AHB_SVS_VOTE);
+ if (rc < 0) {
+ csiphy_dev->ref_count--;
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ rc = msm_camera_clk_enable(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+ csiphy_dev->num_clk, true);
+ if (rc < 0) {
+ pr_err("%s: csiphy clk enable failed\n", __func__);
+ csiphy_dev->ref_count--;
+ goto csiphy_resource_fail;
+ }
+
+ if (csiphy_dev->csiphy_3phase == CSI_3PHASE_HW)
+ msm_csiphy_3ph_reset(csiphy_dev);
+ else
+ msm_csiphy_reset(csiphy_dev);
+
+ if (csiphy_dev->hw_dts_version == CSIPHY_VERSION_V30)
+ csiphy_dev->hw_version =
+ msm_camera_io_r(csiphy_dev->base +
+ csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_hw_version_addr);
+ else
+ csiphy_dev->hw_version = csiphy_dev->hw_dts_version;
+
+ csiphy_dev->csiphy_sof_debug = SOF_DEBUG_DISABLE;
+ CDBG("%s:%d called csiphy_dev->hw_version 0x%x\n", __func__, __LINE__,
+ csiphy_dev->hw_version);
+ csiphy_dev->csiphy_state = CSIPHY_POWER_UP;
+ return 0;
+
+csiphy_resource_fail:
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+}
+#endif
+
+#if DBG_CSIPHY
+static int msm_csiphy_release(struct csiphy_device *csiphy_dev, void *arg)
+{
+ int i = 0;
+ int rc = 0;
+ struct msm_camera_csi_lane_params *csi_lane_params;
+ uint16_t csi_lane_mask;
+
+ csi_lane_params = (struct msm_camera_csi_lane_params *)arg;
+
+ if (!csiphy_dev || !csiphy_dev->ref_count) {
+ pr_err("%s csiphy dev NULL / ref_count ZERO\n", __func__);
+ return 0;
+ }
+
+ if (csiphy_dev->csiphy_state != CSIPHY_POWER_UP) {
+ pr_err("%s: csiphy invalid state %d\n", __func__,
+ csiphy_dev->csiphy_state);
+ return -EINVAL;
+ }
+
+ if (--csiphy_dev->ref_count) {
+ CDBG("%s csiphy refcount = %d\n", __func__,
+ csiphy_dev->ref_count);
+ return 0;
+ }
+
+ if (csiphy_dev->csiphy_3phase == CSI_3PHASE_HW) {
+ msm_camera_io_w(0x0,
+ csiphy_dev->base + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl5.addr);
+ msm_camera_io_w(0x0,
+ csiphy_dev->base + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl6.addr);
+ } else if (csiphy_dev->hw_version < CSIPHY_VERSION_V30) {
+ csiphy_dev->lane_mask[csiphy_dev->pdev->id] = 0;
+ for (i = 0; i < 4; i++)
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_cfg2_addr + 0x40*i);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnck_cfg2_addr);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_glbl_pwr_cfg_addr);
+ } else {
+ if (!csi_lane_params) {
+ pr_err("%s:%d failed: csi_lane_params %pK\n", __func__,
+ __LINE__, csi_lane_params);
+ return -EINVAL;
+ }
+ csi_lane_mask = (csi_lane_params->csi_lane_mask & 0x1F);
+
+ CDBG("%s csiphy_params, lane assign 0x%x mask = 0x%x\n",
+ __func__,
+ csi_lane_params->csi_lane_assign,
+ csi_lane_params->csi_lane_mask);
+
+ if (!csi_lane_mask)
+ csi_lane_mask = 0x1f;
+
+ csiphy_dev->lane_mask[csiphy_dev->pdev->id] &=
+ ~(csi_lane_mask);
+ i = 0;
+ while (csi_lane_mask) {
+ if (csi_lane_mask & 0x1) {
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_cfg2_addr + 0x40*i);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_misc1_addr + 0x40*i);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_test_imp + 0x40*i);
+ }
+ csi_lane_mask >>= 1;
+ i++;
+ }
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnck_cfg2_addr);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_glbl_pwr_cfg_addr);
+ }
+
+ rc = msm_camera_enable_irq(csiphy_dev->irq, false);
+
+ msm_camera_clk_enable(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+ csiphy_dev->num_clk, false);
+
+ if (csiphy_dev->csiphy_3phase == CSI_3PHASE_HW &&
+ csiphy_dev->csi_3phase == 1) {
+ msm_camera_clk_enable(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_3p_clk_info,
+ csiphy_dev->csiphy_3p_clk, 2, false);
+ }
+
+ csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
+
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+ return 0;
+}
+#else
+static int msm_csiphy_release(struct csiphy_device *csiphy_dev, void *arg)
+{
+ int i = 0, rc = 0;
+ struct msm_camera_csi_lane_params *csi_lane_params;
+ uint16_t csi_lane_mask;
+
+ csi_lane_params = (struct msm_camera_csi_lane_params *)arg;
+
+ if (!csiphy_dev || !csiphy_dev->ref_count) {
+ pr_err("%s csiphy dev NULL / ref_count ZERO\n", __func__);
+ return 0;
+ }
+
+ if (csiphy_dev->csiphy_state != CSIPHY_POWER_UP) {
+ pr_err("%s: csiphy invalid state %d\n", __func__,
+ csiphy_dev->csiphy_state);
+ return -EINVAL;
+ }
+
+ if (--csiphy_dev->ref_count) {
+ CDBG("%s csiphy refcount = %d\n", __func__,
+ csiphy_dev->ref_count);
+ return 0;
+ }
+
+ if (csiphy_dev->csiphy_3phase == CSI_3PHASE_HW) {
+ msm_camera_io_w(0x0,
+ csiphy_dev->base + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl5.addr);
+ msm_camera_io_w(0x0,
+ csiphy_dev->base + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl6.addr);
+ } else if (csiphy_dev->hw_version < CSIPHY_VERSION_V30) {
+ csiphy_dev->lane_mask[csiphy_dev->pdev->id] = 0;
+ for (i = 0; i < 4; i++)
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_cfg2_addr + 0x40*i);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnck_cfg2_addr);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_glbl_pwr_cfg_addr);
+ } else {
+ if (!csi_lane_params) {
+ pr_err("%s:%d failed: csi_lane_params %pK\n", __func__,
+ __LINE__, csi_lane_params);
+ return -EINVAL;
+ }
+ csi_lane_mask = (csi_lane_params->csi_lane_mask & 0x1F);
+
+ CDBG("%s csiphy_params, lane assign 0x%x mask = 0x%x\n",
+ __func__,
+ csi_lane_params->csi_lane_assign,
+ csi_lane_params->csi_lane_mask);
+
+ if (!csi_lane_mask)
+ csi_lane_mask = 0x1f;
+
+ csiphy_dev->lane_mask[csiphy_dev->pdev->id] &=
+ ~(csi_lane_mask);
+ i = 0;
+ while (csi_lane_mask) {
+ if (csi_lane_mask & 0x1) {
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_cfg2_addr + 0x40*i);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_misc1_addr + 0x40*i);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_test_imp + 0x40*i);
+ }
+ csi_lane_mask >>= 1;
+ i++;
+ }
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnck_cfg2_addr);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_glbl_pwr_cfg_addr);
+ }
+ if (csiphy_dev->csiphy_sof_debug == SOF_DEBUG_ENABLE)
+ rc = msm_camera_enable_irq(csiphy_dev->irq, false);
+
+ msm_camera_clk_enable(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+ csiphy_dev->num_clk, false);
+ if ((csiphy_dev->csiphy_3phase == CSI_3PHASE_HW) &&
+ (csiphy_dev->csi_3phase == 1)) {
+ msm_camera_clk_enable(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_3p_clk_info,
+ csiphy_dev->csiphy_3p_clk, 2, false);
+ }
+
+ csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
+
+ if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
+ CAM_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+ return 0;
+}
+
+#endif
+static int32_t msm_csiphy_cmd(struct csiphy_device *csiphy_dev, void *arg)
+{
+ int rc = 0;
+ struct csiphy_cfg_data *cdata = (struct csiphy_cfg_data *)arg;
+ struct msm_camera_csiphy_params csiphy_params;
+ struct msm_camera_csi_lane_params csi_lane_params;
+
+ if (!csiphy_dev || !cdata) {
+ pr_err("%s: csiphy_dev NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ CDBG("%s cfgtype = %d\n", __func__, cdata->cfgtype);
+ switch (cdata->cfgtype) {
+ case CSIPHY_INIT:
+ rc = msm_csiphy_init(csiphy_dev);
+ break;
+ case CSIPHY_CFG:
+ if (copy_from_user(&csiphy_params,
+ (void *)cdata->cfg.csiphy_params,
+ sizeof(struct msm_camera_csiphy_params))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ csiphy_dev->csiphy_sof_debug = SOF_DEBUG_DISABLE;
+ csiphy_dev->is_combo_mode = csiphy_params.combo_mode;
+ csiphy_dev->csiphy_params = csiphy_params;
+ rc = 0;
+
+ CDBG("CSI_PHY: combo mode = %d\n",
+ csiphy_params.combo_mode);
+ CDBG("CSI_PHY: lane_cnt = %d\n",
+ csiphy_params.lane_cnt);
+ CDBG("CSI_PHY: settle_cnt = %d\n",
+ csiphy_params.settle_cnt);
+ CDBG("CSI_PHY: lane_mask = %d\n",
+ csiphy_params.lane_mask);
+ CDBG("CSI_PHY: csid_core = %d\n",
+ csiphy_params.csid_core);
+ CDBG("CSI_PHY: csiphy_clk = %d\n",
+ csiphy_params.csiphy_clk);
+ CDBG("CSI_PHY: csi_3phase = %d\n",
+ csiphy_params.csi_3phase);
+ break;
+ case CSIPHY_START:
+ rc = msm_csiphy_lane_config(csiphy_dev,
+ &csiphy_dev->csiphy_params);
+ break;
+ case CSIPHY_STOP:
+ csi_lane_params.csi_lane_assign = 0;
+ csi_lane_params.csi_lane_mask = csiphy_dev->csiphy_params.
+ lane_mask;
+ rc = msm_csiphy_release(csiphy_dev, &csi_lane_params);
+ break;
+ case CSIPHY_RELEASE:
+ csi_lane_params.csi_lane_assign = 0;
+ csi_lane_params.csi_lane_mask = csiphy_dev->csiphy_params.
+ lane_mask;
+
+ CDBG("CSI_PHY: csi_lane_assign =%d\n",
+ csi_lane_params.csi_lane_assign);
+ CDBG("CSI_PHY: csi_lane_mask =%d\n",
+ csi_lane_params.csi_lane_mask);
+
+ if ((csiphy_dev->is_combo_mode == 1) &&
+ (csiphy_dev->ref_count == 2)) {
+ /* CSIPHY is running in Combo mode do
+ * not power down core
+ */
+ csiphy_dev->ref_count--;
+ } else {
+ rc = msm_csiphy_release(csiphy_dev, &csi_lane_params);
+ }
+
+ break;
+ default:
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+ return rc;
+}
+
+static int32_t msm_csiphy_get_subdev_id(struct csiphy_device *csiphy_dev,
+ void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+
+ if (!subdev_id) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ *subdev_id = csiphy_dev->pdev->id;
+ CDBG("%s:%d subdev_id %d\n", __func__, __LINE__, *subdev_id);
+ return 0;
+}
+
+static long msm_csiphy_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = -ENOIOCTLCMD;
+ struct csiphy_device *csiphy_dev = v4l2_get_subdevdata(sd);
+
+ if (!csiphy_dev) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ mutex_lock(&csiphy_dev->mutex);
+ CDBG("%s:%d id %d\n", __func__, __LINE__, csiphy_dev->pdev->id);
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ rc = msm_csiphy_get_subdev_id(csiphy_dev, arg);
+ break;
+ case VIDIOC_MSM_CSIPHY_IO_CFG:
+ rc = msm_csiphy_cmd(csiphy_dev, arg);
+ break;
+ case VIDIOC_MSM_CSIPHY_RELEASE:
+ case MSM_SD_SHUTDOWN:
+ rc = msm_csiphy_release(csiphy_dev, arg);
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ if (!csiphy_dev || !csiphy_dev->ctrl_reg ||
+ !csiphy_dev->ref_count)
+ break;
+ if (csiphy_dev->csiphy_sof_debug == SOF_DEBUG_DISABLE) {
+ csiphy_dev->csiphy_sof_debug = SOF_DEBUG_ENABLE;
+ rc = msm_camera_enable_irq(csiphy_dev->irq, true);
+ }
+ break;
+ case MSM_SD_UNNOTIFY_FREEZE:
+ if (!csiphy_dev || !csiphy_dev->ctrl_reg ||
+ !csiphy_dev->ref_count)
+ break;
+ csiphy_dev->csiphy_sof_debug = SOF_DEBUG_DISABLE;
+ rc = msm_camera_enable_irq(csiphy_dev->irq, false);
+ break;
+ default:
+ pr_err_ratelimited("%s: command not found\n", __func__);
+ }
+ mutex_unlock(&csiphy_dev->mutex);
+ CDBG("%s:%d\n", __func__, __LINE__);
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_csiphy_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct csiphy_cfg_data32 *u32 =
+ (struct csiphy_cfg_data32 *)arg;
+ struct csiphy_cfg_data csiphy_data;
+
+ switch (cmd) {
+ case VIDIOC_MSM_CSIPHY_IO_CFG32:
+ cmd = VIDIOC_MSM_CSIPHY_IO_CFG;
+ csiphy_data.cfgtype = u32->cfgtype;
+ csiphy_data.cfg.csiphy_params =
+ compat_ptr(u32->cfg.csiphy_params);
+ return msm_csiphy_subdev_ioctl(sd, cmd, &csiphy_data);
+ default:
+ return msm_csiphy_subdev_ioctl(sd, cmd, arg);
+ }
+}
+
+static long msm_csiphy_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_csiphy_subdev_do_ioctl);
+}
+#endif
+
+static const struct v4l2_subdev_internal_ops msm_csiphy_internal_ops;
+
+static struct v4l2_subdev_core_ops msm_csiphy_subdev_core_ops = {
+ .ioctl = &msm_csiphy_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_ops msm_csiphy_subdev_ops = {
+ .core = &msm_csiphy_subdev_core_ops,
+};
+
+static int msm_csiphy_get_clk_info(struct csiphy_device *csiphy_dev,
+ struct platform_device *pdev)
+{
+ int i, rc = 0;
+ char *csi_3p_clk_name = "csi_phy_3p_clk";
+ char *csi_3p_clk_src_name = "csiphy_3p_clk_src";
+ uint32_t clk_cnt = 0;
+
+ rc = msm_camera_get_clk_info(csiphy_dev->pdev,
+ &csiphy_dev->csiphy_all_clk_info,
+ &csiphy_dev->csiphy_all_clk,
+ &csiphy_dev->num_all_clk);
+ if (rc < 0) {
+ pr_err("%s:%d, failed\n", __func__, __LINE__);
+ return rc;
+ }
+ if (csiphy_dev->num_all_clk > CSIPHY_NUM_CLK_MAX) {
+ pr_err("%s: invalid count=%zu, max is %d\n", __func__,
+ csiphy_dev->num_all_clk, CSIPHY_NUM_CLK_MAX);
+ rc = -EINVAL;
+ goto MAX_CLK_ERROR;
+ }
+
+ for (i = 0; i < csiphy_dev->num_all_clk; i++) {
+ if (!strcmp(csiphy_dev->csiphy_all_clk_info[i].clk_name,
+ csi_3p_clk_src_name)) {
+ csiphy_dev->csiphy_3p_clk_info[0].clk_name =
+ csiphy_dev->csiphy_all_clk_info[i].clk_name;
+ csiphy_dev->csiphy_3p_clk_info[0].clk_rate =
+ csiphy_dev->csiphy_all_clk_info[i].clk_rate;
+ csiphy_dev->csiphy_3p_clk[0] =
+ csiphy_dev->csiphy_all_clk[i];
+ continue;
+ } else if (!strcmp(csiphy_dev->csiphy_all_clk_info[i].clk_name,
+ csi_3p_clk_name)) {
+ csiphy_dev->csiphy_3p_clk_info[1].clk_name =
+ csiphy_dev->csiphy_all_clk_info[i].clk_name;
+ csiphy_dev->csiphy_3p_clk_info[1].clk_rate =
+ csiphy_dev->csiphy_all_clk_info[i].clk_rate;
+ csiphy_dev->csiphy_3p_clk[1] =
+ csiphy_dev->csiphy_all_clk[i];
+ continue;
+ }
+ csiphy_dev->csiphy_clk_info[clk_cnt].clk_name =
+ csiphy_dev->csiphy_all_clk_info[i].clk_name;
+ csiphy_dev->csiphy_clk_info[clk_cnt].clk_rate =
+ csiphy_dev->csiphy_all_clk_info[i].clk_rate;
+ csiphy_dev->csiphy_clk[clk_cnt] =
+ csiphy_dev->csiphy_all_clk[clk_cnt];
+ if (!strcmp(csiphy_dev->csiphy_clk_info[clk_cnt].clk_name,
+ "csiphy_timer_src_clk")) {
+ CDBG("%s:%d, copy csiphy_timer_src_clk",
+ __func__, __LINE__);
+ csiphy_dev->csiphy_max_clk =
+ csiphy_dev->csiphy_clk_info[clk_cnt].clk_rate;
+ csiphy_dev->csiphy_clk_index = clk_cnt;
+ }
+ CDBG("%s: clk_rate[%d] = %ld\n", __func__, clk_cnt,
+ csiphy_dev->csiphy_clk_info[clk_cnt].clk_rate);
+ clk_cnt++;
+ }
+
+ csiphy_dev->num_clk = clk_cnt;
+ return rc;
+MAX_CLK_ERROR:
+ msm_camera_put_clk_info(csiphy_dev->pdev,
+ &csiphy_dev->csiphy_all_clk_info,
+ &csiphy_dev->csiphy_all_clk,
+ csiphy_dev->num_all_clk);
+
+ return rc;
+}
+
+static int csiphy_probe(struct platform_device *pdev)
+{
+ struct csiphy_device *new_csiphy_dev;
+ int rc = 0;
+
+ new_csiphy_dev = kzalloc(sizeof(struct csiphy_device), GFP_KERNEL);
+ if (!new_csiphy_dev)
+ return -ENOMEM;
+ new_csiphy_dev->is_3_1_20nm_hw = 0;
+ new_csiphy_dev->ctrl_reg = NULL;
+ new_csiphy_dev->ctrl_reg = kzalloc(sizeof(struct csiphy_ctrl_t),
+ GFP_KERNEL);
+ if (!new_csiphy_dev->ctrl_reg) {
+ kfree(new_csiphy_dev);
+ return -ENOMEM;
+ }
+ v4l2_subdev_init(&new_csiphy_dev->msm_sd.sd, &msm_csiphy_subdev_ops);
+ v4l2_set_subdevdata(&new_csiphy_dev->msm_sd.sd, new_csiphy_dev);
+ platform_set_drvdata(pdev, &new_csiphy_dev->msm_sd.sd);
+
+ mutex_init(&new_csiphy_dev->mutex);
+
+ if (pdev->dev.of_node) {
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+ CDBG("%s: device id = %d\n", __func__, pdev->id);
+ }
+
+ new_csiphy_dev->pdev = pdev;
+ new_csiphy_dev->msm_sd.sd.internal_ops = &msm_csiphy_internal_ops;
+ new_csiphy_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(new_csiphy_dev->msm_sd.sd.name,
+ ARRAY_SIZE(new_csiphy_dev->msm_sd.sd.name), "msm_csiphy");
+ media_entity_init(&new_csiphy_dev->msm_sd.sd.entity, 0, NULL, 0);
+ new_csiphy_dev->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ new_csiphy_dev->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_CSIPHY;
+ new_csiphy_dev->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x4;
+ msm_sd_register(&new_csiphy_dev->msm_sd);
+
+ new_csiphy_dev->csiphy_3phase = 0;
+ new_csiphy_dev->num_irq_registers = 0x8;
+
+ if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v2.0")) {
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v2_0;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V20;
+ } else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v2.2")) {
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v2_2;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V22;
+ } else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v3.0")) {
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v3_0;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V30;
+ } else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v3.1")) {
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v3_1;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V31;
+ } else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v3.1.1")) {
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v3_1;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V31;
+ new_csiphy_dev->is_3_1_20nm_hw = 1;
+ } else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v3.2")) {
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v3_2;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V32;
+ } else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v3.4.2")) {
+ new_csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_v3_4_2_3ph;
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v3_4_2;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V342;
+ new_csiphy_dev->csiphy_3phase = CSI_3PHASE_HW;
+ } else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v3.4.2.1")) {
+ new_csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_v3_4_2_1_3ph;
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v3_4_2_1;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V342_1;
+ new_csiphy_dev->csiphy_3phase = CSI_3PHASE_HW;
+ } else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v3.5")) {
+ new_csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_v3_5_3ph;
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v3_5;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V35;
+ new_csiphy_dev->csiphy_3phase = CSI_3PHASE_HW;
+ } else {
+ pr_err("%s:%d, invalid hw version : 0x%x\n", __func__, __LINE__,
+ new_csiphy_dev->hw_dts_version);
+ rc = -EINVAL;
+ goto csiphy_no_resource;
+ }
+
+ /* ToDo: Enable 3phase clock for dynamic clock enable/disable */
+ rc = msm_csiphy_get_clk_info(new_csiphy_dev, pdev);
+ if (rc < 0) {
+ pr_err("%s: msm_csiphy_get_clk_info() failed", __func__);
+ rc = -EFAULT;
+ goto csiphy_no_resource;
+ }
+
+ new_csiphy_dev->base = msm_camera_get_reg_base(pdev, "csiphy", true);
+ if (!new_csiphy_dev->base) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto csiphy_no_resource;
+ }
+
+ if (new_csiphy_dev->hw_dts_version >= CSIPHY_VERSION_V30) {
+ new_csiphy_dev->clk_mux_base = msm_camera_get_reg_base(pdev,
+ "csiphy_clk_mux", true);
+ if (!new_csiphy_dev->clk_mux_base) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto csiphy_no_mux_resource;
+ }
+ }
+ new_csiphy_dev->irq = msm_camera_get_irq(pdev, "csiphy");
+ if (!new_csiphy_dev->irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto csiphy_no_irq_resource;
+ }
+ rc = msm_camera_register_irq(pdev, new_csiphy_dev->irq,
+ msm_csiphy_irq, IRQF_TRIGGER_RISING, "csiphy", new_csiphy_dev);
+ if (rc < 0) {
+ pr_err("%s: irq request fail\n", __func__);
+ rc = -EBUSY;
+ goto csiphy_no_irq_resource;
+ }
+ msm_camera_enable_irq(new_csiphy_dev->irq, false);
+
+ msm_cam_copy_v4l2_subdev_fops(&msm_csiphy_v4l2_subdev_fops);
+#ifdef CONFIG_COMPAT
+ msm_csiphy_v4l2_subdev_fops.compat_ioctl32 =
+ msm_csiphy_subdev_fops_ioctl;
+#endif
+ new_csiphy_dev->msm_sd.sd.devnode->fops =
+ &msm_csiphy_v4l2_subdev_fops;
+ new_csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
+
+ return 0;
+
+csiphy_no_irq_resource:
+ if (new_csiphy_dev->hw_dts_version >= CSIPHY_VERSION_V30) {
+ msm_camera_put_reg_base(pdev, new_csiphy_dev->clk_mux_base,
+ "csiphy_clk_mux", true);
+ }
+csiphy_no_mux_resource:
+ msm_camera_put_reg_base(pdev, new_csiphy_dev->base, "csiphy", true);
+csiphy_no_resource:
+ mutex_destroy(&new_csiphy_dev->mutex);
+ kfree(new_csiphy_dev->ctrl_reg);
+ kfree(new_csiphy_dev);
+ return rc;
+}
+
+static int msm_csiphy_exit(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
+ struct csiphy_device *csiphy_dev =
+ v4l2_get_subdevdata(subdev);
+
+ msm_camera_put_clk_info(pdev,
+ &csiphy_dev->csiphy_all_clk_info,
+ &csiphy_dev->csiphy_all_clk,
+ csiphy_dev->num_all_clk);
+
+ msm_camera_put_reg_base(pdev, csiphy_dev->base, "csiphy", true);
+ if (csiphy_dev->hw_dts_version >= CSIPHY_VERSION_V30) {
+ msm_camera_put_reg_base(pdev, csiphy_dev->clk_mux_base,
+ "csiphy_clk_mux", true);
+ }
+ kfree(csiphy_dev);
+ return 0;
+}
+
+static const struct of_device_id msm_csiphy_dt_match[] = {
+ {.compatible = "qcom,csiphy"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_csiphy_dt_match);
+
+static struct platform_driver csiphy_driver = {
+ .probe = csiphy_probe,
+ .remove = msm_csiphy_exit,
+ .driver = {
+ .name = MSM_CSIPHY_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_csiphy_dt_match,
+ },
+};
+
+static int __init msm_csiphy_init_module(void)
+{
+ return platform_driver_register(&csiphy_driver);
+}
+
+static void __exit msm_csiphy_exit_module(void)
+{
+ platform_driver_unregister(&csiphy_driver);
+}
+
+module_init(msm_csiphy_init_module);
+module_exit(msm_csiphy_exit_module);
+MODULE_DESCRIPTION("MSM CSIPHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.h
new file mode 100644
index 000000000000..9b38fa50ce98
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.h
@@ -0,0 +1,177 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_H
+#define MSM_CSIPHY_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <media/ais/msm_ais_sensor.h>
+#include "msm_sd.h"
+#include "msm_camera_io_util.h"
+#include "cam_soc_api.h"
+
+#define MAX_CSIPHY 3
+#define CSIPHY_NUM_CLK_MAX 16
+
+struct csiphy_reg_t {
+ uint32_t addr;
+ uint32_t data;
+};
+
+struct csiphy_reg_parms_t {
+ /* MIPI CSI PHY registers */
+ uint32_t mipi_csiphy_lnn_cfg1_addr;
+ uint32_t mipi_csiphy_lnn_cfg2_addr;
+ uint32_t mipi_csiphy_lnn_cfg3_addr;
+ uint32_t mipi_csiphy_lnn_cfg4_addr;
+ uint32_t mipi_csiphy_lnn_cfg5_addr;
+ uint32_t mipi_csiphy_lnck_cfg1_addr;
+ uint32_t mipi_csiphy_lnck_cfg2_addr;
+ uint32_t mipi_csiphy_lnck_cfg3_addr;
+ uint32_t mipi_csiphy_lnck_cfg4_addr;
+ uint32_t mipi_csiphy_lnn_test_imp;
+ uint32_t mipi_csiphy_lnn_misc1_addr;
+ uint32_t mipi_csiphy_glbl_reset_addr;
+ uint32_t mipi_csiphy_glbl_pwr_cfg_addr;
+ uint32_t mipi_csiphy_glbl_irq_cmd_addr;
+ uint32_t mipi_csiphy_hw_version_addr;
+ uint32_t mipi_csiphy_interrupt_status0_addr;
+ uint32_t mipi_csiphy_interrupt_mask0_addr;
+ uint32_t mipi_csiphy_interrupt_mask_val;
+ uint32_t mipi_csiphy_interrupt_mask_addr;
+ uint32_t mipi_csiphy_interrupt_clear0_addr;
+ uint32_t mipi_csiphy_interrupt_clear_addr;
+ uint32_t mipi_csiphy_mode_config_shift;
+ uint32_t mipi_csiphy_glbl_t_init_cfg0_addr;
+ uint32_t mipi_csiphy_t_wakeup_cfg0_addr;
+ uint32_t csiphy_version;
+ uint32_t combo_clk_mask;
+};
+
+struct csiphy_reg_3ph_parms_t {
+/*MIPI CSI PHY registers*/
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl5;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl6;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl34;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl35;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl36;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl1;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl2;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl3;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl5;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl6;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl7;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl8;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl9;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl10;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl11;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl12;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl13;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl14;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl15;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl16;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl17;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl18;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl19;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl21;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl23;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl24;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl25;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl26;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl27;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl28;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl29;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl30;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl31;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl32;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl33;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl51;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl7;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl11;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl12;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl13;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl14;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl15;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl16;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl17;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl18;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl19;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl20;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl21;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_misc1;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl0;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg1;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg2;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg3;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg4;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg5;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg6;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg7;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg8;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg9;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_ctrl15;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_test_imp;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_test_force;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_ctrl5;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnck_cfg1;
+};
+
+struct csiphy_ctrl_t {
+ struct csiphy_reg_parms_t csiphy_reg;
+ struct csiphy_reg_3ph_parms_t csiphy_3ph_reg;
+};
+
+enum msm_csiphy_state_t {
+ CSIPHY_POWER_UP,
+ CSIPHY_POWER_DOWN,
+};
+
+struct csiphy_device {
+ struct platform_device *pdev;
+ struct msm_sd_subdev msm_sd;
+ struct v4l2_subdev subdev;
+ struct resource *irq;
+ void __iomem *base;
+ void __iomem *clk_mux_base;
+ struct mutex mutex;
+ uint32_t hw_version;
+ uint32_t hw_dts_version;
+ enum msm_csiphy_state_t csiphy_state;
+ struct csiphy_ctrl_t *ctrl_reg;
+ size_t num_all_clk;
+ struct clk **csiphy_all_clk;
+ struct msm_cam_clk_info *csiphy_all_clk_info;
+ uint32_t num_clk;
+ struct clk *csiphy_clk[CSIPHY_NUM_CLK_MAX];
+ struct msm_cam_clk_info csiphy_clk_info[CSIPHY_NUM_CLK_MAX];
+ struct clk *csiphy_3p_clk[2];
+ struct msm_cam_clk_info csiphy_3p_clk_info[2];
+ unsigned char csi_3phase;
+ int32_t ref_count;
+ uint16_t lane_mask[MAX_CSIPHY];
+ uint32_t is_3_1_20nm_hw;
+ uint32_t csiphy_clk_index;
+ uint32_t csiphy_max_clk;
+ uint8_t csiphy_3phase;
+ uint8_t num_irq_registers;
+ uint32_t csiphy_sof_debug;
+ uint32_t csiphy_sof_debug_count;
+ uint32_t is_combo_mode;
+ struct msm_camera_csiphy_params csiphy_params;
+};
+
+#define VIDIOC_MSM_CSIPHY_RELEASE \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 9, void *)
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/eeprom/Makefile b/drivers/media/platform/msm/ais/sensor/eeprom/Makefile
new file mode 100644
index 000000000000..c93cfe564b3d
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/eeprom/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+obj-$(CONFIG_MSM_AIS_EEPROM) += msm_eeprom.o
diff --git a/drivers/media/platform/msm/ais/sensor/eeprom/msm_eeprom.c b/drivers/media/platform/msm/ais/sensor/eeprom/msm_eeprom.c
new file mode 100644
index 000000000000..0e2202cd1c17
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/eeprom/msm_eeprom.c
@@ -0,0 +1,1879 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include "msm_sd.h"
+#include "msm_cci.h"
+#include "msm_eeprom.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+DEFINE_MSM_MUTEX(msm_eeprom_mutex);
+#ifdef CONFIG_COMPAT
+static struct v4l2_file_operations msm_eeprom_v4l2_subdev_fops;
+#endif
+
+/**
+ * msm_get_read_mem_size - Get the total size for allocation
+ * @eeprom_map_array: mem map
+ *
+ * Returns size after computation size, returns error in case of error
+ */
+static int msm_get_read_mem_size
+ (struct msm_eeprom_memory_map_array *eeprom_map_array) {
+ int size = 0, i, j;
+ struct msm_eeprom_mem_map_t *eeprom_map;
+
+ if (eeprom_map_array->msm_size_of_max_mappings >
+ MSM_EEPROM_MAX_MEM_MAP_CNT) {
+ pr_err("%s:%d Memory map cnt greter then expected: %d",
+ __func__, __LINE__,
+ eeprom_map_array->msm_size_of_max_mappings);
+ return -EINVAL;
+ }
+ for (j = 0; j < eeprom_map_array->msm_size_of_max_mappings; j++) {
+ eeprom_map = &(eeprom_map_array->memory_map[j]);
+ if (eeprom_map->memory_map_size >
+ MSM_EEPROM_MEMORY_MAP_MAX_SIZE) {
+ pr_err("%s:%d Memory map size greter then expected: %d",
+ __func__, __LINE__,
+ eeprom_map->memory_map_size);
+ return -EINVAL;
+ }
+ for (i = 0; i < eeprom_map->memory_map_size; i++) {
+ if (eeprom_map->mem_settings[i].i2c_operation ==
+ MSM_CAM_READ) {
+ size += eeprom_map->mem_settings[i].reg_data;
+ }
+ }
+ }
+ CDBG("Total Data Size: %d\n", size);
+ return size;
+}
+
+/**
+ * msm_eeprom_verify_sum - verify crc32 checksum
+ * @mem: data buffer
+ * @size: size of data buffer
+ * @sum: expected checksum
+ *
+ * Returns 0 if checksum match, -EINVAL otherwise.
+ */
+static int msm_eeprom_verify_sum(const char *mem, uint32_t size, uint32_t sum)
+{
+ uint32_t crc = ~0;
+
+ /* check overflow */
+ if (size > crc - sizeof(uint32_t))
+ return -EINVAL;
+
+ crc = crc32_le(crc, mem, size);
+ if (~crc != sum) {
+ CDBG("%s: expect 0x%x, result 0x%x\n", __func__, sum, ~crc);
+ return -EINVAL;
+ }
+ CDBG("%s: checksum pass 0x%x\n", __func__, sum);
+ return 0;
+}
+
+/**
+ * msm_eeprom_match_crc - verify multiple regions using crc
+ * @data: data block to be verified
+ *
+ * Iterates through all regions stored in @data. Regions with odd index
+ * are treated as data, and its next region is treated as checksum. Thus
+ * regions of even index must have valid_size of 4 or 0 (skip verification).
+ * Returns a bitmask of verified regions, starting from LSB. 1 indicates
+ * a checksum match, while 0 indicates checksum mismatch or not verified.
+ */
+static uint32_t msm_eeprom_match_crc(struct msm_eeprom_memory_block_t *data)
+{
+ int j, rc;
+ uint32_t *sum;
+ uint32_t ret = 0;
+ uint8_t *memptr;
+ struct msm_eeprom_memory_map_t *map;
+
+ if (!data) {
+ pr_err("%s data is NULL", __func__);
+ return -EINVAL;
+ }
+ map = data->map;
+ memptr = data->mapdata;
+
+ for (j = 0; j + 1 < data->num_map; j += 2) {
+ /* empty table or no checksum */
+ if (!map[j].mem.valid_size || !map[j+1].mem.valid_size) {
+ memptr += map[j].mem.valid_size
+ + map[j+1].mem.valid_size;
+ continue;
+ }
+ if (map[j+1].mem.valid_size != sizeof(uint32_t)) {
+ CDBG("%s: malformatted data mapping\n", __func__);
+ return -EINVAL;
+ }
+ sum = (uint32_t *) (memptr + map[j].mem.valid_size);
+ rc = msm_eeprom_verify_sum(memptr, map[j].mem.valid_size,
+ *sum);
+ if (!rc)
+ ret |= 1 << (j/2);
+ memptr += map[j].mem.valid_size + map[j+1].mem.valid_size;
+ }
+ return ret;
+}
+
+/**
+ * read_eeprom_memory() - read map data into buffer
+ * @e_ctrl: eeprom control struct
+ * @block: block to be read
+ *
+ * This function iterates through blocks stored in block->map, reads each
+ * region and concatenate them into the pre-allocated block->mapdata
+ */
+static int read_eeprom_memory(struct msm_eeprom_ctrl_t *e_ctrl,
+ struct msm_eeprom_memory_block_t *block)
+{
+ int rc = 0;
+ int j;
+ struct msm_eeprom_memory_map_t *emap = block->map;
+ struct msm_eeprom_board_info *eb_info;
+ uint8_t *memptr = block->mapdata;
+
+ if (!e_ctrl) {
+ pr_err("%s e_ctrl is NULL", __func__);
+ return -EINVAL;
+ }
+
+ eb_info = e_ctrl->eboard_info;
+
+ for (j = 0; j < block->num_map; j++) {
+ if (emap[j].saddr.addr) {
+ eb_info->i2c_slaveaddr = emap[j].saddr.addr;
+ e_ctrl->i2c_client.cci_client->sid =
+ eb_info->i2c_slaveaddr >> 1;
+ pr_err("qcom,slave-addr = 0x%X\n",
+ eb_info->i2c_slaveaddr);
+ }
+
+ if (emap[j].page.valid_size) {
+ e_ctrl->i2c_client.addr_type = emap[j].page.addr_t;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->i2c_write(
+ &(e_ctrl->i2c_client), emap[j].page.addr,
+ emap[j].page.data, emap[j].page.data_t);
+ msleep(emap[j].page.delay);
+ if (rc < 0) {
+ pr_err("%s: page write failed\n", __func__);
+ return rc;
+ }
+ }
+ if (emap[j].pageen.valid_size) {
+ e_ctrl->i2c_client.addr_type = emap[j].pageen.addr_t;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->i2c_write(
+ &(e_ctrl->i2c_client), emap[j].pageen.addr,
+ emap[j].pageen.data, emap[j].pageen.data_t);
+ msleep(emap[j].pageen.delay);
+ if (rc < 0) {
+ pr_err("%s: page enable failed\n", __func__);
+ return rc;
+ }
+ }
+ if (emap[j].poll.valid_size) {
+ e_ctrl->i2c_client.addr_type = emap[j].poll.addr_t;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->i2c_poll(
+ &(e_ctrl->i2c_client), emap[j].poll.addr,
+ emap[j].poll.data, emap[j].poll.data_t,
+ emap[j].poll.delay);
+ if (rc < 0) {
+ pr_err("%s: poll failed\n", __func__);
+ return rc;
+ }
+ }
+
+ if (emap[j].mem.valid_size) {
+ e_ctrl->i2c_client.addr_type = emap[j].mem.addr_t;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->i2c_read_seq(
+ &(e_ctrl->i2c_client), emap[j].mem.addr,
+ memptr, emap[j].mem.valid_size);
+ if (rc < 0) {
+ pr_err("%s: read failed\n", __func__);
+ return rc;
+ }
+ memptr += emap[j].mem.valid_size;
+ }
+ if (emap[j].pageen.valid_size) {
+ e_ctrl->i2c_client.addr_type = emap[j].pageen.addr_t;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->i2c_write(
+ &(e_ctrl->i2c_client), emap[j].pageen.addr,
+ 0, emap[j].pageen.data_t);
+ if (rc < 0) {
+ pr_err("%s: page disable failed\n", __func__);
+ return rc;
+ }
+ }
+ }
+ return rc;
+}
+/**
+ * msm_eeprom_parse_memory_map() - parse memory map in device node
+ * @of: device node
+ * @data: memory block for output
+ *
+ * This functions parses @of to fill @data. It allocates map itself, parses
+ * the @of node, calculate total data length, and allocates required buffer.
+ * It only fills the map, but does not perform actual reading.
+ */
+static int msm_eeprom_parse_memory_map(struct device_node *of,
+ struct msm_eeprom_memory_block_t *data)
+{
+ int i, rc = 0;
+ char property[PROPERTY_MAXSIZE];
+ uint32_t count = 6;
+ struct msm_eeprom_memory_map_t *map;
+
+ snprintf(property, PROPERTY_MAXSIZE, "qcom,num-blocks");
+ rc = of_property_read_u32(of, property, &data->num_map);
+ CDBG("%s: %s %d\n", __func__, property, data->num_map);
+ if (rc < 0) {
+ pr_err("%s failed rc %d\n", __func__, rc);
+ return rc;
+ }
+
+ map = kzalloc((sizeof(*map) * data->num_map), GFP_KERNEL);
+ if (!map) {
+ rc = -ENOMEM;
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ return rc;
+ }
+ data->map = map;
+
+ for (i = 0; i < data->num_map; i++) {
+ snprintf(property, PROPERTY_MAXSIZE, "qcom,page%d", i);
+ rc = of_property_read_u32_array(of, property,
+ (uint32_t *) &map[i].page, count);
+ if (rc < 0) {
+ pr_err("%s: failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ snprintf(property, PROPERTY_MAXSIZE,
+ "qcom,pageen%d", i);
+ rc = of_property_read_u32_array(of, property,
+ (uint32_t *) &map[i].pageen, count);
+ if (rc < 0)
+ CDBG("%s: pageen not needed\n", __func__);
+
+ snprintf(property, PROPERTY_MAXSIZE, "qcom,saddr%d", i);
+ rc = of_property_read_u32_array(of, property,
+ (uint32_t *) &map[i].saddr.addr, 1);
+ if (rc < 0)
+ CDBG("%s: saddr not needed - block %d\n", __func__, i);
+
+ snprintf(property, PROPERTY_MAXSIZE, "qcom,poll%d", i);
+ rc = of_property_read_u32_array(of, property,
+ (uint32_t *) &map[i].poll, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ snprintf(property, PROPERTY_MAXSIZE, "qcom,mem%d", i);
+ rc = of_property_read_u32_array(of, property,
+ (uint32_t *) &map[i].mem, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+ data->num_data += map[i].mem.valid_size;
+ }
+
+ CDBG("%s num_bytes %d\n", __func__, data->num_data);
+
+ data->mapdata = kzalloc(data->num_data, GFP_KERNEL);
+ if (!data->mapdata) {
+ rc = -ENOMEM;
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+ return rc;
+
+ERROR:
+ kfree(data->map);
+ memset(data, 0, sizeof(*data));
+ return rc;
+}
+
+/**
+ * eeprom_parse_memory_map - Parse mem map
+ * @e_ctrl: ctrl structure
+ * @eeprom_map_array: eeprom map
+ *
+ * Returns success or failure
+ */
+static int eeprom_parse_memory_map(struct msm_eeprom_ctrl_t *e_ctrl,
+ struct msm_eeprom_memory_map_array *eeprom_map_array)
+{
+ int rc = 0, i, j;
+ uint8_t *memptr;
+ struct msm_eeprom_mem_map_t *eeprom_map;
+
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.num_data = msm_get_read_mem_size(eeprom_map_array);
+ if (e_ctrl->cal_data.num_data <= 0) {
+ pr_err("%s:%d Error in reading mem size\n",
+ __func__, __LINE__);
+ e_ctrl->cal_data.num_data = 0;
+ return -EINVAL;
+ }
+ e_ctrl->cal_data.mapdata =
+ kzalloc(e_ctrl->cal_data.num_data, GFP_KERNEL);
+ if (!e_ctrl->cal_data.mapdata)
+ return -ENOMEM;
+
+ memptr = e_ctrl->cal_data.mapdata;
+ for (j = 0; j < eeprom_map_array->msm_size_of_max_mappings; j++) {
+ eeprom_map = &(eeprom_map_array->memory_map[j]);
+ if (e_ctrl->i2c_client.cci_client) {
+ e_ctrl->i2c_client.cci_client->sid =
+ eeprom_map->slave_addr >> 1;
+ } else if (e_ctrl->i2c_client.client) {
+ e_ctrl->i2c_client.client->addr =
+ eeprom_map->slave_addr >> 1;
+ }
+ CDBG("Slave Addr: 0x%X\n", eeprom_map->slave_addr);
+ CDBG("Memory map Size: %d",
+ eeprom_map->memory_map_size);
+ for (i = 0; i < eeprom_map->memory_map_size; i++) {
+ switch (eeprom_map->mem_settings[i].i2c_operation) {
+ case MSM_CAM_WRITE: {
+ e_ctrl->i2c_client.addr_type =
+ eeprom_map->mem_settings[i].addr_type;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->i2c_write(
+ &(e_ctrl->i2c_client),
+ eeprom_map->mem_settings[i].reg_addr,
+ eeprom_map->mem_settings[i].reg_data,
+ eeprom_map->mem_settings[i].data_type);
+ msleep(eeprom_map->mem_settings[i].delay);
+ if (rc < 0) {
+ pr_err("%s: page write failed\n",
+ __func__);
+ goto clean_up;
+ }
+ }
+ break;
+ case MSM_CAM_POLL: {
+ e_ctrl->i2c_client.addr_type =
+ eeprom_map->mem_settings[i].addr_type;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->i2c_poll(
+ &(e_ctrl->i2c_client),
+ eeprom_map->mem_settings[i].reg_addr,
+ eeprom_map->mem_settings[i].reg_data,
+ eeprom_map->mem_settings[i].data_type,
+ eeprom_map->mem_settings[i].delay);
+ if (rc < 0) {
+ pr_err("%s: poll failed\n",
+ __func__);
+ goto clean_up;
+ }
+ }
+ break;
+ case MSM_CAM_READ: {
+ e_ctrl->i2c_client.addr_type =
+ eeprom_map->mem_settings[i].addr_type;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->
+ i2c_read_seq(&(e_ctrl->i2c_client),
+ eeprom_map->mem_settings[i].reg_addr,
+ memptr,
+ eeprom_map->mem_settings[i].reg_data);
+ msleep(eeprom_map->mem_settings[i].delay);
+ if (rc < 0) {
+ pr_err("%s: read failed\n",
+ __func__);
+ goto clean_up;
+ }
+ memptr += eeprom_map->mem_settings[i].reg_data;
+ }
+ break;
+ default:
+ pr_err("%s: %d Invalid i2c operation LC:%d\n",
+ __func__, __LINE__, i);
+ return -EINVAL;
+ }
+ }
+ }
+ memptr = e_ctrl->cal_data.mapdata;
+ for (i = 0; i < e_ctrl->cal_data.num_data; i++)
+ CDBG("memory_data[%d] = 0x%X\n", i, memptr[i]);
+ return rc;
+
+clean_up:
+ kfree(e_ctrl->cal_data.mapdata);
+ e_ctrl->cal_data.num_data = 0;
+ e_ctrl->cal_data.mapdata = NULL;
+ return rc;
+}
+
+/**
+ * msm_eeprom_power_up - Do eeprom power up here
+ * @e_ctrl: ctrl structure
+ * @power_info: power up info for eeprom
+ *
+ * Returns success or failure
+ */
+static int msm_eeprom_power_up(struct msm_eeprom_ctrl_t *e_ctrl,
+ struct msm_camera_power_ctrl_t *power_info) {
+ int32_t rc = 0;
+
+ rc = msm_camera_fill_vreg_params(
+ power_info->cam_vreg, power_info->num_vreg,
+ power_info->power_setting, power_info->power_setting_size);
+ if (rc < 0) {
+ pr_err("%s:%d failed in camera_fill_vreg_params rc %d",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+
+ /* Parse and fill vreg params for powerdown settings*/
+ rc = msm_camera_fill_vreg_params(
+ power_info->cam_vreg, power_info->num_vreg,
+ power_info->power_down_setting,
+ power_info->power_down_setting_size);
+ if (rc < 0) {
+ pr_err("%s:%d failed msm_camera_fill_vreg_params for PDOWN rc %d",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+
+ rc = msm_camera_power_up(power_info, e_ctrl->eeprom_device_type,
+ &e_ctrl->i2c_client);
+ if (rc) {
+ pr_err("%s:%d failed in eeprom Power up rc %d\n",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+ return rc;
+}
+
+/**
+ * msm_eeprom_power_up - Do power up, parse and power down
+ * @e_ctrl: ctrl structure
+ * Returns success or failure
+ */
+static int eeprom_init_config(struct msm_eeprom_ctrl_t *e_ctrl,
+ void __user *argp)
+{
+ int rc = 0;
+ struct msm_eeprom_cfg_data *cdata = argp;
+ struct msm_sensor_power_setting_array *power_setting_array = NULL;
+ struct msm_camera_power_ctrl_t *power_info;
+ struct msm_eeprom_memory_map_array *memory_map_arr = NULL;
+
+ power_setting_array =
+ kzalloc(sizeof(struct msm_sensor_power_setting_array),
+ GFP_KERNEL);
+ if (!power_setting_array) {
+ pr_err("%s:%d Mem Alloc Fail\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ return rc;
+ }
+ memory_map_arr = kzalloc(sizeof(struct msm_eeprom_memory_map_array),
+ GFP_KERNEL);
+ if (!memory_map_arr) {
+ rc = -ENOMEM;
+ pr_err("%s:%d Mem Alloc Fail\n", __func__, __LINE__);
+ goto free_mem;
+ }
+
+ if (copy_from_user(power_setting_array,
+ cdata->cfg.eeprom_info.power_setting_array,
+ sizeof(struct msm_sensor_power_setting_array))) {
+ pr_err("%s copy_from_user failed %d\n",
+ __func__, __LINE__);
+ goto free_mem;
+ }
+ CDBG("%s:%d Size of power setting array: %d\n",
+ __func__, __LINE__, power_setting_array->size);
+ if (copy_from_user(memory_map_arr,
+ cdata->cfg.eeprom_info.mem_map_array,
+ sizeof(struct msm_eeprom_memory_map_array))) {
+ rc = -EINVAL;
+ pr_err("%s copy_from_user failed for memory map%d\n",
+ __func__, __LINE__);
+ goto free_mem;
+ }
+
+ power_info = &(e_ctrl->eboard_info->power_info);
+
+ power_info->power_setting =
+ power_setting_array->power_setting_a;
+ power_info->power_down_setting =
+ power_setting_array->power_down_setting_a;
+
+ power_info->power_setting_size =
+ power_setting_array->size;
+ power_info->power_down_setting_size =
+ power_setting_array->size_down;
+
+ if ((power_info->power_setting_size >
+ MAX_POWER_CONFIG) ||
+ (power_info->power_down_setting_size >
+ MAX_POWER_CONFIG) ||
+ (!power_info->power_down_setting_size) ||
+ (!power_info->power_setting_size)) {
+ rc = -EINVAL;
+ pr_err("%s:%d Invalid power setting size :%d, %d\n",
+ __func__, __LINE__,
+ power_info->power_setting_size,
+ power_info->power_down_setting_size);
+ goto free_mem;
+ }
+
+ if (e_ctrl->i2c_client.cci_client) {
+ e_ctrl->i2c_client.cci_client->i2c_freq_mode =
+ cdata->cfg.eeprom_info.i2c_freq_mode;
+ if (e_ctrl->i2c_client.cci_client->i2c_freq_mode >
+ I2C_MAX_MODES) {
+ pr_err("%s::%d Improper I2C freq mode\n",
+ __func__, __LINE__);
+ e_ctrl->i2c_client.cci_client->i2c_freq_mode =
+ I2C_STANDARD_MODE;
+ }
+ }
+
+ /* Fill vreg power info and power up here */
+ rc = msm_eeprom_power_up(e_ctrl, power_info);
+ if (rc < 0) {
+ pr_err("Power Up failed for eeprom\n");
+ goto free_mem;
+ }
+
+ rc = eeprom_parse_memory_map(e_ctrl, memory_map_arr);
+ if (rc < 0)
+ pr_err("%s::%d memory map parse failed\n", __func__, __LINE__);
+
+ rc = msm_camera_power_down(power_info, e_ctrl->eeprom_device_type,
+ &e_ctrl->i2c_client);
+ if (rc < 0) {
+ pr_err("%s:%d Power down failed rc %d\n",
+ __func__, __LINE__, rc);
+ }
+
+free_mem:
+ kfree(power_setting_array);
+ kfree(memory_map_arr);
+ power_setting_array = NULL;
+ memory_map_arr = NULL;
+ return rc;
+}
+
+static int msm_eeprom_get_cmm_data(struct msm_eeprom_ctrl_t *e_ctrl,
+ struct msm_eeprom_cfg_data *cdata)
+{
+ int rc = 0;
+ struct msm_eeprom_cmm_t *cmm_data = &e_ctrl->eboard_info->cmm_data;
+
+ cdata->cfg.get_cmm_data.cmm_support = cmm_data->cmm_support;
+ cdata->cfg.get_cmm_data.cmm_compression = cmm_data->cmm_compression;
+ cdata->cfg.get_cmm_data.cmm_size = cmm_data->cmm_size;
+ return rc;
+}
+
+static int eeprom_config_read_cal_data(struct msm_eeprom_ctrl_t *e_ctrl,
+ struct msm_eeprom_cfg_data *cdata)
+{
+ int rc;
+
+ /* check range */
+ if (cdata->cfg.read_data.num_bytes >
+ e_ctrl->cal_data.num_data) {
+ CDBG("%s: Invalid size. exp %u, req %u\n", __func__,
+ e_ctrl->cal_data.num_data,
+ cdata->cfg.read_data.num_bytes);
+ return -EINVAL;
+ }
+
+ rc = copy_to_user(cdata->cfg.read_data.dbuffer,
+ e_ctrl->cal_data.mapdata,
+ cdata->cfg.read_data.num_bytes);
+
+ return rc;
+}
+
+static int msm_eeprom_config(struct msm_eeprom_ctrl_t *e_ctrl,
+ void __user *argp)
+{
+ struct msm_eeprom_cfg_data *cdata =
+ (struct msm_eeprom_cfg_data *)argp;
+ int rc = 0;
+ size_t length = 0;
+
+ CDBG("%s E\n", __func__);
+ switch (cdata->cfgtype) {
+ case CFG_EEPROM_GET_INFO:
+ if (e_ctrl->userspace_probe == 1) {
+ pr_err("%s:%d Eeprom name should be module driver",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+ CDBG("%s E CFG_EEPROM_GET_INFO\n", __func__);
+ cdata->is_supported = e_ctrl->is_supported;
+ length = strlen(e_ctrl->eboard_info->eeprom_name) + 1;
+ if (length > MAX_EEPROM_NAME) {
+ pr_err("%s:%d invalid eeprom_name length %d\n",
+ __func__, __LINE__, (int)length);
+ rc = -EINVAL;
+ break;
+ }
+ memcpy(cdata->cfg.eeprom_name,
+ e_ctrl->eboard_info->eeprom_name, length);
+ break;
+ case CFG_EEPROM_GET_CAL_DATA:
+ CDBG("%s E CFG_EEPROM_GET_CAL_DATA\n", __func__);
+ cdata->cfg.get_data.num_bytes =
+ e_ctrl->cal_data.num_data;
+ break;
+ case CFG_EEPROM_READ_CAL_DATA:
+ CDBG("%s E CFG_EEPROM_READ_CAL_DATA\n", __func__);
+ rc = eeprom_config_read_cal_data(e_ctrl, cdata);
+ break;
+ case CFG_EEPROM_GET_MM_INFO:
+ CDBG("%s E CFG_EEPROM_GET_MM_INFO\n", __func__);
+ rc = msm_eeprom_get_cmm_data(e_ctrl, cdata);
+ break;
+ case CFG_EEPROM_INIT:
+ if (e_ctrl->userspace_probe == 0) {
+ pr_err("%s:%d Eeprom already probed at kernel boot",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+ if (e_ctrl->cal_data.num_data == 0) {
+ rc = eeprom_init_config(e_ctrl, argp);
+ if (rc < 0) {
+ pr_err("%s:%d Eeprom init failed\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ } else {
+ CDBG("%s:%d Already read eeprom\n",
+ __func__, __LINE__);
+ }
+ break;
+ default:
+ break;
+ }
+
+ CDBG("%s X rc: %d\n", __func__, rc);
+ return rc;
+}
+
+static int msm_eeprom_get_subdev_id(struct msm_eeprom_ctrl_t *e_ctrl,
+ void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+
+ CDBG("%s E\n", __func__);
+ if (!subdev_id) {
+ pr_err("%s failed\n", __func__);
+ return -EINVAL;
+ }
+ *subdev_id = e_ctrl->subdev_id;
+ CDBG("subdev_id %d\n", *subdev_id);
+ CDBG("%s X\n", __func__);
+ return 0;
+}
+
+static long msm_eeprom_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct msm_eeprom_ctrl_t *e_ctrl = v4l2_get_subdevdata(sd);
+ void __user *argp = (void __user *)arg;
+
+ CDBG("%s E\n", __func__);
+ CDBG("%s:%d a_ctrl %pK argp %pK\n", __func__, __LINE__, e_ctrl, argp);
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ return msm_eeprom_get_subdev_id(e_ctrl, argp);
+ case VIDIOC_MSM_EEPROM_CFG:
+ return msm_eeprom_config(e_ctrl, argp);
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ CDBG("%s X\n", __func__);
+}
+
+static struct msm_camera_i2c_fn_t msm_eeprom_cci_func_tbl = {
+ .i2c_read = msm_camera_cci_i2c_read,
+ .i2c_read_seq = msm_camera_cci_i2c_read_seq,
+ .i2c_write = msm_camera_cci_i2c_write,
+ .i2c_write_seq = msm_camera_cci_i2c_write_seq,
+ .i2c_write_table = msm_camera_cci_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_cci_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_cci_i2c_write_table_w_microdelay,
+ .i2c_util = msm_sensor_cci_i2c_util,
+ .i2c_poll = msm_camera_cci_i2c_poll,
+};
+
+static struct msm_camera_i2c_fn_t msm_eeprom_qup_func_tbl = {
+ .i2c_read = msm_camera_qup_i2c_read,
+ .i2c_read_seq = msm_camera_qup_i2c_read_seq,
+ .i2c_write = msm_camera_qup_i2c_write,
+ .i2c_write_table = msm_camera_qup_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_qup_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_qup_i2c_write_table_w_microdelay,
+};
+
+static struct msm_camera_i2c_fn_t msm_eeprom_spi_func_tbl = {
+ .i2c_read = msm_camera_spi_read,
+ .i2c_read_seq = msm_camera_spi_read_seq,
+};
+
+static int msm_eeprom_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh) {
+ int rc = 0;
+ struct msm_eeprom_ctrl_t *e_ctrl = v4l2_get_subdevdata(sd);
+
+ CDBG("%s E\n", __func__);
+ if (!e_ctrl) {
+ pr_err("%s failed e_ctrl is NULL\n", __func__);
+ return -EINVAL;
+ }
+ CDBG("%s X\n", __func__);
+ return rc;
+}
+
+static int msm_eeprom_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh) {
+ int rc = 0;
+ struct msm_eeprom_ctrl_t *e_ctrl = v4l2_get_subdevdata(sd);
+
+ CDBG("%s E\n", __func__);
+ if (!e_ctrl) {
+ pr_err("%s failed e_ctrl is NULL\n", __func__);
+ return -EINVAL;
+ }
+ CDBG("%s X\n", __func__);
+ return rc;
+}
+
+static const struct v4l2_subdev_internal_ops msm_eeprom_internal_ops = {
+ .open = msm_eeprom_open,
+ .close = msm_eeprom_close,
+};
+
+static struct v4l2_subdev_core_ops msm_eeprom_subdev_core_ops = {
+ .ioctl = msm_eeprom_subdev_ioctl,
+};
+
+static struct v4l2_subdev_ops msm_eeprom_subdev_ops = {
+ .core = &msm_eeprom_subdev_core_ops,
+};
+
+static int msm_eeprom_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ struct msm_eeprom_ctrl_t *e_ctrl = NULL;
+
+ CDBG("%s E\n", __func__);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ pr_err("%s i2c_check_functionality failed\n", __func__);
+ goto probe_failure;
+ }
+
+ e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+ if (!e_ctrl)
+ return -ENOMEM;
+ e_ctrl->eeprom_v4l2_subdev_ops = &msm_eeprom_subdev_ops;
+ e_ctrl->eeprom_mutex = &msm_eeprom_mutex;
+ CDBG("%s client = 0x%pK\n", __func__, client);
+ e_ctrl->eboard_info = (struct msm_eeprom_board_info *)(id->driver_data);
+ if (!e_ctrl->eboard_info) {
+ pr_err("%s:%d board info NULL\n", __func__, __LINE__);
+ rc = -EINVAL;
+ goto ectrl_free;
+ }
+ e_ctrl->i2c_client.client = client;
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.map = NULL;
+ e_ctrl->userspace_probe = 0;
+ e_ctrl->is_supported = 1;
+
+ /* Set device type as I2C */
+ e_ctrl->eeprom_device_type = MSM_CAMERA_I2C_DEVICE;
+ e_ctrl->i2c_client.i2c_func_tbl = &msm_eeprom_qup_func_tbl;
+
+ if (e_ctrl->eboard_info->i2c_slaveaddr != 0)
+ e_ctrl->i2c_client.client->addr =
+ e_ctrl->eboard_info->i2c_slaveaddr;
+
+ /* Get clocks information */
+ rc = msm_camera_i2c_dev_get_clk_info(
+ &e_ctrl->i2c_client.client->dev,
+ &e_ctrl->eboard_info->power_info.clk_info,
+ &e_ctrl->eboard_info->power_info.clk_ptr,
+ &e_ctrl->eboard_info->power_info.clk_info_size);
+ if (rc < 0) {
+ pr_err("failed: msm_camera_get_clk_info rc %d", rc);
+ goto ectrl_free;
+ }
+
+ /* IMPLEMENT READING PART */
+ /* Initialize sub device */
+ v4l2_i2c_subdev_init(&e_ctrl->msm_sd.sd,
+ e_ctrl->i2c_client.client,
+ e_ctrl->eeprom_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&e_ctrl->msm_sd.sd, e_ctrl);
+ e_ctrl->msm_sd.sd.internal_ops = &msm_eeprom_internal_ops;
+ e_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ media_entity_init(&e_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ e_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ e_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_EEPROM;
+ msm_sd_register(&e_ctrl->msm_sd);
+ CDBG("%s success result=%d X\n", __func__, rc);
+ return rc;
+
+ectrl_free:
+ kfree(e_ctrl);
+probe_failure:
+ pr_err("%s failed! rc = %d\n", __func__, rc);
+ return rc;
+}
+
+static int msm_eeprom_i2c_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct msm_eeprom_ctrl_t *e_ctrl;
+
+ if (!sd) {
+ pr_err("%s: Subdevice is NULL\n", __func__);
+ return 0;
+ }
+
+ e_ctrl = (struct msm_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+ if (!e_ctrl) {
+ pr_err("%s: eeprom device is NULL\n", __func__);
+ return 0;
+ }
+
+ if (!e_ctrl->eboard_info) {
+ pr_err("%s: eboard_info is NULL\n", __func__);
+ return 0;
+ }
+
+ msm_camera_i2c_dev_put_clk_info(&e_ctrl->i2c_client.client->dev,
+ &e_ctrl->eboard_info->power_info.clk_info,
+ &e_ctrl->eboard_info->power_info.clk_ptr,
+ e_ctrl->eboard_info->power_info.clk_info_size);
+
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+ if (e_ctrl->eboard_info) {
+ kfree(e_ctrl->eboard_info->power_info.gpio_conf);
+ kfree(e_ctrl->eboard_info);
+ }
+ e_ctrl->cal_data.mapdata = NULL;
+ kfree(e_ctrl);
+ e_ctrl = NULL;
+
+ return 0;
+}
+
+#define msm_eeprom_spi_parse_cmd(spic, str, name, out, size) \
+ { \
+ spic->cmd_tbl.name.opcode = out[0]; \
+ spic->cmd_tbl.name.addr_len = out[1]; \
+ spic->cmd_tbl.name.dummy_len = out[2]; \
+ }
+
+static int msm_eeprom_spi_parse_of(struct msm_camera_spi_client *spic)
+{
+ int rc = -EFAULT;
+ uint32_t tmp[3];
+
+ if (of_property_read_u32_array(spic->spi_master->dev.of_node,
+ "qcom,spiop,read", tmp, 3))
+ return -EFAULT;
+ msm_eeprom_spi_parse_cmd(spic, "qcom,spiop,read", read, tmp, 3);
+ if (of_property_read_u32_array(spic->spi_master->dev.of_node,
+ "qcom,spiop,readseq", tmp, 3))
+ return -EFAULT;
+ msm_eeprom_spi_parse_cmd(spic, "qcom,spiop,readseq", read_seq, tmp, 3);
+ if (of_property_read_u32_array(spic->spi_master->dev.of_node,
+ "qcom,spiop,queryid", tmp, 3))
+ return -EFAULT;
+ msm_eeprom_spi_parse_cmd(spic, "qcom,spiop,queryid", query_id, tmp, 3);
+
+ rc = of_property_read_u32_array(spic->spi_master->dev.of_node,
+ "qcom,eeprom-id", tmp, 2);
+ if (rc) {
+ pr_err("%s: Failed to get eeprom id\n", __func__);
+ return rc;
+ }
+ spic->mfr_id0 = tmp[0];
+ spic->device_id0 = tmp[1];
+
+ return 0;
+}
+
+static int msm_eeprom_match_id(struct msm_eeprom_ctrl_t *e_ctrl)
+{
+ int rc;
+ struct msm_camera_i2c_client *client = &e_ctrl->i2c_client;
+ uint8_t id[2];
+
+ rc = msm_camera_spi_query_id(client, 0, &id[0], 2);
+ if (rc < 0)
+ return rc;
+ CDBG("%s: read 0x%x 0x%x, check 0x%x 0x%x\n", __func__, id[0],
+ id[1], client->spi_client->mfr_id0,
+ client->spi_client->device_id0);
+ if (id[0] != client->spi_client->mfr_id0
+ || id[1] != client->spi_client->device_id0)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int msm_eeprom_get_dt_data(struct msm_eeprom_ctrl_t *e_ctrl)
+{
+ int rc = 0, i = 0;
+ struct msm_eeprom_board_info *eb_info;
+ struct msm_camera_power_ctrl_t *power_info =
+ &e_ctrl->eboard_info->power_info;
+ struct device_node *of_node = NULL;
+ struct msm_camera_gpio_conf *gconf = NULL;
+ int8_t gpio_array_size = 0;
+ uint16_t *gpio_array = NULL;
+
+ eb_info = e_ctrl->eboard_info;
+ if (e_ctrl->eeprom_device_type == MSM_CAMERA_SPI_DEVICE)
+ of_node = e_ctrl->i2c_client.
+ spi_client->spi_master->dev.of_node;
+ else if (e_ctrl->eeprom_device_type == MSM_CAMERA_PLATFORM_DEVICE)
+ of_node = e_ctrl->pdev->dev.of_node;
+
+ if (!of_node) {
+ pr_err("%s: %d of_node is NULL\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ rc = msm_camera_get_dt_vreg_data(of_node, &power_info->cam_vreg,
+ &power_info->num_vreg);
+ if (rc < 0)
+ return rc;
+
+ if (e_ctrl->userspace_probe == 0) {
+ rc = msm_camera_get_dt_power_setting_data(of_node,
+ power_info->cam_vreg, power_info->num_vreg,
+ power_info);
+ if (rc < 0)
+ goto ERROR1;
+ }
+
+ power_info->gpio_conf = kzalloc(sizeof(struct msm_camera_gpio_conf),
+ GFP_KERNEL);
+ if (!power_info->gpio_conf) {
+ rc = -ENOMEM;
+ goto ERROR2;
+ }
+ gconf = power_info->gpio_conf;
+ gpio_array_size = of_gpio_count(of_node);
+ CDBG("%s gpio count %d\n", __func__, gpio_array_size);
+
+ if (gpio_array_size > 0) {
+ gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t),
+ GFP_KERNEL);
+ if (!gpio_array) {
+ rc = -ENOMEM;
+ goto ERROR3;
+ }
+ for (i = 0; i < gpio_array_size; i++) {
+ gpio_array[i] = of_get_gpio(of_node, i);
+ CDBG("%s gpio_array[%d] = %d\n", __func__, i,
+ gpio_array[i]);
+ }
+
+ rc = msm_camera_get_dt_gpio_req_tbl(of_node, gconf,
+ gpio_array, gpio_array_size);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR4;
+ }
+
+ rc = msm_camera_init_gpio_pin_tbl(of_node, gconf,
+ gpio_array, gpio_array_size);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR4;
+ }
+ kfree(gpio_array);
+ }
+
+ return rc;
+ERROR4:
+ kfree(gpio_array);
+ERROR3:
+ kfree(power_info->gpio_conf);
+ERROR2:
+ kfree(power_info->cam_vreg);
+ERROR1:
+ kfree(power_info->power_setting);
+ return rc;
+}
+
+
+static int msm_eeprom_cmm_dts(struct msm_eeprom_board_info *eb_info,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ struct msm_eeprom_cmm_t *cmm_data = &eb_info->cmm_data;
+
+ cmm_data->cmm_support =
+ of_property_read_bool(of_node, "qcom,cmm-data-support");
+ if (!cmm_data->cmm_support)
+ return -EINVAL;
+ cmm_data->cmm_compression =
+ of_property_read_bool(of_node, "qcom,cmm-data-compressed");
+ if (!cmm_data->cmm_compression)
+ CDBG("No MM compression data\n");
+
+ rc = of_property_read_u32(of_node, "qcom,cmm-data-offset",
+ &cmm_data->cmm_offset);
+ if (rc < 0)
+ CDBG("No MM offset data\n");
+
+ rc = of_property_read_u32(of_node, "qcom,cmm-data-size",
+ &cmm_data->cmm_size);
+ if (rc < 0)
+ CDBG("No MM size data\n");
+
+ CDBG("cmm_support: cmm_compr %d, cmm_offset %d, cmm_size %d\n",
+ cmm_data->cmm_compression,
+ cmm_data->cmm_offset,
+ cmm_data->cmm_size);
+ return 0;
+}
+
+static int msm_eeprom_spi_setup(struct spi_device *spi)
+{
+ struct msm_eeprom_ctrl_t *e_ctrl = NULL;
+ struct msm_camera_i2c_client *client = NULL;
+ struct msm_camera_spi_client *spi_client;
+ struct msm_eeprom_board_info *eb_info;
+ struct msm_camera_power_ctrl_t *power_info = NULL;
+ int rc = 0;
+
+ e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+ if (!e_ctrl)
+ return -ENOMEM;
+ e_ctrl->eeprom_v4l2_subdev_ops = &msm_eeprom_subdev_ops;
+ e_ctrl->eeprom_mutex = &msm_eeprom_mutex;
+ client = &e_ctrl->i2c_client;
+ e_ctrl->is_supported = 0;
+ e_ctrl->userspace_probe = 0;
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.map = NULL;
+
+ spi_client = kzalloc(sizeof(*spi_client), GFP_KERNEL);
+ if (!spi_client) {
+ kfree(e_ctrl);
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32(spi->dev.of_node, "cell-index",
+ &e_ctrl->subdev_id);
+ CDBG("cell-index %d, rc %d\n", e_ctrl->subdev_id, rc);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+
+ eb_info = kzalloc(sizeof(*eb_info), GFP_KERNEL);
+ if (!eb_info)
+ goto spi_free;
+ e_ctrl->eboard_info = eb_info;
+
+ rc = of_property_read_string(spi->dev.of_node, "qcom,eeprom-name",
+ &eb_info->eeprom_name);
+ CDBG("%s qcom,eeprom-name %s, rc %d\n", __func__,
+ eb_info->eeprom_name, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ e_ctrl->userspace_probe = 1;
+ goto board_free;
+ }
+
+ e_ctrl->eeprom_device_type = MSM_CAMERA_SPI_DEVICE;
+ client->spi_client = spi_client;
+ spi_client->spi_master = spi;
+ client->i2c_func_tbl = &msm_eeprom_spi_func_tbl;
+ client->addr_type = MSM_CAMERA_I2C_3B_ADDR;
+
+ rc = msm_eeprom_cmm_dts(e_ctrl->eboard_info, spi->dev.of_node);
+ if (rc < 0)
+ CDBG("%s MM data miss:%d\n", __func__, __LINE__);
+
+ power_info = &eb_info->power_info;
+ power_info->dev = &spi->dev;
+
+ /* Get clocks information */
+ rc = msm_camera_i2c_dev_get_clk_info(
+ &spi->dev,
+ &power_info->clk_info,
+ &power_info->clk_ptr,
+ &power_info->clk_info_size);
+ if (rc < 0) {
+ pr_err("failed: msm_camera_get_clk_info rc %d", rc);
+ goto board_free;
+ }
+
+ rc = msm_eeprom_get_dt_data(e_ctrl);
+ if (rc < 0)
+ goto board_free;
+
+ /* set spi instruction info */
+ spi_client->retry_delay = 1;
+ spi_client->retries = 0;
+
+ rc = msm_eeprom_spi_parse_of(spi_client);
+ if (rc < 0) {
+ dev_err(&spi->dev,
+ "%s: Error parsing device properties\n", __func__);
+ goto board_free;
+ }
+
+ if (e_ctrl->userspace_probe == 0) {
+ /* prepare memory buffer */
+ rc = msm_eeprom_parse_memory_map(spi->dev.of_node,
+ &e_ctrl->cal_data);
+ if (rc < 0)
+ CDBG("%s: no cal memory map\n", __func__);
+
+ /* power up eeprom for reading */
+ rc = msm_camera_power_up(power_info, e_ctrl->eeprom_device_type,
+ &e_ctrl->i2c_client);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ goto caldata_free;
+ }
+
+ /* check eeprom id */
+ rc = msm_eeprom_match_id(e_ctrl);
+ if (rc < 0) {
+ CDBG("%s: eeprom not matching %d\n", __func__, rc);
+ goto power_down;
+ }
+ /* read eeprom */
+ if (e_ctrl->cal_data.map) {
+ rc = read_eeprom_memory(e_ctrl, &e_ctrl->cal_data);
+ if (rc < 0) {
+ pr_err("%s: read cal data failed\n", __func__);
+ goto power_down;
+ }
+ e_ctrl->is_supported |= msm_eeprom_match_crc(
+ &e_ctrl->cal_data);
+ }
+
+ rc = msm_camera_power_down(power_info,
+ e_ctrl->eeprom_device_type, &e_ctrl->i2c_client);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ goto caldata_free;
+ }
+ } else
+ e_ctrl->is_supported = 1;
+
+ /* initiazlie subdev */
+ v4l2_spi_subdev_init(&e_ctrl->msm_sd.sd,
+ e_ctrl->i2c_client.spi_client->spi_master,
+ e_ctrl->eeprom_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&e_ctrl->msm_sd.sd, e_ctrl);
+ e_ctrl->msm_sd.sd.internal_ops = &msm_eeprom_internal_ops;
+ e_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ media_entity_init(&e_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ e_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ e_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_EEPROM;
+ msm_sd_register(&e_ctrl->msm_sd);
+ e_ctrl->is_supported = (e_ctrl->is_supported << 1) | 1;
+ CDBG("%s success result=%d supported=%x X\n", __func__, rc,
+ e_ctrl->is_supported);
+
+ return 0;
+
+power_down:
+ msm_camera_power_down(power_info, e_ctrl->eeprom_device_type,
+ &e_ctrl->i2c_client);
+caldata_free:
+ msm_camera_i2c_dev_put_clk_info(
+ &e_ctrl->i2c_client.spi_client->spi_master->dev,
+ &e_ctrl->eboard_info->power_info.clk_info,
+ &e_ctrl->eboard_info->power_info.clk_ptr,
+ e_ctrl->eboard_info->power_info.clk_info_size);
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+board_free:
+ kfree(e_ctrl->eboard_info);
+spi_free:
+ kfree(spi_client);
+ kfree(e_ctrl);
+ return rc;
+}
+
+static int msm_eeprom_spi_probe(struct spi_device *spi)
+{
+ int irq, cs, cpha, cpol, cs_high;
+
+ CDBG("%s\n", __func__);
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+ spi_setup(spi);
+
+ irq = spi->irq;
+ cs = spi->chip_select;
+ cpha = (spi->mode & SPI_CPHA) ? 1 : 0;
+ cpol = (spi->mode & SPI_CPOL) ? 1 : 0;
+ cs_high = (spi->mode & SPI_CS_HIGH) ? 1 : 0;
+ CDBG("%s: irq[%d] cs[%x] CPHA[%x] CPOL[%x] CS_HIGH[%x]\n",
+ __func__, irq, cs, cpha, cpol, cs_high);
+ CDBG("%s: max_speed[%u]\n", __func__, spi->max_speed_hz);
+
+ return msm_eeprom_spi_setup(spi);
+}
+
+static int msm_eeprom_spi_remove(struct spi_device *sdev)
+{
+ struct v4l2_subdev *sd = spi_get_drvdata(sdev);
+ struct msm_eeprom_ctrl_t *e_ctrl;
+
+ if (!sd) {
+ pr_err("%s: Subdevice is NULL\n", __func__);
+ return 0;
+ }
+
+ e_ctrl = (struct msm_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+ if (!e_ctrl) {
+ pr_err("%s: eeprom device is NULL\n", __func__);
+ return 0;
+ }
+
+ if (!e_ctrl->eboard_info) {
+ pr_err("%s: eboard_info is NULL\n", __func__);
+ return 0;
+ }
+ msm_camera_i2c_dev_put_clk_info(
+ &e_ctrl->i2c_client.spi_client->spi_master->dev,
+ &e_ctrl->eboard_info->power_info.clk_info,
+ &e_ctrl->eboard_info->power_info.clk_ptr,
+ e_ctrl->eboard_info->power_info.clk_info_size);
+
+ kfree(e_ctrl->i2c_client.spi_client);
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+ if (e_ctrl->eboard_info) {
+ kfree(e_ctrl->eboard_info->power_info.gpio_conf);
+ kfree(e_ctrl->eboard_info);
+ }
+ e_ctrl->cal_data.mapdata = NULL;
+ kfree(e_ctrl);
+ e_ctrl = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static void msm_eeprom_copy_power_settings_compat(
+ struct msm_sensor_power_setting_array *ps,
+ struct msm_sensor_power_setting_array32 *ps32)
+{
+ uint16_t i = 0;
+
+ ps->size = ps32->size;
+ for (i = 0; i < ps32->size; i++) {
+ ps->power_setting_a[i].config_val =
+ ps32->power_setting_a[i].config_val;
+ ps->power_setting_a[i].delay =
+ ps32->power_setting_a[i].delay;
+ ps->power_setting_a[i].seq_type =
+ ps32->power_setting_a[i].seq_type;
+ ps->power_setting_a[i].seq_val =
+ ps32->power_setting_a[i].seq_val;
+ }
+
+ ps->size_down = ps32->size_down;
+ for (i = 0; i < ps32->size_down; i++) {
+ ps->power_down_setting_a[i].config_val =
+ ps32->power_down_setting_a[i].config_val;
+ ps->power_down_setting_a[i].delay =
+ ps32->power_down_setting_a[i].delay;
+ ps->power_down_setting_a[i].seq_type =
+ ps32->power_down_setting_a[i].seq_type;
+ ps->power_down_setting_a[i].seq_val =
+ ps32->power_down_setting_a[i].seq_val;
+ }
+}
+
+static int eeprom_config_read_cal_data32(struct msm_eeprom_ctrl_t *e_ctrl,
+ void __user *arg)
+{
+ int rc;
+ uint8_t *ptr_dest = NULL;
+ struct msm_eeprom_cfg_data32 *cdata32 =
+ (struct msm_eeprom_cfg_data32 *) arg;
+ struct msm_eeprom_cfg_data cdata;
+
+ cdata.cfgtype = cdata32->cfgtype;
+ cdata.is_supported = cdata32->is_supported;
+ cdata.cfg.read_data.num_bytes = cdata32->cfg.read_data.num_bytes;
+ /* check range */
+ if (cdata.cfg.read_data.num_bytes >
+ e_ctrl->cal_data.num_data) {
+ CDBG("%s: Invalid size. exp %u, req %u\n", __func__,
+ e_ctrl->cal_data.num_data,
+ cdata.cfg.read_data.num_bytes);
+ return -EINVAL;
+ }
+ if (!e_ctrl->cal_data.mapdata)
+ return -EFAULT;
+
+ ptr_dest = (uint8_t *) compat_ptr(cdata32->cfg.read_data.dbuffer);
+
+ rc = copy_to_user(ptr_dest, e_ctrl->cal_data.mapdata,
+ cdata.cfg.read_data.num_bytes);
+
+ return rc;
+}
+
+static int eeprom_init_config32(struct msm_eeprom_ctrl_t *e_ctrl,
+ void __user *argp)
+{
+ int rc = 0;
+ struct msm_eeprom_cfg_data32 *cdata32 = argp;
+ struct msm_sensor_power_setting_array *power_setting_array = NULL;
+ struct msm_sensor_power_setting_array32 *power_setting_array32 = NULL;
+ struct msm_camera_power_ctrl_t *power_info = NULL;
+ struct msm_eeprom_memory_map_array *mem_map_array = NULL;
+
+ power_setting_array32 =
+ kzalloc(sizeof(struct msm_sensor_power_setting_array32),
+ GFP_KERNEL);
+ if (!power_setting_array32) {
+ pr_err("%s:%d Mem Alloc Fail\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ return rc;
+ }
+ power_setting_array =
+ kzalloc(sizeof(struct msm_sensor_power_setting_array),
+ GFP_KERNEL);
+ if (power_setting_array == NULL) {
+ pr_err("%s:%d Mem Alloc Fail\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto free_mem;
+ }
+ mem_map_array =
+ kzalloc(sizeof(struct msm_eeprom_memory_map_array),
+ GFP_KERNEL);
+ if (mem_map_array == NULL) {
+ pr_err("%s:%d Mem Alloc Fail\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto free_mem;
+ }
+
+ if (copy_from_user(power_setting_array32,
+ (void *)compat_ptr(cdata32->cfg.eeprom_info.
+ power_setting_array),
+ sizeof(struct msm_sensor_power_setting_array32))) {
+ pr_err("%s:%d copy_from_user failed\n",
+ __func__, __LINE__);
+ goto free_mem;
+ }
+ CDBG("%s:%d Size of power setting array: %d",
+ __func__, __LINE__, power_setting_array32->size);
+ if (copy_from_user(mem_map_array,
+ (void *)compat_ptr(cdata32->cfg.eeprom_info.mem_map_array),
+ sizeof(struct msm_eeprom_memory_map_array))) {
+ pr_err("%s:%d copy_from_user failed for memory map\n",
+ __func__, __LINE__);
+ goto free_mem;
+ }
+
+ power_info = &(e_ctrl->eboard_info->power_info);
+
+ if ((power_setting_array32->size > MAX_POWER_CONFIG) ||
+ (power_setting_array32->size_down > MAX_POWER_CONFIG) ||
+ (!power_setting_array32->size) ||
+ (!power_setting_array32->size_down)) {
+ pr_err("%s:%d invalid power setting size=%d size_down=%d\n",
+ __func__, __LINE__, power_setting_array32->size,
+ power_setting_array32->size_down);
+ rc = -EINVAL;
+ goto free_mem;
+ }
+ msm_eeprom_copy_power_settings_compat(
+ power_setting_array,
+ power_setting_array32);
+
+ power_info->power_setting =
+ power_setting_array->power_setting_a;
+ power_info->power_down_setting =
+ power_setting_array->power_down_setting_a;
+
+ power_info->power_setting_size =
+ power_setting_array->size;
+ power_info->power_down_setting_size =
+ power_setting_array->size_down;
+
+ if (e_ctrl->i2c_client.cci_client) {
+ e_ctrl->i2c_client.cci_client->i2c_freq_mode =
+ cdata32->cfg.eeprom_info.i2c_freq_mode;
+ if (e_ctrl->i2c_client.cci_client->i2c_freq_mode >
+ I2C_MAX_MODES) {
+ pr_err("%s::%d Improper I2C Freq Mode\n",
+ __func__, __LINE__);
+ e_ctrl->i2c_client.cci_client->i2c_freq_mode =
+ I2C_STANDARD_MODE;
+ }
+ CDBG("%s:%d Not CCI probe", __func__, __LINE__);
+ }
+ /* Fill vreg power info and power up here */
+ rc = msm_eeprom_power_up(e_ctrl, power_info);
+ if (rc < 0) {
+ pr_err("%s:%d Power Up failed for eeprom\n",
+ __func__, __LINE__);
+ goto free_mem;
+ }
+
+ rc = eeprom_parse_memory_map(e_ctrl, mem_map_array);
+ if (rc < 0) {
+ pr_err("%s:%d memory map parse failed\n",
+ __func__, __LINE__);
+ goto free_mem;
+ }
+
+ rc = msm_camera_power_down(power_info,
+ e_ctrl->eeprom_device_type, &e_ctrl->i2c_client);
+ if (rc < 0)
+ pr_err("%s:%d Power down failed rc %d\n",
+ __func__, __LINE__, rc);
+
+free_mem:
+ kfree(power_setting_array32);
+ kfree(power_setting_array);
+ kfree(mem_map_array);
+ power_setting_array32 = NULL;
+ power_setting_array = NULL;
+ mem_map_array = NULL;
+ return rc;
+}
+
+static int msm_eeprom_config32(struct msm_eeprom_ctrl_t *e_ctrl,
+ void __user *argp)
+{
+ struct msm_eeprom_cfg_data32 *cdata =
+ (struct msm_eeprom_cfg_data32 *)argp;
+ int rc = 0;
+ size_t length = 0;
+
+ CDBG("%s E\n", __func__);
+ switch (cdata->cfgtype) {
+ case CFG_EEPROM_GET_INFO:
+ if (e_ctrl->userspace_probe == 1) {
+ pr_err("%s:%d Eeprom name should be module driver",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+ CDBG("%s E CFG_EEPROM_GET_INFO\n", __func__);
+ cdata->is_supported = e_ctrl->is_supported;
+ length = strlen(e_ctrl->eboard_info->eeprom_name) + 1;
+ if (length > MAX_EEPROM_NAME) {
+ pr_err("%s:%d invalid eeprom_name length %d\n",
+ __func__, __LINE__, (int)length);
+ rc = -EINVAL;
+ break;
+ }
+ memcpy(cdata->cfg.eeprom_name,
+ e_ctrl->eboard_info->eeprom_name, length);
+ break;
+ case CFG_EEPROM_GET_CAL_DATA:
+ CDBG("%s E CFG_EEPROM_GET_CAL_DATA\n", __func__);
+ cdata->cfg.get_data.num_bytes =
+ e_ctrl->cal_data.num_data;
+ break;
+ case CFG_EEPROM_READ_CAL_DATA:
+ CDBG("%s E CFG_EEPROM_READ_CAL_DATA\n", __func__);
+ rc = eeprom_config_read_cal_data32(e_ctrl, argp);
+ break;
+ case CFG_EEPROM_INIT:
+ if (e_ctrl->userspace_probe == 0) {
+ pr_err("%s:%d Eeprom already probed at kernel boot",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+ if (e_ctrl->cal_data.num_data == 0) {
+ rc = eeprom_init_config32(e_ctrl, argp);
+ if (rc < 0)
+ pr_err("%s:%d Eeprom init failed\n",
+ __func__, __LINE__);
+ } else {
+ CDBG("%s:%d Already read eeprom\n",
+ __func__, __LINE__);
+ }
+ break;
+ default:
+ break;
+ }
+
+ CDBG("%s X rc: %d\n", __func__, rc);
+ return rc;
+}
+
+static long msm_eeprom_subdev_ioctl32(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct msm_eeprom_ctrl_t *e_ctrl = v4l2_get_subdevdata(sd);
+ void __user *argp = (void __user *)arg;
+
+ CDBG("%s E\n", __func__);
+ CDBG("%s:%d a_ctrl %pK argp %pK\n", __func__, __LINE__, e_ctrl, argp);
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ return msm_eeprom_get_subdev_id(e_ctrl, argp);
+ case VIDIOC_MSM_EEPROM_CFG32:
+ return msm_eeprom_config32(e_ctrl, argp);
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ CDBG("%s X\n", __func__);
+}
+
+static long msm_eeprom_subdev_do_ioctl32(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ return msm_eeprom_subdev_ioctl32(sd, cmd, arg);
+}
+
+static long msm_eeprom_subdev_fops_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_eeprom_subdev_do_ioctl32);
+}
+
+#endif
+
+static int msm_eeprom_platform_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ int j = 0;
+ uint32_t temp;
+
+ struct msm_camera_cci_client *cci_client = NULL;
+ struct msm_eeprom_ctrl_t *e_ctrl = NULL;
+ struct msm_eeprom_board_info *eb_info = NULL;
+ struct device_node *of_node = pdev->dev.of_node;
+ struct msm_camera_power_ctrl_t *power_info = NULL;
+
+ CDBG("%s E\n", __func__);
+
+ e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+ if (!e_ctrl)
+ return -ENOMEM;
+ e_ctrl->eeprom_v4l2_subdev_ops = &msm_eeprom_subdev_ops;
+ e_ctrl->eeprom_mutex = &msm_eeprom_mutex;
+
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.map = NULL;
+ e_ctrl->userspace_probe = 0;
+ e_ctrl->is_supported = 0;
+ if (!of_node) {
+ pr_err("%s dev.of_node NULL\n", __func__);
+ rc = -EINVAL;
+ goto ectrl_free;
+ }
+
+ /* Set platform device handle */
+ e_ctrl->pdev = pdev;
+ /* Set device type as platform device */
+ e_ctrl->eeprom_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ e_ctrl->i2c_client.i2c_func_tbl = &msm_eeprom_cci_func_tbl;
+ e_ctrl->i2c_client.cci_client = kzalloc(sizeof(
+ struct msm_camera_cci_client), GFP_KERNEL);
+ if (!e_ctrl->i2c_client.cci_client) {
+ rc = -ENOMEM;
+ goto ectrl_free;
+ }
+
+ e_ctrl->eboard_info = kzalloc(sizeof(
+ struct msm_eeprom_board_info), GFP_KERNEL);
+ if (!e_ctrl->eboard_info) {
+ rc = -ENOMEM;
+ goto cciclient_free;
+ }
+
+ eb_info = e_ctrl->eboard_info;
+ power_info = &eb_info->power_info;
+ cci_client = e_ctrl->i2c_client.cci_client;
+ cci_client->cci_subdev = msm_cci_get_subdev();
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+ power_info->dev = &pdev->dev;
+
+ /* Get clocks information */
+ rc = msm_camera_get_clk_info(e_ctrl->pdev,
+ &power_info->clk_info,
+ &power_info->clk_ptr,
+ &power_info->clk_info_size);
+ if (rc < 0) {
+ pr_err("failed: msm_camera_get_clk_info rc %d", rc);
+ goto board_free;
+ }
+
+ rc = of_property_read_u32(of_node, "cell-index",
+ &pdev->id);
+ CDBG("cell-index %d, rc %d\n", pdev->id, rc);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ goto board_free;
+ }
+ e_ctrl->subdev_id = pdev->id;
+
+ rc = of_property_read_u32(of_node, "qcom,cci-master",
+ &e_ctrl->cci_master);
+ CDBG("qcom,cci-master %d, rc %d\n", e_ctrl->cci_master, rc);
+ if (rc < 0) {
+ pr_err("%s failed rc %d\n", __func__, rc);
+ goto board_free;
+ }
+ cci_client->cci_i2c_master = e_ctrl->cci_master;
+
+ rc = of_property_read_string(of_node, "qcom,eeprom-name",
+ &eb_info->eeprom_name);
+ CDBG("%s qcom,eeprom-name %s, rc %d\n", __func__,
+ eb_info->eeprom_name, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ e_ctrl->userspace_probe = 1;
+ }
+
+ rc = msm_eeprom_get_dt_data(e_ctrl);
+ if (rc < 0)
+ goto board_free;
+
+ if (e_ctrl->userspace_probe == 0) {
+ rc = of_property_read_u32(of_node, "qcom,slave-addr",
+ &temp);
+ if (rc < 0) {
+ pr_err("%s failed rc %d\n", __func__, rc);
+ goto board_free;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,i2c-freq-mode",
+ &e_ctrl->i2c_freq_mode);
+ CDBG("qcom,i2c_freq_mode %d, rc %d\n",
+ e_ctrl->i2c_freq_mode, rc);
+ if (rc < 0) {
+ pr_err("%s qcom,i2c-freq-mode read fail. Setting to 0 %d\n",
+ __func__, rc);
+ e_ctrl->i2c_freq_mode = 0;
+ }
+ if (e_ctrl->i2c_freq_mode >= I2C_MAX_MODES) {
+ pr_err("%s:%d invalid i2c_freq_mode = %d\n",
+ __func__, __LINE__, e_ctrl->i2c_freq_mode);
+ e_ctrl->i2c_freq_mode = 0;
+ }
+ eb_info->i2c_slaveaddr = temp;
+ CDBG("qcom,slave-addr = 0x%X\n", eb_info->i2c_slaveaddr);
+ eb_info->i2c_freq_mode = e_ctrl->i2c_freq_mode;
+ cci_client->i2c_freq_mode = e_ctrl->i2c_freq_mode;
+ cci_client->sid = eb_info->i2c_slaveaddr >> 1;
+
+ rc = msm_eeprom_parse_memory_map(of_node, &e_ctrl->cal_data);
+ if (rc < 0)
+ goto board_free;
+
+ rc = msm_camera_power_up(power_info, e_ctrl->eeprom_device_type,
+ &e_ctrl->i2c_client);
+ if (rc) {
+ pr_err("failed rc %d\n", rc);
+ goto memdata_free;
+ }
+ rc = read_eeprom_memory(e_ctrl, &e_ctrl->cal_data);
+ if (rc < 0) {
+ pr_err("%s read_eeprom_memory failed\n", __func__);
+ goto power_down;
+ }
+ for (j = 0; j < e_ctrl->cal_data.num_data; j++)
+ CDBG("memory_data[%d] = 0x%X\n", j,
+ e_ctrl->cal_data.mapdata[j]);
+
+ e_ctrl->is_supported |= msm_eeprom_match_crc(&e_ctrl->cal_data);
+
+ rc = msm_camera_power_down(power_info,
+ e_ctrl->eeprom_device_type, &e_ctrl->i2c_client);
+ if (rc) {
+ pr_err("failed rc %d\n", rc);
+ goto memdata_free;
+ }
+ } else
+ e_ctrl->is_supported = 1;
+
+ v4l2_subdev_init(&e_ctrl->msm_sd.sd,
+ e_ctrl->eeprom_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&e_ctrl->msm_sd.sd, e_ctrl);
+ platform_set_drvdata(pdev, &e_ctrl->msm_sd.sd);
+ e_ctrl->msm_sd.sd.internal_ops = &msm_eeprom_internal_ops;
+ e_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(e_ctrl->msm_sd.sd.name,
+ ARRAY_SIZE(e_ctrl->msm_sd.sd.name), "msm_eeprom");
+ media_entity_init(&e_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ e_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ e_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_EEPROM;
+ msm_sd_register(&e_ctrl->msm_sd);
+
+#ifdef CONFIG_COMPAT
+ msm_cam_copy_v4l2_subdev_fops(&msm_eeprom_v4l2_subdev_fops);
+ msm_eeprom_v4l2_subdev_fops.compat_ioctl32 =
+ msm_eeprom_subdev_fops_ioctl32;
+ e_ctrl->msm_sd.sd.devnode->fops = &msm_eeprom_v4l2_subdev_fops;
+#endif
+
+ e_ctrl->is_supported = (e_ctrl->is_supported << 1) | 1;
+ CDBG("%s X\n", __func__);
+ return rc;
+
+power_down:
+ msm_camera_power_down(power_info, e_ctrl->eeprom_device_type,
+ &e_ctrl->i2c_client);
+memdata_free:
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+board_free:
+ kfree(e_ctrl->eboard_info);
+cciclient_free:
+ kfree(e_ctrl->i2c_client.cci_client);
+ectrl_free:
+ kfree(e_ctrl);
+ return rc;
+}
+
+static int msm_eeprom_platform_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct msm_eeprom_ctrl_t *e_ctrl;
+
+ if (!sd) {
+ pr_err("%s: Subdevice is NULL\n", __func__);
+ return 0;
+ }
+
+ e_ctrl = (struct msm_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+ if (!e_ctrl) {
+ pr_err("%s: eeprom device is NULL\n", __func__);
+ return 0;
+ }
+
+ if (!e_ctrl->eboard_info) {
+ pr_err("%s: eboard_info is NULL\n", __func__);
+ return 0;
+ }
+ msm_camera_put_clk_info(e_ctrl->pdev,
+ &e_ctrl->eboard_info->power_info.clk_info,
+ &e_ctrl->eboard_info->power_info.clk_ptr,
+ e_ctrl->eboard_info->power_info.clk_info_size);
+
+ kfree(e_ctrl->i2c_client.cci_client);
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+ if (e_ctrl->eboard_info) {
+ kfree(e_ctrl->eboard_info->power_info.gpio_conf);
+ kfree(e_ctrl->eboard_info);
+ }
+ kfree(e_ctrl);
+ return 0;
+}
+
+static const struct of_device_id msm_eeprom_dt_match[] = {
+ { .compatible = "qcom,eeprom" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, msm_eeprom_dt_match);
+
+static struct platform_driver msm_eeprom_platform_driver = {
+ .driver = {
+ .name = "qcom,eeprom",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_eeprom_dt_match,
+ },
+ .probe = msm_eeprom_platform_probe,
+ .remove = msm_eeprom_platform_remove,
+};
+
+static const struct i2c_device_id msm_eeprom_i2c_id[] = {
+ { "msm_eeprom", (kernel_ulong_t)NULL},
+ { }
+};
+
+static struct i2c_driver msm_eeprom_i2c_driver = {
+ .id_table = msm_eeprom_i2c_id,
+ .probe = msm_eeprom_i2c_probe,
+ .remove = msm_eeprom_i2c_remove,
+ .driver = {
+ .name = "msm_eeprom",
+ },
+};
+
+static struct spi_driver msm_eeprom_spi_driver = {
+ .driver = {
+ .name = "qcom_eeprom",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_eeprom_dt_match,
+ },
+ .probe = msm_eeprom_spi_probe,
+ .remove = msm_eeprom_spi_remove,
+};
+
+static int __init msm_eeprom_init_module(void)
+{
+ int rc = 0;
+
+ CDBG("%s E\n", __func__);
+ rc = platform_driver_register(&msm_eeprom_platform_driver);
+ CDBG("%s:%d platform rc %d\n", __func__, __LINE__, rc);
+ rc = spi_register_driver(&msm_eeprom_spi_driver);
+ CDBG("%s:%d spi rc %d\n", __func__, __LINE__, rc);
+ return i2c_add_driver(&msm_eeprom_i2c_driver);
+}
+
+static void __exit msm_eeprom_exit_module(void)
+{
+ platform_driver_unregister(&msm_eeprom_platform_driver);
+ spi_unregister_driver(&msm_eeprom_spi_driver);
+ i2c_del_driver(&msm_eeprom_i2c_driver);
+}
+
+module_init(msm_eeprom_init_module);
+module_exit(msm_eeprom_exit_module);
+MODULE_DESCRIPTION("MSM EEPROM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/sensor/eeprom/msm_eeprom.h b/drivers/media/platform/msm/ais/sensor/eeprom/msm_eeprom.h
new file mode 100644
index 000000000000..302a663cde93
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/eeprom/msm_eeprom.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_EEPROM_H
+#define MSM_EEPROM_H
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <soc/qcom/ais.h>
+#include <media/v4l2-subdev.h>
+#include <media/ais/msm_ais.h>
+#include "msm_camera_i2c.h"
+#include "msm_camera_spi.h"
+#include "msm_camera_io_util.h"
+#include "msm_camera_dt_util.h"
+
+struct msm_eeprom_ctrl_t;
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+#define PROPERTY_MAXSIZE 32
+
+struct msm_eeprom_ctrl_t {
+ struct platform_device *pdev;
+ struct mutex *eeprom_mutex;
+
+ struct v4l2_subdev sdev;
+ struct v4l2_subdev_ops *eeprom_v4l2_subdev_ops;
+ enum msm_camera_device_type_t eeprom_device_type;
+ struct msm_sd_subdev msm_sd;
+ enum cci_i2c_master_t cci_master;
+ enum i2c_freq_mode_t i2c_freq_mode;
+
+ struct msm_camera_i2c_client i2c_client;
+ struct msm_eeprom_board_info *eboard_info;
+ uint32_t subdev_id;
+ int32_t userspace_probe;
+ struct msm_eeprom_memory_block_t cal_data;
+ uint8_t is_supported;
+};
+
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/flash/Makefile b/drivers/media/platform/msm/ais/sensor/flash/Makefile
new file mode 100644
index 000000000000..fd6f381e6bdc
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/flash/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+obj-$(CONFIG_MSM_AIS) += msm_flash.o
diff --git a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c
new file mode 100644
index 000000000000..b97156cbd486
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c
@@ -0,0 +1,1223 @@
+/* Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include "msm_flash.h"
+#include "msm_camera_dt_util.h"
+#include "msm_cci.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+DEFINE_MSM_MUTEX(msm_flash_mutex);
+
+static struct v4l2_file_operations msm_flash_v4l2_subdev_fops;
+static struct led_trigger *torch_trigger;
+
+static const struct of_device_id msm_flash_dt_match[] = {
+ {.compatible = "qcom,camera-flash", .data = NULL},
+ {}
+};
+
+static struct msm_flash_table msm_i2c_flash_table;
+static struct msm_flash_table msm_gpio_flash_table;
+static struct msm_flash_table msm_pmic_flash_table;
+
+static struct msm_flash_table *flash_table[] = {
+ &msm_i2c_flash_table,
+ &msm_gpio_flash_table,
+ &msm_pmic_flash_table
+};
+
+static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl = {
+ .i2c_read = msm_camera_cci_i2c_read,
+ .i2c_read_seq = msm_camera_cci_i2c_read_seq,
+ .i2c_write = msm_camera_cci_i2c_write,
+ .i2c_write_table = msm_camera_cci_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_cci_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_cci_i2c_write_table_w_microdelay,
+ .i2c_util = msm_sensor_cci_i2c_util,
+ .i2c_poll = msm_camera_cci_i2c_poll,
+};
+
+void msm_torch_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (!torch_trigger) {
+ pr_err("No torch trigger found, can't set brightness\n");
+ return;
+ }
+
+ led_trigger_event(torch_trigger, value);
+};
+
+static struct led_classdev msm_torch_led[MAX_LED_TRIGGERS] = {
+ {
+ .name = "torch-light0",
+ .brightness_set = msm_torch_brightness_set,
+ .brightness = LED_OFF,
+ },
+ {
+ .name = "torch-light1",
+ .brightness_set = msm_torch_brightness_set,
+ .brightness = LED_OFF,
+ },
+ {
+ .name = "torch-light2",
+ .brightness_set = msm_torch_brightness_set,
+ .brightness = LED_OFF,
+ },
+};
+
+static int32_t msm_torch_create_classdev(struct platform_device *pdev,
+ void *data)
+{
+ int32_t rc = 0;
+ int32_t i = 0;
+ struct msm_flash_ctrl_t *fctrl =
+ (struct msm_flash_ctrl_t *)data;
+
+ if (!fctrl) {
+ pr_err("Invalid fctrl\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < fctrl->torch_num_sources; i++) {
+ if (fctrl->torch_trigger[i]) {
+ torch_trigger = fctrl->torch_trigger[i];
+ CDBG("%s:%d msm_torch_brightness_set for torch %d",
+ __func__, __LINE__, i);
+ msm_torch_brightness_set(&msm_torch_led[i],
+ LED_OFF);
+
+ rc = led_classdev_register(&pdev->dev,
+ &msm_torch_led[i]);
+ if (rc) {
+ pr_err("Failed to register %d led dev. rc = %d\n",
+ i, rc);
+ return rc;
+ }
+ } else {
+ pr_err("Invalid fctrl->torch_trigger[%d]\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+};
+
+static int32_t msm_flash_get_subdev_id(
+ struct msm_flash_ctrl_t *flash_ctrl, void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+
+ CDBG("Enter\n");
+ if (!subdev_id) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+ if (flash_ctrl->flash_device_type == MSM_CAMERA_PLATFORM_DEVICE)
+ *subdev_id = flash_ctrl->pdev->id;
+ else
+ *subdev_id = flash_ctrl->subdev_id;
+
+ CDBG("subdev_id %d\n", *subdev_id);
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_flash_i2c_write_table(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_camera_i2c_reg_setting_array *settings)
+{
+ struct msm_camera_i2c_reg_setting conf_array;
+
+ conf_array.addr_type = settings->addr_type;
+ conf_array.data_type = settings->data_type;
+ conf_array.delay = settings->delay;
+ conf_array.reg_setting = settings->reg_setting_a;
+ conf_array.size = settings->size;
+
+ return flash_ctrl->flash_i2c_client.i2c_func_tbl->i2c_write_table(
+ &flash_ctrl->flash_i2c_client, &conf_array);
+}
+
+#ifdef CONFIG_COMPAT
+static void msm_flash_copy_power_settings_compat(
+ struct msm_sensor_power_setting *ps,
+ struct msm_sensor_power_setting32 *ps32, uint32_t size)
+{
+ uint16_t i = 0;
+
+ for (i = 0; i < size; i++) {
+ ps[i].config_val = ps32[i].config_val;
+ ps[i].delay = ps32[i].delay;
+ ps[i].seq_type = ps32[i].seq_type;
+ ps[i].seq_val = ps32[i].seq_val;
+ }
+}
+#endif
+
+static int32_t msm_flash_i2c_init(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ int32_t rc = 0;
+ struct msm_flash_init_info_t *flash_init_info =
+ flash_data->cfg.flash_init_info;
+ struct msm_camera_i2c_reg_setting_array *settings = NULL;
+ struct msm_camera_cci_client *cci_client = NULL;
+
+ if (!flash_init_info || !flash_init_info->power_setting_array) {
+ pr_err("%s:%d failed: Null pointer\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ struct msm_sensor_power_setting_array32
+ *power_setting_array32 = NULL;
+
+ power_setting_array32 = kzalloc(
+ sizeof(struct msm_sensor_power_setting_array32),
+ GFP_KERNEL);
+ if (!power_setting_array32) {
+ pr_err("%s mem allocation failed %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(power_setting_array32,
+ (void *)flash_init_info->power_setting_array,
+ sizeof(struct msm_sensor_power_setting_array32))) {
+ pr_err("%s copy_from_user failed %d\n",
+ __func__, __LINE__);
+ kfree(power_setting_array32);
+ return -EFAULT;
+ }
+
+ flash_ctrl->power_setting_array.size =
+ power_setting_array32->size;
+ flash_ctrl->power_setting_array.size_down =
+ power_setting_array32->size_down;
+ flash_ctrl->power_setting_array.power_down_setting =
+ compat_ptr(power_setting_array32->power_down_setting);
+ flash_ctrl->power_setting_array.power_setting =
+ compat_ptr(power_setting_array32->power_setting);
+
+ /* Validate power_up array size and power_down array size */
+ if ((!flash_ctrl->power_setting_array.size) ||
+ (flash_ctrl->power_setting_array.size >
+ MAX_POWER_CONFIG) ||
+ (!flash_ctrl->power_setting_array.size_down) ||
+ (flash_ctrl->power_setting_array.size_down >
+ MAX_POWER_CONFIG)) {
+
+ pr_err("failed: invalid size %d, size_down %d",
+ flash_ctrl->power_setting_array.size,
+ flash_ctrl->power_setting_array.size_down);
+ kfree(power_setting_array32);
+ return -EINVAL;
+ }
+ /* Copy the settings from compat struct to regular struct */
+ msm_flash_copy_power_settings_compat(
+ flash_ctrl->power_setting_array.power_setting_a,
+ power_setting_array32->power_setting_a,
+ flash_ctrl->power_setting_array.size);
+
+ msm_flash_copy_power_settings_compat(
+ flash_ctrl->power_setting_array.power_down_setting_a,
+ power_setting_array32->power_down_setting_a,
+ flash_ctrl->power_setting_array.size_down);
+
+ kfree(power_setting_array32);
+ } else
+#endif
+ if (copy_from_user(&flash_ctrl->power_setting_array,
+ (void *)flash_init_info->power_setting_array,
+ sizeof(struct msm_sensor_power_setting_array))) {
+ pr_err("%s copy_from_user failed %d\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ if (flash_ctrl->flash_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ cci_client = flash_ctrl->flash_i2c_client.cci_client;
+ cci_client->sid = flash_init_info->slave_addr >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+ cci_client->i2c_freq_mode = flash_init_info->i2c_freq_mode;
+ }
+
+ flash_ctrl->power_info.power_setting =
+ flash_ctrl->power_setting_array.power_setting_a;
+ flash_ctrl->power_info.power_down_setting =
+ flash_ctrl->power_setting_array.power_down_setting_a;
+ flash_ctrl->power_info.power_setting_size =
+ flash_ctrl->power_setting_array.size;
+ flash_ctrl->power_info.power_down_setting_size =
+ flash_ctrl->power_setting_array.size_down;
+
+ if ((flash_ctrl->power_info.power_setting_size > MAX_POWER_CONFIG) ||
+ (flash_ctrl->power_info.power_down_setting_size > MAX_POWER_CONFIG)) {
+ pr_err("%s:%d invalid power setting size=%d size_down=%d\n",
+ __func__, __LINE__,
+ flash_ctrl->power_info.power_setting_size,
+ flash_ctrl->power_info.power_down_setting_size);
+ rc = -EINVAL;
+ goto msm_flash_i2c_init_fail;
+ }
+
+ rc = msm_camera_power_up(&flash_ctrl->power_info,
+ flash_ctrl->flash_device_type,
+ &flash_ctrl->flash_i2c_client);
+ if (rc < 0) {
+ pr_err("%s msm_camera_power_up failed %d\n",
+ __func__, __LINE__);
+ goto msm_flash_i2c_init_fail;
+ }
+
+ if (flash_data->cfg.flash_init_info->settings) {
+ settings = kzalloc(sizeof(
+ struct msm_camera_i2c_reg_setting_array), GFP_KERNEL);
+ if (!settings) {
+ rc = -ENOMEM;
+ goto msm_flash_i2c_init_fail;
+ }
+
+ if (copy_from_user(settings, (void *)flash_init_info->settings,
+ sizeof(struct msm_camera_i2c_reg_setting_array))) {
+ kfree(settings);
+ pr_err("%s copy_from_user failed %d\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ rc = msm_flash_i2c_write_table(flash_ctrl, settings);
+ kfree(settings);
+
+ if (rc < 0) {
+ pr_err("%s:%d msm_flash_i2c_write_table rc %d failed\n",
+ __func__, __LINE__, rc);
+ }
+ }
+
+ return 0;
+
+msm_flash_i2c_init_fail:
+ return rc;
+}
+
+static int32_t msm_flash_gpio_init(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ int32_t i = 0;
+ int32_t rc = 0;
+
+ CDBG("Enter");
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++)
+ flash_ctrl->flash_op_current[i] = LED_FULL;
+
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++)
+ flash_ctrl->torch_op_current[i] = LED_HALF;
+
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++) {
+ if (!flash_ctrl->torch_trigger[i]) {
+ if (i < flash_ctrl->flash_num_sources)
+ flash_ctrl->torch_trigger[i] =
+ flash_ctrl->flash_trigger[i];
+ else
+ flash_ctrl->torch_trigger[i] =
+ flash_ctrl->flash_trigger[
+ flash_ctrl->flash_num_sources - 1];
+ }
+ }
+
+ rc = flash_ctrl->func_tbl->camera_flash_off(flash_ctrl, flash_data);
+
+ CDBG("Exit");
+ return rc;
+}
+
+static int32_t msm_flash_i2c_release(
+ struct msm_flash_ctrl_t *flash_ctrl)
+{
+ int32_t rc = 0;
+
+ if (!(&flash_ctrl->power_info) || !(&flash_ctrl->flash_i2c_client)) {
+ pr_err("%s:%d failed: %pK %pK\n",
+ __func__, __LINE__, &flash_ctrl->power_info,
+ &flash_ctrl->flash_i2c_client);
+ return -EINVAL;
+ }
+
+ rc = msm_camera_power_down(&flash_ctrl->power_info,
+ flash_ctrl->flash_device_type,
+ &flash_ctrl->flash_i2c_client);
+ if (rc < 0) {
+ pr_err("%s msm_camera_power_down failed %d\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int32_t msm_flash_off(struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ int32_t i = 0;
+
+ CDBG("Enter\n");
+
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++)
+ if (flash_ctrl->flash_trigger[i])
+ led_trigger_event(flash_ctrl->flash_trigger[i], 0);
+
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++)
+ if (flash_ctrl->torch_trigger[i])
+ led_trigger_event(flash_ctrl->torch_trigger[i], 0);
+ if (flash_ctrl->switch_trigger)
+ led_trigger_event(flash_ctrl->switch_trigger, 0);
+
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_flash_i2c_write_setting_array(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ int32_t rc = 0;
+ struct msm_camera_i2c_reg_setting_array *settings = NULL;
+
+ if (!flash_data->cfg.settings) {
+ pr_err("%s:%d failed: Null pointer\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ settings = kzalloc(sizeof(struct msm_camera_i2c_reg_setting_array),
+ GFP_KERNEL);
+ if (!settings)
+ return -ENOMEM;
+
+ if (copy_from_user(settings, (void *)flash_data->cfg.settings,
+ sizeof(struct msm_camera_i2c_reg_setting_array))) {
+ kfree(settings);
+ pr_err("%s copy_from_user failed %d\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ rc = msm_flash_i2c_write_table(flash_ctrl, settings);
+ kfree(settings);
+
+ if (rc < 0) {
+ pr_err("%s:%d msm_flash_i2c_write_table rc = %d failed\n",
+ __func__, __LINE__, rc);
+ }
+ return rc;
+}
+
+static int32_t msm_flash_init(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ uint32_t i = 0;
+ int32_t rc = -EFAULT;
+ enum msm_flash_driver_type flash_driver_type = FLASH_DRIVER_DEFAULT;
+
+ CDBG("Enter");
+
+ if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT) {
+ pr_err("%s:%d Invalid flash state = %d",
+ __func__, __LINE__, flash_ctrl->flash_state);
+ return 0;
+ }
+
+ if (flash_data->cfg.flash_init_info->flash_driver_type ==
+ FLASH_DRIVER_DEFAULT) {
+ flash_driver_type = flash_ctrl->flash_driver_type;
+ for (i = 0; i < MAX_LED_TRIGGERS; i++) {
+ flash_data->flash_current[i] =
+ flash_ctrl->flash_max_current[i];
+ flash_data->flash_duration[i] =
+ flash_ctrl->flash_max_duration[i];
+ }
+ } else if (flash_data->cfg.flash_init_info->flash_driver_type ==
+ flash_ctrl->flash_driver_type) {
+ flash_driver_type = flash_ctrl->flash_driver_type;
+ for (i = 0; i < MAX_LED_TRIGGERS; i++) {
+ flash_ctrl->flash_max_current[i] =
+ flash_data->flash_current[i];
+ flash_ctrl->flash_max_duration[i] =
+ flash_data->flash_duration[i];
+ }
+ }
+
+ if (flash_driver_type == FLASH_DRIVER_DEFAULT) {
+ pr_err("%s:%d invalid flash_driver_type", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(flash_table); i++) {
+ if (flash_driver_type == flash_table[i]->flash_driver_type) {
+ flash_ctrl->func_tbl = &flash_table[i]->func_tbl;
+ rc = 0;
+ }
+ }
+
+ if (rc < 0) {
+ pr_err("%s:%d failed invalid flash_driver_type %d\n",
+ __func__, __LINE__,
+ flash_data->cfg.flash_init_info->flash_driver_type);
+ }
+
+ if (flash_ctrl->func_tbl->camera_flash_init) {
+ rc = flash_ctrl->func_tbl->camera_flash_init(
+ flash_ctrl, flash_data);
+ if (rc < 0) {
+ pr_err("%s:%d camera_flash_init failed rc = %d",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+ }
+
+ flash_ctrl->flash_state = MSM_CAMERA_FLASH_INIT;
+
+ CDBG("Exit");
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static int32_t msm_flash_init_prepare(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ return msm_flash_init(flash_ctrl, flash_data);
+}
+#else
+static int32_t msm_flash_init_prepare(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ struct msm_flash_cfg_data_t flash_data_k;
+ struct msm_flash_init_info_t flash_init_info;
+ int32_t i = 0;
+
+ flash_data_k.cfg_type = flash_data->cfg_type;
+ for (i = 0; i < MAX_LED_TRIGGERS; i++) {
+ flash_data_k.flash_current[i] =
+ flash_data->flash_current[i];
+ flash_data_k.flash_duration[i] =
+ flash_data->flash_duration[i];
+ }
+
+ flash_data_k.cfg.flash_init_info = &flash_init_info;
+ if (copy_from_user(&flash_init_info,
+ (void *)(flash_data->cfg.flash_init_info),
+ sizeof(struct msm_flash_init_info_t))) {
+ pr_err("%s copy_from_user failed %d\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+ return msm_flash_init(flash_ctrl, &flash_data_k);
+}
+#endif
+
+static int32_t msm_flash_low(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ uint32_t curr = 0, max_current = 0;
+ int32_t i = 0;
+
+ CDBG("Enter\n");
+ /* Turn off flash triggers */
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++)
+ if (flash_ctrl->flash_trigger[i])
+ led_trigger_event(flash_ctrl->flash_trigger[i], 0);
+
+ /* Turn on flash triggers */
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++) {
+ if (flash_ctrl->torch_trigger[i]) {
+ max_current = flash_ctrl->torch_max_current[i];
+ if (flash_data->flash_current[i] >= 0 &&
+ flash_data->flash_current[i] <
+ max_current) {
+ curr = flash_data->flash_current[i];
+ } else {
+ curr = flash_ctrl->torch_op_current[i];
+ pr_debug("LED current clamped to %d\n",
+ curr);
+ }
+ CDBG("low_flash_current[%d] = %d", i, curr);
+ led_trigger_event(flash_ctrl->torch_trigger[i],
+ curr);
+ }
+ }
+ if (flash_ctrl->switch_trigger)
+ led_trigger_event(flash_ctrl->switch_trigger, 1);
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_flash_high(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ int32_t curr = 0;
+ int32_t max_current = 0;
+ int32_t i = 0;
+
+ /* Turn off torch triggers */
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++)
+ if (flash_ctrl->torch_trigger[i])
+ led_trigger_event(flash_ctrl->torch_trigger[i], 0);
+
+ /* Turn on flash triggers */
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++) {
+ if (flash_ctrl->flash_trigger[i]) {
+ max_current = flash_ctrl->flash_max_current[i];
+ if (flash_data->flash_current[i] >= 0 &&
+ flash_data->flash_current[i] <
+ max_current) {
+ curr = flash_data->flash_current[i];
+ } else {
+ curr = flash_ctrl->flash_op_current[i];
+ pr_debug("LED flash_current[%d] clamped %d\n",
+ i, curr);
+ }
+ CDBG("high_flash_current[%d] = %d", i, curr);
+ led_trigger_event(flash_ctrl->flash_trigger[i],
+ curr);
+ }
+ }
+ if (flash_ctrl->switch_trigger)
+ led_trigger_event(flash_ctrl->switch_trigger, 1);
+ return 0;
+}
+
+static int32_t msm_flash_release(
+ struct msm_flash_ctrl_t *flash_ctrl)
+{
+ int32_t rc = 0;
+
+ rc = flash_ctrl->func_tbl->camera_flash_off(flash_ctrl, NULL);
+ if (rc < 0) {
+ pr_err("%s:%d camera_flash_init failed rc = %d",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+ flash_ctrl->flash_state = MSM_CAMERA_FLASH_RELEASE;
+ return 0;
+}
+
+static int32_t msm_flash_config(struct msm_flash_ctrl_t *flash_ctrl,
+ void __user *argp)
+{
+ int32_t rc = 0;
+ struct msm_flash_cfg_data_t *flash_data =
+ (struct msm_flash_cfg_data_t *) argp;
+
+ mutex_lock(flash_ctrl->flash_mutex);
+
+ CDBG("Enter %s type %d\n", __func__, flash_data->cfg_type);
+
+ switch (flash_data->cfg_type) {
+ case CFG_FLASH_INIT:
+ rc = msm_flash_init_prepare(flash_ctrl, flash_data);
+ break;
+ case CFG_FLASH_RELEASE:
+ if (flash_ctrl->flash_state != MSM_CAMERA_FLASH_RELEASE) {
+ rc = flash_ctrl->func_tbl->camera_flash_release(
+ flash_ctrl);
+ } else {
+ CDBG(pr_fmt("Invalid state : %d\n"),
+ flash_ctrl->flash_state);
+ }
+ break;
+ case CFG_FLASH_OFF:
+ if ((flash_ctrl->flash_state != MSM_CAMERA_FLASH_RELEASE) &&
+ (flash_ctrl->flash_state != MSM_CAMERA_FLASH_OFF)) {
+ rc = flash_ctrl->func_tbl->camera_flash_off(
+ flash_ctrl, flash_data);
+ if (!rc)
+ flash_ctrl->flash_state = MSM_CAMERA_FLASH_OFF;
+ } else {
+ CDBG(pr_fmt("Invalid state : %d\n"),
+ flash_ctrl->flash_state);
+ }
+ break;
+ case CFG_FLASH_LOW:
+ if ((flash_ctrl->flash_state == MSM_CAMERA_FLASH_OFF) ||
+ (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT)) {
+ rc = flash_ctrl->func_tbl->camera_flash_low(
+ flash_ctrl, flash_data);
+ if (!rc)
+ flash_ctrl->flash_state = MSM_CAMERA_FLASH_LOW;
+ } else {
+ CDBG(pr_fmt("Invalid state : %d\n"),
+ flash_ctrl->flash_state);
+ }
+ break;
+ case CFG_FLASH_HIGH:
+ if ((flash_ctrl->flash_state == MSM_CAMERA_FLASH_OFF) ||
+ (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT)) {
+ rc = flash_ctrl->func_tbl->camera_flash_high(
+ flash_ctrl, flash_data);
+ if (!rc)
+ flash_ctrl->flash_state = MSM_CAMERA_FLASH_HIGH;
+ } else {
+ CDBG(pr_fmt("Invalid state : %d\n"),
+ flash_ctrl->flash_state);
+ }
+ break;
+ default:
+ rc = -EFAULT;
+ break;
+ }
+
+ mutex_unlock(flash_ctrl->flash_mutex);
+
+ CDBG("Exit %s type %d\n", __func__, flash_data->cfg_type);
+
+ return rc;
+}
+
+static long msm_flash_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct msm_flash_ctrl_t *fctrl = NULL;
+ void __user *argp = (void __user *)arg;
+
+ CDBG("Enter\n");
+
+ if (!sd) {
+ pr_err("sd NULL\n");
+ return -EINVAL;
+ }
+ fctrl = v4l2_get_subdevdata(sd);
+ if (!fctrl) {
+ pr_err("fctrl NULL\n");
+ return -EINVAL;
+ }
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ return msm_flash_get_subdev_id(fctrl, argp);
+ case VIDIOC_MSM_FLASH_CFG:
+ return msm_flash_config(fctrl, argp);
+ case MSM_SD_NOTIFY_FREEZE:
+ return 0;
+ case MSM_SD_UNNOTIFY_FREEZE:
+ return 0;
+ case MSM_SD_SHUTDOWN:
+ if (!fctrl->func_tbl) {
+ pr_err("fctrl->func_tbl NULL\n");
+ return -EINVAL;
+ } else {
+ return fctrl->func_tbl->camera_flash_release(fctrl);
+ }
+ default:
+ pr_err_ratelimited("invalid cmd %d\n", cmd);
+ return -ENOIOCTLCMD;
+ }
+ CDBG("Exit\n");
+}
+
+static struct v4l2_subdev_core_ops msm_flash_subdev_core_ops = {
+ .ioctl = msm_flash_subdev_ioctl,
+};
+
+static struct v4l2_subdev_ops msm_flash_subdev_ops = {
+ .core = &msm_flash_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_flash_internal_ops;
+
+static int32_t msm_flash_get_pmic_source_info(
+ struct device_node *of_node,
+ struct msm_flash_ctrl_t *fctrl)
+{
+ int32_t rc = 0;
+ uint32_t count = 0, i = 0;
+ struct device_node *flash_src_node = NULL;
+ struct device_node *torch_src_node = NULL;
+ struct device_node *switch_src_node = NULL;
+
+ switch_src_node = of_parse_phandle(of_node, "qcom,switch-source", 0);
+ if (!switch_src_node) {
+ CDBG("%s:%d switch_src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_string(switch_src_node,
+ "qcom,default-led-trigger",
+ &fctrl->switch_trigger_name);
+ if (rc < 0) {
+ rc = of_property_read_string(switch_src_node,
+ "linux,default-trigger",
+ &fctrl->switch_trigger_name);
+ if (rc < 0)
+ pr_err("default-trigger read failed\n");
+ }
+ of_node_put(switch_src_node);
+ switch_src_node = NULL;
+ if (!rc) {
+ CDBG("switch trigger %s\n",
+ fctrl->switch_trigger_name);
+ led_trigger_register_simple(
+ fctrl->switch_trigger_name,
+ &fctrl->switch_trigger);
+ }
+ }
+
+ if (of_get_property(of_node, "qcom,flash-source", &count)) {
+ count /= sizeof(uint32_t);
+ CDBG("count %d\n", count);
+ if (count > MAX_LED_TRIGGERS) {
+ pr_err("invalid count\n");
+ return -EINVAL;
+ }
+ fctrl->flash_num_sources = count;
+ CDBG("%s:%d flash_num_sources = %d",
+ __func__, __LINE__, fctrl->flash_num_sources);
+ for (i = 0; i < count; i++) {
+ flash_src_node = of_parse_phandle(of_node,
+ "qcom,flash-source", i);
+ if (!flash_src_node) {
+ pr_err("flash_src_node NULL\n");
+ continue;
+ }
+
+ rc = of_property_read_string(flash_src_node,
+ "qcom,default-led-trigger",
+ &fctrl->flash_trigger_name[i]);
+ if (rc < 0) {
+ rc = of_property_read_string(flash_src_node,
+ "linux,default-trigger",
+ &fctrl->flash_trigger_name[i]);
+ if (rc < 0) {
+ pr_err("default-trigger read failed\n");
+ of_node_put(flash_src_node);
+ continue;
+ }
+ }
+
+ CDBG("default trigger %s\n",
+ fctrl->flash_trigger_name[i]);
+
+ /* Read operational-current */
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,current",
+ &fctrl->flash_op_current[i]);
+ if (rc < 0) {
+ pr_err("current: read failed\n");
+ of_node_put(flash_src_node);
+ continue;
+ }
+
+ /* Read max-current */
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,max-current",
+ &fctrl->flash_max_current[i]);
+ if (rc < 0) {
+ pr_err("current: read failed\n");
+ of_node_put(flash_src_node);
+ continue;
+ }
+
+ /* Read max-duration */
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,duration",
+ &fctrl->flash_max_duration[i]);
+ if (rc < 0) {
+ pr_err("duration: read failed\n");
+ of_node_put(flash_src_node);
+ /* Non-fatal; this property is optional */
+ }
+
+ of_node_put(flash_src_node);
+
+ CDBG("max_current[%d] %d\n",
+ i, fctrl->flash_op_current[i]);
+
+ led_trigger_register_simple(
+ fctrl->flash_trigger_name[i],
+ &fctrl->flash_trigger[i]);
+ }
+ if (fctrl->flash_driver_type == FLASH_DRIVER_DEFAULT)
+ fctrl->flash_driver_type = FLASH_DRIVER_PMIC;
+ CDBG("%s:%d fctrl->flash_driver_type = %d", __func__, __LINE__,
+ fctrl->flash_driver_type);
+ }
+
+ if (of_get_property(of_node, "qcom,torch-source", &count)) {
+ count /= sizeof(uint32_t);
+ CDBG("count %d\n", count);
+ if (count > MAX_LED_TRIGGERS) {
+ pr_err("invalid count\n");
+ return -EINVAL;
+ }
+ fctrl->torch_num_sources = count;
+ CDBG("%s:%d torch_num_sources = %d",
+ __func__, __LINE__, fctrl->torch_num_sources);
+ for (i = 0; i < count; i++) {
+ torch_src_node = of_parse_phandle(of_node,
+ "qcom,torch-source", i);
+ if (!torch_src_node) {
+ pr_err("torch_src_node NULL\n");
+ continue;
+ }
+
+ rc = of_property_read_string(torch_src_node,
+ "qcom,default-led-trigger",
+ &fctrl->torch_trigger_name[i]);
+ if (rc < 0) {
+ rc = of_property_read_string(torch_src_node,
+ "linux,default-trigger",
+ &fctrl->torch_trigger_name[i]);
+ if (rc < 0) {
+ pr_err("default-trigger read failed\n");
+ of_node_put(torch_src_node);
+ continue;
+ }
+ }
+
+ CDBG("default trigger %s\n",
+ fctrl->torch_trigger_name[i]);
+
+ /* Read operational-current */
+ rc = of_property_read_u32(torch_src_node,
+ "qcom,current",
+ &fctrl->torch_op_current[i]);
+ if (rc < 0) {
+ pr_err("current: read failed\n");
+ of_node_put(torch_src_node);
+ continue;
+ }
+
+ /* Read max-current */
+ rc = of_property_read_u32(torch_src_node,
+ "qcom,max-current",
+ &fctrl->torch_max_current[i]);
+ if (rc < 0) {
+ pr_err("current: read failed\n");
+ of_node_put(torch_src_node);
+ continue;
+ }
+
+ of_node_put(torch_src_node);
+
+ CDBG("max_current[%d] %d\n",
+ i, fctrl->torch_op_current[i]);
+
+ led_trigger_register_simple(
+ fctrl->torch_trigger_name[i],
+ &fctrl->torch_trigger[i]);
+ }
+ if (fctrl->flash_driver_type == FLASH_DRIVER_DEFAULT)
+ fctrl->flash_driver_type = FLASH_DRIVER_PMIC;
+ CDBG("%s:%d fctrl->flash_driver_type = %d", __func__, __LINE__,
+ fctrl->flash_driver_type);
+ }
+
+ return 0;
+}
+
+static int32_t msm_flash_get_dt_data(struct device_node *of_node,
+ struct msm_flash_ctrl_t *fctrl)
+{
+ int32_t rc = 0;
+
+ CDBG("called\n");
+
+ if (!of_node) {
+ pr_err("of_node NULL\n");
+ return -EINVAL;
+ }
+
+ /* Read the sub device */
+ rc = of_property_read_u32(of_node, "cell-index", &fctrl->pdev->id);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+
+ CDBG("subdev id %d\n", fctrl->subdev_id);
+
+ fctrl->flash_driver_type = FLASH_DRIVER_DEFAULT;
+
+ /* Read the CCI master. Use M0 if not available in the node */
+ rc = of_property_read_u32(of_node, "qcom,cci-master",
+ &fctrl->cci_i2c_master);
+ CDBG("%s qcom,cci-master %d, rc %d\n", __func__, fctrl->cci_i2c_master,
+ rc);
+ if (rc < 0) {
+ /* Set default master 0 */
+ fctrl->cci_i2c_master = MASTER_0;
+ rc = 0;
+ } else {
+ fctrl->flash_driver_type = FLASH_DRIVER_I2C;
+ }
+
+ /* Read the flash and torch source info from device tree node */
+ rc = msm_flash_get_pmic_source_info(of_node, fctrl);
+ if (rc < 0) {
+ pr_err("%s:%d msm_flash_get_pmic_source_info failed rc %d\n",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+
+ /* Read the gpio information from device tree */
+ rc = msm_sensor_driver_get_gpio_data(
+ &(fctrl->power_info.gpio_conf), of_node);
+ if (-ENODEV == rc) {
+ pr_notice("No valid flash GPIOs data\n");
+ rc = 0;
+ } else if (rc < 0) {
+ pr_err("Error flash GPIOs rc %d\n", rc);
+ return rc;
+ }
+
+ if (fctrl->flash_driver_type == FLASH_DRIVER_DEFAULT)
+ fctrl->flash_driver_type = FLASH_DRIVER_GPIO;
+ CDBG("%s:%d fctrl->flash_driver_type = %d", __func__, __LINE__,
+ fctrl->flash_driver_type);
+
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_flash_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ int32_t i = 0;
+ int32_t rc = 0;
+ struct video_device *vdev;
+ struct v4l2_subdev *sd;
+ struct msm_flash_cfg_data_t32 *u32;
+ struct msm_flash_cfg_data_t flash_data;
+ struct msm_flash_init_info_t32 flash_init_info32;
+ struct msm_flash_init_info_t flash_init_info;
+
+ CDBG("Enter");
+
+ if (!file || !arg) {
+ pr_err("%s:failed NULL parameter\n", __func__);
+ return -EINVAL;
+ }
+ vdev = video_devdata(file);
+ sd = vdev_to_v4l2_subdev(vdev);
+ u32 = (struct msm_flash_cfg_data_t32 *)arg;
+
+ flash_data.cfg_type = u32->cfg_type;
+ for (i = 0; i < MAX_LED_TRIGGERS; i++) {
+ flash_data.flash_current[i] = u32->flash_current[i];
+ flash_data.flash_duration[i] = u32->flash_duration[i];
+ }
+ switch (cmd) {
+ case VIDIOC_MSM_FLASH_CFG32:
+ cmd = VIDIOC_MSM_FLASH_CFG;
+ switch (flash_data.cfg_type) {
+ case CFG_FLASH_OFF:
+ case CFG_FLASH_LOW:
+ case CFG_FLASH_HIGH:
+ flash_data.cfg.settings = compat_ptr(u32->cfg.settings);
+ break;
+ case CFG_FLASH_INIT:
+ flash_data.cfg.flash_init_info = &flash_init_info;
+ if (copy_from_user(&flash_init_info32,
+ (void *)compat_ptr(u32->cfg.flash_init_info),
+ sizeof(struct msm_flash_init_info_t32))) {
+ pr_err("%s copy_from_user failed %d\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+ flash_init_info.flash_driver_type =
+ flash_init_info32.flash_driver_type;
+ flash_init_info.slave_addr =
+ flash_init_info32.slave_addr;
+ flash_init_info.i2c_freq_mode =
+ flash_init_info32.i2c_freq_mode;
+ flash_init_info.settings =
+ compat_ptr(flash_init_info32.settings);
+ flash_init_info.power_setting_array =
+ compat_ptr(
+ flash_init_info32.power_setting_array);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ return msm_flash_subdev_ioctl(sd, cmd, arg);
+ }
+
+ rc = msm_flash_subdev_ioctl(sd, cmd, &flash_data);
+ for (i = 0; i < MAX_LED_TRIGGERS; i++) {
+ u32->flash_current[i] = flash_data.flash_current[i];
+ u32->flash_duration[i] = flash_data.flash_duration[i];
+ }
+ CDBG("Exit");
+ return rc;
+}
+
+static long msm_flash_subdev_fops_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_flash_subdev_do_ioctl);
+}
+#endif
+static int32_t msm_flash_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct msm_flash_ctrl_t *flash_ctrl = NULL;
+ struct msm_camera_cci_client *cci_client = NULL;
+
+ CDBG("Enter");
+ if (!pdev->dev.of_node) {
+ pr_err("of_node NULL\n");
+ return -EINVAL;
+ }
+
+ flash_ctrl = kzalloc(sizeof(struct msm_flash_ctrl_t), GFP_KERNEL);
+ if (!flash_ctrl)
+ return -ENOMEM;
+
+ memset(flash_ctrl, 0, sizeof(struct msm_flash_ctrl_t));
+
+ flash_ctrl->pdev = pdev;
+
+ rc = msm_flash_get_dt_data(pdev->dev.of_node, flash_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d msm_flash_get_dt_data failed\n",
+ __func__, __LINE__);
+ kfree(flash_ctrl);
+ return -EINVAL;
+ }
+
+ flash_ctrl->flash_state = MSM_CAMERA_FLASH_RELEASE;
+ flash_ctrl->power_info.dev = &flash_ctrl->pdev->dev;
+ flash_ctrl->flash_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ flash_ctrl->flash_mutex = &msm_flash_mutex;
+ flash_ctrl->flash_i2c_client.i2c_func_tbl = &msm_sensor_cci_func_tbl;
+ flash_ctrl->flash_i2c_client.cci_client = kzalloc(
+ sizeof(struct msm_camera_cci_client), GFP_KERNEL);
+ if (!flash_ctrl->flash_i2c_client.cci_client) {
+ kfree(flash_ctrl->power_info.gpio_conf);
+ kfree(flash_ctrl);
+ pr_err("failed no memory\n");
+ return -ENOMEM;
+ }
+
+ cci_client = flash_ctrl->flash_i2c_client.cci_client;
+ cci_client->cci_subdev = msm_cci_get_subdev();
+ cci_client->cci_i2c_master = flash_ctrl->cci_i2c_master;
+
+ /* Initialize sub device */
+ v4l2_subdev_init(&flash_ctrl->msm_sd.sd, &msm_flash_subdev_ops);
+ v4l2_set_subdevdata(&flash_ctrl->msm_sd.sd, flash_ctrl);
+
+ flash_ctrl->msm_sd.sd.internal_ops = &msm_flash_internal_ops;
+ flash_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(flash_ctrl->msm_sd.sd.name,
+ ARRAY_SIZE(flash_ctrl->msm_sd.sd.name),
+ "msm_camera_flash");
+ media_entity_init(&flash_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ flash_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ flash_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_FLASH;
+ flash_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x1;
+ msm_sd_register(&flash_ctrl->msm_sd);
+
+ CDBG("%s:%d flash sd name = %s", __func__, __LINE__,
+ flash_ctrl->msm_sd.sd.entity.name);
+ msm_cam_copy_v4l2_subdev_fops(&msm_flash_v4l2_subdev_fops);
+#ifdef CONFIG_COMPAT
+ msm_flash_v4l2_subdev_fops.compat_ioctl32 =
+ msm_flash_subdev_fops_ioctl;
+#endif
+ flash_ctrl->msm_sd.sd.devnode->fops = &msm_flash_v4l2_subdev_fops;
+
+ if (flash_ctrl->flash_driver_type == FLASH_DRIVER_PMIC)
+ rc = msm_torch_create_classdev(pdev, flash_ctrl);
+
+ CDBG("probe success\n");
+ return rc;
+}
+
+MODULE_DEVICE_TABLE(of, msm_flash_dt_match);
+
+static struct platform_driver msm_flash_platform_driver = {
+ .probe = msm_flash_platform_probe,
+ .driver = {
+ .name = "qcom,camera-flash",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_flash_dt_match,
+ },
+};
+
+static int __init msm_flash_init_module(void)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+ rc = platform_driver_register(&msm_flash_platform_driver);
+ if (rc)
+ pr_err("platform probe for flash failed");
+
+ return rc;
+}
+
+static void __exit msm_flash_exit_module(void)
+{
+ platform_driver_unregister(&msm_flash_platform_driver);
+}
+
+static struct msm_flash_table msm_pmic_flash_table = {
+ .flash_driver_type = FLASH_DRIVER_PMIC,
+ .func_tbl = {
+ .camera_flash_init = NULL,
+ .camera_flash_release = msm_flash_release,
+ .camera_flash_off = msm_flash_off,
+ .camera_flash_low = msm_flash_low,
+ .camera_flash_high = msm_flash_high,
+ },
+};
+
+static struct msm_flash_table msm_gpio_flash_table = {
+ .flash_driver_type = FLASH_DRIVER_GPIO,
+ .func_tbl = {
+ .camera_flash_init = msm_flash_gpio_init,
+ .camera_flash_release = msm_flash_release,
+ .camera_flash_off = msm_flash_off,
+ .camera_flash_low = msm_flash_low,
+ .camera_flash_high = msm_flash_high,
+ },
+};
+
+static struct msm_flash_table msm_i2c_flash_table = {
+ .flash_driver_type = FLASH_DRIVER_I2C,
+ .func_tbl = {
+ .camera_flash_init = msm_flash_i2c_init,
+ .camera_flash_release = msm_flash_i2c_release,
+ .camera_flash_off = msm_flash_i2c_write_setting_array,
+ .camera_flash_low = msm_flash_i2c_write_setting_array,
+ .camera_flash_high = msm_flash_i2c_write_setting_array,
+ },
+};
+
+module_init(msm_flash_init_module);
+module_exit(msm_flash_exit_module);
+MODULE_DESCRIPTION("MSM FLASH");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.h b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.h
new file mode 100644
index 000000000000..6571b34f55ff
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.h
@@ -0,0 +1,124 @@
+/* Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MSM_FLASH_H
+#define MSM_FLASH_H
+
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/ais/msm_ais_sensor.h>
+#include <soc/qcom/ais.h>
+#include "msm_camera_i2c.h"
+#include "msm_sd.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+enum msm_camera_flash_state_t {
+ MSM_CAMERA_FLASH_INIT,
+ MSM_CAMERA_FLASH_OFF,
+ MSM_CAMERA_FLASH_LOW,
+ MSM_CAMERA_FLASH_HIGH,
+ MSM_CAMERA_FLASH_RELEASE,
+};
+
+struct msm_flash_ctrl_t;
+
+struct msm_flash_func_t {
+ int32_t (*camera_flash_init)(struct msm_flash_ctrl_t *,
+ struct msm_flash_cfg_data_t *);
+ int32_t (*camera_flash_release)(struct msm_flash_ctrl_t *);
+ int32_t (*camera_flash_off)(struct msm_flash_ctrl_t *,
+ struct msm_flash_cfg_data_t *);
+ int32_t (*camera_flash_low)(struct msm_flash_ctrl_t *,
+ struct msm_flash_cfg_data_t *);
+ int32_t (*camera_flash_high)(struct msm_flash_ctrl_t *,
+ struct msm_flash_cfg_data_t *);
+};
+
+struct msm_flash_table {
+ enum msm_flash_driver_type flash_driver_type;
+ struct msm_flash_func_t func_tbl;
+};
+
+struct msm_flash_reg_t {
+ struct msm_camera_i2c_reg_setting *init_setting;
+ struct msm_camera_i2c_reg_setting *off_setting;
+ struct msm_camera_i2c_reg_setting *release_setting;
+ struct msm_camera_i2c_reg_setting *low_setting;
+ struct msm_camera_i2c_reg_setting *high_setting;
+};
+
+struct msm_flash_ctrl_t {
+ struct msm_camera_i2c_client flash_i2c_client;
+ struct msm_sd_subdev msm_sd;
+ struct platform_device *pdev;
+ struct msm_flash_func_t *func_tbl;
+ struct msm_camera_power_ctrl_t power_info;
+
+ /* Switch node to trigger led */
+ const char *switch_trigger_name;
+ struct led_trigger *switch_trigger;
+
+ /* Flash */
+ uint32_t flash_num_sources;
+ const char *flash_trigger_name[MAX_LED_TRIGGERS];
+ struct led_trigger *flash_trigger[MAX_LED_TRIGGERS];
+ uint32_t flash_op_current[MAX_LED_TRIGGERS];
+ uint32_t flash_max_current[MAX_LED_TRIGGERS];
+ uint32_t flash_max_duration[MAX_LED_TRIGGERS];
+
+ /* Torch */
+ uint32_t torch_num_sources;
+ const char *torch_trigger_name[MAX_LED_TRIGGERS];
+ struct led_trigger *torch_trigger[MAX_LED_TRIGGERS];
+ uint32_t torch_op_current[MAX_LED_TRIGGERS];
+ uint32_t torch_max_current[MAX_LED_TRIGGERS];
+
+ void *data;
+ enum msm_camera_device_type_t flash_device_type;
+ enum cci_i2c_master_t cci_i2c_master;
+ uint32_t subdev_id;
+ struct mutex *flash_mutex;
+ struct msm_sensor_power_setting_array power_setting_array;
+
+ /* flash driver type */
+ enum msm_flash_driver_type flash_driver_type;
+
+ /* flash state */
+ enum msm_camera_flash_state_t flash_state;
+};
+
+int msm_flash_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id);
+
+int msm_flash_probe(struct platform_device *pdev, const void *data);
+
+int32_t msm_flash_create_v4lsubdev(struct platform_device *pdev,
+ void *data);
+int32_t msm_led_i2c_flash_create_v4lsubdev(void *data);
+
+int32_t msm_led_i2c_trigger_get_subdev_id(struct msm_flash_ctrl_t *fctrl,
+ void *arg);
+
+int32_t msm_led_i2c_trigger_config(struct msm_flash_ctrl_t *fctrl,
+ void *data);
+
+int msm_flash_led_init(struct msm_flash_ctrl_t *fctrl);
+int msm_flash_led_release(struct msm_flash_ctrl_t *fctrl);
+int msm_flash_led_off(struct msm_flash_ctrl_t *fctrl);
+int msm_flash_led_low(struct msm_flash_ctrl_t *fctrl);
+int msm_flash_led_high(struct msm_flash_ctrl_t *fctrl);
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/io/Makefile b/drivers/media/platform/msm/ais/sensor/io/Makefile
new file mode 100644
index 000000000000..86e3f9ad48f9
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/io/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/ais/
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+ccflags-y += -Idrivers/misc/
+obj-$(CONFIG_MSM_AIS) += msm_camera_cci_i2c.o msm_camera_qup_i2c.o msm_camera_spi.o msm_camera_dt_util.o msm_camera_tz_i2c.o
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_cci_i2c.c b/drivers/media/platform/msm/ais/sensor/io/msm_camera_cci_i2c.c
new file mode 100644
index 000000000000..955be342e8cf
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_cci_i2c.c
@@ -0,0 +1,578 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <soc/qcom/ais.h>
+#include "msm_camera_i2c.h"
+#include "msm_cci.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#define S_I2C_DBG(fmt, args...) pr_debug(fmt, ##args)
+
+int32_t msm_camera_cci_i2c_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ unsigned char buf[client->addr_type+data_type];
+ struct msm_camera_cci_ctrl cci_ctrl;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_3B_ADDR)
+ || (data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ cci_ctrl.cmd = MSM_CCI_I2C_READ;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = client->addr_type;
+ cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+ cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = data_type;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ rc = cci_ctrl.status;
+ if (data_type == MSM_CAMERA_I2C_BYTE_DATA)
+ *data = buf[0];
+ else
+ *data = buf[0] << 8 | buf[1];
+
+ S_I2C_DBG("%s addr = 0x%x data: 0x%x\n", __func__, addr, *data);
+ return rc;
+}
+
+int32_t msm_camera_cci_i2c_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ unsigned char *buf = NULL;
+ int i;
+ struct msm_camera_cci_ctrl cci_ctrl;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_3B_ADDR)
+ || num_byte == 0)
+ return rc;
+
+ if (num_byte > I2C_REG_DATA_MAX) {
+ S_I2C_DBG("%s: Error num_byte:0x%x exceeds 8K\n",
+ __func__, num_byte);
+ S_I2C_DBG("%s: max supported:0x%x\n",
+ __func__, I2C_REG_DATA_MAX);
+ return rc;
+ }
+
+ buf = kzalloc(num_byte, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ cci_ctrl.cmd = MSM_CCI_I2C_READ;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = client->addr_type;
+ cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+ cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = num_byte;
+ cci_ctrl.status = -EFAULT;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+ rc = cci_ctrl.status;
+
+ S_I2C_DBG("%s addr = 0x%x", __func__, addr);
+ for (i = 0; i < num_byte; i++) {
+ data[i] = buf[i];
+ S_I2C_DBG("Byte %d: 0x%x\n", i, buf[i]);
+ S_I2C_DBG("Data: 0x%x\n", data[i]);
+ }
+ kfree(buf);
+ return rc;
+}
+
+int32_t msm_camera_cci_i2c_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ struct msm_camera_cci_ctrl cci_ctrl;
+ struct msm_camera_i2c_reg_array reg_conf_tbl;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ CDBG("%s:%d reg addr = 0x%x data type: %d\n",
+ __func__, __LINE__, addr, data_type);
+ reg_conf_tbl.reg_addr = addr;
+ reg_conf_tbl.reg_data = data;
+ reg_conf_tbl.delay = 0;
+ cci_ctrl.cmd = MSM_CCI_I2C_WRITE;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_write_cfg.reg_setting = &reg_conf_tbl;
+ cci_ctrl.cfg.cci_i2c_write_cfg.data_type = data_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.addr_type = client->addr_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.size = 1;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ rc = cci_ctrl.status;
+ return rc;
+}
+
+int32_t msm_camera_cci_i2c_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ uint32_t i = 0;
+ struct msm_camera_cci_ctrl cci_ctrl;
+ struct msm_camera_i2c_reg_array *reg_conf_tbl = NULL;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || num_byte == 0)
+ return rc;
+
+ if (num_byte > I2C_SEQ_REG_DATA_MAX) {
+ pr_err("%s: num_byte=%d clamped to max supported %d\n",
+ __func__, num_byte, I2C_SEQ_REG_DATA_MAX);
+ return rc;
+ }
+
+ S_I2C_DBG("%s reg addr = 0x%x num bytes: %d\n",
+ __func__, addr, num_byte);
+
+ reg_conf_tbl = kzalloc(num_byte *
+ (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
+ if (!reg_conf_tbl)
+ return -ENOMEM;
+
+ reg_conf_tbl[0].reg_addr = addr;
+ for (i = 0; i < num_byte; i++) {
+ reg_conf_tbl[i].reg_data = data[i];
+ reg_conf_tbl[i].delay = 0;
+ }
+ cci_ctrl.cmd = MSM_CCI_I2C_WRITE_SEQ;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_write_cfg.reg_setting = reg_conf_tbl;
+ cci_ctrl.cfg.cci_i2c_write_cfg.data_type = MSM_CAMERA_I2C_BYTE_DATA;
+ cci_ctrl.cfg.cci_i2c_write_cfg.addr_type = client->addr_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.size = num_byte;
+ cci_ctrl.status = -EFAULT;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+ rc = cci_ctrl.status;
+ kfree(reg_conf_tbl);
+ reg_conf_tbl = NULL;
+ return rc;
+}
+
+static int32_t msm_camera_cci_i2c_write_table_cmd(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting,
+ enum msm_cci_cmd_type cmd)
+{
+ int32_t rc = -EFAULT;
+ struct msm_camera_cci_ctrl cci_ctrl;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if ((write_setting->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && write_setting->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (write_setting->data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && write_setting->data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ cci_ctrl.cmd = cmd;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_write_cfg.reg_setting =
+ write_setting->reg_setting;
+ cci_ctrl.cfg.cci_i2c_write_cfg.data_type = write_setting->data_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.addr_type = client->addr_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.size = write_setting->size;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ rc = cci_ctrl.status;
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000, (write_setting->delay
+ * 1000) + 1000);
+
+ return rc;
+}
+
+int32_t msm_camera_cci_i2c_write_table_async(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ return msm_camera_cci_i2c_write_table_cmd(client, write_setting,
+ MSM_CCI_I2C_WRITE_ASYNC);
+}
+
+int32_t msm_camera_cci_i2c_write_table_sync(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ return msm_camera_cci_i2c_write_table_cmd(client, write_setting,
+ MSM_CCI_I2C_WRITE_SYNC);
+}
+
+int32_t msm_camera_cci_i2c_write_table_sync_block(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ return msm_camera_cci_i2c_write_table_cmd(client, write_setting,
+ MSM_CCI_I2C_WRITE_SYNC_BLOCK);
+}
+
+int32_t msm_camera_cci_i2c_write_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ return msm_camera_cci_i2c_write_table_cmd(client, write_setting,
+ MSM_CCI_I2C_WRITE);
+}
+
+int32_t msm_camera_cci_i2c_write_seq_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_seq_reg_setting *write_setting)
+{
+ int i;
+ int32_t rc = -EFAULT;
+ struct msm_camera_i2c_seq_reg_array *reg_setting;
+ uint16_t client_addr_type;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if ((write_setting->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && write_setting->addr_type != MSM_CAMERA_I2C_WORD_ADDR)) {
+ pr_err("%s Invalid addr type %d\n", __func__,
+ write_setting->addr_type);
+ return rc;
+ }
+
+ reg_setting = write_setting->reg_setting;
+ client_addr_type = client->addr_type;
+ client->addr_type = write_setting->addr_type;
+
+ if (reg_setting->reg_data_size > I2C_SEQ_REG_DATA_MAX) {
+ pr_err("%s: number of bytes %u exceeding the max supported %d\n",
+ __func__, reg_setting->reg_data_size, I2C_SEQ_REG_DATA_MAX);
+ return rc;
+ }
+
+ for (i = 0; i < write_setting->size; i++) {
+ rc = msm_camera_cci_i2c_write_seq(client, reg_setting->reg_addr,
+ reg_setting->reg_data, reg_setting->reg_data_size);
+ if (rc < 0)
+ return rc;
+ reg_setting++;
+ }
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000, (write_setting->delay
+ * 1000) + 1000);
+
+ client->addr_type = client_addr_type;
+ return rc;
+}
+
+int32_t msm_camera_cci_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ struct msm_camera_cci_ctrl cci_ctrl;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (write_setting->data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && write_setting->data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ cci_ctrl.cmd = MSM_CCI_I2C_WRITE;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_write_cfg.reg_setting =
+ write_setting->reg_setting;
+ cci_ctrl.cfg.cci_i2c_write_cfg.data_type = write_setting->data_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.addr_type = client->addr_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.size = write_setting->size;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ rc = cci_ctrl.status;
+ return rc;
+}
+
+static int32_t msm_camera_cci_i2c_compare(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc;
+ uint16_t reg_data = 0;
+ int data_len = 0;
+
+ switch (data_type) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ data_len = data_type;
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_MASK:
+ case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+ data_len = MSM_CAMERA_I2C_BYTE_DATA;
+ break;
+ case MSM_CAMERA_I2C_SET_WORD_MASK:
+ case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+ data_len = MSM_CAMERA_I2C_WORD_DATA;
+ break;
+ default:
+ pr_err("%s: Unsupport data type: %d\n", __func__, data_type);
+ break;
+ }
+
+ rc = msm_camera_cci_i2c_read(client, addr, &reg_data, data_len);
+ if (rc < 0)
+ return rc;
+
+ rc = I2C_COMPARE_MISMATCH;
+ switch (data_type) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ if (data == reg_data)
+ rc = I2C_COMPARE_MATCH;
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_MASK:
+ case MSM_CAMERA_I2C_SET_WORD_MASK:
+ if ((reg_data & data) == data)
+ rc = I2C_COMPARE_MATCH;
+ break;
+ case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+ case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+ if (!(reg_data & data))
+ rc = I2C_COMPARE_MATCH;
+ break;
+ default:
+ pr_err("%s: Unsupport data type: %d\n", __func__, data_type);
+ break;
+ }
+
+ S_I2C_DBG("%s: Register and data match result %d\n", __func__,
+ rc);
+ return rc;
+}
+
+int32_t msm_camera_cci_i2c_poll(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type, uint32_t delay_ms)
+{
+ int32_t rc = -EFAULT;
+ int32_t i = 0;
+
+ S_I2C_DBG("%s: addr: 0x%x data: 0x%x dt: %d\n",
+ __func__, addr, data, data_type);
+
+ if (delay_ms > MAX_POLL_DELAY_MS) {
+ pr_err("%s:%d invalid delay = %d max_delay = %d\n",
+ __func__, __LINE__, delay_ms, MAX_POLL_DELAY_MS);
+ return -EINVAL;
+ }
+ for (i = 0; i < delay_ms; i++) {
+ rc = msm_camera_cci_i2c_compare(client,
+ addr, data, data_type);
+ if (!rc)
+ return rc;
+ usleep_range(1000, 1010);
+ }
+
+ /* If rc is 1 then read is successful but poll is failure */
+ if (rc == 1)
+ pr_err("%s:%d poll failed rc=%d(non-fatal)\n",
+ __func__, __LINE__, rc);
+
+ if (rc < 0)
+ pr_err("%s:%d poll failed rc=%d\n", __func__, __LINE__, rc);
+
+ return rc;
+}
+
+static int32_t msm_camera_cci_i2c_set_mask(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t mask,
+ enum msm_camera_i2c_data_type data_type, uint16_t set_mask)
+{
+ int32_t rc = -EFAULT;
+ uint16_t reg_data;
+
+ rc = msm_camera_cci_i2c_read(client, addr, &reg_data, data_type);
+ if (rc < 0) {
+ S_I2C_DBG("%s read fail\n", __func__);
+ return rc;
+ }
+ S_I2C_DBG("%s addr: 0x%x data: 0x%x setmask: 0x%x\n",
+ __func__, addr, reg_data, mask);
+
+ if (set_mask)
+ reg_data |= mask;
+ else
+ reg_data &= ~mask;
+ S_I2C_DBG("%s write: 0x%x\n", __func__, reg_data);
+
+ rc = msm_camera_cci_i2c_write(client, addr, reg_data, data_type);
+ if (rc < 0)
+ S_I2C_DBG("%s write fail\n", __func__);
+
+ return rc;
+}
+
+static int32_t msm_camera_cci_i2c_set_write_mask_data(
+ struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data, int16_t mask,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc;
+ uint16_t reg_data;
+
+ CDBG("%s\n", __func__);
+ if (mask == -1)
+ return 0;
+ if (mask == 0) {
+ rc = msm_camera_cci_i2c_write(client, addr, data, data_type);
+ } else {
+ rc = msm_camera_cci_i2c_read(client, addr, &reg_data,
+ data_type);
+ if (rc < 0) {
+ CDBG("%s read fail\n", __func__);
+ return rc;
+ }
+ reg_data &= ~mask;
+ reg_data |= (data & mask);
+ rc = msm_camera_cci_i2c_write(client, addr, reg_data,
+ data_type);
+ if (rc < 0)
+ CDBG("%s write fail\n", __func__);
+ }
+ return rc;
+}
+
+int32_t msm_camera_cci_i2c_write_conf_tbl(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int i;
+ int32_t rc = -EFAULT;
+
+ for (i = 0; i < size; i++) {
+ enum msm_camera_i2c_data_type dt;
+
+ if (reg_conf_tbl->cmd_type == MSM_CAMERA_I2C_CMD_POLL) {
+ rc = msm_camera_cci_i2c_poll(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ reg_conf_tbl->dt, I2C_POLL_TIME_MS);
+ } else {
+ if (reg_conf_tbl->dt == 0)
+ dt = data_type;
+ else
+ dt = reg_conf_tbl->dt;
+ switch (dt) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ rc = msm_camera_cci_i2c_write(
+ client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data, dt);
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_MASK:
+ rc = msm_camera_cci_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_BYTE_DATA, 1);
+ break;
+ case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+ rc = msm_camera_cci_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_BYTE_DATA, 0);
+ break;
+ case MSM_CAMERA_I2C_SET_WORD_MASK:
+ rc = msm_camera_cci_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_WORD_DATA, 1);
+ break;
+ case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+ rc = msm_camera_cci_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_WORD_DATA, 0);
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA:
+ rc = msm_camera_cci_i2c_set_write_mask_data(
+ client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ reg_conf_tbl->mask,
+ MSM_CAMERA_I2C_BYTE_DATA);
+ break;
+ default:
+ pr_err("%s: Unsupport data type: %d\n",
+ __func__, dt);
+ break;
+ }
+ }
+ if (rc < 0)
+ break;
+ reg_conf_tbl++;
+ }
+ return rc;
+}
+
+int32_t msm_sensor_cci_i2c_util(struct msm_camera_i2c_client *client,
+ uint16_t cci_cmd)
+{
+ int32_t rc = 0;
+ struct msm_camera_cci_ctrl cci_ctrl;
+
+ CDBG("%s line %d\n", __func__, __LINE__);
+ cci_ctrl.cmd = cci_cmd;
+ cci_ctrl.cci_info = client->cci_client;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ pr_err("%s line %d rc = %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ return cci_ctrl.status;
+}
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.c b/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.c
new file mode 100644
index 000000000000..071600ed5221
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.c
@@ -0,0 +1,1734 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_camera_dt_util.h"
+#include "msm_camera_io_util.h"
+#include "msm_camera_i2c_mux.h"
+#include "msm_cci.h"
+
+#define CAM_SENSOR_PINCTRL_STATE_SLEEP "cam_suspend"
+#define CAM_SENSOR_PINCTRL_STATE_DEFAULT "cam_default"
+/* #define CONFIG_MSM_CAMERA_DT_DEBUG */
+
+#define VALIDATE_VOLTAGE(min, max, config_val) ((config_val) && \
+ (config_val >= min) && (config_val <= max))
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+int msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
+ int num_vreg, struct msm_sensor_power_setting *power_setting,
+ uint16_t power_setting_size)
+{
+ uint16_t i = 0;
+ int j = 0;
+
+ /* Validate input parameters */
+ if (!cam_vreg || !power_setting) {
+ pr_err("%s:%d failed: cam_vreg %pK power_setting %pK", __func__,
+ __LINE__, cam_vreg, power_setting);
+ return -EINVAL;
+ }
+
+ /* Validate size of num_vreg */
+ if (num_vreg <= 0) {
+ pr_err("failed: num_vreg %d", num_vreg);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < power_setting_size; i++) {
+ if (power_setting[i].seq_type != SENSOR_VREG)
+ continue;
+
+ switch (power_setting[i].seq_val) {
+ case CAM_VDIG:
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(cam_vreg[j].reg_name, "cam_vdig")) {
+ CDBG("%s:%d i %d j %d cam_vdig\n",
+ __func__, __LINE__, i, j);
+ power_setting[i].seq_val = j;
+ if (VALIDATE_VOLTAGE(
+ cam_vreg[j].min_voltage,
+ cam_vreg[j].max_voltage,
+ power_setting[i].config_val)) {
+ cam_vreg[j].min_voltage =
+ cam_vreg[j].max_voltage =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+
+ case CAM_VIO:
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(cam_vreg[j].reg_name, "cam_vio")) {
+ CDBG("%s:%d i %d j %d cam_vio\n",
+ __func__, __LINE__, i, j);
+ power_setting[i].seq_val = j;
+ if (VALIDATE_VOLTAGE(
+ cam_vreg[j].min_voltage,
+ cam_vreg[j].max_voltage,
+ power_setting[i].config_val)) {
+ cam_vreg[j].min_voltage =
+ cam_vreg[j].max_voltage =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+
+ case CAM_VANA:
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(cam_vreg[j].reg_name, "cam_vana")) {
+ CDBG("%s:%d i %d j %d cam_vana\n",
+ __func__, __LINE__, i, j);
+ power_setting[i].seq_val = j;
+ if (VALIDATE_VOLTAGE(
+ cam_vreg[j].min_voltage,
+ cam_vreg[j].max_voltage,
+ power_setting[i].config_val)) {
+ cam_vreg[j].min_voltage =
+ cam_vreg[j].max_voltage =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+
+ case CAM_VAF:
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(cam_vreg[j].reg_name, "cam_vaf")) {
+ CDBG("%s:%d i %d j %d cam_vaf\n",
+ __func__, __LINE__, i, j);
+ power_setting[i].seq_val = j;
+ if (VALIDATE_VOLTAGE(
+ cam_vreg[j].min_voltage,
+ cam_vreg[j].max_voltage,
+ power_setting[i].config_val)) {
+ cam_vreg[j].min_voltage =
+ cam_vreg[j].max_voltage =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+
+ case CAM_V_CUSTOM1:
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(cam_vreg[j].reg_name,
+ "cam_v_custom1")) {
+ CDBG("%s:%d i %d j %d cam_vcustom1\n",
+ __func__, __LINE__, i, j);
+ power_setting[i].seq_val = j;
+ if (VALIDATE_VOLTAGE(
+ cam_vreg[j].min_voltage,
+ cam_vreg[j].max_voltage,
+ power_setting[i].config_val)) {
+ cam_vreg[j].min_voltage =
+ cam_vreg[j].max_voltage =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+ case CAM_V_CUSTOM2:
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(cam_vreg[j].reg_name,
+ "cam_v_custom2")) {
+ CDBG("%s:%d i %d j %d cam_vcustom2\n",
+ __func__, __LINE__, i, j);
+ power_setting[i].seq_val = j;
+ if (VALIDATE_VOLTAGE(
+ cam_vreg[j].min_voltage,
+ cam_vreg[j].max_voltage,
+ power_setting[i].config_val)) {
+ cam_vreg[j].min_voltage =
+ cam_vreg[j].max_voltage =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+
+ default:
+ pr_err("%s:%d invalid seq_val %d\n", __func__,
+ __LINE__, power_setting[i].seq_val);
+ break;
+ }
+ }
+ return 0;
+}
+
+int msm_sensor_get_sub_module_index(struct device_node *of_node,
+ struct msm_sensor_info_t **s_info)
+{
+ int rc = 0, i = 0;
+ uint32_t val = 0, count = 0;
+ uint32_t *val_array = NULL;
+ struct device_node *src_node = NULL;
+ struct msm_sensor_info_t *sensor_info;
+
+ sensor_info = kzalloc(sizeof(*sensor_info), GFP_KERNEL);
+ if (!sensor_info)
+ return -ENOMEM;
+ for (i = 0; i < SUB_MODULE_MAX; i++) {
+ sensor_info->subdev_id[i] = -1;
+ /* Subdev expose additional interface for same sub module*/
+ sensor_info->subdev_intf[i] = -1;
+ }
+
+ src_node = of_parse_phandle(of_node, "qcom,actuator-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,actuator cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+ if (of_device_is_available(src_node))
+ sensor_info->subdev_id[SUB_MODULE_ACTUATOR] = val;
+ else
+ CDBG("%s:%d actuator disabled!\n", __func__, __LINE__);
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
+ src_node = of_parse_phandle(of_node, "qcom,ois-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,ois cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+ sensor_info->subdev_id[SUB_MODULE_OIS] = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
+ src_node = of_parse_phandle(of_node, "qcom,eeprom-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d eeprom src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,eeprom cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+ sensor_info->subdev_id[SUB_MODULE_EEPROM] = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,eeprom-sd-index", &val);
+ if (rc != -EINVAL) {
+ CDBG("%s qcom,eeprom-sd-index %d, rc %d\n", __func__, val, rc);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__, __LINE__, rc);
+ goto ERROR;
+ }
+ sensor_info->subdev_id[SUB_MODULE_EEPROM] = val;
+ } else {
+ rc = 0;
+ }
+
+ src_node = of_parse_phandle(of_node, "qcom,led-flash-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,led flash cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+ goto ERROR;
+ }
+ sensor_info->subdev_id[SUB_MODULE_LED_FLASH] = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
+ src_node = of_parse_phandle(of_node, "qcom,ir-led-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,ir led cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+ goto ERROR;
+ }
+ sensor_info->subdev_id[SUB_MODULE_IR_LED] = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
+ src_node = of_parse_phandle(of_node, "qcom,ir-cut-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,ir cut cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+ goto ERROR;
+ }
+ sensor_info->subdev_id[SUB_MODULE_IR_CUT] = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,strobe-flash-sd-index", &val);
+ if (rc != -EINVAL) {
+ CDBG("%s qcom,strobe-flash-sd-index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__, __LINE__, rc);
+ goto ERROR;
+ }
+ sensor_info->subdev_id[SUB_MODULE_STROBE_FLASH] = val;
+ } else {
+ rc = 0;
+ }
+
+ if (of_get_property(of_node, "qcom,csiphy-sd-index", &count)) {
+ count /= sizeof(uint32_t);
+ if (count > 2) {
+ pr_err("%s qcom,csiphy-sd-index count %d > 2\n",
+ __func__, count);
+ goto ERROR;
+ }
+ val_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+ if (!val_array) {
+ rc = -ENOMEM;
+ goto ERROR;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,csiphy-sd-index",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ kfree(val_array);
+ goto ERROR;
+ }
+ for (i = 0; i < count; i++) {
+ sensor_info->subdev_id[SUB_MODULE_CSIPHY + i] =
+ val_array[i];
+ CDBG("%s csiphy_core[%d] = %d\n",
+ __func__, i, val_array[i]);
+ }
+ kfree(val_array);
+ } else {
+ pr_err("%s:%d qcom,csiphy-sd-index not present\n", __func__,
+ __LINE__);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+
+ if (of_get_property(of_node, "qcom,csid-sd-index", &count)) {
+ count /= sizeof(uint32_t);
+ if (count > 2) {
+ pr_err("%s qcom,csid-sd-index count %d > 2\n",
+ __func__, count);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ val_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+ if (!val_array) {
+ rc = -ENOMEM;
+ goto ERROR;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,csid-sd-index",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ kfree(val_array);
+ goto ERROR;
+ }
+ for (i = 0; i < count; i++) {
+ sensor_info->subdev_id
+ [SUB_MODULE_CSID + i] = val_array[i];
+ CDBG("%s csid_core[%d] = %d\n",
+ __func__, i, val_array[i]);
+ }
+ kfree(val_array);
+ } else {
+ pr_err("%s:%d qcom,csid-sd-index not present\n", __func__,
+ __LINE__);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+
+ *s_info = sensor_info;
+ return rc;
+ERROR:
+ kfree(sensor_info);
+ return rc;
+}
+
+int msm_sensor_get_dt_actuator_data(struct device_node *of_node,
+ struct msm_actuator_info **act_info)
+{
+ int rc = 0;
+ uint32_t val = 0;
+ struct msm_actuator_info *actuator_info;
+
+ rc = of_property_read_u32(of_node, "qcom,actuator-cam-name", &val);
+ CDBG("%s qcom,actuator-cam-name %d, rc %d\n", __func__, val, rc);
+ if (rc < 0)
+ return 0;
+
+ actuator_info = kzalloc(sizeof(*actuator_info), GFP_KERNEL);
+ if (!actuator_info) {
+ rc = -ENOMEM;
+ goto ERROR;
+ }
+
+ actuator_info->cam_name = val;
+
+ rc = of_property_read_u32(of_node, "qcom,actuator-vcm-pwd", &val);
+ CDBG("%s qcom,actuator-vcm-pwd %d, rc %d\n", __func__, val, rc);
+ if (!rc)
+ actuator_info->vcm_pwd = val;
+
+ rc = of_property_read_u32(of_node, "qcom,actuator-vcm-enable", &val);
+ CDBG("%s qcom,actuator-vcm-enable %d, rc %d\n", __func__, val, rc);
+ if (!rc)
+ actuator_info->vcm_enable = val;
+
+ *act_info = actuator_info;
+ return 0;
+ERROR:
+ kfree(actuator_info);
+ return rc;
+}
+
+int msm_sensor_get_dt_csi_data(struct device_node *of_node,
+ struct msm_camera_csi_lane_params **csi_lane_params)
+{
+ int rc = 0;
+ uint32_t val = 0;
+ struct msm_camera_csi_lane_params *clp;
+
+ clp = kzalloc(sizeof(*clp), GFP_KERNEL);
+ if (!clp)
+ return -ENOMEM;
+ *csi_lane_params = clp;
+
+ rc = of_property_read_u32(of_node, "qcom,csi-lane-assign", &val);
+ CDBG("%s qcom,csi-lane-assign 0x%x, rc %d\n", __func__, val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+ clp->csi_lane_assign = val;
+
+ rc = of_property_read_u32(of_node, "qcom,csi-lane-mask", &val);
+ CDBG("%s qcom,csi-lane-mask 0x%x, rc %d\n", __func__, val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+ clp->csi_lane_mask = val;
+
+ return rc;
+ERROR:
+ kfree(clp);
+ return rc;
+}
+
+int msm_camera_get_dt_power_setting_data(struct device_node *of_node,
+ struct camera_vreg_t *cam_vreg, int num_vreg,
+ struct msm_camera_power_ctrl_t *power_info)
+{
+ int rc = 0, i, j;
+ int count = 0;
+ const char *seq_name = NULL;
+ uint32_t *array = NULL;
+ struct msm_sensor_power_setting *ps;
+
+ struct msm_sensor_power_setting *power_setting;
+ uint16_t *power_setting_size, size = 0;
+ bool need_reverse = 0;
+
+ if (!power_info)
+ return -EINVAL;
+
+ power_setting = power_info->power_setting;
+ power_setting_size = &power_info->power_setting_size;
+
+ count = of_property_count_strings(of_node, "qcom,cam-power-seq-type");
+ *power_setting_size = count;
+
+ CDBG("%s qcom,cam-power-seq-type count %d\n", __func__, count);
+
+ if (count <= 0)
+ return 0;
+
+ ps = kcalloc(count, sizeof(*ps), GFP_KERNEL);
+ if (!ps)
+ return -ENOMEM;
+ power_setting = ps;
+ power_info->power_setting = ps;
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,cam-power-seq-type", i,
+ &seq_name);
+ CDBG("%s seq_name[%d] = %s\n", __func__, i,
+ seq_name);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR1;
+ }
+ if (!strcmp(seq_name, "sensor_vreg")) {
+ ps[i].seq_type = SENSOR_VREG;
+ CDBG("%s:%d seq_type[%d] %d\n", __func__, __LINE__,
+ i, ps[i].seq_type);
+ } else if (!strcmp(seq_name, "sensor_gpio")) {
+ ps[i].seq_type = SENSOR_GPIO;
+ CDBG("%s:%d seq_type[%d] %d\n", __func__, __LINE__,
+ i, ps[i].seq_type);
+ } else if (!strcmp(seq_name, "sensor_clk")) {
+ ps[i].seq_type = SENSOR_CLK;
+ CDBG("%s:%d seq_type[%d] %d\n", __func__, __LINE__,
+ i, ps[i].seq_type);
+ } else if (!strcmp(seq_name, "sensor_i2c_mux")) {
+ ps[i].seq_type = SENSOR_I2C_MUX;
+ CDBG("%s:%d seq_type[%d] %d\n", __func__, __LINE__,
+ i, ps[i].seq_type);
+ } else {
+ CDBG("%s: unrecognized seq-type\n", __func__);
+ rc = -EILSEQ;
+ goto ERROR1;
+ }
+ }
+
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,cam-power-seq-val", i,
+ &seq_name);
+ CDBG("%s seq_name[%d] = %s\n", __func__, i,
+ seq_name);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR1;
+ }
+ switch (ps[i].seq_type) {
+ case SENSOR_VREG:
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(seq_name, cam_vreg[j].reg_name))
+ break;
+ }
+ if (j < num_vreg)
+ ps[i].seq_val = j;
+ else
+ rc = -EILSEQ;
+ break;
+ case SENSOR_GPIO:
+ if (!strcmp(seq_name, "sensor_gpio_reset"))
+ ps[i].seq_val = SENSOR_GPIO_RESET;
+ else if (!strcmp(seq_name, "sensor_gpio_standby"))
+ ps[i].seq_val = SENSOR_GPIO_STANDBY;
+ else if (!strcmp(seq_name, "sensor_gpio_vdig"))
+ ps[i].seq_val = SENSOR_GPIO_VDIG;
+ else if (!strcmp(seq_name, "sensor_gpio_vana"))
+ ps[i].seq_val = SENSOR_GPIO_VANA;
+ else if (!strcmp(seq_name, "sensor_gpio_vaf"))
+ ps[i].seq_val = SENSOR_GPIO_VAF;
+ else if (!strcmp(seq_name, "sensor_gpio_vio"))
+ ps[i].seq_val = SENSOR_GPIO_VIO;
+ else if (!strcmp(seq_name, "sensor_gpio_custom1"))
+ ps[i].seq_val = SENSOR_GPIO_CUSTOM1;
+ else if (!strcmp(seq_name, "sensor_gpio_custom2"))
+ ps[i].seq_val = SENSOR_GPIO_CUSTOM2;
+ else
+ rc = -EILSEQ;
+ break;
+ case SENSOR_CLK:
+ if (!strcmp(seq_name, "sensor_cam_mclk"))
+ ps[i].seq_val = SENSOR_CAM_MCLK;
+ else if (!strcmp(seq_name, "sensor_cam_clk"))
+ ps[i].seq_val = SENSOR_CAM_CLK;
+ else
+ rc = -EILSEQ;
+ break;
+ case SENSOR_I2C_MUX:
+ if (!strcmp(seq_name, "none"))
+ ps[i].seq_val = 0;
+ else
+ rc = -EILSEQ;
+ break;
+ default:
+ rc = -EILSEQ;
+ break;
+ }
+ if (rc < 0) {
+ CDBG("%s: unrecognized seq-val\n", __func__);
+ goto ERROR1;
+ }
+ }
+
+ array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+ if (!array) {
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-cfg-val",
+ array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ if (ps[i].seq_type == SENSOR_GPIO) {
+ if (array[i] == 0)
+ ps[i].config_val = GPIO_OUT_LOW;
+ else if (array[i] == 1)
+ ps[i].config_val = GPIO_OUT_HIGH;
+ } else {
+ ps[i].config_val = array[i];
+ }
+ CDBG("%s power_setting[%d].config_val = %ld\n", __func__, i,
+ ps[i].config_val);
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-delay",
+ array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ ps[i].delay = array[i];
+ CDBG("%s power_setting[%d].delay = %d\n", __func__,
+ i, ps[i].delay);
+ }
+ kfree(array);
+
+ size = *power_setting_size;
+
+ if (NULL != ps && 0 != size)
+ need_reverse = 1;
+
+ power_info->power_down_setting =
+ kzalloc(sizeof(*ps) * size, GFP_KERNEL);
+
+ if (!power_info->power_down_setting) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ memcpy(power_info->power_down_setting,
+ ps, sizeof(*ps) * size);
+
+ power_info->power_down_setting_size = size;
+
+ if (need_reverse) {
+ int c, end = size - 1;
+ struct msm_sensor_power_setting power_down_setting_t;
+
+ for (c = 0; c < size/2; c++) {
+ power_down_setting_t =
+ power_info->power_down_setting[c];
+ power_info->power_down_setting[c] =
+ power_info->power_down_setting[end];
+ power_info->power_down_setting[end] =
+ power_down_setting_t;
+ end--;
+ }
+ }
+ return rc;
+ERROR2:
+ kfree(array);
+ERROR1:
+ kfree(ps);
+ power_setting_size = 0;
+ return rc;
+}
+
+int msm_camera_get_dt_gpio_req_tbl(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size)
+{
+ int rc = 0, i = 0;
+ uint32_t count = 0;
+ uint32_t *val_array = NULL;
+
+ if (!of_get_property(of_node, "qcom,gpio-req-tbl-num", &count))
+ return 0;
+
+ count /= sizeof(uint32_t);
+ if (!count) {
+ pr_err("%s qcom,gpio-req-tbl-num 0\n", __func__);
+ return 0;
+ }
+
+ val_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+ if (!val_array)
+ return -ENOMEM;
+
+ gconf->cam_gpio_req_tbl = kcalloc(count, sizeof(struct gpio),
+ GFP_KERNEL);
+ if (!gconf->cam_gpio_req_tbl) {
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+ gconf->cam_gpio_req_tbl_size = count;
+
+ rc = of_property_read_u32_array(of_node, "qcom,gpio-req-tbl-num",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ if (val_array[i] >= gpio_array_size) {
+ pr_err("%s gpio req tbl index %d invalid\n",
+ __func__, val_array[i]);
+ return -EINVAL;
+ }
+ gconf->cam_gpio_req_tbl[i].gpio = gpio_array[val_array[i]];
+ CDBG("%s cam_gpio_req_tbl[%d].gpio = %d\n", __func__, i,
+ gconf->cam_gpio_req_tbl[i].gpio);
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,gpio-req-tbl-flags",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ gconf->cam_gpio_req_tbl[i].flags = val_array[i];
+ CDBG("%s cam_gpio_req_tbl[%d].flags = %ld\n", __func__, i,
+ gconf->cam_gpio_req_tbl[i].flags);
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,gpio-req-tbl-label", i,
+ &gconf->cam_gpio_req_tbl[i].label);
+ CDBG("%s cam_gpio_req_tbl[%d].label = %s\n", __func__, i,
+ gconf->cam_gpio_req_tbl[i].label);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ }
+
+ kfree(val_array);
+ return rc;
+
+ERROR2:
+ kfree(gconf->cam_gpio_req_tbl);
+ERROR1:
+ kfree(val_array);
+ gconf->cam_gpio_req_tbl_size = 0;
+ return rc;
+}
+
+int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size)
+{
+ int rc = 0, val = 0;
+
+ gconf->gpio_num_info = kzalloc(sizeof(struct msm_camera_gpio_num_info),
+ GFP_KERNEL);
+ if (!gconf->gpio_num_info) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-ir-p", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-ir-p failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-ir-p invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+
+ gconf->gpio_num_info->gpio_num[IR_CUT_FILTER_GPIO_P] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[IR_CUT_FILTER_GPIO_P] = 1;
+
+ CDBG("%s qcom,gpio-ir-p %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[IR_CUT_FILTER_GPIO_P]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-ir-m", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-ir-m failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-ir-m invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+
+ gconf->gpio_num_info->gpio_num[IR_CUT_FILTER_GPIO_M] =
+ gpio_array[val];
+
+ gconf->gpio_num_info->valid[IR_CUT_FILTER_GPIO_M] = 1;
+
+ CDBG("%s qcom,gpio-ir-m %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[IR_CUT_FILTER_GPIO_M]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-vana", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-vana failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-vana invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VANA] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_VANA] = 1;
+ CDBG("%s qcom,gpio-vana %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VANA]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-vio", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-vio failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-vio invalid %d\n",
+ __func__, __LINE__, val);
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VIO] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_VIO] = 1;
+ CDBG("%s qcom,gpio-vio %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VIO]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-vaf", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-vaf failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-vaf invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VAF] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_VAF] = 1;
+ CDBG("%s qcom,gpio-vaf %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VAF]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-vdig", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-vdig failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-vdig invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VDIG] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_VDIG] = 1;
+ CDBG("%s qcom,gpio-vdig %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VDIG]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-reset", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-reset failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-reset invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_RESET] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_RESET] = 1;
+ CDBG("%s qcom,gpio-reset %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_RESET]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-standby", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-standby failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-standby invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_STANDBY] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_STANDBY] = 1;
+ CDBG("%s qcom,gpio-standby %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_STANDBY]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-af-pwdm", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-af-pwdm failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-af-pwdm invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_AF_PWDM] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_AF_PWDM] = 1;
+ CDBG("%s qcom,gpio-af-pwdm %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_AF_PWDM]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-flash-en", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-flash-en failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-flash-en invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_FL_EN] = 1;
+ CDBG("%s qcom,gpio-flash-en %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-flash-now", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-flash-now failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-flash-now invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_FL_NOW] = 1;
+ CDBG("%s qcom,gpio-flash-now %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-flash-reset", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%dread qcom,gpio-flash-reset failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-flash-reset invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_FL_RESET] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_FL_RESET] = 1;
+ CDBG("%s qcom,gpio-flash-reset %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_FL_RESET]);
+ } else
+ rc = 0;
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-custom1", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-custom1 failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-custom1 invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_CUSTOM1] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_CUSTOM1] = 1;
+ CDBG("%s qcom,gpio-custom1 %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_CUSTOM1]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-custom2", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-custom2 failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-custom2 invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_CUSTOM2] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_CUSTOM2] = 1;
+ CDBG("%s qcom,gpio-custom2 %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_CUSTOM2]);
+ } else {
+ rc = 0;
+ }
+
+ return rc;
+
+ERROR:
+ kfree(gconf->gpio_num_info);
+ gconf->gpio_num_info = NULL;
+ return rc;
+}
+
+int msm_camera_get_dt_vreg_data(struct device_node *of_node,
+ struct camera_vreg_t **cam_vreg, int *num_vreg)
+{
+ int rc = 0, i = 0;
+ int32_t count = 0;
+ uint32_t *vreg_array = NULL;
+ struct camera_vreg_t *vreg = NULL;
+ bool custom_vreg_name = false;
+
+ count = of_property_count_strings(of_node, "qcom,cam-vreg-name");
+ CDBG("%s qcom,cam-vreg-name count %d\n", __func__, count);
+
+ if (!count || (count == -EINVAL)) {
+ pr_err("%s:%d number of entries is 0 or not present in dts\n",
+ __func__, __LINE__);
+ *num_vreg = 0;
+ return 0;
+ }
+
+ vreg = kcalloc(count, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg)
+ return -ENOMEM;
+ *cam_vreg = vreg;
+ *num_vreg = count;
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,cam-vreg-name", i,
+ &vreg[i].reg_name);
+ CDBG("%s reg_name[%d] = %s\n", __func__, i,
+ vreg[i].reg_name);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR1;
+ }
+ }
+
+ custom_vreg_name = of_property_read_bool(of_node,
+ "qcom,cam-custom-vreg-name");
+ if (custom_vreg_name) {
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,cam-custom-vreg-name", i,
+ &vreg[i].custom_vreg_name);
+ CDBG("%s sub reg_name[%d] = %s\n", __func__, i,
+ vreg[i].custom_vreg_name);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR1;
+ }
+ }
+ }
+
+ vreg_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+ if (!vreg_array) {
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ for (i = 0; i < count; i++)
+ vreg[i].type = VREG_TYPE_DEFAULT;
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-type",
+ vreg_array, count);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ } else {
+ for (i = 0; i < count; i++) {
+ vreg[i].type = vreg_array[i];
+ CDBG("%s cam_vreg[%d].type = %d\n",
+ __func__, i, vreg[i].type);
+ }
+ }
+ } else {
+ CDBG("%s:%d no qcom,cam-vreg-type entries in dts\n",
+ __func__, __LINE__);
+ rc = 0;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-min-voltage",
+ vreg_array, count);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ } else {
+ for (i = 0; i < count; i++) {
+ vreg[i].min_voltage = vreg_array[i];
+ CDBG("%s cam_vreg[%d].min_voltage = %d\n",
+ __func__, i, vreg[i].min_voltage);
+ }
+ }
+ } else {
+ CDBG("%s:%d no qcom,cam-vreg-min-voltage entries in dts\n",
+ __func__, __LINE__);
+ rc = 0;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-max-voltage",
+ vreg_array, count);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ } else {
+ for (i = 0; i < count; i++) {
+ vreg[i].max_voltage = vreg_array[i];
+ CDBG("%s cam_vreg[%d].max_voltage = %d\n",
+ __func__, i, vreg[i].max_voltage);
+ }
+ }
+ } else {
+ CDBG("%s:%d no qcom,cam-vreg-max-voltage entries in dts\n",
+ __func__, __LINE__);
+ rc = 0;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-op-mode",
+ vreg_array, count);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ } else {
+ for (i = 0; i < count; i++) {
+ vreg[i].op_mode = vreg_array[i];
+ CDBG("%s cam_vreg[%d].op_mode = %d\n",
+ __func__, i, vreg[i].op_mode);
+ }
+ }
+ } else {
+ CDBG("%s:%d no qcom,cam-vreg-op-mode entries in dts\n",
+ __func__, __LINE__);
+ rc = 0;
+ }
+
+ kfree(vreg_array);
+ return rc;
+ERROR2:
+ kfree(vreg_array);
+ERROR1:
+ kfree(vreg);
+ *num_vreg = 0;
+ return rc;
+}
+
+static int msm_camera_enable_i2c_mux(struct msm_camera_i2c_conf *i2c_conf)
+{
+ struct v4l2_subdev *i2c_mux_sd =
+ dev_get_drvdata(&i2c_conf->mux_dev->dev);
+ v4l2_subdev_call(i2c_mux_sd, core, ioctl,
+ VIDIOC_MSM_I2C_MUX_INIT, NULL);
+ v4l2_subdev_call(i2c_mux_sd, core, ioctl,
+ VIDIOC_MSM_I2C_MUX_CFG, (void *)&i2c_conf->i2c_mux_mode);
+ return 0;
+}
+
+static int msm_camera_disable_i2c_mux(struct msm_camera_i2c_conf *i2c_conf)
+{
+ struct v4l2_subdev *i2c_mux_sd =
+ dev_get_drvdata(&i2c_conf->mux_dev->dev);
+ v4l2_subdev_call(i2c_mux_sd, core, ioctl,
+ VIDIOC_MSM_I2C_MUX_RELEASE, NULL);
+ return 0;
+}
+
+int msm_camera_pinctrl_init(
+ struct msm_pinctrl_info *sensor_pctrl, struct device *dev) {
+
+ sensor_pctrl->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR_OR_NULL(sensor_pctrl->pinctrl)) {
+ pr_err("%s:%d Getting pinctrl handle failed\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ sensor_pctrl->gpio_state_active =
+ pinctrl_lookup_state(sensor_pctrl->pinctrl,
+ CAM_SENSOR_PINCTRL_STATE_DEFAULT);
+ if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_active)) {
+ pr_err("%s:%d Failed to get the active state pinctrl handle\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ sensor_pctrl->gpio_state_suspend
+ = pinctrl_lookup_state(sensor_pctrl->pinctrl,
+ CAM_SENSOR_PINCTRL_STATE_SLEEP);
+ if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_suspend)) {
+ pr_err("%s:%d Failed to get the suspend state pinctrl handle\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int msm_cam_sensor_handle_reg_gpio(int seq_val,
+ struct msm_camera_gpio_conf *gconf, int val) {
+
+ int gpio_offset = -1;
+
+ if (!gconf) {
+ pr_err("ERR:%s: Input Parameters are not proper\n", __func__);
+ return -EINVAL;
+ }
+ CDBG("%s: %d Seq val: %d, config: %d", __func__, __LINE__,
+ seq_val, val);
+
+ switch (seq_val) {
+ case CAM_VDIG:
+ gpio_offset = SENSOR_GPIO_VDIG;
+ break;
+
+ case CAM_VIO:
+ gpio_offset = SENSOR_GPIO_VIO;
+ break;
+
+ case CAM_VANA:
+ gpio_offset = SENSOR_GPIO_VANA;
+ break;
+
+ case CAM_VAF:
+ gpio_offset = SENSOR_GPIO_VAF;
+ break;
+
+ case CAM_V_CUSTOM1:
+ gpio_offset = SENSOR_GPIO_CUSTOM1;
+ break;
+
+ case CAM_V_CUSTOM2:
+ gpio_offset = SENSOR_GPIO_CUSTOM2;
+ break;
+
+ default:
+ pr_err("%s:%d Invalid VREG seq val %d\n", __func__,
+ __LINE__, seq_val);
+ return -EINVAL;
+ }
+
+ CDBG("%s: %d GPIO offset: %d, seq_val: %d\n", __func__, __LINE__,
+ gpio_offset, seq_val);
+
+ if ((gconf->gpio_num_info->valid[gpio_offset] == 1)) {
+ gpio_set_value_cansleep(
+ gconf->gpio_num_info->gpio_num
+ [gpio_offset], val);
+ }
+ return 0;
+}
+
+int32_t msm_sensor_driver_get_gpio_data(
+ struct msm_camera_gpio_conf **gpio_conf,
+ struct device_node *of_node)
+{
+ int32_t rc = 0, i = 0;
+ uint16_t *gpio_array = NULL;
+ int16_t gpio_array_size = 0;
+ struct msm_camera_gpio_conf *gconf = NULL;
+
+ /* Validate input parameters */
+ if (!of_node) {
+ pr_err("failed: invalid param of_node %pK", of_node);
+ return -EINVAL;
+ }
+
+ gpio_array_size = of_gpio_count(of_node);
+ CDBG("gpio count %d\n", gpio_array_size);
+ if (gpio_array_size <= 0)
+ return -ENODEV;
+
+ gconf = kzalloc(sizeof(struct msm_camera_gpio_conf),
+ GFP_KERNEL);
+ if (!gconf)
+ return -ENOMEM;
+
+ *gpio_conf = gconf;
+
+ gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t), GFP_KERNEL);
+ if (!gpio_array)
+ goto FREE_GPIO_CONF;
+
+ for (i = 0; i < gpio_array_size; i++) {
+ gpio_array[i] = of_get_gpio(of_node, i);
+ CDBG("gpio_array[%d] = %d", i, gpio_array[i]);
+ }
+ rc = msm_camera_get_dt_gpio_req_tbl(of_node, gconf, gpio_array,
+ gpio_array_size);
+ if (rc < 0) {
+ pr_err("failed in msm_camera_get_dt_gpio_req_tbl\n");
+ goto FREE_GPIO_CONF;
+ }
+
+ rc = msm_camera_init_gpio_pin_tbl(of_node, gconf, gpio_array,
+ gpio_array_size);
+ if (rc < 0) {
+ pr_err("failed in msm_camera_init_gpio_pin_tbl\n");
+ goto FREE_GPIO_REQ_TBL;
+ }
+ kfree(gpio_array);
+ return rc;
+
+FREE_GPIO_REQ_TBL:
+ kfree(gconf->cam_gpio_req_tbl);
+FREE_GPIO_CONF:
+ kfree(gconf);
+ kfree(gpio_array);
+ *gpio_conf = NULL;
+ return rc;
+}
+
+int msm_camera_power_up(struct msm_camera_power_ctrl_t *ctrl,
+ enum msm_camera_device_type_t device_type,
+ struct msm_camera_i2c_client *sensor_i2c_client)
+{
+ int rc = 0, index = 0, no_gpio = 0, ret = 0;
+ struct msm_sensor_power_setting *power_setting = NULL;
+
+ CDBG("%s:%d\n", __func__, __LINE__);
+ if (!ctrl || !sensor_i2c_client) {
+ pr_err("failed ctrl %pK sensor_i2c_client %pK\n", ctrl,
+ sensor_i2c_client);
+ return -EINVAL;
+ }
+ if (ctrl->gpio_conf->cam_gpiomux_conf_tbl != NULL)
+ pr_err("%s:%d mux install\n", __func__, __LINE__);
+
+ ret = msm_camera_pinctrl_init(&(ctrl->pinctrl_info), ctrl->dev);
+ if (ret < 0) {
+ pr_err("%s:%d Initialization of pinctrl failed\n",
+ __func__, __LINE__);
+ ctrl->cam_pinctrl_status = 0;
+ } else {
+ ctrl->cam_pinctrl_status = 1;
+ }
+ rc = msm_camera_request_gpio_table(
+ ctrl->gpio_conf->cam_gpio_req_tbl,
+ ctrl->gpio_conf->cam_gpio_req_tbl_size, 1);
+ if (rc < 0)
+ no_gpio = rc;
+ if (ctrl->cam_pinctrl_status) {
+ ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
+ ctrl->pinctrl_info.gpio_state_active);
+ if (ret)
+ pr_err("%s:%d cannot set pin to active state",
+ __func__, __LINE__);
+ }
+ for (index = 0; index < ctrl->power_setting_size; index++) {
+ CDBG("%s index %d\n", __func__, index);
+ power_setting = &ctrl->power_setting[index];
+ CDBG("%s type %d\n", __func__, power_setting->seq_type);
+ switch (power_setting->seq_type) {
+ case SENSOR_CLK:
+ if (power_setting->seq_val >= ctrl->clk_info_size) {
+ pr_err("%s clk index %d >= max %zu\n", __func__,
+ power_setting->seq_val,
+ ctrl->clk_info_size);
+ goto power_up_failed;
+ }
+ if (power_setting->config_val)
+ ctrl->clk_info[power_setting->seq_val].
+ clk_rate = power_setting->config_val;
+ rc = msm_camera_clk_enable(ctrl->dev,
+ ctrl->clk_info, ctrl->clk_ptr,
+ ctrl->clk_info_size, true);
+ if (rc < 0) {
+ pr_err("%s: clk enable failed\n", __func__);
+ goto power_up_failed;
+ }
+ break;
+ case SENSOR_GPIO:
+ if (no_gpio) {
+ pr_err("%s: request gpio failed\n", __func__);
+ return no_gpio;
+ }
+ if (power_setting->seq_val >= SENSOR_GPIO_MAX ||
+ !ctrl->gpio_conf->gpio_num_info) {
+ pr_err("%s gpio index %d >= max %d\n", __func__,
+ power_setting->seq_val,
+ SENSOR_GPIO_MAX);
+ goto power_up_failed;
+ }
+ if (!ctrl->gpio_conf->gpio_num_info->valid
+ [power_setting->seq_val])
+ continue;
+ CDBG("%s:%d gpio set val %d\n", __func__, __LINE__,
+ ctrl->gpio_conf->gpio_num_info->gpio_num
+ [power_setting->seq_val]);
+ gpio_set_value_cansleep(
+ ctrl->gpio_conf->gpio_num_info->gpio_num
+ [power_setting->seq_val],
+ (int) power_setting->config_val);
+ break;
+ case SENSOR_VREG:
+ if (power_setting->seq_val == INVALID_VREG)
+ break;
+
+ if (power_setting->seq_val >= CAM_VREG_MAX) {
+ pr_err("%s vreg index %d >= max %d\n", __func__,
+ power_setting->seq_val,
+ SENSOR_GPIO_MAX);
+ goto power_up_failed;
+ }
+ if (power_setting->seq_val < ctrl->num_vreg)
+ msm_camera_config_single_vreg(ctrl->dev,
+ &ctrl->cam_vreg
+ [power_setting->seq_val],
+ (struct regulator **)
+ &power_setting->data[0],
+ 1);
+ else
+ pr_err("%s: %d usr_idx:%d dts_idx:%d\n",
+ __func__, __LINE__,
+ power_setting->seq_val, ctrl->num_vreg);
+
+ rc = msm_cam_sensor_handle_reg_gpio(
+ power_setting->seq_val,
+ ctrl->gpio_conf, 1);
+ if (rc < 0) {
+ pr_err("ERR:%s Error in handling VREG GPIO\n",
+ __func__);
+ goto power_up_failed;
+ }
+ break;
+ case SENSOR_I2C_MUX:
+ if (ctrl->i2c_conf && ctrl->i2c_conf->use_i2c_mux)
+ msm_camera_enable_i2c_mux(ctrl->i2c_conf);
+ break;
+ default:
+ pr_err("%s error power seq type %d\n", __func__,
+ power_setting->seq_type);
+ break;
+ }
+ if (power_setting->delay > 20) {
+ msleep(power_setting->delay);
+ } else if (power_setting->delay) {
+ usleep_range(power_setting->delay * 1000,
+ (power_setting->delay * 1000) + 1000);
+ }
+ }
+
+ if (device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ rc = sensor_i2c_client->i2c_func_tbl->i2c_util(
+ sensor_i2c_client, MSM_CCI_INIT);
+ if (rc < 0) {
+ pr_err("%s cci_init failed\n", __func__);
+ goto power_up_failed;
+ }
+ }
+ CDBG("%s exit\n", __func__);
+ return 0;
+power_up_failed:
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ for (index--; index >= 0; index--) {
+ CDBG("%s index %d\n", __func__, index);
+ power_setting = &ctrl->power_setting[index];
+ CDBG("%s type %d\n", __func__, power_setting->seq_type);
+ switch (power_setting->seq_type) {
+ case SENSOR_GPIO:
+ if (!ctrl->gpio_conf->gpio_num_info)
+ continue;
+ if (!ctrl->gpio_conf->gpio_num_info->valid
+ [power_setting->seq_val])
+ continue;
+ gpio_set_value_cansleep(
+ ctrl->gpio_conf->gpio_num_info->gpio_num
+ [power_setting->seq_val], GPIOF_OUT_INIT_LOW);
+ break;
+ case SENSOR_VREG:
+ if (power_setting->seq_val < ctrl->num_vreg)
+ msm_camera_config_single_vreg(ctrl->dev,
+ &ctrl->cam_vreg
+ [power_setting->seq_val],
+ (struct regulator **)
+ &power_setting->data[0],
+ 0);
+ else
+ pr_err("%s:%d:seq_val: %d > num_vreg: %d\n",
+ __func__, __LINE__,
+ power_setting->seq_val, ctrl->num_vreg);
+
+ msm_cam_sensor_handle_reg_gpio(power_setting->seq_val,
+ ctrl->gpio_conf, GPIOF_OUT_INIT_LOW);
+ break;
+ case SENSOR_I2C_MUX:
+ if (ctrl->i2c_conf && ctrl->i2c_conf->use_i2c_mux)
+ msm_camera_disable_i2c_mux(ctrl->i2c_conf);
+ break;
+ default:
+ pr_err("%s error power seq type %d\n", __func__,
+ power_setting->seq_type);
+ break;
+ }
+ if (power_setting->delay > 20) {
+ msleep(power_setting->delay);
+ } else if (power_setting->delay) {
+ usleep_range(power_setting->delay * 1000,
+ (power_setting->delay * 1000) + 1000);
+ }
+ }
+ if (ctrl->cam_pinctrl_status) {
+ ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
+ ctrl->pinctrl_info.gpio_state_suspend);
+ if (ret)
+ pr_err("%s:%d cannot set pin to suspend state\n",
+ __func__, __LINE__);
+ devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
+ }
+ ctrl->cam_pinctrl_status = 0;
+ msm_camera_request_gpio_table(
+ ctrl->gpio_conf->cam_gpio_req_tbl,
+ ctrl->gpio_conf->cam_gpio_req_tbl_size, 0);
+ return rc;
+}
+
+static struct msm_sensor_power_setting*
+msm_camera_get_power_settings(struct msm_camera_power_ctrl_t *ctrl,
+ enum msm_sensor_power_seq_type_t seq_type,
+ uint16_t seq_val)
+{
+ struct msm_sensor_power_setting *power_setting, *ps = NULL;
+ int idx;
+
+ for (idx = 0; idx < ctrl->power_setting_size; idx++) {
+ power_setting = &ctrl->power_setting[idx];
+ if ((power_setting->seq_type == seq_type) &&
+ (power_setting->seq_val == seq_val)) {
+ ps = power_setting;
+ return ps;
+ }
+
+ }
+ return ps;
+}
+
+int msm_camera_power_down(struct msm_camera_power_ctrl_t *ctrl,
+ enum msm_camera_device_type_t device_type,
+ struct msm_camera_i2c_client *sensor_i2c_client)
+{
+ int index = 0, ret = 0;
+ struct msm_sensor_power_setting *pd = NULL;
+ struct msm_sensor_power_setting *ps;
+
+ CDBG("%s:%d\n", __func__, __LINE__);
+ if (!ctrl || !sensor_i2c_client) {
+ pr_err("failed ctrl %pK sensor_i2c_client %pK\n", ctrl,
+ sensor_i2c_client);
+ return -EINVAL;
+ }
+ if (device_type == MSM_CAMERA_PLATFORM_DEVICE)
+ sensor_i2c_client->i2c_func_tbl->i2c_util(
+ sensor_i2c_client, MSM_CCI_RELEASE);
+
+ for (index = 0; index < ctrl->power_down_setting_size; index++) {
+ CDBG("%s index %d\n", __func__, index);
+ pd = &ctrl->power_down_setting[index];
+ ps = NULL;
+ CDBG("%s type %d\n", __func__, pd->seq_type);
+ switch (pd->seq_type) {
+ case SENSOR_CLK:
+ msm_camera_clk_enable(ctrl->dev,
+ ctrl->clk_info, ctrl->clk_ptr,
+ ctrl->clk_info_size, false);
+ break;
+ case SENSOR_GPIO:
+ if (pd->seq_val >= SENSOR_GPIO_MAX ||
+ !ctrl->gpio_conf->gpio_num_info) {
+ pr_err("%s gpio index %d >= max %d\n", __func__,
+ pd->seq_val,
+ SENSOR_GPIO_MAX);
+ continue;
+ }
+ if (!ctrl->gpio_conf->gpio_num_info->valid
+ [pd->seq_val])
+ continue;
+ gpio_set_value_cansleep(
+ ctrl->gpio_conf->gpio_num_info->gpio_num
+ [pd->seq_val],
+ (int) pd->config_val);
+ break;
+ case SENSOR_VREG:
+ if (pd->seq_val == INVALID_VREG)
+ break;
+ if (pd->seq_val >= CAM_VREG_MAX) {
+ pr_err("%s vreg index %d >= max %d\n", __func__,
+ pd->seq_val,
+ SENSOR_GPIO_MAX);
+ continue;
+ }
+
+ ps = msm_camera_get_power_settings(ctrl,
+ pd->seq_type,
+ pd->seq_val);
+ if (ps) {
+ if (pd->seq_val < ctrl->num_vreg)
+ msm_camera_config_single_vreg(ctrl->dev,
+ &ctrl->cam_vreg
+ [pd->seq_val],
+ (struct regulator **)
+ &ps->data[0],
+ 0);
+ else
+ pr_err("%s:%d:seq_val:%d > num_vreg: %d\n",
+ __func__, __LINE__, pd->seq_val,
+ ctrl->num_vreg);
+ } else
+ pr_err("%s error in power up/down seq data\n",
+ __func__);
+ ret = msm_cam_sensor_handle_reg_gpio(pd->seq_val,
+ ctrl->gpio_conf, GPIOF_OUT_INIT_LOW);
+ if (ret < 0)
+ pr_err("ERR:%s Error while disabling VREG GPIO\n",
+ __func__);
+ break;
+ case SENSOR_I2C_MUX:
+ if (ctrl->i2c_conf && ctrl->i2c_conf->use_i2c_mux)
+ msm_camera_disable_i2c_mux(ctrl->i2c_conf);
+ break;
+ default:
+ pr_err("%s error power seq type %d\n", __func__,
+ pd->seq_type);
+ break;
+ }
+ if (pd->delay > 20) {
+ msleep(pd->delay);
+ } else if (pd->delay) {
+ usleep_range(pd->delay * 1000,
+ (pd->delay * 1000) + 1000);
+ }
+ }
+ if (ctrl->cam_pinctrl_status) {
+ ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
+ ctrl->pinctrl_info.gpio_state_suspend);
+ if (ret)
+ pr_err("%s:%d cannot set pin to suspend state",
+ __func__, __LINE__);
+ devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
+ }
+ ctrl->cam_pinctrl_status = 0;
+ msm_camera_request_gpio_table(
+ ctrl->gpio_conf->cam_gpio_req_tbl,
+ ctrl->gpio_conf->cam_gpio_req_tbl_size, 0);
+ CDBG("%s exit\n", __func__);
+ return 0;
+}
+
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.h b/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.h
new file mode 100644
index 000000000000..a29ef21274c2
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.h
@@ -0,0 +1,68 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CAMERA_DT_UTIL_H__
+#define MSM_CAMERA_DT_UTIL_H__
+
+#include <soc/qcom/ais.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include "msm_camera_i2c.h"
+#include "cam_soc_api.h"
+
+
+#define INVALID_VREG 100
+
+int msm_sensor_get_sub_module_index(struct device_node *of_node,
+ struct msm_sensor_info_t **s_info);
+
+int msm_sensor_get_dt_actuator_data(struct device_node *of_node,
+ struct msm_actuator_info **act_info);
+
+int msm_sensor_get_dt_csi_data(struct device_node *of_node,
+ struct msm_camera_csi_lane_params **csi_lane_params);
+
+int msm_camera_get_dt_power_setting_data(struct device_node *of_node,
+ struct camera_vreg_t *cam_vreg, int num_vreg,
+ struct msm_camera_power_ctrl_t *power_info);
+
+int msm_camera_get_dt_gpio_req_tbl(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size);
+
+int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size);
+
+int msm_camera_get_dt_vreg_data(struct device_node *of_node,
+ struct camera_vreg_t **cam_vreg, int *num_vreg);
+
+int msm_camera_power_up(struct msm_camera_power_ctrl_t *ctrl,
+ enum msm_camera_device_type_t device_type,
+ struct msm_camera_i2c_client *sensor_i2c_client);
+
+int msm_camera_power_down(struct msm_camera_power_ctrl_t *ctrl,
+ enum msm_camera_device_type_t device_type,
+ struct msm_camera_i2c_client *sensor_i2c_client);
+
+int msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
+ int num_vreg, struct msm_sensor_power_setting *power_setting,
+ uint16_t power_setting_size);
+
+int msm_camera_pinctrl_init
+ (struct msm_pinctrl_info *sensor_pctrl, struct device *dev);
+
+int32_t msm_sensor_driver_get_gpio_data(
+ struct msm_camera_gpio_conf **gpio_conf,
+ struct device_node *of_node);
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_i2c.h b/drivers/media/platform/msm/ais/sensor/io/msm_camera_i2c.h
new file mode 100644
index 000000000000..2597d2351aaa
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_i2c.h
@@ -0,0 +1,211 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CAMERA_CCI_I2C_H
+#define MSM_CAMERA_CCI_I2C_H
+
+#include <linux/delay.h>
+#include <media/v4l2-subdev.h>
+#include <media/ais/msm_ais_sensor.h>
+
+#define I2C_POLL_TIME_MS 5
+#define MAX_POLL_DELAY_MS 100
+
+#define I2C_COMPARE_MATCH 0
+#define I2C_COMPARE_MISMATCH 1
+
+struct msm_camera_i2c_client {
+ struct msm_camera_i2c_fn_t *i2c_func_tbl;
+ struct i2c_client *client;
+ struct msm_camera_cci_client *cci_client;
+ struct msm_camera_spi_client *spi_client;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+};
+
+struct msm_camera_i2c_fn_t {
+ int (*i2c_read)(struct msm_camera_i2c_client *, uint32_t, uint16_t *,
+ enum msm_camera_i2c_data_type);
+ int32_t (*i2c_read_seq)(struct msm_camera_i2c_client *, uint32_t,
+ uint8_t *, uint32_t);
+ int (*i2c_write)(struct msm_camera_i2c_client *, uint32_t, uint16_t,
+ enum msm_camera_i2c_data_type);
+ int (*i2c_write_seq)(struct msm_camera_i2c_client *, uint32_t,
+ uint8_t *, uint32_t);
+ int32_t (*i2c_write_table)(struct msm_camera_i2c_client *,
+ struct msm_camera_i2c_reg_setting *);
+ int32_t (*i2c_write_seq_table)(struct msm_camera_i2c_client *,
+ struct msm_camera_i2c_seq_reg_setting *);
+ int32_t (*i2c_write_table_w_microdelay)
+ (struct msm_camera_i2c_client *,
+ struct msm_camera_i2c_reg_setting *);
+ int32_t (*i2c_util)(struct msm_camera_i2c_client *, uint16_t);
+ int32_t (*i2c_write_conf_tbl)(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type);
+ int32_t (*i2c_poll)(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type, uint32_t delay_ms);
+ int32_t (*i2c_read_burst)(struct msm_camera_i2c_client *client,
+ uint32_t read_byte, uint8_t *buffer, uint32_t addr,
+ enum msm_camera_i2c_data_type data_type);
+ int32_t (*i2c_write_burst)(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_array *reg_setting, uint32_t reg_size,
+ uint32_t buf_len, uint32_t addr,
+ enum msm_camera_i2c_data_type data_type);
+ int32_t (*i2c_write_table_async)(struct msm_camera_i2c_client *,
+ struct msm_camera_i2c_reg_setting *);
+ int32_t (*i2c_write_table_sync)(struct msm_camera_i2c_client *,
+ struct msm_camera_i2c_reg_setting *);
+ int32_t (*i2c_write_table_sync_block)(struct msm_camera_i2c_client *,
+ struct msm_camera_i2c_reg_setting *);
+};
+
+int32_t msm_camera_cci_i2c_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_cci_i2c_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_cci_i2c_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_cci_i2c_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_cci_i2c_write_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_cci_i2c_write_table_async(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_cci_i2c_write_table_sync(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_cci_i2c_write_table_sync_block(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_cci_i2c_write_seq_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_seq_reg_setting *write_setting);
+
+int32_t msm_camera_cci_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_cci_i2c_write_conf_tbl(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_sensor_cci_i2c_util(struct msm_camera_i2c_client *client,
+ uint16_t cci_cmd);
+
+int32_t msm_camera_cci_i2c_poll(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type, uint32_t delay_ms);
+
+int32_t msm_camera_qup_i2c_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_qup_i2c_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_qup_i2c_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_qup_i2c_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_qup_i2c_write_table(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_qup_i2c_write_seq_table(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_seq_reg_setting *write_setting);
+
+int32_t msm_camera_qup_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_qup_i2c_write_conf_tbl(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_qup_i2c_poll(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type, uint32_t delay_ms);
+
+int32_t msm_camera_tz_i2c_register_sensor(void *s_ctrl_p);
+
+int32_t msm_camera_tz_i2c_power_up(struct msm_camera_i2c_client *client);
+
+int32_t msm_camera_tz_i2c_power_down(struct msm_camera_i2c_client *client);
+
+int32_t msm_camera_tz_i2c_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_tz_i2c_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_tz_i2c_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_tz_i2c_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_tz_i2c_write_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_table_async(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_table_sync(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_table_sync_block(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_seq_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_seq_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_conf_tbl(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_sensor_tz_i2c_util(struct msm_camera_i2c_client *client,
+ uint16_t cci_cmd);
+
+int32_t msm_camera_tz_i2c_poll(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type);
+
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_i2c_mux.c b/drivers/media/platform/msm/ais/sensor/io/msm_camera_i2c_mux.c
new file mode 100644
index 000000000000..d0d1e4003de6
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_i2c_mux.c
@@ -0,0 +1,185 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include "msm_camera_i2c_mux.h"
+
+/* TODO move this somewhere else */
+#define MSM_I2C_MUX_DRV_NAME "msm_cam_i2c_mux"
+static int msm_i2c_mux_config(struct i2c_mux_device *mux_device, uint8_t *mode)
+{
+ uint32_t val;
+
+ val = msm_camera_io_r(mux_device->ctl_base);
+ if (*mode == MODE_DUAL) {
+ msm_camera_io_w(val | 0x3, mux_device->ctl_base);
+ } else if (*mode == MODE_L) {
+ msm_camera_io_w(((val | 0x2) & ~(0x1)), mux_device->ctl_base);
+ val = msm_camera_io_r(mux_device->ctl_base);
+ CDBG("the camio mode config left value is %d\n", val);
+ } else {
+ msm_camera_io_w(((val | 0x1) & ~(0x2)), mux_device->ctl_base);
+ val = msm_camera_io_r(mux_device->ctl_base);
+ CDBG("the camio mode config right value is %d\n", val);
+ }
+ return 0;
+}
+
+static int msm_i2c_mux_init(struct i2c_mux_device *mux_device)
+{
+ int rc = 0, val = 0;
+
+ if (mux_device->use_count == 0) {
+ val = msm_camera_io_r(mux_device->rw_base);
+ msm_camera_io_w((val | 0x200), mux_device->rw_base);
+ }
+ mux_device->use_count++;
+ return 0;
+};
+
+static int msm_i2c_mux_release(struct i2c_mux_device *mux_device)
+{
+ int val = 0;
+
+ mux_device->use_count--;
+ if (mux_device->use_count == 0) {
+ val = msm_camera_io_r(mux_device->rw_base);
+ msm_camera_io_w((val & ~0x200), mux_device->rw_base);
+ }
+ return 0;
+}
+
+static long msm_i2c_mux_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct i2c_mux_device *mux_device;
+ int rc = 0;
+
+ mux_device = v4l2_get_subdevdata(sd);
+ if (mux_device == NULL) {
+ rc = -ENOMEM;
+ return rc;
+ }
+ mutex_lock(&mux_device->mutex);
+ switch (cmd) {
+ case VIDIOC_MSM_I2C_MUX_CFG:
+ rc = msm_i2c_mux_config(mux_device, (uint8_t *) arg);
+ break;
+ case VIDIOC_MSM_I2C_MUX_INIT:
+ rc = msm_i2c_mux_init(mux_device);
+ break;
+ case VIDIOC_MSM_I2C_MUX_RELEASE:
+ rc = msm_i2c_mux_release(mux_device);
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ }
+ mutex_unlock(&mux_device->mutex);
+ return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_i2c_mux_subdev_core_ops = {
+ .ioctl = &msm_i2c_mux_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_ops msm_i2c_mux_subdev_ops = {
+ .core = &msm_i2c_mux_subdev_core_ops,
+};
+
+static int i2c_mux_probe(struct platform_device *pdev)
+{
+ struct i2c_mux_device *mux_device;
+ int rc = 0;
+
+ CDBG("%s: device id = %d\n", __func__, pdev->id);
+ mux_device = kzalloc(sizeof(struct i2c_mux_device), GFP_KERNEL);
+ if (!mux_device) {
+ pr_err("%s: no enough memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ v4l2_subdev_init(&mux_device->subdev, &msm_i2c_mux_subdev_ops);
+ v4l2_set_subdevdata(&mux_device->subdev, mux_device);
+ platform_set_drvdata(pdev, &mux_device->subdev);
+ mutex_init(&mux_device->mutex);
+
+ mux_device->ctl_base = msm_camera_get_reg_base(pdev,
+ "i2c_mux_ctl", true);
+ if (!mux_device->ctl_base) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto ctl_base_failed;
+ }
+ mux_device->rw_base = msm_camera_get_reg_base(pdev, "i2c_mux_rw", true);
+ if (!mux_device->rw_mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto rw_base_failed;
+ }
+ mux_device->pdev = pdev;
+ return 0;
+
+rw_base_failed:
+ msm_camera_put_reg_base(pdev, mux_device->ctl_base,
+ "i2c_mux_ctl", true);
+ctl_base_failed:
+ mutex_destroy(&mux_device->mutex);
+ kfree(mux_device);
+ return 0;
+}
+
+static int i2c_mux_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sub_dev = platform_get_drvdata(pdev);
+ struct i2c_mux_device *mux_device;
+
+ if (!sub_dev) {
+ pr_err("%s: sub device is NULL\n", __func__);
+ return 0;
+ }
+
+ mux_device = (struct mux_device *)v4l2_get_subdevdata(sub_dev);
+ if (!mux_device) {
+ pr_err("%s: sub device is NULL\n", __func__);
+ return 0;
+ }
+
+ msm_camera_put_reg_base(pdev, mux_device->rw_base, "i2c_mux_ctl", true);
+ msm_camera_put_reg_base(pdev, mux_device->ctl_base, "i2c_mux_rw", true);
+}
+
+static struct platform_driver i2c_mux_driver = {
+ .probe = i2c_mux_probe,
+ .remove = i2c_mux_remove,
+ .driver = {
+ .name = MSM_I2C_MUX_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_camera_i2c_mux_init_module(void)
+{
+ return platform_driver_register(&i2c_mux_driver);
+}
+
+static void __exit msm_camera_i2c_mux_exit_module(void)
+{
+ platform_driver_unregister(&i2c_mux_driver);
+}
+
+module_init(msm_camera_i2c_mux_init_module);
+module_exit(msm_camera_i2c_mux_exit_module);
+MODULE_DESCRIPTION("MSM Camera I2C mux driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_i2c_mux.h b/drivers/media/platform/msm/ais/sensor/io/msm_camera_i2c_mux.h
new file mode 100644
index 000000000000..706fd9298630
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_i2c_mux.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_I2C_MUX_H
+#define MSM_I2C_MUX_H
+
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+
+struct i2c_mux_device {
+ struct platform_device *pdev;
+ struct v4l2_subdev subdev;
+ void __iomem *ctl_base;
+ void __iomem *rw_base;
+ struct mutex mutex;
+ unsigned use_count;
+};
+
+struct i2c_mux_cfg_params {
+ struct v4l2_subdev *subdev;
+ void *parms;
+};
+
+#define VIDIOC_MSM_I2C_MUX_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 13, struct i2c_mux_cfg_params)
+
+#define VIDIOC_MSM_I2C_MUX_INIT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 14, struct v4l2_subdev*)
+
+#define VIDIOC_MSM_I2C_MUX_RELEASE \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct v4l2_subdev*)
+
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_qup_i2c.c b/drivers/media/platform/msm/ais/sensor/io/msm_camera_qup_i2c.c
new file mode 100644
index 000000000000..9098b23dbc67
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_qup_i2c.c
@@ -0,0 +1,608 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <soc/qcom/ais.h>
+#include "msm_camera_i2c.h"
+
+#undef CDBG
+#ifdef CONFIG_MSM_AIS_DEBUG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#define S_I2C_DBG(fmt, args...) pr_debug(fmt, ##args)
+#else
+#define CDBG(fmt, args...)
+#define S_I2C_DBG(fmt, args...)
+#endif
+
+static int32_t msm_camera_qup_i2c_rxdata(
+ struct msm_camera_i2c_client *dev_client, unsigned char *rxdata,
+ int data_length)
+{
+ int32_t rc = 0;
+ uint16_t saddr = dev_client->client->addr >> 1;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = saddr,
+ .flags = 0,
+ .len = dev_client->addr_type,
+ .buf = rxdata,
+ },
+ {
+ .addr = saddr,
+ .flags = I2C_M_RD,
+ .len = data_length,
+ .buf = rxdata,
+ },
+ };
+
+ rc = i2c_transfer(dev_client->client->adapter, msgs, 2);
+ if (rc < 0)
+ S_I2C_DBG("msm_camera_qup_i2c_rxdata failed 0x%x\n", saddr);
+ return rc;
+}
+
+static int32_t msm_camera_qup_i2c_txdata(
+ struct msm_camera_i2c_client *dev_client, unsigned char *txdata,
+ int length)
+{
+ int32_t rc = 0;
+ uint16_t saddr = dev_client->client->addr >> 1;
+ struct i2c_msg msg[] = {
+ {
+ .addr = saddr,
+ .flags = 0,
+ .len = length,
+ .buf = txdata,
+ },
+ };
+
+ rc = i2c_transfer(dev_client->client->adapter, msg, 1);
+ if (rc < 0)
+ S_I2C_DBG("msm_camera_qup_i2c_txdata failed 0x%x\n", saddr);
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ unsigned char *buf = NULL;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ if (client->addr_type > UINT_MAX - data_type) {
+ S_I2C_DBG("%s: integer overflow prevented\n", __func__);
+ return rc;
+ }
+
+ buf = kzalloc(client->addr_type+data_type, GFP_KERNEL);
+ if (!buf) {
+ S_I2C_DBG("%s:%d no memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+ buf[0] = addr;
+ } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+ buf[0] = addr >> BITS_PER_BYTE;
+ buf[1] = addr;
+ }
+ rc = msm_camera_qup_i2c_rxdata(client, buf, data_type);
+ if (rc < 0) {
+ S_I2C_DBG("%s fail\n", __func__);
+ kfree(buf);
+ buf = NULL;
+ return rc;
+ }
+
+ if (data_type == MSM_CAMERA_I2C_BYTE_DATA)
+ *data = buf[0];
+ else
+ *data = buf[0] << 8 | buf[1];
+
+ S_I2C_DBG("%s addr = 0x%x data: 0x%x\n", __func__, addr, *data);
+ kfree(buf);
+ buf = NULL;
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ unsigned char *buf = NULL;
+ int i;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || num_byte == 0)
+ return rc;
+
+ if (num_byte > I2C_REG_DATA_MAX) {
+ S_I2C_DBG("%s: Error num_byte:0x%x exceeds 8K\n",
+ __func__, num_byte);
+ S_I2C_DBG("%s: max supported:0x%x\n",
+ __func__, I2C_REG_DATA_MAX);
+ return rc;
+ }
+ if (client->addr_type > UINT_MAX - num_byte) {
+ S_I2C_DBG("%s: integer overflow prevented\n", __func__);
+ return rc;
+ }
+
+ buf = kzalloc(client->addr_type+num_byte, GFP_KERNEL);
+ if (!buf) {
+ S_I2C_DBG("%s:%d no memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+ buf[0] = addr;
+ } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+ buf[0] = addr >> BITS_PER_BYTE;
+ buf[1] = addr;
+ }
+ rc = msm_camera_qup_i2c_rxdata(client, buf, num_byte);
+ if (rc < 0) {
+ S_I2C_DBG("%s fail\n", __func__);
+ kfree(buf);
+ buf = NULL;
+ return rc;
+ }
+
+ S_I2C_DBG("%s addr = 0x%x", __func__, addr);
+ for (i = 0; i < num_byte; i++) {
+ data[i] = buf[i];
+ S_I2C_DBG("Byte %d: 0x%x\n", i, buf[i]);
+ S_I2C_DBG("Data: 0x%x\n", data[i]);
+ }
+ kfree(buf);
+ buf = NULL;
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ unsigned char buf[client->addr_type+data_type];
+ uint8_t len = 0;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ S_I2C_DBG("%s reg addr = 0x%x data type: %d\n",
+ __func__, addr, data_type);
+ if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+ buf[0] = addr;
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len, buf[len]);
+ len = 1;
+ } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+ buf[0] = addr >> BITS_PER_BYTE;
+ buf[1] = addr;
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len, buf[len]);
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len+1, buf[len+1]);
+ len = 2;
+ }
+ S_I2C_DBG("Data: 0x%x\n", data);
+ if (data_type == MSM_CAMERA_I2C_BYTE_DATA) {
+ buf[len] = data;
+ S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
+ len += 1;
+ } else if (data_type == MSM_CAMERA_I2C_WORD_DATA) {
+ buf[len] = data >> BITS_PER_BYTE;
+ buf[len+1] = data;
+ S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
+ S_I2C_DBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+ len += 2;
+ }
+ rc = msm_camera_qup_i2c_txdata(client, buf, len);
+ if (rc < 0)
+ S_I2C_DBG("%s fail\n", __func__);
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ unsigned char buf[client->addr_type+num_byte];
+ uint8_t len = 0, i = 0;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || num_byte == 0)
+ return rc;
+
+ S_I2C_DBG("%s reg addr = 0x%x num bytes: %d\n",
+ __func__, addr, num_byte);
+ if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+ buf[0] = addr;
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len, buf[len]);
+ len = 1;
+ } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+ buf[0] = addr >> BITS_PER_BYTE;
+ buf[1] = addr;
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len, buf[len]);
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len+1, buf[len+1]);
+ len = 2;
+ }
+ if (num_byte > I2C_SEQ_REG_DATA_MAX) {
+ pr_err("%s: num_byte=%d clamped to max supported %d\n",
+ __func__, num_byte, I2C_SEQ_REG_DATA_MAX);
+ num_byte = I2C_SEQ_REG_DATA_MAX;
+ }
+ for (i = 0; i < num_byte; i++) {
+ buf[i+len] = data[i];
+ S_I2C_DBG("Byte %d: 0x%x\n", i+len, buf[i+len]);
+ S_I2C_DBG("Data: 0x%x\n", data[i]);
+ }
+ rc = msm_camera_qup_i2c_txdata(client, buf, len+num_byte);
+ if (rc < 0)
+ S_I2C_DBG("%s fail\n", __func__);
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_write_table(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int i;
+ int32_t rc = -EFAULT;
+ struct msm_camera_i2c_reg_array *reg_setting;
+ uint16_t client_addr_type;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if ((write_setting->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && write_setting->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (write_setting->data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && write_setting->data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ reg_setting = write_setting->reg_setting;
+ client_addr_type = client->addr_type;
+ client->addr_type = write_setting->addr_type;
+
+ for (i = 0; i < write_setting->size; i++) {
+ CDBG("%s addr 0x%x data 0x%x\n", __func__,
+ reg_setting->reg_addr, reg_setting->reg_data);
+
+ rc = msm_camera_qup_i2c_write(client, reg_setting->reg_addr,
+ reg_setting->reg_data, write_setting->data_type);
+ if (rc < 0)
+ break;
+ reg_setting++;
+ }
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000, (write_setting->delay
+ * 1000) + 1000);
+
+ client->addr_type = client_addr_type;
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_write_seq_table(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_seq_reg_setting *write_setting)
+{
+ int i;
+ int32_t rc = -EFAULT;
+ struct msm_camera_i2c_seq_reg_array *reg_setting;
+ uint16_t client_addr_type;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if ((write_setting->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && write_setting->addr_type != MSM_CAMERA_I2C_WORD_ADDR)) {
+ pr_err("%s Invalid addr type %d\n", __func__,
+ write_setting->addr_type);
+ return rc;
+ }
+
+ reg_setting = write_setting->reg_setting;
+ client_addr_type = client->addr_type;
+ client->addr_type = write_setting->addr_type;
+
+ if (reg_setting->reg_data_size > I2C_SEQ_REG_DATA_MAX) {
+ pr_err("%s: number of bytes %u exceeding the max supported %d\n",
+ __func__, reg_setting->reg_data_size, I2C_SEQ_REG_DATA_MAX);
+ return rc;
+ }
+
+ for (i = 0; i < write_setting->size; i++) {
+ rc = msm_camera_qup_i2c_write_seq(client, reg_setting->reg_addr,
+ reg_setting->reg_data, reg_setting->reg_data_size);
+ if (rc < 0)
+ break;
+ reg_setting++;
+ }
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000, (write_setting->delay
+ * 1000) + 1000);
+
+ client->addr_type = client_addr_type;
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int i;
+ int32_t rc = -EFAULT;
+
+ struct msm_camera_i2c_reg_array *reg_setting = NULL;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (write_setting->data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && write_setting->data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ reg_setting = write_setting->reg_setting;
+ for (i = 0; i < write_setting->size; i++) {
+ rc = msm_camera_qup_i2c_write(client, reg_setting->reg_addr,
+ reg_setting->reg_data, write_setting->data_type);
+ if (rc < 0)
+ break;
+ if (reg_setting->delay)
+ usleep_range(reg_setting->delay,
+ reg_setting->delay + 1000);
+ reg_setting++;
+ }
+ return rc;
+}
+
+static int32_t msm_camera_qup_i2c_compare(
+ struct msm_camera_i2c_client *client, uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc;
+ uint16_t reg_data = 0;
+ int data_len = 0;
+
+ switch (data_type) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ data_len = data_type;
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_MASK:
+ case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+ data_len = MSM_CAMERA_I2C_BYTE_DATA;
+ break;
+ case MSM_CAMERA_I2C_SET_WORD_MASK:
+ case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+ data_len = MSM_CAMERA_I2C_WORD_DATA;
+ break;
+ default:
+ pr_err("%s: Unsupport data type: %d\n", __func__, data_type);
+ break;
+ }
+
+ rc = msm_camera_qup_i2c_read(client, addr, &reg_data, data_len);
+ if (rc < 0)
+ return rc;
+
+ rc = I2C_COMPARE_MISMATCH;
+ switch (data_type) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ if (data == reg_data)
+ rc = I2C_COMPARE_MATCH;
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_MASK:
+ case MSM_CAMERA_I2C_SET_WORD_MASK:
+ if ((reg_data & data) == data)
+ rc = I2C_COMPARE_MATCH;
+ break;
+ case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+ case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+ if (!(reg_data & data))
+ rc = I2C_COMPARE_MATCH;
+ break;
+ default:
+ pr_err("%s: Unsupport data type: %d\n", __func__, data_type);
+ break;
+ }
+
+ S_I2C_DBG("%s: Register and data match result %d\n", __func__,
+ rc);
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_poll(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type, uint32_t delay_ms)
+{
+ int32_t rc = 0;
+ int i;
+
+ S_I2C_DBG("%s: addr: 0x%x data: 0x%x dt: %d\n",
+ __func__, addr, data, data_type);
+
+ if (delay_ms > MAX_POLL_DELAY_MS) {
+ pr_err("%s:%d invalid delay = %d max_delay = %d\n",
+ __func__, __LINE__, delay_ms, MAX_POLL_DELAY_MS);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < delay_ms; i++) {
+ rc = msm_camera_qup_i2c_compare(client,
+ addr, data, data_type);
+ if (rc < 0) {
+ pr_err("%s:%d qup_i2c_compare failed rc = %d", __func__,
+ __LINE__, rc);
+ break;
+ }
+ if (rc == I2C_COMPARE_MISMATCH)
+ break;
+ usleep_range(1000, 1010);
+ }
+ return rc;
+}
+
+static int32_t msm_camera_qup_i2c_set_mask(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t mask,
+ enum msm_camera_i2c_data_type data_type, uint16_t set_mask)
+{
+ int32_t rc;
+ uint16_t reg_data;
+
+ rc = msm_camera_qup_i2c_read(client, addr, &reg_data, data_type);
+ if (rc < 0) {
+ S_I2C_DBG("%s read fail\n", __func__);
+ return rc;
+ }
+ S_I2C_DBG("%s addr: 0x%x data: 0x%x setmask: 0x%x\n",
+ __func__, addr, reg_data, mask);
+
+ if (set_mask)
+ reg_data |= mask;
+ else
+ reg_data &= ~mask;
+ S_I2C_DBG("%s write: 0x%x\n", __func__, reg_data);
+
+ rc = msm_camera_qup_i2c_write(client, addr, reg_data, data_type);
+ if (rc < 0)
+ S_I2C_DBG("%s write fail\n", __func__);
+
+ return rc;
+}
+
+static int32_t msm_camera_qup_i2c_set_write_mask_data(
+ struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data, int16_t mask,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc;
+ uint16_t reg_data;
+
+ CDBG("%s\n", __func__);
+ if (mask == -1)
+ return 0;
+ if (mask == 0) {
+ rc = msm_camera_qup_i2c_write(client, addr, data, data_type);
+ } else {
+ rc = msm_camera_qup_i2c_read(client, addr, &reg_data,
+ data_type);
+ if (rc < 0) {
+ CDBG("%s read fail\n", __func__);
+ return rc;
+ }
+ reg_data &= ~mask;
+ reg_data |= (data & mask);
+ rc = msm_camera_qup_i2c_write(client, addr, reg_data,
+ data_type);
+ if (rc < 0)
+ CDBG("%s write fail\n", __func__);
+ }
+ return rc;
+}
+
+
+int32_t msm_camera_qup_i2c_write_conf_tbl(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int i;
+ int32_t rc = -EFAULT;
+
+ pr_err("%s, E. ", __func__);
+ for (i = 0; i < size; i++) {
+ enum msm_camera_i2c_data_type dt;
+
+ if (reg_conf_tbl->cmd_type == MSM_CAMERA_I2C_CMD_POLL) {
+ rc = msm_camera_qup_i2c_poll(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ reg_conf_tbl->dt, I2C_POLL_TIME_MS);
+ } else {
+ if (reg_conf_tbl->dt == 0)
+ dt = data_type;
+ else
+ dt = reg_conf_tbl->dt;
+ switch (dt) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ rc = msm_camera_qup_i2c_write(
+ client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data, dt);
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_MASK:
+ rc = msm_camera_qup_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_BYTE_DATA, 1);
+ break;
+ case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+ rc = msm_camera_qup_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_BYTE_DATA, 0);
+ break;
+ case MSM_CAMERA_I2C_SET_WORD_MASK:
+ rc = msm_camera_qup_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_WORD_DATA, 1);
+ break;
+ case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+ rc = msm_camera_qup_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_WORD_DATA, 0);
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA:
+ rc = msm_camera_qup_i2c_set_write_mask_data(
+ client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ reg_conf_tbl->mask,
+ MSM_CAMERA_I2C_BYTE_DATA);
+ break;
+ default:
+ pr_err("%s: Unsupport data type: %d\n",
+ __func__, dt);
+ break;
+ }
+ }
+ if (rc < 0)
+ break;
+ reg_conf_tbl++;
+ }
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.c b/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.c
new file mode 100644
index 000000000000..cd277f0ca0da
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.c
@@ -0,0 +1,851 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <soc/qcom/ais.h>
+#include "msm_camera_spi.h"
+
+#undef SPIDBG
+#ifdef CONFIG_MSM_AIS_DEBUG
+#define SPIDBG(fmt, args...) pr_debug(fmt, ##args)
+#define S_I2C_DBG(fmt, args...) pr_debug(fmt, ##args)
+#else
+#define SPIDBG(fmt, args...)
+#define S_I2C_DBG(fmt, args...)
+#endif
+
+static int msm_camera_spi_txfr(struct spi_device *spi, char *txbuf,
+ char *rxbuf, int num_byte)
+{
+ struct spi_transfer t;
+ struct spi_message m;
+
+ memset(&t, 0, sizeof(t));
+ t.tx_buf = txbuf;
+ t.rx_buf = rxbuf;
+ t.len = num_byte;
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+
+ return spi_sync(spi, &m);
+}
+
+static int msm_camera_spi_txfr_read(struct spi_device *spi, char *txbuf,
+ char *rxbuf, int txlen, int rxlen)
+{
+ struct spi_transfer tx;
+ struct spi_transfer rx;
+ struct spi_message m;
+
+ memset(&tx, 0, sizeof(tx));
+ memset(&rx, 0, sizeof(rx));
+ tx.tx_buf = txbuf;
+ rx.rx_buf = rxbuf;
+ tx.len = txlen;
+ rx.len = rxlen;
+ spi_message_init(&m);
+ spi_message_add_tail(&tx, &m);
+ spi_message_add_tail(&rx, &m);
+ return spi_sync(spi, &m);
+}
+
+
+/**
+ * msm_camera_set_addr() - helper function to set transfer address
+ * @addr: device address
+ * @addr_len: the addr field length of an instruction
+ * @type: type (i.e. byte-length) of @addr
+ * @str: shifted address output, must be zeroed when passed in
+ *
+ * This helper function sets @str based on the addr field length of an
+ * instruction and the data length.
+ */
+static void msm_camera_set_addr(uint32_t addr, uint8_t addr_len,
+ enum msm_camera_i2c_reg_addr_type type,
+ char *str)
+{
+ int i, len;
+
+ if (!addr_len)
+ return;
+
+ if (addr_len < type)
+ SPIDBG("%s: omitting higher bits in address\n", __func__);
+
+ /* only support transfer MSB first for now */
+ len = addr_len - type;
+ for (i = len; i < addr_len; i++) {
+ if (i >= 0)
+ str[i] = (addr >> (BITS_PER_BYTE * (addr_len - i - 1)))
+ & 0xFF;
+ }
+
+}
+
+/**
+ * msm_camera_spi_tx_helper() - wrapper for SPI transaction
+ * @client: io client
+ * @inst: inst of this transaction
+ * @addr: device addr following the inst
+ * @data: output byte array (could be NULL)
+ * @num_byte: size of @data
+ * @tx, rx: optional transfer buffer. It must be at least header
+ * + @num_byte long.
+ *
+ * This is the core function for SPI transaction, except for writes. It first
+ * checks address type, then allocates required memory for tx/rx buffers.
+ * It sends out <opcode><addr>, and optionally receives @num_byte of response,
+ * if @data is not NULL. This function does not check for wait conditions,
+ * and will return immediately once bus transaction finishes.
+ *
+ * This function will allocate buffers of header + @num_byte long. For
+ * large transfers, the allocation could fail. External buffer @tx, @rx
+ * should be passed in to bypass allocation. The size of buffer should be
+ * at least header + num_byte long. Since buffer is managed externally,
+ * @data will be ignored, and read results will be in @rx.
+ * @tx, @rx also can be used for repeated transfers to improve performance.
+ */
+int32_t msm_camera_spi_tx_helper(struct msm_camera_i2c_client *client,
+ struct msm_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+ uint32_t num_byte, char *tx, char *rx)
+{
+ int32_t rc = -EINVAL;
+ struct spi_device *spi = client->spi_client->spi_master;
+ char *ctx = NULL, *crx = NULL;
+ uint32_t len, hlen;
+ uint8_t retries = client->spi_client->retries;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_3B_ADDR))
+ return rc;
+
+ hlen = msm_camera_spi_get_hlen(inst);
+ len = hlen + num_byte;
+
+ if (tx)
+ ctx = tx;
+ else
+ ctx = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (num_byte) {
+ if (rx)
+ crx = rx;
+ else
+ crx = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!crx) {
+ if (!tx)
+ kfree(ctx);
+ return -ENOMEM;
+ }
+ } else {
+ crx = NULL;
+ }
+
+ ctx[0] = inst->opcode;
+ msm_camera_set_addr(addr, inst->addr_len, client->addr_type, ctx + 1);
+ while ((rc = msm_camera_spi_txfr(spi, ctx, crx, len)) && retries) {
+ retries--;
+ msleep(client->spi_client->retry_delay);
+ }
+ if (rc < 0) {
+ SPIDBG("%s: failed %d\n", __func__, rc);
+ goto out;
+ }
+ if (data && num_byte && !rx)
+ memcpy(data, crx + hlen, num_byte);
+
+out:
+ if (!tx)
+ kfree(ctx);
+ if (!rx)
+ kfree(crx);
+ return rc;
+}
+
+int32_t msm_camera_spi_tx_read(struct msm_camera_i2c_client *client,
+ struct msm_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+ uint32_t num_byte, char *tx, char *rx)
+{
+ int32_t rc = -EINVAL;
+ struct spi_device *spi = client->spi_client->spi_master;
+ char *ctx = NULL, *crx = NULL;
+ uint32_t hlen;
+ uint8_t retries = client->spi_client->retries;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_3B_ADDR))
+ return rc;
+
+ hlen = msm_camera_spi_get_hlen(inst);
+ if (tx)
+ ctx = tx;
+ else
+ ctx = kzalloc(hlen, GFP_KERNEL | GFP_DMA);
+ if (!ctx)
+ return -ENOMEM;
+ if (num_byte) {
+ if (rx)
+ crx = rx;
+ else
+ crx = kzalloc(num_byte, GFP_KERNEL | GFP_DMA);
+ if (!crx) {
+ if (!tx)
+ kfree(ctx);
+ return -ENOMEM;
+ }
+ } else {
+ crx = NULL;
+ }
+
+ ctx[0] = inst->opcode;
+ if (client->addr_type == MSM_CAMERA_I2C_3B_ADDR) {
+ msm_camera_set_addr(addr, inst->addr_len, client->addr_type,
+ ctx + 1);
+ } else {
+ ctx[1] = (addr >> BITS_PER_BYTE) & 0xFF;
+ ctx[2] = (addr & 0xFF);
+ ctx[3] = 0;
+ }
+ SPIDBG("%s: tx(%u): %02x %02x %02x %02x\n", __func__,
+ hlen, ctx[0], ctx[1], ctx[2], ctx[3]);
+ while ((rc = msm_camera_spi_txfr_read(spi, ctx, crx, hlen, num_byte))
+ && retries) {
+ retries--;
+ msleep(client->spi_client->retry_delay);
+ }
+ if (rc < 0) {
+ pr_err("%s: failed %d\n", __func__, rc);
+ goto out;
+ }
+ if (data && num_byte && !rx)
+ memcpy(data, crx, num_byte);
+out:
+ if (!tx)
+ kfree(ctx);
+ if (!rx)
+ kfree(crx);
+ return rc;
+}
+
+int32_t msm_camera_spi_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EINVAL;
+ uint8_t temp[2];
+
+ if ((data_type != MSM_CAMERA_I2C_BYTE_DATA)
+ && (data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ rc = msm_camera_spi_tx_read(client,
+ &client->spi_client->cmd_tbl.read, addr, &temp[0],
+ data_type, NULL, NULL);
+ if (rc < 0) {
+ pr_err("%s: failed %d\n", __func__, rc);
+ return rc;
+ }
+
+ if (data_type == MSM_CAMERA_I2C_BYTE_DATA)
+ *data = temp[0];
+ else
+ *data = (temp[0] << BITS_PER_BYTE) | temp[1];
+
+ SPIDBG("%s: addr 0x%x, data %u\n", __func__, addr, *data);
+ return rc;
+}
+
+int32_t msm_camera_spi_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ return msm_camera_spi_tx_helper(client,
+ &client->spi_client->cmd_tbl.read_seq, addr, data, num_byte,
+ NULL, NULL);
+}
+
+/**
+ * msm_camera_spi_read_seq_l()- function for large SPI reads
+ * @client: io client
+ * @addr: device address to read
+ * @num_byte: read length
+ * @tx,rx: pre-allocated SPI buffer. Its size must be at least
+ * header + num_byte
+ *
+ * This function is used for large transactions. Instead of allocating SPI
+ * buffer each time, caller is responsible for pre-allocating memory buffers.
+ * Memory buffer must be at least header + num_byte. Header length can be
+ * obtained by msm_camera_spi_get_hlen().
+ */
+int32_t msm_camera_spi_read_seq_l(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint32_t num_byte, char *tx, char *rx)
+{
+ return msm_camera_spi_tx_helper(client,
+ &client->spi_client->cmd_tbl.read_seq, addr, NULL, num_byte,
+ tx, rx);
+}
+
+int32_t msm_camera_spi_query_id(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ return msm_camera_spi_tx_helper(client,
+ &client->spi_client->cmd_tbl.query_id, addr, data, num_byte,
+ NULL, NULL);
+}
+
+static int32_t msm_camera_spi_read_status_reg(
+ struct msm_camera_i2c_client *client, uint8_t *status)
+{
+ struct msm_camera_spi_inst *rs =
+ &client->spi_client->cmd_tbl.read_status;
+
+ if (rs->addr_len != 0) {
+ pr_err("%s: not implemented yet\n", __func__);
+ return -EINVAL;
+ }
+ return msm_camera_spi_tx_helper(client, rs, 0, status, 1, NULL, NULL);
+}
+
+static int32_t msm_camera_spi_device_busy(struct msm_camera_i2c_client *client,
+ uint8_t *busy)
+{
+ int rc;
+ uint8_t st = 0;
+
+ rc = msm_camera_spi_read_status_reg(client, &st);
+ if (rc < 0) {
+ pr_err("%s: failed to read status reg\n", __func__);
+ return rc;
+ }
+ *busy = st & client->spi_client->busy_mask;
+ return 0;
+}
+
+static int32_t msm_camera_spi_wait(struct msm_camera_i2c_client *client,
+ struct msm_camera_spi_inst *inst)
+{
+ uint8_t busy;
+ int i, rc;
+
+ SPIDBG("%s: op 0x%x wait start\n", __func__, inst->opcode);
+ for (i = 0; i < inst->delay_count; i++) {
+ rc = msm_camera_spi_device_busy(client, &busy);
+ if (rc < 0)
+ return rc;
+ if (!busy)
+ break;
+ msleep(inst->delay_intv);
+ SPIDBG("%s: op 0x%x wait\n", __func__, inst->opcode);
+ }
+ if (i > inst->delay_count) {
+ pr_err("%s: op %x timed out\n", __func__, inst->opcode);
+ return -ETIMEDOUT;
+ }
+ SPIDBG("%s: op %x finished\n", __func__, inst->opcode);
+ return 0;
+}
+
+static int32_t msm_camera_spi_write_enable(
+ struct msm_camera_i2c_client *client)
+{
+ struct msm_camera_spi_inst *we =
+ &client->spi_client->cmd_tbl.write_enable;
+ int rc;
+
+ if (we->opcode == 0)
+ return 0;
+ if (we->addr_len != 0) {
+ pr_err("%s: not implemented yet\n", __func__);
+ return -EINVAL;
+ }
+ rc = msm_camera_spi_tx_helper(client, we, 0, NULL, 0, NULL, NULL);
+ if (rc < 0)
+ pr_err("%s: write enable failed\n", __func__);
+ return rc;
+}
+
+int32_t msm_camera_spi_erase(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint32_t size)
+{
+ struct msm_camera_spi_inst *se = &client->spi_client->cmd_tbl.erase;
+ int rc = 0;
+ uint32_t cur;
+ uint32_t end = addr + size;
+ uint32_t erase_size = client->spi_client->erase_size;
+
+ end = addr + size;
+ for (cur = rounddown(addr, erase_size); cur < end; cur += erase_size) {
+ SPIDBG("%s: erasing 0x%x\n", __func__, cur);
+ rc = msm_camera_spi_write_enable(client);
+ if (rc < 0)
+ return rc;
+ rc = msm_camera_spi_tx_helper(client, se, cur, NULL, 0,
+ NULL, NULL);
+ if (rc < 0) {
+ pr_err("%s: erase failed\n", __func__);
+ return rc;
+ }
+ rc = msm_camera_spi_wait(client, se);
+ if (rc < 0) {
+ pr_err("%s: erase timedout\n", __func__);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+/**
+ * msm_camera_spi_page_program() - core function to perform write
+ * @client: need for obtaining SPI device
+ * @addr: address to program on device
+ * @data: data to write
+ * @len: size of data
+ * @tx: tx buffer, size >= header + len
+ *
+ * This function performs SPI write, and has no boundary check. Writing range
+ * should not cross page boundary, or data will be corrupted. Transaction is
+ * guaranteed to be finished when it returns. This function should never be
+ * used outside msm_camera_spi_write_seq().
+ */
+static int32_t msm_camera_spi_page_program(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint16_t len, uint8_t *tx)
+{
+ int rc;
+ struct msm_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ struct spi_device *spi = client->spi_client->spi_master;
+ uint8_t retries = client->spi_client->retries;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+
+ SPIDBG("%s: addr 0x%x, size 0x%x\n", __func__, addr, len);
+ rc = msm_camera_spi_write_enable(client);
+ if (rc < 0)
+ return rc;
+ memset(tx, 0, header_len);
+ tx[0] = pg->opcode;
+ msm_camera_set_addr(addr, pg->addr_len, client->addr_type, tx + 1);
+ memcpy(tx + header_len, data, len);
+ SPIDBG("%s: tx(%u): %02x %02x %02x %02x\n", __func__,
+ len, tx[0], tx[1], tx[2], tx[3]);
+ while ((rc = spi_write(spi, tx, len + header_len)) && retries) {
+ rc = msm_camera_spi_wait(client, pg);
+ msleep(client->spi_client->retry_delay);
+ retries--;
+ }
+ if (rc < 0) {
+ pr_err("%s: failed %d\n", __func__, rc);
+ return rc;
+ }
+ rc = msm_camera_spi_wait(client, pg);
+ return rc;
+}
+
+int32_t msm_camera_spi_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ struct msm_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ const uint32_t page_size = client->spi_client->page_size;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+ uint16_t len;
+ uint32_t cur_len, end;
+ char *tx, *pdata = data;
+ int rc = -EINVAL;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_3B_ADDR))
+ return rc;
+ /* single page write */
+ if ((addr % page_size) + num_byte <= page_size) {
+ len = header_len + num_byte;
+ tx = kmalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!tx)
+ goto NOMEM;
+ rc = msm_camera_spi_page_program(client, addr, data,
+ num_byte, tx);
+ if (rc < 0)
+ goto ERROR;
+ goto OUT;
+ }
+ /* multi page write */
+ len = header_len + page_size;
+ tx = kmalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!tx)
+ goto NOMEM;
+ while (num_byte) {
+ end = min(page_size, (addr % page_size) + num_byte);
+ cur_len = end - (addr % page_size);
+ rc = msm_camera_spi_page_program(client, addr, pdata,
+ cur_len, tx);
+ if (rc < 0)
+ goto ERROR;
+ addr += cur_len;
+ pdata += cur_len;
+ num_byte -= cur_len;
+ }
+ goto OUT;
+NOMEM:
+ pr_err("%s: memory allocation failed\n", __func__);
+ return -ENOMEM;
+ERROR:
+ pr_err("%s: error write\n", __func__);
+OUT:
+ kfree(tx);
+ return rc;
+}
+
+int32_t msm_camera_spi_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data, enum msm_camera_i2c_data_type data_type)
+{
+ struct msm_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+ uint16_t len = 0;
+ char buf[data_type];
+ char *tx;
+ int rc = -EINVAL;
+
+ if (((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_3B_ADDR))
+ || (data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+ S_I2C_DBG("Data: 0x%x\n", data);
+ len = header_len + (uint8_t)data_type;
+ tx = kmalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!tx)
+ goto NOMEM;
+ if (data_type == MSM_CAMERA_I2C_BYTE_DATA) {
+ buf[0] = data;
+ SPIDBG("Byte %d: 0x%x\n", len, buf[0]);
+ } else if (data_type == MSM_CAMERA_I2C_WORD_DATA) {
+ buf[0] = (data >> BITS_PER_BYTE) & 0x00FF;
+ buf[1] = (data & 0x00FF);
+ }
+ rc = msm_camera_spi_page_program(client, addr, buf,
+ (uint16_t)data_type, tx);
+ if (rc < 0)
+ goto ERROR;
+ goto OUT;
+NOMEM:
+ pr_err("%s: memory allocation failed\n", __func__);
+ return -ENOMEM;
+ERROR:
+ pr_err("%s: error write\n", __func__);
+OUT:
+ kfree(tx);
+ return rc;
+}
+int32_t msm_camera_spi_write_table(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int i;
+ int32_t rc = -EFAULT;
+ struct msm_camera_i2c_reg_array *reg_setting;
+ uint16_t client_addr_type;
+
+ if (!client || !write_setting)
+ return rc;
+ if ((write_setting->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && write_setting->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (write_setting->data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && write_setting->data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+ reg_setting = write_setting->reg_setting;
+ client_addr_type = client->addr_type;
+ client->addr_type = write_setting->addr_type;
+ for (i = 0; i < write_setting->size; i++) {
+ SPIDBG("%s addr %x data %x\n", __func__,
+ reg_setting->reg_addr, reg_setting->reg_data);
+ rc = msm_camera_spi_write(client, reg_setting->reg_addr,
+ reg_setting->reg_data, write_setting->data_type);
+ if (rc < 0)
+ break;
+ reg_setting++;
+ }
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000,
+ (write_setting->delay
+ * 1000) + 1000);
+ client->addr_type = client_addr_type;
+ return rc;
+}
+uint32_t msm_get_burst_size(struct msm_camera_i2c_reg_array *reg_setting,
+ uint32_t reg_size, uint32_t index, uint16_t burst_addr)
+{
+ uint32_t i;
+ uint32_t cnt = 0;
+
+ for (i = index; i < reg_size; i++) {
+ if (reg_setting[i].reg_addr == burst_addr)
+ cnt++;
+ else
+ break;
+ }
+ return cnt;
+}
+
+#ifdef SPI_DYNAMIC_ALLOC
+int32_t msm_camera_spi_send_burst(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_array *reg_setting, uint32_t reg_size,
+ struct msm_camera_burst_info *info,
+ enum msm_camera_i2c_data_type data_type)
+{
+ uint32_t i, j, k;
+ int32_t rc = 0;
+ uint32_t chunk_num, residue;
+ struct msm_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+ uint8_t *ctx, *data;
+ uint32_t len;
+
+ if (info->burst_len == 0 || info->chunk_size == 0) {
+ pr_err("%s:%d Invalid argument\n", __func__, __LINE__);
+ return rc;
+ }
+ if (info->burst_start + info->burst_len > reg_size) {
+ pr_err("%s too big burst size, index=%d, size=%d\n", __func__,
+ info->burst_start, info->burst_len);
+ return rc;
+ }
+ chunk_num = info->burst_len / info->chunk_size;
+ residue = info->burst_len % info->chunk_size;
+ SPIDBG("%s header_len=%d, chunk nb=%d, residue=%d\n",
+ __func__, header_len, chunk_num, residue);
+ len = info->chunk_size * data_type + header_len;
+ SPIDBG("buffer allocation size = %d\n", len);
+ ctx = kmalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!ctx) {
+ pr_err("%s %d memory alloc fail!\n", __func__, __LINE__);
+ return rc;
+ }
+ ctx[0] = pg->opcode;
+ ctx[1] = (info->burst_addr >> 8) & 0xff;
+ ctx[2] = info->burst_addr & 0xff;
+ k = info->burst_start;
+ for (i = 0; i < chunk_num; i++) {
+ data = ctx + header_len;
+ for (j = 0; j < info->chunk_size; j++) {
+ *data++ = (reg_setting[k+j].reg_data >> 8) & 0xff;
+ *data++ = reg_setting[k+j].reg_data & 0xff;
+ }
+ rc = msm_camera_spi_txfr(client->spi_client->spi_master,
+ (void *) ctx, NULL,
+ info->chunk_size * data_type + header_len);
+ if (rc < 0) {
+ pr_err("%s %d spi sending error = %d!!\n",
+ __func__, __LINE__, rc);
+ goto fail;
+ }
+ k += info->chunk_size;
+ }
+ SPIDBG("%s burst chunk start=%d, residue=%d\n",
+ __func__, k, residue);
+ if (residue) {
+ data = ctx + header_len;
+ for (j = 0; j < residue; j++) {
+ *data++ = (reg_setting[k+j].reg_data >> 8) & 0xff;
+ *data++ = reg_setting[k+j].reg_data & 0xff;
+ }
+ rc = msm_camera_spi_txfr(client->spi_client->spi_master,
+ (void *)ctx, NULL,
+ residue*data_type+header_len);
+ if (rc < 0) {
+ pr_err("%s %d spi sending error = %d!!\n", __func__,
+ __LINE__, rc);
+ goto fail;
+ }
+ }
+fail:
+ kfree(ctx);
+ return rc;
+}
+#else /* SPI_DYNAMIC_ALLOC */
+int32_t msm_camera_spi_send_burst(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_array *reg_setting, uint32_t reg_size,
+ struct msm_camera_burst_info *info,
+ enum msm_camera_i2c_data_type data_type)
+{
+ uint32_t i, j, k;
+ int32_t rc = 0;
+ uint32_t chunk_num, residue;
+ struct msm_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+ struct msm_spi_write_burst_packet tx_buf;
+
+ if (info->burst_len == 0 || info->burst_len == 0
+ || info->chunk_size == 0) {
+ pr_err("%s %d Invalid argument\n", __func__, __LINE__);
+ return rc;
+ }
+ if (info->burst_start + info->burst_len > reg_size) {
+ pr_err("%s too big burst size, index=%d, size=%d\n", __func__,
+ info->burst_start, info->burst_len);
+ return rc;
+ }
+ chunk_num = info->burst_len / info->chunk_size;
+ residue = info->burst_len % info->chunk_size;
+ SPIDBG("%s header_len=%d, chunk nb=%d, residue=%d\n",
+ __func__, header_len, chunk_num, residue);
+ tx_buf.cmd = pg->opcode;
+ tx_buf.addr_msb = (info->burst_addr >> 8) & 0xff;
+ tx_buf.addr_lsb = info->burst_addr & 0xff;
+ SPIDBG("%s cmd=%d, addr_msb=0x%x, addr_lsb=0x%x\n", __func__,
+ tx_buf.cmd, tx_buf.addr_msb, tx_buf.addr_lsb);
+ k = info->burst_start;
+ for (i = 0; i < chunk_num; i++) {
+ SPIDBG("%s burst chunk start=%d, chunk_size=%d, chunk_num=%d\n",
+ __func__,
+ k, info->chunk_size, i);
+ for (j = 0; j < info->chunk_size; j++) {
+ tx_buf.data_arr[j].data_msb =
+ (reg_setting[k+j].reg_data >> 8) & 0xff;
+ tx_buf.data_arr[j].data_lsb =
+ reg_setting[k+j].reg_data & 0xff;
+ }
+ rc = msm_camera_spi_txfr(client->spi_client->spi_master,
+ (void *)&tx_buf, NULL,
+ info->chunk_size * data_type+header_len);
+ if (rc < 0) {
+ pr_err("%s %d spi sending error = %d!!\n", __func__,
+ __LINE__, rc);
+ goto fail;
+ }
+ k += info->chunk_size;
+ }
+ SPIDBG("%s burst chunk start=%d, residue=%d\n", __func__, k, residue);
+ if (residue) {
+ for (j = 0; j < residue; j++) {
+ tx_buf.data_arr[j].data_msb = (reg_setting[k+j].reg_data
+ >> 8) & 0xff;
+ tx_buf.data_arr[j].data_lsb = reg_setting[k+j].reg_data
+ & 0xff;
+ }
+ rc = msm_camera_spi_txfr(client->spi_client->spi_master,
+ (void *)&tx_buf, NULL,
+ residue * data_type+header_len);
+ if (rc < 0) {
+ pr_err("%s %d spi sending error = %d!!\n", __func__,
+ __LINE__, rc);
+ goto fail;
+ }
+ }
+fail:
+ return rc;
+}
+#endif /* SPI_DYNAMIC_ALLOC */
+
+int32_t msm_camera_spi_write_burst(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_array *reg_setting, uint32_t reg_size,
+ uint32_t buf_len, uint32_t burst_addr,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int k = 0;
+ int32_t rc = -EFAULT;
+ struct msm_camera_burst_info burst_info;
+
+ SPIDBG(" %s: start\n", __func__);
+ if (buf_len <= 0) {
+ pr_err("%s Invalid parameter, buf_len = %d\n",
+ __func__, buf_len);
+ return rc;
+ }
+ if (reg_size <= 0 || reg_setting == NULL) {
+ pr_err("%s Invalid parameter, array_size = %d\n",
+ __func__, reg_size);
+ return rc;
+ }
+
+ if ((client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ SPIDBG(" %s: buf_len=%d, reg_size=%d\n", __func__, buf_len, reg_size);
+ while (k < reg_size) {
+ if (reg_setting[k].reg_addr == burst_addr) {
+ memset(&burst_info, 0x00,
+ sizeof(struct msm_camera_burst_info));
+ burst_info.burst_addr = burst_addr;
+ burst_info.burst_start = k;
+ burst_info.chunk_size = buf_len;
+ burst_info.burst_len =
+ msm_get_burst_size(reg_setting, reg_size, k,
+ burst_addr);
+ SPIDBG("%s burst start = %d, length = %d\n", __func__,
+ k, burst_info.burst_len);
+ rc = msm_camera_spi_send_burst(client, reg_setting,
+ reg_size, &burst_info, data_type);
+ if (rc < 0) {
+ pr_err("[%s::%d][spi_sync Error::%d]\n",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+ k += burst_info.burst_len;
+ } else {
+ SPIDBG("%s word write, start = %d\n", __func__, k);
+ msm_camera_spi_write(client, reg_setting[k].reg_addr,
+ reg_setting[k].reg_data, data_type);
+ k++;
+ }
+ }
+ SPIDBG("%s: end\n", __func__);
+ return rc;
+}
+
+int32_t msm_camera_spi_read_burst(struct msm_camera_i2c_client *client,
+ uint32_t read_byte, uint8_t *buffer, uint32_t burst_addr,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ struct msm_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.read;
+ uint32_t len = msm_camera_spi_get_hlen(pg);
+ uint8_t *tx_buf = NULL;
+ uint8_t *r = buffer;
+
+ SPIDBG("%s: start\n", __func__);
+
+ if (buffer == NULL || read_byte == 0 || len == 0) {
+ pr_err("%s %d Invalid parameters!!\n", __func__, __LINE__);
+ return rc;
+ }
+
+ if ((client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+ tx_buf = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!tx_buf)
+ return -ENOMEM;
+
+ tx_buf[0] = pg->opcode;
+ tx_buf[1] = (burst_addr >> 8) & 0xff;
+ tx_buf[2] = burst_addr & 0xff;
+ tx_buf[3] = 0; /* dummy */
+ rc = msm_camera_spi_txfr_read(client->spi_client->spi_master,
+ &tx_buf[0], r, len, read_byte);
+ if (rc < 0)
+ pr_err("[%s::%d][spi_sync Error::%d]\n", __func__,
+ __LINE__, rc);
+
+ kfree(tx_buf);
+
+ SPIDBG("%s: end\n", __func__);
+ return rc;
+}
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.h b/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.h
new file mode 100644
index 000000000000..28aa184ce630
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.h
@@ -0,0 +1,120 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CAMERA_SPI_H
+#define __MSM_CAMERA_SPI_H
+
+#include <linux/spi/spi.h>
+#include <media/ais/msm_ais_sensor.h>
+#include "msm_camera_i2c.h"
+
+#define MAX_SPI_SIZE 110
+#define SPI_DYNAMIC_ALLOC
+
+/**
+ * Common SPI communication scheme
+ * tx: <opcode>[addr][wait][write buffer]
+ * rx: [read buffer]
+ * Some inst require polling busy reg until it's done
+ */
+struct msm_camera_spi_inst {
+ uint8_t opcode; /* one-byte opcode */
+ uint8_t addr_len; /* addr len in bytes */
+ uint8_t dummy_len; /* setup cycles */
+ uint8_t delay_intv; /* delay intv for this inst (ms) */
+ uint8_t delay_count; /* total delay count for this inst */
+};
+
+struct msm_spi_write_burst_data {
+ u8 data_msb;
+ u8 data_lsb;
+};
+
+struct msm_spi_write_burst_packet {
+ u8 cmd;
+ u8 addr_msb;
+ u8 addr_lsb;
+ struct msm_spi_write_burst_data data_arr[MAX_SPI_SIZE];
+};
+
+struct msm_camera_burst_info {
+ uint32_t burst_addr;
+ uint32_t burst_start;
+ uint32_t burst_len;
+ uint32_t chunk_size;
+};
+
+struct msm_camera_spi_inst_tbl {
+ struct msm_camera_spi_inst read;
+ struct msm_camera_spi_inst read_seq;
+ struct msm_camera_spi_inst query_id;
+ struct msm_camera_spi_inst page_program;
+ struct msm_camera_spi_inst write_enable;
+ struct msm_camera_spi_inst read_status;
+ struct msm_camera_spi_inst erase;
+};
+
+struct msm_camera_spi_client {
+ struct spi_device *spi_master;
+ struct msm_camera_spi_inst_tbl cmd_tbl;
+ uint8_t device_id0;
+ uint8_t device_id1;
+ uint8_t mfr_id0;
+ uint8_t mfr_id1;
+ uint8_t retry_delay; /* ms */
+ uint8_t retries; /* retry times upon failure */
+ uint8_t busy_mask; /* busy bit in status reg */
+ uint16_t page_size; /* page size for page program */
+ uint32_t erase_size; /* minimal erase size */
+};
+
+static __always_inline
+uint16_t msm_camera_spi_get_hlen(struct msm_camera_spi_inst *inst)
+{
+ return sizeof(inst->opcode) + inst->addr_len + inst->dummy_len;
+}
+
+int32_t msm_camera_spi_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_spi_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_spi_read_seq_l(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint32_t num_byte, char *tx, char *rx);
+
+int32_t msm_camera_spi_query_id(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_spi_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_spi_erase(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint32_t size);
+
+int32_t msm_camera_spi_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data, enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_spi_write_table(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_spi_write_burst(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_array *reg_setting, uint32_t reg_size,
+ uint32_t buf_len, uint32_t addr,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_spi_read_burst(struct msm_camera_i2c_client *client,
+ uint32_t read_byte, uint8_t *buffer, uint32_t addr,
+ enum msm_camera_i2c_data_type data_type);
+
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_tz_i2c.c b/drivers/media/platform/msm/ais/sensor/io/msm_camera_tz_i2c.c
new file mode 100644
index 000000000000..75589c176e22
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_tz_i2c.c
@@ -0,0 +1,1096 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <soc/qcom/ais.h>
+#include "qseecom_kernel.h"
+#include "msm_camera_i2c.h"
+#include "msm_camera_io_util.h"
+#include "msm_cci.h"
+#include "msm_sensor.h"
+
+#define QSEECOM_SBUFF_SIZE SZ_128K
+#define MAX_TA_NAME 32
+#define EMPTY_QSEECOM_HANDLE NULL
+
+#ifndef CONFIG_MSM_AIS_SEC_CCI_TA_NAME
+ #define CONFIG_MSM_AIS_SEC_CCI_TA_NAME "seccamdemo64"
+#endif /* CONFIG_MSM_SEC_CCI_TA_NAME */
+
+/* Update version major number in case the HLOS-TA interface is changed*/
+#define TA_IF_VERSION_MAJ 0
+#define TA_IF_VERSION_MIN 1
+
+#undef CDBG
+#ifdef CONFIG_MSM_AIS_SEC_CCI_DEBUG
+
+#define CDBG(fmt, args...) \
+ pr_info(CONFIG_MSM_AIS_SEC_CCI_TA_NAME "::%s:%d - " fmt,\
+ __func__, __LINE__, ##args)
+#define TZ_I2C_FN_RETURN(ret, i2c_fn, ...) \
+ ((ret < 0) ? i2c_fn(__VA_ARGS__):ret)
+
+#else /* CONFIG_MSM_AIS_SEC_CCI_DEBUG */
+
+#define CDBG(fmt, args...) \
+ pr_info("%s:%d - " fmt, __func__, __LINE__, ##args)
+#define TZ_I2C_FN_RETURN(ret, i2c_fn, ...) \
+ ((ret < 0) ? -EFAULT:ret)
+
+#endif /* CONFIG_MSM_AIS_SEC_CCI_DEBUG */
+
+#pragma pack(push, msm_camera_tz_i2c, 1)
+
+enum msm_camera_tz_i2c_cmd_id_t {
+ TZ_I2C_CMD_GET_NONE,
+ TZ_I2C_CMD_GET_IF_VERSION,
+ TZ_I2C_CMD_POWER_UP,
+ TZ_I2C_CMD_POWER_DOWN,
+ TZ_I2C_CMD_CCI_GENERIC,
+ TZ_I2C_CMD_CCI_READ,
+ TZ_I2C_CMD_CCI_READ_SEQ,
+ TZ_I2C_CMD_CCI_WRITE,
+ TZ_I2C_CMD_CCI_WRITE_SEQ,
+ TZ_I2C_CMD_CCI_WRITE_TABLE_ASYNC,
+ TZ_I2C_CMD_CCI_WRITE_TABLE_SYNC,
+ TZ_I2C_CMD_CCI_WRITE_TABLE_SYNC_BLOCK,
+ TZ_I2C_CMD_CCI_WRITE_TABLE,
+ TZ_I2C_CMD_CCI_WRITE_SEQ_TABLE,
+ TZ_I2C_CMD_CCI_WRITE_TABLE_W_MICRODELAY,
+ TZ_I2C_CMD_CCI_POLL,
+ TZ_I2C_CMD_CCI_WRITE_CONF_TBL,
+ TZ_I2C_CMD_CCI_UTIL,
+};
+
+enum msm_camera_tz_i2c_status_t {
+ TZ_I2C_STATUS_SUCCESS = 0,
+ TZ_I2C_STATUS_GENERAL_FAILURE = -1,
+ TZ_I2C_STATUS_INVALID_INPUT_PARAMS = -2,
+ TZ_I2C_STATUS_INVALID_SENSOR_ID = -3,
+ TZ_I2C_STATUS_BYPASS = -4,
+ TZ_I2C_STATUS_ERR_SIZE = 0x7FFFFFFF
+};
+
+struct msm_camera_tz_i2c_generic_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+};
+
+struct msm_camera_tz_i2c_generic_rsp_t {
+ enum msm_camera_tz_i2c_status_t rc;
+};
+
+#define msm_camera_tz_i2c_get_if_version_req_t msm_camera_tz_i2c_generic_req_t
+
+struct msm_camera_tz_i2c_get_if_version_rsp_t {
+ enum msm_camera_tz_i2c_status_t rc;
+ uint32_t if_version_maj;
+ uint32_t if_version_min;
+};
+
+struct msm_camera_tz_i2c_power_up_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+};
+
+#define msm_camera_tz_i2c_power_up_rsp_t msm_camera_tz_i2c_generic_rsp_t
+
+struct msm_camera_tz_i2c_power_down_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+};
+
+#define msm_camera_tz_i2c_power_down_rsp_t msm_camera_tz_i2c_generic_rsp_t
+
+struct msm_camera_tz_i2c_cci_generic_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+ enum msm_camera_tz_i2c_cmd_id_t cci_cmd_id;
+ uint32_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+};
+
+#define msm_camera_tz_i2c_cci_generic_rsp_t msm_camera_tz_i2c_generic_rsp_t
+
+struct msm_camera_tz_i2c_cci_read_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+ uint32_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+ uint32_t addr;
+ uint32_t data_type;
+};
+
+struct msm_camera_tz_i2c_cci_read_rsp_t {
+ enum msm_camera_tz_i2c_status_t rc;
+ uint16_t data;
+};
+
+struct msm_camera_tz_i2c_cci_write_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+ uint32_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+ uint32_t addr;
+ uint16_t data;
+ uint32_t data_type;
+};
+
+#define msm_camera_tz_i2c_cci_write_rsp_t msm_camera_tz_i2c_generic_rsp_t
+
+struct msm_camera_tz_i2c_cci_util_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+ uint32_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+ uint16_t cci_cmd;
+};
+
+#define msm_camera_tz_i2c_cci_util_rsp_t msm_camera_tz_i2c_generic_rsp_t
+
+#pragma pack(pop, msm_camera_tz_i2c)
+
+struct msm_camera_tz_i2c_sensor_info_t {
+ struct msm_sensor_ctrl_t *s_ctrl;
+ struct msm_camera_i2c_fn_t *saved_sensor_i2c_fn;
+ uint32_t secure;
+ uint32_t ta_enabled;
+ struct qseecom_handle *ta_qseecom_handle;
+ const char *ta_name;
+};
+
+struct msm_camera_tz_i2c_ctrl_t {
+ struct mutex lock;
+ uint32_t lock_ready;
+ uint32_t secure_mode;
+};
+
+static struct msm_camera_tz_i2c_ctrl_t msm_camera_tz_i2c_ctrl;
+
+static struct msm_camera_tz_i2c_sensor_info_t sensor_info[MAX_CAMERAS] = {
+ {NULL, NULL, 0, 0, NULL, CONFIG_MSM_AIS_SEC_CCI_TA_NAME},
+ {NULL, NULL, 0, 0, NULL, CONFIG_MSM_AIS_SEC_CCI_TA_NAME},
+ {NULL, NULL, 0, 0, NULL, CONFIG_MSM_AIS_SEC_CCI_TA_NAME},
+ {NULL, NULL, 0, 0, NULL, CONFIG_MSM_AIS_SEC_CCI_TA_NAME},
+};
+
+static int32_t msm_camera_tz_i2c_is_sensor_secure(
+ struct msm_camera_i2c_client *client)
+{
+ uint32_t index;
+
+ if (client == NULL) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("Enter\n");
+ for (index = 0; index < MAX_CAMERAS; index++) {
+ if ((sensor_info[index].s_ctrl != NULL) &&
+ sensor_info[index].secure &&
+ (sensor_info[index].s_ctrl->sensor_i2c_client ==
+ client)) {
+ CDBG("Found secure sensor ID = %d\n",
+ sensor_info[index].s_ctrl->id);
+ return sensor_info[index].s_ctrl->id;
+ }
+ }
+ return -EINVAL;
+}
+
+static int32_t get_cmd_rsp_buffers(
+ struct qseecom_handle *ta_qseecom_handle,
+ void **cmd, int *cmd_len,
+ void **rsp, int *rsp_len)
+{
+
+ CDBG("Enter\n");
+ if ((ta_qseecom_handle == NULL) ||
+ (cmd == NULL) || (cmd_len == NULL) ||
+ (rsp == NULL) || (rsp_len == NULL)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (*cmd_len & QSEECOM_ALIGN_MASK)
+ *cmd_len = QSEECOM_ALIGN(*cmd_len);
+
+ if (*rsp_len & QSEECOM_ALIGN_MASK)
+ *rsp_len = QSEECOM_ALIGN(*rsp_len);
+
+ if ((*rsp_len + *cmd_len) > QSEECOM_SBUFF_SIZE) {
+ pr_err("%s:%d - Shared buffer too small to hold cmd=%d and rsp=%d\n",
+ __func__, __LINE__,
+ *cmd_len, *rsp_len);
+ return -ENOMEM;
+ }
+
+ *cmd = ta_qseecom_handle->sbuf;
+ *rsp = ta_qseecom_handle->sbuf + *cmd_len;
+ return 0;
+}
+
+static int32_t msm_camera_tz_i2c_ta_get_if_version(
+ struct qseecom_handle *ta_qseecom_handle,
+ uint32_t *if_version_maj,
+ uint32_t *if_version_min)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_get_if_version_req_t *cmd;
+ struct msm_camera_tz_i2c_get_if_version_rsp_t *rsp;
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+ if ((ta_qseecom_handle == NULL) ||
+ (if_version_maj == NULL) || (if_version_min == NULL)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ cmd_len = sizeof(struct msm_camera_tz_i2c_get_if_version_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_get_if_version_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_GET_IF_VERSION;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Unable to get if version info, rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+
+ if (rsp->rc < 0) {
+ CDBG("TZ I2C App error, rc=%d\n", rsp->rc);
+ rc = -EFAULT;
+ } else {
+ *if_version_maj = rsp->if_version_maj;
+ *if_version_min = rsp->if_version_min;
+ CDBG("TZ I2C If version %d.%d\n", *if_version_maj,
+ *if_version_min);
+ }
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_power_up(
+ struct qseecom_handle *ta_qseecom_handle,
+ int32_t sensor_id,
+ uint32_t *sensor_secure)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_power_up_req_t *cmd;
+ struct msm_camera_tz_i2c_power_up_rsp_t *rsp;
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+
+ if (sensor_secure == NULL)
+ return -EINVAL;
+
+ *sensor_secure = 0;
+ if ((ta_qseecom_handle == NULL) ||
+ (sensor_secure == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ cmd_len = sizeof(struct msm_camera_tz_i2c_power_up_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_power_up_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_POWER_UP;
+ cmd->sensor_id = sensor_id;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Unable to get sensor secure status, rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+
+ if (rsp->rc == TZ_I2C_STATUS_SUCCESS)
+ *sensor_secure = 1;
+ CDBG("Sensor %d is %s\n", sensor_id,
+ (*sensor_secure)?"SECURE":"NON-SECURE");
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_power_down(
+ struct qseecom_handle *ta_qseecom_handle,
+ int32_t sensor_id)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_power_down_req_t *cmd;
+ struct msm_camera_tz_i2c_power_down_rsp_t *rsp;
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+
+ if ((ta_qseecom_handle == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ cmd_len = sizeof(struct msm_camera_tz_i2c_power_down_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_power_down_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_POWER_DOWN;
+ cmd->sensor_id = sensor_id;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Failed: rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_cci_generic(
+ struct msm_camera_i2c_client *client,
+ enum msm_camera_tz_i2c_cmd_id_t cci_cmd_id)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_cci_generic_req_t *cmd;
+ struct msm_camera_tz_i2c_cci_generic_rsp_t *rsp;
+ int32_t rc = 0;
+ struct qseecom_handle *ta_qseecom_handle;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if ((client == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, cci_cmd_id=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ cci_cmd_id);
+
+ ta_qseecom_handle = sensor_info[sensor_id].ta_qseecom_handle;
+ cmd_len = sizeof(struct msm_camera_tz_i2c_cci_generic_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_cci_generic_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_CCI_GENERIC;
+ cmd->sensor_id = sensor_id;
+ cmd->cci_cmd_id = cci_cmd_id;
+ cmd->cci_i2c_master = client->cci_client->cci_i2c_master;
+ cmd->sid = client->cci_client->sid;
+ cmd->cid = client->cci_client->cid;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Failed: rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+ rc = rsp->rc;
+ CDBG("Done: rc=%d, cci_cmd_id=%d\n", rc, cci_cmd_id);
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_cci_read(
+ struct msm_camera_i2c_client *client,
+ uint32_t addr,
+ uint16_t *data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_cci_read_req_t *cmd;
+ struct msm_camera_tz_i2c_cci_read_rsp_t *rsp;
+ int32_t rc = 0;
+ struct qseecom_handle *ta_qseecom_handle;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if ((client == NULL) ||
+ (data == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, Addr=0x%X, Type=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr,
+ data_type);
+
+ ta_qseecom_handle = sensor_info[sensor_id].ta_qseecom_handle;
+ cmd_len = sizeof(struct msm_camera_tz_i2c_cci_read_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_cci_read_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_CCI_READ;
+ cmd->sensor_id = sensor_id;
+ cmd->cci_i2c_master = client->cci_client->cci_i2c_master;
+ cmd->sid = client->cci_client->sid;
+ cmd->cid = client->cci_client->cid;
+ cmd->addr = addr;
+ cmd->data_type = data_type;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Failed: rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+ rc = rsp->rc;
+ *data = rsp->data;
+
+ CDBG("Done: rc=%d, addr=0x%X, data=0x%X\n", rc,
+ addr, *data);
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_cci_write(
+ struct msm_camera_i2c_client *client,
+ uint32_t addr,
+ uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_cci_write_req_t *cmd;
+ struct msm_camera_tz_i2c_cci_write_rsp_t *rsp;
+ int32_t rc = 0;
+ struct qseecom_handle *ta_qseecom_handle;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if ((client == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, Addr=0x%X, Data=0x%X Type=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr,
+ data,
+ data_type);
+
+ ta_qseecom_handle = sensor_info[sensor_id].ta_qseecom_handle;
+ cmd_len = sizeof(struct msm_camera_tz_i2c_cci_write_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_cci_write_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_CCI_WRITE;
+ cmd->sensor_id = sensor_id;
+ cmd->cci_i2c_master = client->cci_client->cci_i2c_master;
+ cmd->sid = client->cci_client->sid;
+ cmd->cid = client->cci_client->cid;
+ cmd->addr = addr;
+ cmd->data = data;
+ cmd->data_type = data_type;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Failed:, rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+ rc = rsp->rc;
+
+ CDBG("Done: rc=%d, addr=0x%X, data=0x%X\n", rc,
+ addr, data);
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_cci_util(
+ struct msm_camera_i2c_client *client,
+ uint16_t cci_cmd)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_cci_util_req_t *cmd;
+ struct msm_camera_tz_i2c_cci_util_rsp_t *rsp;
+ int32_t rc = 0;
+ struct qseecom_handle *ta_qseecom_handle;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if ((client == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, cci_cmd=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ cci_cmd);
+
+ ta_qseecom_handle = sensor_info[sensor_id].ta_qseecom_handle;
+ cmd_len = sizeof(struct msm_camera_tz_i2c_cci_util_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_cci_util_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_CCI_UTIL;
+ cmd->sensor_id = sensor_id;
+ cmd->cci_i2c_master = client->cci_client->cci_i2c_master;
+ cmd->sid = client->cci_client->sid;
+ cmd->cid = client->cci_client->cid;
+ cmd->cci_cmd = cci_cmd;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Failed: rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+ rc = rsp->rc;
+ CDBG("Done: rc=%d, cci_cmd=%d\n", rc, cci_cmd);
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_probe(
+ struct msm_camera_i2c_client *client)
+{
+ int32_t sensor_id = -1;
+
+ CDBG("Enter\n");
+ sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+ if ((sensor_id >= 0) && sensor_info[sensor_id].ta_enabled
+ && msm_camera_tz_i2c_ctrl.lock_ready) {
+ mutex_lock(&msm_camera_tz_i2c_ctrl.lock);
+ return sensor_id;
+ }
+ return -EINVAL;
+}
+
+static int32_t msm_camera_tz_i2c_ta_done(void)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+ if (msm_camera_tz_i2c_ctrl.lock_ready)
+ mutex_unlock(&msm_camera_tz_i2c_ctrl.lock);
+ return rc;
+}
+
+int32_t msm_camera_tz_i2c_power_up(
+ struct msm_camera_i2c_client *client)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if (!msm_camera_tz_i2c_ctrl.lock_ready) {
+ msm_camera_tz_i2c_ctrl.lock_ready = 1;
+ mutex_init(&msm_camera_tz_i2c_ctrl.lock);
+ }
+
+ CDBG("Enter (sensor_id=%d)\n", sensor_id);
+ if (sensor_id >= 0) {
+ ktime_t startTime;
+
+ mutex_lock(&msm_camera_tz_i2c_ctrl.lock);
+ if (msm_camera_tz_i2c_ctrl.secure_mode) {
+ mutex_unlock(&msm_camera_tz_i2c_ctrl.lock);
+ return rc;
+ }
+ startTime = ktime_get();
+
+ CDBG("Switch to secure mode (secure sensor=%d)\n",
+ sensor_id);
+ /* Start the TA */
+ if ((sensor_info[sensor_id].ta_qseecom_handle == NULL)
+ && (sensor_info[sensor_id].ta_name != NULL) &&
+ ('\0' != sensor_info[sensor_id].ta_name[0])) {
+ uint32_t if_version_maj = 0;
+ uint32_t if_version_min = 0;
+
+ sensor_info[sensor_id].ta_enabled = 0;
+ rc = qseecom_start_app(
+ &sensor_info[sensor_id].ta_qseecom_handle,
+ (char *)sensor_info[sensor_id].ta_name,
+ QSEECOM_SBUFF_SIZE);
+ if (!rc) {
+ rc = msm_camera_tz_i2c_ta_get_if_version(
+ sensor_info[sensor_id].
+ ta_qseecom_handle,
+ &if_version_maj, &if_version_min);
+ }
+
+ if (!rc) {
+ if (if_version_maj != TA_IF_VERSION_MAJ) {
+ CDBG("TA ver mismatch %d.%d != %d.%d\n",
+ if_version_maj, if_version_min,
+ TA_IF_VERSION_MAJ,
+ TA_IF_VERSION_MIN);
+ rc = qseecom_shutdown_app(
+ &sensor_info[sensor_id].
+ ta_qseecom_handle);
+ sensor_info[sensor_id].ta_qseecom_handle
+ = EMPTY_QSEECOM_HANDLE;
+ rc = -EFAULT;
+ } else {
+ uint32_t sensor_secure = 0;
+ /* Notify TA */
+ /* Get sensor secure status */
+ rc = msm_camera_tz_i2c_ta_power_up(
+ sensor_info[sensor_id].
+ ta_qseecom_handle,
+ sensor_id,
+ &sensor_secure);
+ if (!rc && sensor_secure)
+ /* Sensor validated by TA*/
+ sensor_info[sensor_id].
+ ta_enabled = 1;
+ else {
+ qseecom_shutdown_app(
+ &sensor_info[sensor_id].
+ ta_qseecom_handle);
+ sensor_info[sensor_id].
+ ta_qseecom_handle
+ = EMPTY_QSEECOM_HANDLE;
+ rc = -EFAULT;
+ }
+ }
+ }
+ }
+ CDBG("Init TA %s - %s(%d) - %llu\n",
+ sensor_info[sensor_id].ta_name,
+ (sensor_info[sensor_id].ta_enabled)?"Ok" :
+ "Failed", rc, ktime_us_delta(ktime_get(),
+ startTime));
+ if (!rc)
+ msm_camera_tz_i2c_ctrl.secure_mode++;
+ mutex_unlock(&msm_camera_tz_i2c_ctrl.lock);
+ }
+ return rc;
+}
+
+int32_t msm_camera_tz_i2c_power_down(
+ struct msm_camera_i2c_client *client)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if (!msm_camera_tz_i2c_ctrl.lock_ready) {
+ msm_camera_tz_i2c_ctrl.lock_ready = 1;
+ mutex_init(&msm_camera_tz_i2c_ctrl.lock);
+ }
+
+ CDBG("Enter (sensor_id=%d)\n", sensor_id);
+ if ((sensor_id >= 0) && (msm_camera_tz_i2c_ctrl.secure_mode != 0)) {
+ mutex_lock(&msm_camera_tz_i2c_ctrl.lock);
+ if (msm_camera_tz_i2c_ctrl.secure_mode == 1) {
+ ktime_t startTime = ktime_get();
+
+ CDBG("Switch to non-secure mode (secure sensor=%d)\n",
+ sensor_id);
+ /* Shutdown the TA */
+ if (sensor_info[sensor_id].ta_qseecom_handle != NULL) {
+ msm_camera_tz_i2c_ta_power_down(
+ sensor_info[sensor_id].
+ ta_qseecom_handle,
+ sensor_id);
+ rc = qseecom_shutdown_app(&sensor_info[
+ sensor_id].ta_qseecom_handle);
+ sensor_info[sensor_id].ta_qseecom_handle
+ = EMPTY_QSEECOM_HANDLE;
+ }
+ CDBG("Unload TA %s - %s(%d) - %llu\n",
+ sensor_info[sensor_id].ta_name,
+ (!rc)?"Ok":"Failed", rc,
+ ktime_us_delta(ktime_get(), startTime));
+ }
+ msm_camera_tz_i2c_ctrl.secure_mode--;
+ mutex_unlock(&msm_camera_tz_i2c_ctrl.lock);
+ }
+ return rc;
+}
+
+int32_t msm_camera_tz_i2c_register_sensor(
+ void *s_ctrl_p)
+{
+ struct msm_sensor_ctrl_t *s_ctrl = (struct msm_sensor_ctrl_t *)s_ctrl_p;
+
+ if (s_ctrl == NULL) {
+ pr_err("%s:%d - invalid parameter)\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ if (s_ctrl->id >= MAX_CAMERAS) {
+ pr_err("%s:%d - invalid ID: %d\n",
+ __func__, __LINE__, s_ctrl->id);
+ return -EINVAL;
+ }
+
+ CDBG("id=%d, client=%pK\n", s_ctrl->id, s_ctrl);
+ sensor_info[s_ctrl->id].s_ctrl = s_ctrl;
+ sensor_info[s_ctrl->id].secure = s_ctrl->is_secure;
+ return 0;
+}
+
+int32_t msm_camera_tz_i2c_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, addr=0x%08X\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_read(
+ client, addr, data, data_type);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_read, client, addr, data, data_type);
+}
+
+int32_t msm_camera_tz_i2c_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, addr=0x%08X, num=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr,
+ num_byte);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_READ_SEQ);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_read_seq, client, addr, data, num_byte);
+}
+
+int32_t msm_camera_tz_i2c_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, addr=0x%08X\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_write(
+ client, addr, data, data_type);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write, client, addr, data, data_type);
+}
+
+int32_t msm_camera_tz_i2c_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, addr=0x%08X, num=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr,
+ num_byte);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_SEQ);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_seq, client, addr, data, num_byte);
+}
+
+int32_t msm_camera_tz_i2c_write_table_async(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_TABLE_ASYNC);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_table_async, client, write_setting);
+}
+
+int32_t msm_camera_tz_i2c_write_table_sync(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_TABLE_SYNC);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_table_sync, client, write_setting);
+}
+
+int32_t msm_camera_tz_i2c_write_table_sync_block(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_TABLE_SYNC_BLOCK);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_table_sync_block, client,
+ write_setting);
+}
+
+int32_t msm_camera_tz_i2c_write_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_TABLE);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_table, client, write_setting);
+}
+
+int32_t msm_camera_tz_i2c_write_seq_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_seq_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_SEQ_TABLE);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_seq_table, client, write_setting);
+}
+
+int32_t msm_camera_tz_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_TABLE_W_MICRODELAY);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_table_w_microdelay, client,
+ write_setting);
+}
+
+int32_t msm_camera_tz_i2c_poll(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_POLL);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_poll, client, addr, data, data_type);
+}
+
+int32_t msm_camera_tz_i2c_write_conf_tbl(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_CONF_TBL);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_conf_tbl, client, reg_conf_tbl, size,
+ data_type);
+}
+
+int32_t msm_sensor_tz_i2c_util(struct msm_camera_i2c_client *client,
+ uint16_t cci_cmd)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, cci_cmd=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid, cci_cmd);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_util(client, cci_cmd);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_sensor_cci_i2c_util, client, cci_cmd);
+}
diff --git a/drivers/media/platform/msm/ais/sensor/ir_cut/Makefile b/drivers/media/platform/msm/ais/sensor/ir_cut/Makefile
new file mode 100644
index 000000000000..32ce6226088a
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/ir_cut/Makefile
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+obj-$(CONFIG_MSM_AIS) += msm_ir_cut.o
diff --git a/drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.c b/drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.c
new file mode 100644
index 000000000000..bfb960ea862a
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.c
@@ -0,0 +1,662 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include "msm_ir_cut.h"
+#include "msm_camera_dt_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+DEFINE_MSM_MUTEX(msm_ir_cut_mutex);
+
+static struct v4l2_file_operations msm_ir_cut_v4l2_subdev_fops;
+
+static const struct of_device_id msm_ir_cut_dt_match[] = {
+ {.compatible = "qcom,ir-cut", .data = NULL},
+ {}
+};
+
+static struct msm_ir_cut_table msm_gpio_ir_cut_table;
+
+static struct msm_ir_cut_table *ir_cut_table[] = {
+ &msm_gpio_ir_cut_table,
+};
+
+static int32_t msm_ir_cut_get_subdev_id(
+ struct msm_ir_cut_ctrl_t *ir_cut_ctrl, void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+
+ CDBG("Enter\n");
+ if (!subdev_id) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+ if (ir_cut_ctrl->ir_cut_device_type != MSM_CAMERA_PLATFORM_DEVICE) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+
+ *subdev_id = ir_cut_ctrl->pdev->id;
+
+ CDBG("subdev_id %d\n", *subdev_id);
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_ir_cut_init(
+ struct msm_ir_cut_ctrl_t *ir_cut_ctrl,
+ struct msm_ir_cut_cfg_data_t *ir_cut_data)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter");
+
+ rc = ir_cut_ctrl->func_tbl->camera_ir_cut_on(ir_cut_ctrl, ir_cut_data);
+
+ CDBG("Exit");
+ return rc;
+}
+
+static int32_t msm_ir_cut_release(
+ struct msm_ir_cut_ctrl_t *ir_cut_ctrl)
+{
+ int32_t rc = 0;
+
+ if (ir_cut_ctrl->ir_cut_state == MSM_CAMERA_IR_CUT_RELEASE) {
+ pr_err("%s:%d Invalid ir_cut state = %d",
+ __func__, __LINE__, ir_cut_ctrl->ir_cut_state);
+ return 0;
+ }
+
+ if (rc < 0) {
+ pr_err("%s:%d camera_ir_cut_on failed rc = %d",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+ ir_cut_ctrl->ir_cut_state = MSM_CAMERA_IR_CUT_RELEASE;
+ return 0;
+}
+
+static int32_t msm_ir_cut_off(struct msm_ir_cut_ctrl_t *ir_cut_ctrl,
+ struct msm_ir_cut_cfg_data_t *ir_cut_data)
+{
+ int rc = 0;
+
+ CDBG("Enter cut off\n");
+
+ if (ir_cut_ctrl->gconf) {
+ rc = msm_camera_request_gpio_table(
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl,
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl_size, 1);
+
+ if (rc < 0) {
+ pr_err("ERR:%s:Failed in selecting state: %d\n",
+ __func__, rc);
+
+ return rc;
+ }
+ } else {
+ pr_err("%s: No IR CUT GPIOs\n", __func__);
+ return 0;
+ }
+
+ if (ir_cut_ctrl->cam_pinctrl_status) {
+ rc = pinctrl_select_state(
+ ir_cut_ctrl->pinctrl_info.pinctrl,
+ ir_cut_ctrl->pinctrl_info.gpio_state_active);
+
+ if (rc < 0)
+ pr_err("ERR:%s:%d cannot set pin to active state: %d",
+ __func__, __LINE__, rc);
+ }
+
+ CDBG("ERR:%s:gpio_conf->gpio_num_info->gpio_num[0] = %d",
+ __func__,
+ ir_cut_ctrl->gconf->gpio_num_info->
+ gpio_num[IR_CUT_FILTER_GPIO_P]);
+
+ CDBG("ERR:%s:gpio_conf->gpio_num_info->gpio_num[1] = %d",
+ __func__,
+ ir_cut_ctrl->gconf->gpio_num_info->
+ gpio_num[IR_CUT_FILTER_GPIO_M]);
+
+ gpio_set_value_cansleep(
+ ir_cut_ctrl->gconf->gpio_num_info->
+ gpio_num[IR_CUT_FILTER_GPIO_P],
+ 0);
+
+ gpio_set_value_cansleep(
+ ir_cut_ctrl->gconf->gpio_num_info->
+ gpio_num[IR_CUT_FILTER_GPIO_M],
+ 1);
+
+ if (ir_cut_ctrl->gconf) {
+ rc = msm_camera_request_gpio_table(
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl,
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl_size, 0);
+
+ if (rc < 0) {
+ pr_err("ERR:%s:Failed in selecting state: %d\n",
+ __func__, rc);
+
+ return rc;
+ }
+ } else {
+ pr_err("%s: No IR CUT GPIOs\n", __func__);
+ return 0;
+ }
+
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_ir_cut_on(
+ struct msm_ir_cut_ctrl_t *ir_cut_ctrl,
+ struct msm_ir_cut_cfg_data_t *ir_cut_data)
+{
+ int rc = 0;
+
+ CDBG("Enter ir cut on\n");
+
+ if (ir_cut_ctrl->gconf) {
+ rc = msm_camera_request_gpio_table(
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl,
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl_size, 1);
+
+ if (rc < 0) {
+ pr_err("ERR:%s:Failed in selecting state: %d\n",
+ __func__, rc);
+
+ return rc;
+ }
+ } else {
+ pr_err("%s: No IR CUT GPIOs\n", __func__);
+ return 0;
+ }
+
+ if (ir_cut_ctrl->cam_pinctrl_status) {
+ rc = pinctrl_select_state(
+ ir_cut_ctrl->pinctrl_info.pinctrl,
+ ir_cut_ctrl->pinctrl_info.gpio_state_active);
+
+ if (rc < 0)
+ pr_err("ERR:%s:%d cannot set pin to active state: %d",
+ __func__, __LINE__, rc);
+ }
+
+ CDBG("ERR:%s: gpio_conf->gpio_num_info->gpio_num[0] = %d",
+ __func__,
+ ir_cut_ctrl->gconf->gpio_num_info->
+ gpio_num[IR_CUT_FILTER_GPIO_P]);
+
+ CDBG("ERR:%s: gpio_conf->gpio_num_info->gpio_num[1] = %d",
+ __func__,
+ ir_cut_ctrl->gconf->gpio_num_info->
+ gpio_num[IR_CUT_FILTER_GPIO_M]);
+
+ gpio_set_value_cansleep(
+ ir_cut_ctrl->gconf->gpio_num_info->
+ gpio_num[IR_CUT_FILTER_GPIO_P],
+ 1);
+
+ gpio_set_value_cansleep(
+ ir_cut_ctrl->gconf->
+ gpio_num_info->gpio_num[IR_CUT_FILTER_GPIO_M],
+ 1);
+
+ if (ir_cut_ctrl->gconf) {
+ rc = msm_camera_request_gpio_table(
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl,
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl_size, 0);
+
+ if (rc < 0) {
+ pr_err("ERR:%s:Failed in selecting state: %d\n",
+ __func__, rc);
+
+ return rc;
+ }
+ } else {
+ pr_err("%s: No IR CUT GPIOs\n", __func__);
+ return 0;
+ }
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_ir_cut_handle_init(
+ struct msm_ir_cut_ctrl_t *ir_cut_ctrl,
+ struct msm_ir_cut_cfg_data_t *ir_cut_data)
+{
+ uint32_t i = 0;
+ int32_t rc = -EFAULT;
+ enum msm_ir_cut_driver_type ir_cut_driver_type =
+ ir_cut_ctrl->ir_cut_driver_type;
+
+ CDBG("Enter");
+
+ if (ir_cut_ctrl->ir_cut_state == MSM_CAMERA_IR_CUT_INIT) {
+ pr_err("%s:%d Invalid ir_cut state = %d",
+ __func__, __LINE__, ir_cut_ctrl->ir_cut_state);
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ir_cut_table); i++) {
+ if (ir_cut_driver_type == ir_cut_table[i]->ir_cut_driver_type) {
+ ir_cut_ctrl->func_tbl = &ir_cut_table[i]->func_tbl;
+ rc = 0;
+ break;
+ }
+ }
+
+ if (rc < 0) {
+ pr_err("%s:%d failed invalid ir_cut_driver_type %d\n",
+ __func__, __LINE__, ir_cut_driver_type);
+ return -EINVAL;
+ }
+
+ if (rc < 0) {
+ pr_err("%s:%d camera_ir_cut_init failed rc = %d",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+
+ ir_cut_ctrl->ir_cut_state = MSM_CAMERA_IR_CUT_INIT;
+
+ CDBG("Exit");
+ return 0;
+}
+
+static int32_t msm_ir_cut_config(struct msm_ir_cut_ctrl_t *ir_cut_ctrl,
+ void __user *argp)
+{
+ int32_t rc = -EINVAL;
+ struct msm_ir_cut_cfg_data_t *ir_cut_data =
+ (struct msm_ir_cut_cfg_data_t *) argp;
+
+ mutex_lock(ir_cut_ctrl->ir_cut_mutex);
+
+ CDBG("Enter %s type %d\n", __func__, ir_cut_data->cfg_type);
+
+ switch (ir_cut_data->cfg_type) {
+ case CFG_IR_CUT_INIT:
+ rc = msm_ir_cut_handle_init(ir_cut_ctrl, ir_cut_data);
+ break;
+ case CFG_IR_CUT_RELEASE:
+ if (ir_cut_ctrl->ir_cut_state == MSM_CAMERA_IR_CUT_INIT)
+ rc = ir_cut_ctrl->func_tbl->camera_ir_cut_release(
+ ir_cut_ctrl);
+ break;
+ case CFG_IR_CUT_OFF:
+ if (ir_cut_ctrl->ir_cut_state == MSM_CAMERA_IR_CUT_INIT)
+ rc = ir_cut_ctrl->func_tbl->camera_ir_cut_off(
+ ir_cut_ctrl, ir_cut_data);
+ break;
+ case CFG_IR_CUT_ON:
+ if (ir_cut_ctrl->ir_cut_state == MSM_CAMERA_IR_CUT_INIT)
+ rc = ir_cut_ctrl->func_tbl->camera_ir_cut_on(
+ ir_cut_ctrl, ir_cut_data);
+ break;
+ default:
+ rc = -EFAULT;
+ break;
+ }
+
+ mutex_unlock(ir_cut_ctrl->ir_cut_mutex);
+
+ CDBG("Exit %s type %d\n", __func__, ir_cut_data->cfg_type);
+
+ return rc;
+}
+
+static long msm_ir_cut_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct msm_ir_cut_ctrl_t *fctrl = NULL;
+ void __user *argp = (void __user *)arg;
+
+ CDBG("Enter\n");
+
+ if (!sd) {
+ pr_err("sd NULL\n");
+ return -EINVAL;
+ }
+ fctrl = v4l2_get_subdevdata(sd);
+ if (!fctrl) {
+ pr_err("fctrl NULL\n");
+ return -EINVAL;
+ }
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ return msm_ir_cut_get_subdev_id(fctrl, argp);
+ case VIDIOC_MSM_IR_CUT_CFG:
+ return msm_ir_cut_config(fctrl, argp);
+ case MSM_SD_NOTIFY_FREEZE:
+ return 0;
+ case MSM_SD_SHUTDOWN:
+ if (!fctrl->func_tbl) {
+ pr_err("fctrl->func_tbl NULL\n");
+ return -EINVAL;
+ } else {
+ return fctrl->func_tbl->camera_ir_cut_release(fctrl);
+ }
+ default:
+ pr_err_ratelimited("invalid cmd %d\n", cmd);
+ return -ENOIOCTLCMD;
+ }
+ CDBG("Exit\n");
+}
+
+static struct v4l2_subdev_core_ops msm_ir_cut_subdev_core_ops = {
+ .ioctl = msm_ir_cut_subdev_ioctl,
+};
+
+static struct v4l2_subdev_ops msm_ir_cut_subdev_ops = {
+ .core = &msm_ir_cut_subdev_core_ops,
+};
+static int msm_ir_cut_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh) {
+
+ int rc = 0;
+ struct msm_ir_cut_ctrl_t *ir_cut_ctrl = v4l2_get_subdevdata(sd);
+
+ CDBG("Enter\n");
+
+ if (!ir_cut_ctrl) {
+ pr_err("%s: failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (ir_cut_ctrl->ir_cut_state == MSM_CAMERA_IR_CUT_INIT)
+ rc = ir_cut_ctrl->func_tbl->camera_ir_cut_release(
+ ir_cut_ctrl);
+
+ CDBG("Exit\n");
+
+ return rc;
+};
+
+static const struct v4l2_subdev_internal_ops msm_ir_cut_internal_ops = {
+ .close = msm_ir_cut_close,
+};
+
+static int32_t msm_ir_cut_get_gpio_dt_data(struct device_node *of_node,
+ struct msm_ir_cut_ctrl_t *fctrl)
+{
+ int32_t rc = 0, i = 0;
+ uint16_t *gpio_array = NULL;
+ int16_t gpio_array_size = 0;
+ struct msm_camera_gpio_conf *gconf = NULL;
+
+ gpio_array_size = of_gpio_count(of_node);
+ CDBG("%s gpio count %d\n", __func__, gpio_array_size);
+
+ if (gpio_array_size > 0) {
+ fctrl->power_info.gpio_conf =
+ kzalloc(sizeof(struct msm_camera_gpio_conf),
+ GFP_KERNEL);
+ if (!fctrl->power_info.gpio_conf) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ return rc;
+ }
+ gconf = fctrl->power_info.gpio_conf;
+
+ gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t),
+ GFP_KERNEL);
+ if (!gpio_array)
+ return -ENOMEM;
+ for (i = 0; i < gpio_array_size; i++) {
+ gpio_array[i] = of_get_gpio(of_node, i);
+ if (((int16_t)gpio_array[i]) < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -EINVAL;
+ goto free_gpio_array;
+ }
+ CDBG("%s gpio_array[%d] = %d\n", __func__, i,
+ gpio_array[i]);
+ }
+
+ rc = msm_camera_get_dt_gpio_req_tbl(of_node, gconf,
+ gpio_array, gpio_array_size);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto free_gpio_array;
+ }
+ kfree(gpio_array);
+
+ if (fctrl->ir_cut_driver_type == IR_CUT_DRIVER_DEFAULT)
+ fctrl->ir_cut_driver_type = IR_CUT_DRIVER_GPIO;
+ CDBG("%s:%d fctrl->ir_cut_driver_type = %d", __func__, __LINE__,
+ fctrl->ir_cut_driver_type);
+ }
+
+ return rc;
+
+free_gpio_array:
+ kfree(gpio_array);
+ return rc;
+}
+
+static int32_t msm_ir_cut_get_dt_data(struct device_node *of_node,
+ struct msm_ir_cut_ctrl_t *fctrl)
+{
+ int32_t rc = 0;
+
+ CDBG("called\n");
+
+ if (!of_node) {
+ pr_err("of_node NULL\n");
+ return -EINVAL;
+ }
+
+ /* Read the sub device */
+ rc = of_property_read_u32(of_node, "cell-index", &fctrl->pdev->id);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+
+ fctrl->ir_cut_driver_type = IR_CUT_DRIVER_DEFAULT;
+
+ /* Read the gpio information from device tree */
+ rc = msm_ir_cut_get_gpio_dt_data(of_node, fctrl);
+ if (rc < 0) {
+ pr_err("%s:%d msm_ir_cut_get_gpio_dt_data failed rc %d\n",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_ir_cut_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ int32_t rc = 0;
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct msm_ir_cut_cfg_data_t32 *u32 =
+ (struct msm_ir_cut_cfg_data_t32 *)arg;
+ struct msm_ir_cut_cfg_data_t ir_cut_data;
+
+ CDBG("Enter");
+ ir_cut_data.cfg_type = u32->cfg_type;
+
+ switch (cmd) {
+ case VIDIOC_MSM_IR_CUT_CFG32:
+ cmd = VIDIOC_MSM_IR_CUT_CFG;
+ break;
+ default:
+ return msm_ir_cut_subdev_ioctl(sd, cmd, arg);
+ }
+
+ rc = msm_ir_cut_subdev_ioctl(sd, cmd, &ir_cut_data);
+
+ CDBG("Exit");
+ return rc;
+}
+
+static long msm_ir_cut_subdev_fops_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_ir_cut_subdev_do_ioctl);
+}
+#endif
+
+static int32_t msm_ir_cut_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0, i = 0;
+ struct msm_ir_cut_ctrl_t *ir_cut_ctrl = NULL;
+
+ CDBG("Enter");
+ if (!pdev->dev.of_node) {
+ pr_err("of_node NULL\n");
+ return -EINVAL;
+ }
+
+ ir_cut_ctrl = kzalloc(sizeof(struct msm_ir_cut_ctrl_t), GFP_KERNEL);
+ if (!ir_cut_ctrl)
+ return -ENOMEM;
+
+ memset(ir_cut_ctrl, 0, sizeof(struct msm_ir_cut_ctrl_t));
+
+ ir_cut_ctrl->pdev = pdev;
+
+ rc = msm_ir_cut_get_dt_data(pdev->dev.of_node, ir_cut_ctrl);
+
+ if (rc < 0) {
+ pr_err("%s:%d msm_ir_cut_get_dt_data failed\n",
+ __func__, __LINE__);
+ kfree(ir_cut_ctrl);
+ return -EINVAL;
+ }
+
+ rc = msm_sensor_driver_get_gpio_data(&(ir_cut_ctrl->gconf),
+ (&pdev->dev)->of_node);
+
+ if ((rc < 0) || (ir_cut_ctrl->gconf == NULL)) {
+ pr_err("%s: No IR CUT GPIOs\n", __func__);
+
+ kfree(ir_cut_ctrl);
+ return -EINVAL;
+ }
+
+ CDBG("%s: gpio_request_table_size = %d\n",
+ __func__,
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl_size);
+
+ for (i = 0;
+ i < ir_cut_ctrl->gconf->cam_gpio_req_tbl_size; i++) {
+ CDBG("%s: gpio = %d\n", __func__,
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl[i].gpio);
+ CDBG("%s: gpio-flags = %lu\n", __func__,
+ ir_cut_ctrl->gconf->cam_gpio_req_tbl[i].flags);
+ CDBG("%s: gconf->gpio_num_info->gpio_num[%d] = %d\n",
+ __func__, i,
+ ir_cut_ctrl->gconf->gpio_num_info->gpio_num[i]);
+ }
+
+ ir_cut_ctrl->cam_pinctrl_status = 1;
+
+ rc = msm_camera_pinctrl_init(
+ &(ir_cut_ctrl->pinctrl_info), &(pdev->dev));
+
+ if (rc < 0) {
+ pr_err("ERR:%s: Error in reading IR CUT pinctrl\n",
+ __func__);
+ ir_cut_ctrl->cam_pinctrl_status = 0;
+ }
+
+ ir_cut_ctrl->ir_cut_state = MSM_CAMERA_IR_CUT_RELEASE;
+ ir_cut_ctrl->power_info.dev = &ir_cut_ctrl->pdev->dev;
+ ir_cut_ctrl->ir_cut_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ ir_cut_ctrl->ir_cut_mutex = &msm_ir_cut_mutex;
+
+ /* Initialize sub device */
+ v4l2_subdev_init(&ir_cut_ctrl->msm_sd.sd, &msm_ir_cut_subdev_ops);
+ v4l2_set_subdevdata(&ir_cut_ctrl->msm_sd.sd, ir_cut_ctrl);
+
+ ir_cut_ctrl->msm_sd.sd.internal_ops = &msm_ir_cut_internal_ops;
+ ir_cut_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(ir_cut_ctrl->msm_sd.sd.name,
+ ARRAY_SIZE(ir_cut_ctrl->msm_sd.sd.name),
+ "msm_camera_ir_cut");
+ media_entity_init(&ir_cut_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ ir_cut_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ ir_cut_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_IR_CUT;
+ ir_cut_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x1;
+ msm_sd_register(&ir_cut_ctrl->msm_sd);
+
+ CDBG("%s:%d ir_cut sd name = %s", __func__, __LINE__,
+ ir_cut_ctrl->msm_sd.sd.entity.name);
+ msm_ir_cut_v4l2_subdev_fops = v4l2_subdev_fops;
+#ifdef CONFIG_COMPAT
+ msm_ir_cut_v4l2_subdev_fops.compat_ioctl32 =
+ msm_ir_cut_subdev_fops_ioctl;
+#endif
+ ir_cut_ctrl->msm_sd.sd.devnode->fops = &msm_ir_cut_v4l2_subdev_fops;
+
+ CDBG("probe success\n");
+ return rc;
+}
+
+MODULE_DEVICE_TABLE(of, msm_ir_cut_dt_match);
+
+static struct platform_driver msm_ir_cut_platform_driver = {
+ .probe = msm_ir_cut_platform_probe,
+ .driver = {
+ .name = "qcom,ir-cut",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ir_cut_dt_match,
+ },
+};
+
+static int __init msm_ir_cut_init_module(void)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+ rc = platform_driver_register(&msm_ir_cut_platform_driver);
+ if (!rc)
+ return rc;
+
+ pr_err("platform probe for ir_cut failed");
+
+ return rc;
+}
+
+static void __exit msm_ir_cut_exit_module(void)
+{
+ platform_driver_unregister(&msm_ir_cut_platform_driver);
+}
+
+static struct msm_ir_cut_table msm_gpio_ir_cut_table = {
+ .ir_cut_driver_type = IR_CUT_DRIVER_GPIO,
+ .func_tbl = {
+ .camera_ir_cut_init = msm_ir_cut_init,
+ .camera_ir_cut_release = msm_ir_cut_release,
+ .camera_ir_cut_off = msm_ir_cut_off,
+ .camera_ir_cut_on = msm_ir_cut_on,
+ },
+};
+
+module_init(msm_ir_cut_init_module);
+module_exit(msm_ir_cut_exit_module);
+MODULE_DESCRIPTION("MSM IR CUT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.h b/drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.h
new file mode 100644
index 000000000000..e74af5b3c2e4
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MSM_IR_CUT_H
+#define MSM_IR_CUT_H
+
+#include <soc/qcom/ais.h>
+#include "msm_camera_dt_util.h"
+#include "msm_camera_io_util.h"
+#include "msm_sd.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+enum msm_camera_ir_cut_state_t {
+ MSM_CAMERA_IR_CUT_INIT,
+ MSM_CAMERA_IR_CUT_RELEASE,
+};
+
+enum msm_ir_cut_driver_type {
+ IR_CUT_DRIVER_GPIO,
+ IR_CUT_DRIVER_DEFAULT,
+};
+
+struct msm_ir_cut_ctrl_t;
+
+struct msm_ir_cut_func_t {
+ int32_t (*camera_ir_cut_init)(struct msm_ir_cut_ctrl_t *,
+ struct msm_ir_cut_cfg_data_t *);
+ int32_t (*camera_ir_cut_release)(struct msm_ir_cut_ctrl_t *);
+ int32_t (*camera_ir_cut_off)(struct msm_ir_cut_ctrl_t *,
+ struct msm_ir_cut_cfg_data_t *);
+ int32_t (*camera_ir_cut_on)(struct msm_ir_cut_ctrl_t *,
+ struct msm_ir_cut_cfg_data_t *);
+};
+
+struct msm_ir_cut_table {
+ enum msm_ir_cut_driver_type ir_cut_driver_type;
+ struct msm_ir_cut_func_t func_tbl;
+};
+
+struct msm_ir_cut_ctrl_t {
+ struct msm_sd_subdev msm_sd;
+ struct platform_device *pdev;
+ struct msm_ir_cut_func_t *func_tbl;
+ struct msm_camera_power_ctrl_t power_info;
+
+ enum msm_camera_device_type_t ir_cut_device_type;
+ struct mutex *ir_cut_mutex;
+
+ /* ir_cut driver type */
+ enum msm_ir_cut_driver_type ir_cut_driver_type;
+
+ /* ir_cut state */
+ enum msm_camera_ir_cut_state_t ir_cut_state;
+
+ struct msm_camera_gpio_conf *gconf;
+ struct msm_pinctrl_info pinctrl_info;
+ uint8_t cam_pinctrl_status;
+};
+
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/ir_led/Makefile b/drivers/media/platform/msm/ais/sensor/ir_led/Makefile
new file mode 100644
index 000000000000..bb3cc3ce40b4
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/ir_led/Makefile
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+obj-$(CONFIG_MSM_AIS) += msm_ir_led.o
diff --git a/drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.c b/drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.c
new file mode 100644
index 000000000000..803bce440ee1
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.c
@@ -0,0 +1,456 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/pwm.h>
+#include <linux/delay.h>
+#include "msm_ir_led.h"
+#include "msm_camera_dt_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+DEFINE_MSM_MUTEX(msm_ir_led_mutex);
+
+static struct v4l2_file_operations msm_ir_led_v4l2_subdev_fops;
+
+static const struct of_device_id msm_ir_led_dt_match[] = {
+ {.compatible = "qcom,ir-led", .data = NULL},
+ {}
+};
+
+static struct msm_ir_led_table msm_default_ir_led_table;
+
+static struct msm_ir_led_table *ir_led_table[] = {
+ &msm_default_ir_led_table,
+};
+
+static int32_t msm_ir_led_get_subdev_id(
+ struct msm_ir_led_ctrl_t *ir_led_ctrl, void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+
+ if (!subdev_id) {
+ pr_err("subdevice ID is not valid\n");
+ return -EINVAL;
+ }
+ if (ir_led_ctrl->ir_led_device_type != MSM_CAMERA_PLATFORM_DEVICE) {
+ pr_err("device type is not matching\n");
+ return -EINVAL;
+ }
+
+ *subdev_id = ir_led_ctrl->pdev->id;
+
+ CDBG("subdev_id %d\n", *subdev_id);
+ return 0;
+}
+
+static int32_t msm_ir_led_init(
+ struct msm_ir_led_ctrl_t *ir_led_ctrl,
+ struct msm_ir_led_cfg_data_t *ir_led_data)
+{
+ int32_t rc = 0;
+
+ rc = ir_led_ctrl->func_tbl->camera_ir_led_off(ir_led_ctrl, ir_led_data);
+
+ return rc;
+}
+
+static int32_t msm_ir_led_release(
+ struct msm_ir_led_ctrl_t *ir_led_ctrl,
+ struct msm_ir_led_cfg_data_t *ir_led_data)
+{
+ int32_t rc = -EFAULT;
+
+ if (ir_led_ctrl->ir_led_state == MSM_CAMERA_IR_LED_RELEASE) {
+ pr_err("Invalid ir_led state = %d\n",
+ ir_led_ctrl->ir_led_state);
+ return rc;
+ }
+
+ rc = ir_led_ctrl->func_tbl->camera_ir_led_off(ir_led_ctrl, ir_led_data);
+ if (rc < 0) {
+ pr_err("camera_ir_led_off failed (%d)\n", rc);
+ return rc;
+ }
+ ir_led_ctrl->ir_led_state = MSM_CAMERA_IR_LED_RELEASE;
+
+ return rc;
+}
+
+static int32_t msm_ir_led_off(struct msm_ir_led_ctrl_t *ir_led_ctrl,
+ struct msm_ir_led_cfg_data_t *ir_led_data)
+{
+ int32_t rc = 0;
+
+ CDBG("pwm duty on(ns) %d, pwm period(ns) %d\n",
+ ir_led_data->pwm_duty_on_ns, ir_led_data->pwm_period_ns);
+
+ if (ir_led_data->pwm_period_ns <= 0)
+ ir_led_data->pwm_period_ns = DEFAULT_PWM_TIME_PERIOD_NS;
+
+ if (ir_led_data->pwm_duty_on_ns != 0)
+ ir_led_data->pwm_duty_on_ns = DEFAULT_PWM_DUTY_CYCLE_NS;
+
+ if (ir_led_ctrl->pwm_dev) {
+ rc = pwm_config(ir_led_ctrl->pwm_dev,
+ ir_led_data->pwm_duty_on_ns,
+ ir_led_data->pwm_period_ns);
+
+ if (rc) {
+ pr_err("PWM config failed (%d)\n", rc);
+ return rc;
+ }
+ /* workaround to disable pwm_module */
+ udelay(50);
+
+ pwm_disable(ir_led_ctrl->pwm_dev);
+ } else {
+ CDBG("pwm device is null\n");
+ }
+
+ return 0;
+}
+
+static int32_t msm_ir_led_on(
+ struct msm_ir_led_ctrl_t *ir_led_ctrl,
+ struct msm_ir_led_cfg_data_t *ir_led_data)
+{
+ int32_t rc = 0;
+
+ CDBG("pwm duty on(ns) %d, pwm period(ns) %d\n",
+ ir_led_data->pwm_duty_on_ns, ir_led_data->pwm_period_ns);
+
+ if (ir_led_ctrl->pwm_dev) {
+ rc = pwm_config(ir_led_ctrl->pwm_dev,
+ ir_led_data->pwm_duty_on_ns,
+ ir_led_data->pwm_period_ns);
+ if (rc) {
+ pr_err("PWM config failed (%d)\n", rc);
+ return rc;
+ }
+
+ rc = pwm_enable(ir_led_ctrl->pwm_dev);
+ if (rc) {
+ pr_err("PWM enable failed(%d)\n", rc);
+ return rc;
+ }
+ } else {
+ CDBG("pwm device is null\n");
+ }
+ return 0;
+}
+
+static int32_t msm_ir_led_handle_init(
+ struct msm_ir_led_ctrl_t *ir_led_ctrl,
+ struct msm_ir_led_cfg_data_t *ir_led_data)
+{
+ uint32_t i = 0;
+ int32_t rc = -EFAULT;
+ enum msm_ir_led_driver_type ir_led_driver_type =
+ ir_led_ctrl->ir_led_driver_type;
+
+ if (ir_led_ctrl->ir_led_state == MSM_CAMERA_IR_LED_INIT) {
+ pr_err("Invalid ir_led state = %d\n",
+ ir_led_ctrl->ir_led_state);
+ return rc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ir_led_table); i++) {
+ if (ir_led_driver_type == ir_led_table[i]->ir_led_driver_type) {
+ ir_led_ctrl->func_tbl = &ir_led_table[i]->func_tbl;
+ rc = 0;
+ break;
+ }
+ }
+
+ if (rc < 0) {
+ pr_err("failed invalid ir_led_driver_type %d\n",
+ ir_led_driver_type);
+ return -EINVAL;
+ }
+
+ rc = ir_led_ctrl->func_tbl->camera_ir_led_init(
+ ir_led_ctrl, ir_led_data);
+ if (rc < 0) {
+ pr_err("camera_ir_led_init failed (%d)\n", rc);
+ return rc;
+ }
+
+ ir_led_ctrl->ir_led_state = MSM_CAMERA_IR_LED_INIT;
+
+ CDBG("IR LED STATE intialised Successfully\n");
+ return rc;
+}
+
+static int32_t msm_ir_led_config(struct msm_ir_led_ctrl_t *ir_led_ctrl,
+ void __user *argp)
+{
+ int32_t rc = -EINVAL;
+ struct msm_ir_led_cfg_data_t *ir_led_data =
+ (struct msm_ir_led_cfg_data_t *) argp;
+
+ CDBG("type %d\n", ir_led_data->cfg_type);
+
+ mutex_lock(ir_led_ctrl->ir_led_mutex);
+
+ switch (ir_led_data->cfg_type) {
+ case CFG_IR_LED_INIT:
+ rc = msm_ir_led_handle_init(ir_led_ctrl, ir_led_data);
+ break;
+ case CFG_IR_LED_RELEASE:
+ if (ir_led_ctrl->ir_led_state == MSM_CAMERA_IR_LED_INIT)
+ rc = ir_led_ctrl->func_tbl->camera_ir_led_release(
+ ir_led_ctrl, ir_led_data);
+ break;
+ case CFG_IR_LED_OFF:
+ if (ir_led_ctrl->ir_led_state == MSM_CAMERA_IR_LED_INIT)
+ rc = ir_led_ctrl->func_tbl->camera_ir_led_off(
+ ir_led_ctrl, ir_led_data);
+ break;
+ case CFG_IR_LED_ON:
+ if (ir_led_ctrl->ir_led_state == MSM_CAMERA_IR_LED_INIT)
+ rc = ir_led_ctrl->func_tbl->camera_ir_led_on(
+ ir_led_ctrl, ir_led_data);
+ break;
+ default:
+ rc = -EFAULT;
+ break;
+ }
+
+ mutex_unlock(ir_led_ctrl->ir_led_mutex);
+
+ CDBG("Exit (%d): type %d\n", rc, ir_led_data->cfg_type);
+
+ return rc;
+}
+
+static long msm_ir_led_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct msm_ir_led_ctrl_t *fctrl = NULL;
+ void __user *argp = (void __user *)arg;
+ struct msm_ir_led_cfg_data_t ir_led_data = {0};
+
+ if (!sd) {
+ pr_err(" v4l2 ir led subdevice is NULL\n");
+ return -EINVAL;
+ }
+ fctrl = v4l2_get_subdevdata(sd);
+ if (!fctrl) {
+ pr_err("fctrl NULL\n");
+ return -EINVAL;
+ }
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ return msm_ir_led_get_subdev_id(fctrl, argp);
+ case VIDIOC_MSM_IR_LED_CFG:
+ return msm_ir_led_config(fctrl, argp);
+ case MSM_SD_NOTIFY_FREEZE:
+ return 0;
+ case MSM_SD_SHUTDOWN:
+ if (!fctrl->func_tbl) {
+ pr_err("No call back funcions\n");
+ return -EINVAL;
+ } else {
+ return fctrl->func_tbl->camera_ir_led_release(fctrl,
+ &ir_led_data);
+ }
+ default:
+ pr_err_ratelimited("invalid cmd %d\n", cmd);
+ return -ENOIOCTLCMD;
+ }
+}
+
+static struct v4l2_subdev_core_ops msm_ir_led_subdev_core_ops = {
+ .ioctl = msm_ir_led_subdev_ioctl,
+};
+
+static struct v4l2_subdev_ops msm_ir_led_subdev_ops = {
+ .core = &msm_ir_led_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_ir_led_internal_ops;
+
+static int32_t msm_ir_led_get_dt_data(struct device_node *of_node,
+ struct msm_ir_led_ctrl_t *fctrl)
+{
+ int32_t rc = 0;
+
+ /* Read the sub device */
+ rc = of_property_read_u32(of_node, "cell-index", &fctrl->pdev->id);
+ if (rc < 0) {
+ pr_err("reading cell-index for ir-led node is failed(rc) %d\n",
+ rc);
+ return rc;
+ }
+
+ fctrl->ir_led_driver_type = IR_LED_DRIVER_DEFAULT;
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_ir_led_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ int32_t rc = 0;
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct msm_ir_led_cfg_data_t32 *u32 =
+ (struct msm_ir_led_cfg_data_t32 *)arg;
+ struct msm_ir_led_cfg_data_t ir_led_data;
+
+ ir_led_data.cfg_type = u32->cfg_type;
+ ir_led_data.pwm_duty_on_ns = u32->pwm_duty_on_ns;
+ ir_led_data.pwm_period_ns = u32->pwm_period_ns;
+
+ switch (cmd) {
+ case VIDIOC_MSM_IR_LED_CFG32:
+ cmd = VIDIOC_MSM_IR_LED_CFG;
+ break;
+ default:
+ return msm_ir_led_subdev_ioctl(sd, cmd, arg);
+ }
+
+ rc = msm_ir_led_subdev_ioctl(sd, cmd, &ir_led_data);
+
+ return rc;
+}
+
+static long msm_ir_led_subdev_fops_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_ir_led_subdev_do_ioctl);
+}
+#endif
+
+static int32_t msm_ir_led_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct msm_ir_led_ctrl_t *ir_led_ctrl = NULL;
+
+ if (!pdev->dev.of_node) {
+ pr_err("IR LED device node is not present in device tree\n");
+ return -EINVAL;
+ }
+
+ ir_led_ctrl = devm_kzalloc(&pdev->dev, sizeof(struct msm_ir_led_ctrl_t),
+ GFP_KERNEL);
+ if (!ir_led_ctrl)
+ return -ENOMEM;
+
+ ir_led_ctrl->pdev = pdev;
+
+ /* Reading PWM device node */
+ ir_led_ctrl->pwm_dev = of_pwm_get(pdev->dev.of_node, NULL);
+
+ if (PTR_ERR(ir_led_ctrl->pwm_dev) == -EPROBE_DEFER) {
+ pr_info("Deferring probe...Cannot get PWM device\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (IS_ERR(ir_led_ctrl->pwm_dev)) {
+ rc = PTR_ERR(ir_led_ctrl->pwm_dev);
+ CDBG("Cannot get PWM device (%d)\n", rc);
+ ir_led_ctrl->pwm_dev = NULL;
+ }
+
+ rc = msm_ir_led_get_dt_data(pdev->dev.of_node, ir_led_ctrl);
+ if (rc < 0) {
+ pr_err("msm_ir_led_get_dt_data failed\n");
+ return -EINVAL;
+ }
+
+ ir_led_ctrl->ir_led_state = MSM_CAMERA_IR_LED_RELEASE;
+ ir_led_ctrl->power_info.dev = &ir_led_ctrl->pdev->dev;
+ ir_led_ctrl->ir_led_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ ir_led_ctrl->ir_led_mutex = &msm_ir_led_mutex;
+
+ /* Initialize sub device */
+ v4l2_subdev_init(&ir_led_ctrl->msm_sd.sd, &msm_ir_led_subdev_ops);
+ v4l2_set_subdevdata(&ir_led_ctrl->msm_sd.sd, ir_led_ctrl);
+
+ ir_led_ctrl->msm_sd.sd.internal_ops = &msm_ir_led_internal_ops;
+ ir_led_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(ir_led_ctrl->msm_sd.sd.name,
+ ARRAY_SIZE(ir_led_ctrl->msm_sd.sd.name),
+ "msm_camera_ir_led");
+ media_entity_init(&ir_led_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ ir_led_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ ir_led_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_IR_LED;
+ ir_led_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x1;
+
+ rc = msm_sd_register(&ir_led_ctrl->msm_sd);
+ if (rc < 0) {
+ pr_err("sub dev register failed for ir_led device\n");
+ return rc;
+ }
+
+ CDBG("ir_led sd name = %s\n",
+ ir_led_ctrl->msm_sd.sd.entity.name);
+ msm_ir_led_v4l2_subdev_fops = v4l2_subdev_fops;
+#ifdef CONFIG_COMPAT
+ msm_ir_led_v4l2_subdev_fops.compat_ioctl32 =
+ msm_ir_led_subdev_fops_ioctl;
+#endif
+ ir_led_ctrl->msm_sd.sd.devnode->fops = &msm_ir_led_v4l2_subdev_fops;
+
+ CDBG("probe success\n");
+ return rc;
+}
+
+MODULE_DEVICE_TABLE(of, msm_ir_led_dt_match);
+
+static struct platform_driver msm_ir_led_platform_driver = {
+ .probe = msm_ir_led_platform_probe,
+ .driver = {
+ .name = "qcom,ir-led",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ir_led_dt_match,
+ },
+};
+
+static int __init msm_ir_led_init_module(void)
+{
+ int32_t rc = 0;
+
+ rc = platform_driver_register(&msm_ir_led_platform_driver);
+ if (!rc)
+ return rc;
+
+ pr_err("ir-led driver register failed (%d)\n", rc);
+
+ return rc;
+}
+
+static void __exit msm_ir_led_exit_module(void)
+{
+ platform_driver_unregister(&msm_ir_led_platform_driver);
+}
+
+static struct msm_ir_led_table msm_default_ir_led_table = {
+ .ir_led_driver_type = IR_LED_DRIVER_DEFAULT,
+ .func_tbl = {
+ .camera_ir_led_init = msm_ir_led_init,
+ .camera_ir_led_release = msm_ir_led_release,
+ .camera_ir_led_off = msm_ir_led_off,
+ .camera_ir_led_on = msm_ir_led_on,
+ },
+};
+
+module_init(msm_ir_led_init_module);
+module_exit(msm_ir_led_exit_module);
+MODULE_DESCRIPTION("MSM IR LED");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.h b/drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.h
new file mode 100644
index 000000000000..15141b97affb
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MSM_IR_LED_H
+#define MSM_IR_LED_H
+
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/ais/msm_ais_sensor.h>
+#include <soc/qcom/ais.h>
+#include "msm_sd.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+/* Default frequency is taken as 15KHz*/
+#define DEFAULT_PWM_TIME_PERIOD_NS 66667
+#define DEFAULT_PWM_DUTY_CYCLE_NS 0
+
+enum msm_camera_ir_led_state_t {
+ MSM_CAMERA_IR_LED_INIT,
+ MSM_CAMERA_IR_LED_RELEASE,
+};
+
+enum msm_ir_led_driver_type {
+ IR_LED_DRIVER_GPIO,
+ IR_LED_DRIVER_DEFAULT,
+};
+
+struct msm_ir_led_ctrl_t;
+
+struct msm_ir_led_func_t {
+ int32_t (*camera_ir_led_init)(struct msm_ir_led_ctrl_t *,
+ struct msm_ir_led_cfg_data_t *);
+ int32_t (*camera_ir_led_release)(struct msm_ir_led_ctrl_t *,
+ struct msm_ir_led_cfg_data_t *);
+ int32_t (*camera_ir_led_off)(struct msm_ir_led_ctrl_t *,
+ struct msm_ir_led_cfg_data_t *);
+ int32_t (*camera_ir_led_on)(struct msm_ir_led_ctrl_t *,
+ struct msm_ir_led_cfg_data_t *);
+};
+
+struct msm_ir_led_table {
+ enum msm_ir_led_driver_type ir_led_driver_type;
+ struct msm_ir_led_func_t func_tbl;
+};
+
+struct msm_ir_led_ctrl_t {
+ struct msm_sd_subdev msm_sd;
+ struct platform_device *pdev;
+ struct pwm_device *pwm_dev;
+ struct msm_ir_led_func_t *func_tbl;
+ struct msm_camera_power_ctrl_t power_info;
+
+ enum msm_camera_device_type_t ir_led_device_type;
+ struct mutex *ir_led_mutex;
+
+ /* ir_led driver type */
+ enum msm_ir_led_driver_type ir_led_driver_type;
+
+ /* ir_led state */
+ enum msm_camera_ir_led_state_t ir_led_state;
+};
+
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor.c b/drivers/media/platform/msm/ais/sensor/msm_sensor.c
new file mode 100644
index 000000000000..c671ea71d2a7
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/msm_sensor.c
@@ -0,0 +1,1583 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_sensor.h"
+#include "msm_sd.h"
+#include "msm_cci.h"
+#include "msm_camera_io_util.h"
+#include "msm_camera_i2c_mux.h"
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/regulator/consumer.h>
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl;
+static struct msm_camera_i2c_fn_t msm_sensor_secure_func_tbl;
+
+static void msm_sensor_adjust_mclk(struct msm_camera_power_ctrl_t *ctrl)
+{
+ int idx;
+ struct msm_sensor_power_setting *power_setting;
+
+ for (idx = 0; idx < ctrl->power_setting_size; idx++) {
+ power_setting = &ctrl->power_setting[idx];
+ if ((power_setting->seq_type == SENSOR_CLK) &&
+ (power_setting->seq_val == SENSOR_CAM_MCLK)) {
+ if (power_setting->config_val == 24000000) {
+ power_setting->config_val = 23880000;
+ CDBG("%s MCLK request adjusted to 23.88MHz\n"
+ , __func__);
+ }
+ break;
+ }
+ }
+}
+
+static void msm_sensor_misc_regulator(
+ struct msm_sensor_ctrl_t *sctrl, uint32_t enable)
+{
+ int32_t rc = 0;
+
+ if (enable) {
+ sctrl->misc_regulator = (void *)rpm_regulator_get(
+ &sctrl->pdev->dev, sctrl->sensordata->misc_regulator);
+ if (sctrl->misc_regulator) {
+ rc = rpm_regulator_set_mode(sctrl->misc_regulator,
+ RPM_REGULATOR_MODE_HPM);
+ if (rc < 0) {
+ pr_err("%s: Failed to set for rpm regulator on %s: %d\n",
+ __func__,
+ sctrl->sensordata->misc_regulator, rc);
+ rpm_regulator_put(sctrl->misc_regulator);
+ }
+ } else {
+ pr_err("%s: Failed to vote for rpm regulator on %s: %d\n",
+ __func__,
+ sctrl->sensordata->misc_regulator, rc);
+ }
+ } else {
+ if (sctrl->misc_regulator) {
+ rc = rpm_regulator_set_mode(
+ (struct rpm_regulator *)sctrl->misc_regulator,
+ RPM_REGULATOR_MODE_AUTO);
+ if (rc < 0)
+ pr_err("%s: Failed to set for rpm regulator on %s: %d\n",
+ __func__,
+ sctrl->sensordata->misc_regulator, rc);
+ rpm_regulator_put(sctrl->misc_regulator);
+ }
+ }
+}
+
+int32_t msm_sensor_free_sensor_data(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ if (!s_ctrl->pdev && !s_ctrl->sensor_i2c_client->client)
+ return 0;
+ kfree(s_ctrl->sensordata->slave_info);
+ kfree(s_ctrl->sensordata->cam_slave_info);
+ kfree(s_ctrl->sensordata->actuator_info);
+ kfree(s_ctrl->sensordata->power_info.gpio_conf->gpio_num_info);
+ kfree(s_ctrl->sensordata->power_info.gpio_conf->cam_gpio_req_tbl);
+ kfree(s_ctrl->sensordata->power_info.gpio_conf);
+ kfree(s_ctrl->sensordata->power_info.cam_vreg);
+ kfree(s_ctrl->sensordata->power_info.power_setting);
+ kfree(s_ctrl->sensordata->power_info.power_down_setting);
+ kfree(s_ctrl->sensordata->csi_lane_params);
+ kfree(s_ctrl->sensordata->sensor_info);
+ if (s_ctrl->sensor_device_type == MSM_CAMERA_I2C_DEVICE) {
+ msm_camera_i2c_dev_put_clk_info(
+ &s_ctrl->sensor_i2c_client->client->dev,
+ &s_ctrl->sensordata->power_info.clk_info,
+ &s_ctrl->sensordata->power_info.clk_ptr,
+ s_ctrl->sensordata->power_info.clk_info_size);
+ } else {
+ msm_camera_put_clk_info(s_ctrl->pdev,
+ &s_ctrl->sensordata->power_info.clk_info,
+ &s_ctrl->sensordata->power_info.clk_ptr,
+ s_ctrl->sensordata->power_info.clk_info_size);
+ }
+
+ kfree(s_ctrl->sensordata);
+ return 0;
+}
+
+int msm_sensor_power_down(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ struct msm_camera_power_ctrl_t *power_info;
+ enum msm_camera_device_type_t sensor_device_type;
+ struct msm_camera_i2c_client *sensor_i2c_client;
+
+ if (!s_ctrl) {
+ pr_err("%s:%d failed: s_ctrl %pK\n",
+ __func__, __LINE__, s_ctrl);
+ return -EINVAL;
+ }
+
+ if (s_ctrl->is_csid_tg_mode)
+ return 0;
+
+ power_info = &s_ctrl->sensordata->power_info;
+ sensor_device_type = s_ctrl->sensor_device_type;
+ sensor_i2c_client = s_ctrl->sensor_i2c_client;
+
+ if (!power_info || !sensor_i2c_client) {
+ pr_err("%s:%d failed: power_info %pK sensor_i2c_client %pK\n",
+ __func__, __LINE__, power_info, sensor_i2c_client);
+ return -EINVAL;
+ }
+
+ /* Power down secure session if it exist*/
+ if (s_ctrl->is_secure)
+ msm_camera_tz_i2c_power_down(sensor_i2c_client);
+
+ return msm_camera_power_down(power_info, sensor_device_type,
+ sensor_i2c_client);
+}
+
+int msm_sensor_power_up(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int rc;
+ struct msm_camera_power_ctrl_t *power_info;
+ struct msm_camera_i2c_client *sensor_i2c_client;
+ struct msm_camera_slave_info *slave_info;
+ const char *sensor_name;
+ uint32_t retry = 0;
+
+ if (!s_ctrl) {
+ pr_err("%s:%d failed: %pK\n",
+ __func__, __LINE__, s_ctrl);
+ return -EINVAL;
+ }
+
+ if (s_ctrl->is_csid_tg_mode)
+ return 0;
+
+ power_info = &s_ctrl->sensordata->power_info;
+ sensor_i2c_client = s_ctrl->sensor_i2c_client;
+ slave_info = s_ctrl->sensordata->slave_info;
+ sensor_name = s_ctrl->sensordata->sensor_name;
+
+ if (!power_info || !sensor_i2c_client || !slave_info ||
+ !sensor_name) {
+ pr_err("%s:%d failed: %pK %pK %pK %pK\n",
+ __func__, __LINE__, power_info,
+ sensor_i2c_client, slave_info, sensor_name);
+ return -EINVAL;
+ }
+
+ if (s_ctrl->set_mclk_23880000)
+ msm_sensor_adjust_mclk(power_info);
+
+ CDBG("Sensor %d tagged as %s\n", s_ctrl->id,
+ (s_ctrl->is_secure)?"SECURE":"NON-SECURE");
+
+ for (retry = 0; retry < 3; retry++) {
+ if (s_ctrl->is_secure) {
+ rc = msm_camera_tz_i2c_power_up(sensor_i2c_client);
+ if (rc < 0) {
+#ifdef CONFIG_MSM_AIS_SEC_CCI_DEBUG
+ CDBG("Secure Sensor %d use cci\n", s_ctrl->id);
+ /* session is not secure */
+ s_ctrl->sensor_i2c_client->i2c_func_tbl =
+ &msm_sensor_cci_func_tbl;
+#else /* CONFIG_MSM_AIS_SEC_CCI_DEBUG */
+ return rc;
+#endif /* CONFIG_MSM_AIS_SEC_CCI_DEBUG */
+ } else {
+ /* session is secure */
+ s_ctrl->sensor_i2c_client->i2c_func_tbl =
+ &msm_sensor_secure_func_tbl;
+ }
+ }
+ rc = msm_camera_power_up(power_info, s_ctrl->sensor_device_type,
+ sensor_i2c_client);
+ if (rc < 0)
+ return rc;
+ rc = msm_sensor_check_id(s_ctrl);
+ if (rc < 0) {
+ msm_camera_power_down(power_info,
+ s_ctrl->sensor_device_type, sensor_i2c_client);
+ msleep(20);
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static uint16_t msm_sensor_id_by_mask(struct msm_sensor_ctrl_t *s_ctrl,
+ uint16_t chipid)
+{
+ uint16_t sensor_id = chipid;
+ int16_t sensor_id_mask = s_ctrl->sensordata->slave_info->sensor_id_mask;
+
+ if (!sensor_id_mask)
+ sensor_id_mask = ~sensor_id_mask;
+
+ sensor_id &= sensor_id_mask;
+ sensor_id_mask &= -sensor_id_mask;
+ sensor_id_mask -= 1;
+ while (sensor_id_mask) {
+ sensor_id_mask >>= 1;
+ sensor_id >>= 1;
+ }
+ return sensor_id;
+}
+
+int msm_sensor_match_id(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int rc = 0;
+ uint16_t chipid = 0;
+ struct msm_camera_i2c_client *sensor_i2c_client;
+ struct msm_camera_slave_info *slave_info;
+ const char *sensor_name;
+
+ if (!s_ctrl) {
+ pr_err("%s:%d failed: %pK\n",
+ __func__, __LINE__, s_ctrl);
+ return -EINVAL;
+ }
+ sensor_i2c_client = s_ctrl->sensor_i2c_client;
+ slave_info = s_ctrl->sensordata->slave_info;
+ sensor_name = s_ctrl->sensordata->sensor_name;
+
+ if (!sensor_i2c_client || !slave_info || !sensor_name) {
+ pr_err("%s:%d failed: %pK %pK %pK\n",
+ __func__, __LINE__, sensor_i2c_client, slave_info,
+ sensor_name);
+ return -EINVAL;
+ }
+
+ rc = sensor_i2c_client->i2c_func_tbl->i2c_read(
+ sensor_i2c_client, slave_info->sensor_id_reg_addr,
+ &chipid, MSM_CAMERA_I2C_WORD_DATA);
+ if (rc < 0) {
+ pr_err("%s: %s: read id failed\n", __func__, sensor_name);
+ return rc;
+ }
+
+ pr_debug("%s: read id: 0x%x expected id 0x%x:\n",
+ __func__, chipid, slave_info->sensor_id);
+ if (msm_sensor_id_by_mask(s_ctrl, chipid) != slave_info->sensor_id) {
+ pr_err("%s chip id %x does not match %x\n",
+ __func__, chipid, slave_info->sensor_id);
+ return -ENODEV;
+ }
+ return rc;
+}
+
+static struct msm_sensor_ctrl_t *get_sctrl(struct v4l2_subdev *sd)
+{
+ return container_of(container_of(sd, struct msm_sd_subdev, sd),
+ struct msm_sensor_ctrl_t, msm_sd);
+}
+
+static void msm_sensor_stop_stream(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+
+ mutex_lock(s_ctrl->msm_sensor_mutex);
+ if (s_ctrl->sensor_state == MSM_SENSOR_POWER_UP) {
+ s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_write_table(
+ s_ctrl->sensor_i2c_client, &s_ctrl->stop_setting);
+ kfree(s_ctrl->stop_setting.reg_setting);
+ s_ctrl->stop_setting.reg_setting = NULL;
+
+ if (s_ctrl->func_tbl->sensor_power_down) {
+ if (s_ctrl->sensordata->misc_regulator)
+ msm_sensor_misc_regulator(s_ctrl, 0);
+
+ rc = s_ctrl->func_tbl->sensor_power_down(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__,
+ __LINE__, rc);
+ }
+ s_ctrl->sensor_state = MSM_SENSOR_POWER_DOWN;
+ CDBG("%s:%d sensor state %d\n", __func__, __LINE__,
+ s_ctrl->sensor_state);
+ } else {
+ pr_err("s_ctrl->func_tbl NULL\n");
+ }
+ }
+ mutex_unlock(s_ctrl->msm_sensor_mutex);
+}
+
+static int msm_sensor_get_af_status(struct msm_sensor_ctrl_t *s_ctrl,
+ void __user *argp)
+{
+ /* TO-DO: Need to set AF status register address and expected value
+ * We need to check the AF status in the sensor register and
+ * set the status in the *status variable accordingly
+ */
+ return 0;
+}
+
+static int32_t msm_sensor_get_subdev_id(
+ struct msm_sensor_ctrl_t *s_ctrl, void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+
+ if (!subdev_id) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ *subdev_id = s_ctrl->id;
+ pr_debug("%s:%d subdev_id %d\n", __func__, __LINE__, *subdev_id);
+ return 0;
+}
+
+static long msm_sensor_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = 0;
+ struct msm_sensor_ctrl_t *s_ctrl = get_sctrl(sd);
+ void __user *argp = (void __user *)arg;
+
+ if (!s_ctrl) {
+ pr_err("%s s_ctrl NULL\n", __func__);
+ return -EBADF;
+ }
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ rc = msm_sensor_get_subdev_id(s_ctrl, arg);
+ return rc;
+ case VIDIOC_MSM_SENSOR_INIT_CFG:
+#ifdef CONFIG_COMPAT
+ case VIDIOC_MSM_SENSOR_INIT_CFG32:
+ if (is_compat_task()) {
+ struct sensor_init_cfg_data32 *u32 =
+ (struct sensor_init_cfg_data32 *)argp;
+ struct sensor_init_cfg_data sensor_init_data;
+
+ memset(&sensor_init_data, 0, sizeof(sensor_init_data));
+ sensor_init_data.cfgtype = u32->cfgtype;
+ sensor_init_data.cfg.setting = compat_ptr(u32->cfg.
+ setting);
+ cmd = VIDIOC_MSM_SENSOR_INIT_CFG;
+ rc = msm_sensor_driver_cmd(&s_ctrl->s_init,
+ &sensor_init_data);
+ } else
+#endif
+ {
+ rc = msm_sensor_driver_cmd(&s_ctrl->s_init, argp);
+ }
+ return rc;
+
+ case VIDIOC_MSM_SENSOR_CFG:
+#ifdef CONFIG_COMPAT
+ if (is_compat_task())
+ rc = s_ctrl->func_tbl->sensor_config32(s_ctrl, argp);
+ else
+#endif
+ rc = s_ctrl->func_tbl->sensor_config(s_ctrl, argp);
+ return rc;
+ case VIDIOC_MSM_SENSOR_GET_AF_STATUS:
+ return msm_sensor_get_af_status(s_ctrl, argp);
+ case VIDIOC_MSM_SENSOR_RELEASE:
+ case MSM_SD_SHUTDOWN:
+ msm_sensor_stop_stream(s_ctrl);
+ return 0;
+ case MSM_SD_NOTIFY_FREEZE:
+ return 0;
+ case MSM_SD_UNNOTIFY_FREEZE:
+ return 0;
+ default:
+ pr_err("%s unknown command %d\n", __func__, cmd);
+ return -ENOIOCTLCMD;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_sensor_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_CFG32:
+ cmd = VIDIOC_MSM_SENSOR_CFG;
+ default:
+ return msm_sensor_subdev_ioctl(sd, cmd, arg);
+ }
+}
+
+long msm_sensor_subdev_fops_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_sensor_subdev_do_ioctl);
+}
+
+static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
+ void __user *argp)
+{
+ struct sensorb_cfg_data32 *cdata = (struct sensorb_cfg_data32 *)argp;
+ int32_t rc = 0;
+ int32_t i = 0;
+
+ mutex_lock(s_ctrl->msm_sensor_mutex);
+ CDBG("%s:%d %s cfgtype = %d\n", __func__, __LINE__,
+ s_ctrl->sensordata->sensor_name, cdata->cfgtype);
+ switch (cdata->cfgtype) {
+ case CFG_GET_SENSOR_INFO:
+ memcpy(cdata->cfg.sensor_info.sensor_name,
+ s_ctrl->sensordata->sensor_name,
+ sizeof(cdata->cfg.sensor_info.sensor_name));
+ cdata->cfg.sensor_info.session_id =
+ s_ctrl->sensordata->sensor_info->session_id;
+ for (i = 0; i < SUB_MODULE_MAX; i++) {
+ cdata->cfg.sensor_info.subdev_id[i] =
+ s_ctrl->sensordata->sensor_info->subdev_id[i];
+ cdata->cfg.sensor_info.subdev_intf[i] =
+ s_ctrl->sensordata->sensor_info->subdev_intf[i];
+ }
+ cdata->cfg.sensor_info.is_mount_angle_valid =
+ s_ctrl->sensordata->sensor_info->is_mount_angle_valid;
+ cdata->cfg.sensor_info.sensor_mount_angle =
+ s_ctrl->sensordata->sensor_info->sensor_mount_angle;
+ cdata->cfg.sensor_info.position =
+ s_ctrl->sensordata->sensor_info->position;
+ cdata->cfg.sensor_info.modes_supported =
+ s_ctrl->sensordata->sensor_info->modes_supported;
+ CDBG("%s:%d sensor name %s\n", __func__, __LINE__,
+ cdata->cfg.sensor_info.sensor_name);
+ CDBG("%s:%d session id %d\n", __func__, __LINE__,
+ cdata->cfg.sensor_info.session_id);
+ for (i = 0; i < SUB_MODULE_MAX; i++) {
+ CDBG("%s:%d subdev_id[%d] %d\n", __func__, __LINE__, i,
+ cdata->cfg.sensor_info.subdev_id[i]);
+ CDBG("%s:%d subdev_intf[%d] %d\n", __func__, __LINE__,
+ i, cdata->cfg.sensor_info.subdev_intf[i]);
+ }
+ CDBG("%s:%d mount angle valid %d value %d\n", __func__,
+ __LINE__, cdata->cfg.sensor_info.is_mount_angle_valid,
+ cdata->cfg.sensor_info.sensor_mount_angle);
+
+ break;
+ case CFG_GET_SENSOR_INIT_PARAMS:
+ cdata->cfg.sensor_init_params.modes_supported =
+ s_ctrl->sensordata->sensor_info->modes_supported;
+ cdata->cfg.sensor_init_params.position =
+ s_ctrl->sensordata->sensor_info->position;
+ cdata->cfg.sensor_init_params.sensor_mount_angle =
+ s_ctrl->sensordata->sensor_info->sensor_mount_angle;
+ CDBG("%s:%d init params mode %d pos %d mount %d\n", __func__,
+ __LINE__,
+ cdata->cfg.sensor_init_params.modes_supported,
+ cdata->cfg.sensor_init_params.position,
+ cdata->cfg.sensor_init_params.sensor_mount_angle);
+ break;
+ case CFG_WRITE_I2C_ARRAY:
+ case CFG_WRITE_I2C_ARRAY_SYNC:
+ case CFG_WRITE_I2C_ARRAY_SYNC_BLOCK:
+ case CFG_WRITE_I2C_ARRAY_ASYNC: {
+ struct msm_camera_i2c_reg_setting32 conf_array32;
+ struct msm_camera_i2c_reg_setting conf_array;
+ struct msm_camera_i2c_reg_array *reg_setting = NULL;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (copy_from_user(&conf_array32,
+ (void *)compat_ptr(cdata->cfg.setting),
+ sizeof(struct msm_camera_i2c_reg_setting32))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ conf_array.addr_type = conf_array32.addr_type;
+ conf_array.data_type = conf_array32.data_type;
+ conf_array.delay = conf_array32.delay;
+ conf_array.size = conf_array32.size;
+ conf_array.reg_setting = compat_ptr(conf_array32.reg_setting);
+
+ if (!conf_array.size ||
+ conf_array.size > I2C_REG_DATA_MAX) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ reg_setting = kzalloc(conf_array.size *
+ (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
+ if (!reg_setting) {
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(reg_setting,
+ (void *)(conf_array.reg_setting),
+ conf_array.size *
+ sizeof(struct msm_camera_i2c_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+
+ conf_array.reg_setting = reg_setting;
+
+ if (cdata->cfgtype == CFG_WRITE_I2C_ARRAY)
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table(s_ctrl->sensor_i2c_client,
+ &conf_array);
+ else if (cdata->cfgtype == CFG_WRITE_I2C_ARRAY_ASYNC)
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table_async(s_ctrl->sensor_i2c_client,
+ &conf_array);
+ else if (cdata->cfgtype == CFG_WRITE_I2C_ARRAY_SYNC_BLOCK)
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table_sync_block(
+ s_ctrl->sensor_i2c_client,
+ &conf_array);
+ else
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table_sync(s_ctrl->sensor_i2c_client,
+ &conf_array);
+
+ kfree(reg_setting);
+ break;
+ }
+ case CFG_SLAVE_READ_I2C: {
+ struct msm_camera_i2c_read_config read_config;
+ struct msm_camera_i2c_read_config *read_config_ptr = NULL;
+ uint16_t local_data = 0;
+ uint16_t orig_slave_addr = 0, read_slave_addr = 0;
+ uint16_t orig_addr_type = 0, read_addr_type = 0;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ read_config_ptr =
+ (struct msm_camera_i2c_read_config *)
+ compat_ptr(cdata->cfg.setting);
+
+ if (copy_from_user(&read_config, read_config_ptr,
+ sizeof(struct msm_camera_i2c_read_config))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ read_slave_addr = read_config.slave_addr;
+ read_addr_type = read_config.addr_type;
+
+ CDBG("%s:CFG_SLAVE_READ_I2C:", __func__);
+ CDBG("%s:slave_addr=0x%x reg_addr=0x%x, data_type=%d\n",
+ __func__, read_config.slave_addr,
+ read_config.reg_addr, read_config.data_type);
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ orig_slave_addr =
+ s_ctrl->sensor_i2c_client->cci_client->sid;
+ s_ctrl->sensor_i2c_client->cci_client->sid =
+ read_slave_addr >> 1;
+ } else if (s_ctrl->sensor_i2c_client->client) {
+ orig_slave_addr =
+ s_ctrl->sensor_i2c_client->client->addr;
+ s_ctrl->sensor_i2c_client->client->addr =
+ read_slave_addr >> 1;
+ } else {
+ pr_err("%s: error: no i2c/cci client found.", __func__);
+ rc = -EFAULT;
+ break;
+ }
+ CDBG("%s:orig_slave_addr=0x%x, new_slave_addr=0x%x",
+ __func__, orig_slave_addr,
+ read_slave_addr >> 1);
+
+ orig_addr_type = s_ctrl->sensor_i2c_client->addr_type;
+ s_ctrl->sensor_i2c_client->addr_type = read_addr_type;
+
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_read(
+ s_ctrl->sensor_i2c_client,
+ read_config.reg_addr,
+ &local_data, read_config.data_type);
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ s_ctrl->sensor_i2c_client->cci_client->sid =
+ orig_slave_addr;
+ } else if (s_ctrl->sensor_i2c_client->client) {
+ s_ctrl->sensor_i2c_client->client->addr =
+ orig_slave_addr;
+ }
+ s_ctrl->sensor_i2c_client->addr_type = orig_addr_type;
+
+ pr_debug("slave_read %x %x %x\n", read_slave_addr,
+ read_config.reg_addr, local_data);
+
+ if (rc < 0) {
+ pr_err("%s:%d: i2c_read failed\n", __func__, __LINE__);
+ break;
+ }
+ read_config_ptr->data = local_data;
+ break;
+ }
+ case CFG_SLAVE_WRITE_I2C_ARRAY: {
+ struct msm_camera_i2c_array_write_config32 write_config32;
+ struct msm_camera_i2c_array_write_config write_config;
+ struct msm_camera_i2c_reg_array *reg_setting = NULL;
+ uint16_t orig_slave_addr = 0, write_slave_addr = 0;
+ uint16_t orig_addr_type = 0, write_addr_type = 0;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (copy_from_user(&write_config32,
+ (void *)compat_ptr(cdata->cfg.setting),
+ sizeof(
+ struct msm_camera_i2c_array_write_config32))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ write_config.slave_addr = write_config32.slave_addr;
+ write_config.conf_array.addr_type =
+ write_config32.conf_array.addr_type;
+ write_config.conf_array.data_type =
+ write_config32.conf_array.data_type;
+ write_config.conf_array.delay =
+ write_config32.conf_array.delay;
+ write_config.conf_array.size =
+ write_config32.conf_array.size;
+ write_config.conf_array.reg_setting =
+ compat_ptr(write_config32.conf_array.reg_setting);
+
+ pr_debug("%s:CFG_SLAVE_WRITE_I2C_ARRAY:\n", __func__);
+ pr_debug("%s:slave_addr=0x%x, array_size=%d addr_type=%d data_type=%d\n",
+ __func__,
+ write_config.slave_addr,
+ write_config.conf_array.size,
+ write_config.conf_array.addr_type,
+ write_config.conf_array.data_type);
+
+ if (!write_config.conf_array.size ||
+ write_config.conf_array.size > I2C_REG_DATA_MAX) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ reg_setting = kzalloc(write_config.conf_array.size *
+ (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
+ if (!reg_setting) {
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(reg_setting,
+ (void *)(write_config.conf_array.reg_setting),
+ write_config.conf_array.size *
+ sizeof(struct msm_camera_i2c_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+ write_config.conf_array.reg_setting = reg_setting;
+ write_slave_addr = write_config.slave_addr;
+ write_addr_type = write_config.conf_array.addr_type;
+
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ orig_slave_addr =
+ s_ctrl->sensor_i2c_client->cci_client->sid;
+ s_ctrl->sensor_i2c_client->cci_client->sid =
+ write_slave_addr >> 1;
+ } else if (s_ctrl->sensor_i2c_client->client) {
+ orig_slave_addr =
+ s_ctrl->sensor_i2c_client->client->addr;
+ s_ctrl->sensor_i2c_client->client->addr =
+ write_slave_addr >> 1;
+ } else {
+ pr_err("%s: error: no i2c/cci client found.",
+ __func__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+
+ pr_debug("%s:orig_slave_addr=0x%x, new_slave_addr=0x%x\n",
+ __func__, orig_slave_addr,
+ write_slave_addr >> 1);
+ orig_addr_type = s_ctrl->sensor_i2c_client->addr_type;
+ s_ctrl->sensor_i2c_client->addr_type = write_addr_type;
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_write_table(
+ s_ctrl->sensor_i2c_client, &(write_config.conf_array));
+
+ s_ctrl->sensor_i2c_client->addr_type = orig_addr_type;
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ s_ctrl->sensor_i2c_client->cci_client->sid =
+ orig_slave_addr;
+ } else if (s_ctrl->sensor_i2c_client->client) {
+ s_ctrl->sensor_i2c_client->client->addr =
+ orig_slave_addr;
+ } else {
+ pr_err("%s: error: no i2c/cci client found.\n",
+ __func__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+ kfree(reg_setting);
+ break;
+ }
+ case CFG_WRITE_I2C_SEQ_ARRAY: {
+ struct msm_camera_i2c_seq_reg_setting32 conf_array32;
+ struct msm_camera_i2c_seq_reg_setting conf_array;
+ struct msm_camera_i2c_seq_reg_array *reg_setting = NULL;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (copy_from_user(&conf_array32,
+ (void *)compat_ptr(cdata->cfg.setting),
+ sizeof(struct msm_camera_i2c_seq_reg_setting32))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ conf_array.addr_type = conf_array32.addr_type;
+ conf_array.delay = conf_array32.delay;
+ conf_array.size = conf_array32.size;
+ conf_array.reg_setting = compat_ptr(conf_array32.reg_setting);
+
+ if (!conf_array.size ||
+ conf_array.size > I2C_SEQ_REG_DATA_MAX) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ reg_setting = kzalloc(conf_array.size *
+ (sizeof(struct msm_camera_i2c_seq_reg_array)),
+ GFP_KERNEL);
+ if (!reg_setting) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(reg_setting, (void *)conf_array.reg_setting,
+ conf_array.size *
+ sizeof(struct msm_camera_i2c_seq_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+
+ conf_array.reg_setting = reg_setting;
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_seq_table(s_ctrl->sensor_i2c_client,
+ &conf_array);
+ kfree(reg_setting);
+ break;
+ }
+
+ case CFG_POWER_UP:
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_DOWN) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+ if (s_ctrl->func_tbl->sensor_power_up) {
+ if (s_ctrl->sensordata->misc_regulator)
+ msm_sensor_misc_regulator(s_ctrl, 1);
+
+ rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__,
+ __LINE__, rc);
+ break;
+ }
+ s_ctrl->sensor_state = MSM_SENSOR_POWER_UP;
+ CDBG("%s:%d sensor state %d\n", __func__, __LINE__,
+ s_ctrl->sensor_state);
+ } else {
+ rc = -EFAULT;
+ }
+ break;
+ case CFG_POWER_DOWN:
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ kfree(s_ctrl->stop_setting.reg_setting);
+ s_ctrl->stop_setting.reg_setting = NULL;
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+ if (s_ctrl->func_tbl->sensor_power_down) {
+ if (s_ctrl->sensordata->misc_regulator)
+ msm_sensor_misc_regulator(s_ctrl, 0);
+
+ rc = s_ctrl->func_tbl->sensor_power_down(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__,
+ __LINE__, rc);
+ break;
+ }
+ s_ctrl->sensor_state = MSM_SENSOR_POWER_DOWN;
+ CDBG("%s:%d sensor state %d\n", __func__, __LINE__,
+ s_ctrl->sensor_state);
+ } else {
+ rc = -EFAULT;
+ }
+ break;
+ case CFG_SET_STOP_STREAM_SETTING: {
+ struct msm_camera_i2c_reg_setting32 stop_setting32;
+ struct msm_camera_i2c_reg_setting *stop_setting =
+ &s_ctrl->stop_setting;
+ struct msm_camera_i2c_reg_array *reg_setting = NULL;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (copy_from_user(&stop_setting32,
+ (void *)compat_ptr((cdata->cfg.setting)),
+ sizeof(struct msm_camera_i2c_reg_setting32))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ stop_setting->addr_type = stop_setting32.addr_type;
+ stop_setting->data_type = stop_setting32.data_type;
+ stop_setting->delay = stop_setting32.delay;
+ stop_setting->size = stop_setting32.size;
+
+ reg_setting = compat_ptr(stop_setting32.reg_setting);
+
+ if ((!stop_setting->size) ||
+ (stop_setting->size > I2C_SEQ_REG_DATA_MAX)) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ stop_setting->reg_setting = kzalloc(stop_setting->size *
+ (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
+ if (!stop_setting->reg_setting) {
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(stop_setting->reg_setting,
+ (void *)reg_setting,
+ stop_setting->size *
+ sizeof(struct msm_camera_i2c_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(stop_setting->reg_setting);
+ stop_setting->reg_setting = NULL;
+ stop_setting->size = 0;
+ rc = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case CFG_SET_I2C_SYNC_PARAM: {
+ struct msm_camera_cci_ctrl cci_ctrl;
+
+ s_ctrl->sensor_i2c_client->cci_client->cid =
+ cdata->cfg.sensor_i2c_sync_params.cid;
+ s_ctrl->sensor_i2c_client->cci_client->id_map =
+ cdata->cfg.sensor_i2c_sync_params.csid;
+
+ CDBG("I2C_SYNC_PARAM CID:%d, line:%d delay:%d, cdid:%d\n",
+ s_ctrl->sensor_i2c_client->cci_client->cid,
+ cdata->cfg.sensor_i2c_sync_params.line,
+ cdata->cfg.sensor_i2c_sync_params.delay,
+ cdata->cfg.sensor_i2c_sync_params.csid);
+
+ cci_ctrl.cmd = MSM_CCI_SET_SYNC_CID;
+ cci_ctrl.cfg.cci_wait_sync_cfg.line =
+ cdata->cfg.sensor_i2c_sync_params.line;
+ cci_ctrl.cfg.cci_wait_sync_cfg.delay =
+ cdata->cfg.sensor_i2c_sync_params.delay;
+ cci_ctrl.cfg.cci_wait_sync_cfg.cid =
+ cdata->cfg.sensor_i2c_sync_params.cid;
+ cci_ctrl.cfg.cci_wait_sync_cfg.csid =
+ cdata->cfg.sensor_i2c_sync_params.csid;
+ rc = v4l2_subdev_call(s_ctrl->sensor_i2c_client->
+ cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ rc = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ default:
+ rc = -EFAULT;
+ break;
+ }
+
+DONE:
+ mutex_unlock(s_ctrl->msm_sensor_mutex);
+
+ return rc;
+}
+#endif
+
+int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp)
+{
+ struct sensorb_cfg_data *cdata = (struct sensorb_cfg_data *)argp;
+ int32_t rc = 0;
+ int32_t i = 0;
+
+ mutex_lock(s_ctrl->msm_sensor_mutex);
+ CDBG("%s:%d %s cfgtype = %d\n", __func__, __LINE__,
+ s_ctrl->sensordata->sensor_name, cdata->cfgtype);
+ switch (cdata->cfgtype) {
+ case CFG_GET_SENSOR_INFO:
+ memcpy(cdata->cfg.sensor_info.sensor_name,
+ s_ctrl->sensordata->sensor_name,
+ sizeof(cdata->cfg.sensor_info.sensor_name));
+ cdata->cfg.sensor_info.session_id =
+ s_ctrl->sensordata->sensor_info->session_id;
+ for (i = 0; i < SUB_MODULE_MAX; i++) {
+ cdata->cfg.sensor_info.subdev_id[i] =
+ s_ctrl->sensordata->sensor_info->subdev_id[i];
+ cdata->cfg.sensor_info.subdev_intf[i] =
+ s_ctrl->sensordata->sensor_info->subdev_intf[i];
+ }
+ cdata->cfg.sensor_info.is_mount_angle_valid =
+ s_ctrl->sensordata->sensor_info->is_mount_angle_valid;
+ cdata->cfg.sensor_info.sensor_mount_angle =
+ s_ctrl->sensordata->sensor_info->sensor_mount_angle;
+ cdata->cfg.sensor_info.position =
+ s_ctrl->sensordata->sensor_info->position;
+ cdata->cfg.sensor_info.modes_supported =
+ s_ctrl->sensordata->sensor_info->modes_supported;
+ CDBG("%s:%d sensor name %s\n", __func__, __LINE__,
+ cdata->cfg.sensor_info.sensor_name);
+ CDBG("%s:%d session id %d\n", __func__, __LINE__,
+ cdata->cfg.sensor_info.session_id);
+ for (i = 0; i < SUB_MODULE_MAX; i++) {
+ CDBG("%s:%d subdev_id[%d] %d\n", __func__, __LINE__, i,
+ cdata->cfg.sensor_info.subdev_id[i]);
+ CDBG("%s:%d subdev_intf[%d] %d\n", __func__, __LINE__,
+ i, cdata->cfg.sensor_info.subdev_intf[i]);
+ }
+ CDBG("%s:%d mount angle valid %d value %d\n", __func__,
+ __LINE__, cdata->cfg.sensor_info.is_mount_angle_valid,
+ cdata->cfg.sensor_info.sensor_mount_angle);
+
+ break;
+ case CFG_GET_SENSOR_INIT_PARAMS:
+ cdata->cfg.sensor_init_params.modes_supported =
+ s_ctrl->sensordata->sensor_info->modes_supported;
+ cdata->cfg.sensor_init_params.position =
+ s_ctrl->sensordata->sensor_info->position;
+ cdata->cfg.sensor_init_params.sensor_mount_angle =
+ s_ctrl->sensordata->sensor_info->sensor_mount_angle;
+ CDBG("%s:%d init params mode %d pos %d mount %d\n", __func__,
+ __LINE__,
+ cdata->cfg.sensor_init_params.modes_supported,
+ cdata->cfg.sensor_init_params.position,
+ cdata->cfg.sensor_init_params.sensor_mount_angle);
+ break;
+
+ case CFG_WRITE_I2C_ARRAY:
+ case CFG_WRITE_I2C_ARRAY_SYNC:
+ case CFG_WRITE_I2C_ARRAY_SYNC_BLOCK:
+ case CFG_WRITE_I2C_ARRAY_ASYNC: {
+ struct msm_camera_i2c_reg_setting conf_array;
+ struct msm_camera_i2c_reg_array *reg_setting = NULL;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (copy_from_user(&conf_array,
+ (void *)cdata->cfg.setting,
+ sizeof(struct msm_camera_i2c_reg_setting))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (!conf_array.size ||
+ conf_array.size > I2C_REG_DATA_MAX) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ reg_setting = kzalloc(conf_array.size *
+ (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
+ if (!reg_setting) {
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(reg_setting, (void *)conf_array.reg_setting,
+ conf_array.size *
+ sizeof(struct msm_camera_i2c_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+
+ conf_array.reg_setting = reg_setting;
+ if (cdata->cfgtype == CFG_WRITE_I2C_ARRAY)
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table(s_ctrl->sensor_i2c_client,
+ &conf_array);
+ else if (cdata->cfgtype == CFG_WRITE_I2C_ARRAY_ASYNC)
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table_async(s_ctrl->sensor_i2c_client,
+ &conf_array);
+ else if (cdata->cfgtype == CFG_WRITE_I2C_ARRAY_SYNC_BLOCK)
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table_sync_block(
+ s_ctrl->sensor_i2c_client,
+ &conf_array);
+ else
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table_sync(s_ctrl->sensor_i2c_client,
+ &conf_array);
+
+ kfree(reg_setting);
+ break;
+ }
+ case CFG_SLAVE_READ_I2C: {
+ struct msm_camera_i2c_read_config read_config;
+ struct msm_camera_i2c_read_config *read_config_ptr = NULL;
+ uint16_t local_data = 0;
+ uint16_t orig_slave_addr = 0, read_slave_addr = 0;
+ uint16_t orig_addr_type = 0, read_addr_type = 0;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ read_config_ptr =
+ (struct msm_camera_i2c_read_config *)cdata->cfg.setting;
+ if (copy_from_user(&read_config, read_config_ptr,
+ sizeof(struct msm_camera_i2c_read_config))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ read_slave_addr = read_config.slave_addr;
+ read_addr_type = read_config.addr_type;
+ CDBG("%s:CFG_SLAVE_READ_I2C:", __func__);
+ CDBG("%s:slave_addr=0x%x reg_addr=0x%x, data_type=%d\n",
+ __func__, read_config.slave_addr,
+ read_config.reg_addr, read_config.data_type);
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ orig_slave_addr =
+ s_ctrl->sensor_i2c_client->cci_client->sid;
+ s_ctrl->sensor_i2c_client->cci_client->sid =
+ read_slave_addr >> 1;
+ } else if (s_ctrl->sensor_i2c_client->client) {
+ orig_slave_addr =
+ s_ctrl->sensor_i2c_client->client->addr;
+ s_ctrl->sensor_i2c_client->client->addr =
+ read_slave_addr >> 1;
+ } else {
+ pr_err("%s: error: no i2c/cci client found.", __func__);
+ rc = -EFAULT;
+ break;
+ }
+ CDBG("%s:orig_slave_addr=0x%x, new_slave_addr=0x%x",
+ __func__, orig_slave_addr,
+ read_slave_addr >> 1);
+
+ orig_addr_type = s_ctrl->sensor_i2c_client->addr_type;
+ s_ctrl->sensor_i2c_client->addr_type = read_addr_type;
+
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_read(
+ s_ctrl->sensor_i2c_client,
+ read_config.reg_addr,
+ &local_data, read_config.data_type);
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ s_ctrl->sensor_i2c_client->cci_client->sid =
+ orig_slave_addr;
+ } else if (s_ctrl->sensor_i2c_client->client) {
+ s_ctrl->sensor_i2c_client->client->addr =
+ orig_slave_addr;
+ }
+ s_ctrl->sensor_i2c_client->addr_type = orig_addr_type;
+
+ if (rc < 0) {
+ pr_err("%s:%d: i2c_read failed\n", __func__, __LINE__);
+ break;
+ }
+ read_config_ptr->data = local_data;
+ break;
+ }
+ case CFG_SLAVE_WRITE_I2C_ARRAY: {
+ struct msm_camera_i2c_array_write_config write_config;
+ struct msm_camera_i2c_reg_array *reg_setting = NULL;
+ uint16_t orig_slave_addr = 0, write_slave_addr = 0;
+ uint16_t orig_addr_type = 0, write_addr_type = 0;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (copy_from_user(&write_config,
+ (void *)cdata->cfg.setting,
+ sizeof(struct msm_camera_i2c_array_write_config))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ CDBG("%s:CFG_SLAVE_WRITE_I2C_ARRAY:", __func__);
+ CDBG("%s:slave_addr=0x%x, array_size=%d\n", __func__,
+ write_config.slave_addr,
+ write_config.conf_array.size);
+
+ if (!write_config.conf_array.size ||
+ write_config.conf_array.size > I2C_REG_DATA_MAX) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ reg_setting = kzalloc(write_config.conf_array.size *
+ (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
+ if (!reg_setting) {
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(reg_setting,
+ (void *)(write_config.conf_array.reg_setting),
+ write_config.conf_array.size *
+ sizeof(struct msm_camera_i2c_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+ write_config.conf_array.reg_setting = reg_setting;
+ write_slave_addr = write_config.slave_addr;
+ write_addr_type = write_config.conf_array.addr_type;
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ orig_slave_addr =
+ s_ctrl->sensor_i2c_client->cci_client->sid;
+ s_ctrl->sensor_i2c_client->cci_client->sid =
+ write_slave_addr >> 1;
+ } else if (s_ctrl->sensor_i2c_client->client) {
+ orig_slave_addr =
+ s_ctrl->sensor_i2c_client->client->addr;
+ s_ctrl->sensor_i2c_client->client->addr =
+ write_slave_addr >> 1;
+ } else {
+ pr_err("%s: error: no i2c/cci client found.", __func__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+ CDBG("%s:orig_slave_addr=0x%x, new_slave_addr=0x%x",
+ __func__, orig_slave_addr,
+ write_slave_addr >> 1);
+ orig_addr_type = s_ctrl->sensor_i2c_client->addr_type;
+ s_ctrl->sensor_i2c_client->addr_type = write_addr_type;
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_write_table(
+ s_ctrl->sensor_i2c_client, &(write_config.conf_array));
+ s_ctrl->sensor_i2c_client->addr_type = orig_addr_type;
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ s_ctrl->sensor_i2c_client->cci_client->sid =
+ orig_slave_addr;
+ } else if (s_ctrl->sensor_i2c_client->client) {
+ s_ctrl->sensor_i2c_client->client->addr =
+ orig_slave_addr;
+ } else {
+ pr_err("%s: error: no i2c/cci client found.", __func__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+ kfree(reg_setting);
+ break;
+ }
+ case CFG_WRITE_I2C_SEQ_ARRAY: {
+ struct msm_camera_i2c_seq_reg_setting conf_array;
+ struct msm_camera_i2c_seq_reg_array *reg_setting = NULL;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (copy_from_user(&conf_array,
+ (void *)cdata->cfg.setting,
+ sizeof(struct msm_camera_i2c_seq_reg_setting))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (!conf_array.size ||
+ conf_array.size > I2C_SEQ_REG_DATA_MAX) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ reg_setting = kzalloc(conf_array.size *
+ (sizeof(struct msm_camera_i2c_seq_reg_array)),
+ GFP_KERNEL);
+ if (!reg_setting) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(reg_setting, (void *)conf_array.reg_setting,
+ conf_array.size *
+ sizeof(struct msm_camera_i2c_seq_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+
+ conf_array.reg_setting = reg_setting;
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_seq_table(s_ctrl->sensor_i2c_client,
+ &conf_array);
+ kfree(reg_setting);
+ break;
+ }
+
+ case CFG_POWER_UP:
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_DOWN) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+ if (s_ctrl->func_tbl->sensor_power_up) {
+ if (s_ctrl->sensordata->misc_regulator)
+ msm_sensor_misc_regulator(s_ctrl, 1);
+
+ rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__,
+ __LINE__, rc);
+ break;
+ }
+ s_ctrl->sensor_state = MSM_SENSOR_POWER_UP;
+ CDBG("%s:%d sensor state %d\n", __func__, __LINE__,
+ s_ctrl->sensor_state);
+ } else {
+ rc = -EFAULT;
+ }
+ break;
+
+ case CFG_POWER_DOWN:
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ kfree(s_ctrl->stop_setting.reg_setting);
+ s_ctrl->stop_setting.reg_setting = NULL;
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+ if (s_ctrl->func_tbl->sensor_power_down) {
+ if (s_ctrl->sensordata->misc_regulator)
+ msm_sensor_misc_regulator(s_ctrl, 0);
+
+ rc = s_ctrl->func_tbl->sensor_power_down(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__,
+ __LINE__, rc);
+ break;
+ }
+ s_ctrl->sensor_state = MSM_SENSOR_POWER_DOWN;
+ CDBG("%s:%d sensor state %d\n", __func__, __LINE__,
+ s_ctrl->sensor_state);
+ } else {
+ rc = -EFAULT;
+ }
+ break;
+
+ case CFG_SET_STOP_STREAM_SETTING: {
+ struct msm_camera_i2c_reg_setting *stop_setting =
+ &s_ctrl->stop_setting;
+ struct msm_camera_i2c_reg_array *reg_setting = NULL;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (copy_from_user(stop_setting,
+ (void *)cdata->cfg.setting,
+ sizeof(struct msm_camera_i2c_reg_setting))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ reg_setting = stop_setting->reg_setting;
+
+ if ((!stop_setting->size) ||
+ (stop_setting->size > I2C_SEQ_REG_DATA_MAX)) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ stop_setting->reg_setting = kzalloc(stop_setting->size *
+ (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
+ if (!stop_setting->reg_setting) {
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(stop_setting->reg_setting,
+ (void *)reg_setting,
+ stop_setting->size *
+ sizeof(struct msm_camera_i2c_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(stop_setting->reg_setting);
+ stop_setting->reg_setting = NULL;
+ stop_setting->size = 0;
+ rc = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case CFG_SET_I2C_SYNC_PARAM: {
+ struct msm_camera_cci_ctrl cci_ctrl;
+
+ s_ctrl->sensor_i2c_client->cci_client->cid =
+ cdata->cfg.sensor_i2c_sync_params.cid;
+ s_ctrl->sensor_i2c_client->cci_client->id_map =
+ cdata->cfg.sensor_i2c_sync_params.csid;
+
+ CDBG("I2C_SYNC_PARAM CID:%d, line:%d delay:%d, cdid:%d\n",
+ s_ctrl->sensor_i2c_client->cci_client->cid,
+ cdata->cfg.sensor_i2c_sync_params.line,
+ cdata->cfg.sensor_i2c_sync_params.delay,
+ cdata->cfg.sensor_i2c_sync_params.csid);
+
+ cci_ctrl.cmd = MSM_CCI_SET_SYNC_CID;
+ cci_ctrl.cfg.cci_wait_sync_cfg.line =
+ cdata->cfg.sensor_i2c_sync_params.line;
+ cci_ctrl.cfg.cci_wait_sync_cfg.delay =
+ cdata->cfg.sensor_i2c_sync_params.delay;
+ cci_ctrl.cfg.cci_wait_sync_cfg.cid =
+ cdata->cfg.sensor_i2c_sync_params.cid;
+ cci_ctrl.cfg.cci_wait_sync_cfg.csid =
+ cdata->cfg.sensor_i2c_sync_params.csid;
+ rc = v4l2_subdev_call(s_ctrl->sensor_i2c_client->
+ cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ rc = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ default:
+ rc = -EFAULT;
+ break;
+ }
+
+DONE:
+ mutex_unlock(s_ctrl->msm_sensor_mutex);
+
+ return rc;
+}
+
+int msm_sensor_check_id(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int rc;
+
+ if (s_ctrl->func_tbl->sensor_match_id)
+ rc = s_ctrl->func_tbl->sensor_match_id(s_ctrl);
+ else
+ rc = msm_sensor_match_id(s_ctrl);
+ if (rc < 0)
+ pr_err("%s:%d match id failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+}
+
+static int msm_sensor_power(struct v4l2_subdev *sd, int on)
+{
+ int rc = 0;
+ struct msm_sensor_ctrl_t *s_ctrl = get_sctrl(sd);
+
+ mutex_lock(s_ctrl->msm_sensor_mutex);
+ if (!on && s_ctrl->sensor_state == MSM_SENSOR_POWER_UP) {
+ s_ctrl->func_tbl->sensor_power_down(s_ctrl);
+ s_ctrl->sensor_state = MSM_SENSOR_POWER_DOWN;
+ }
+ mutex_unlock(s_ctrl->msm_sensor_mutex);
+ return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_sensor_subdev_core_ops = {
+ .ioctl = msm_sensor_subdev_ioctl,
+ .s_power = msm_sensor_power,
+};
+
+static struct v4l2_subdev_ops msm_sensor_subdev_ops = {
+ .core = &msm_sensor_subdev_core_ops,
+};
+
+static struct msm_sensor_fn_t msm_sensor_func_tbl = {
+ .sensor_config = msm_sensor_config,
+#ifdef CONFIG_COMPAT
+ .sensor_config32 = msm_sensor_config32,
+#endif
+ .sensor_power_up = msm_sensor_power_up,
+ .sensor_power_down = msm_sensor_power_down,
+ .sensor_match_id = msm_sensor_match_id,
+};
+
+static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl = {
+ .i2c_read = msm_camera_cci_i2c_read,
+ .i2c_read_seq = msm_camera_cci_i2c_read_seq,
+ .i2c_write = msm_camera_cci_i2c_write,
+ .i2c_write_table = msm_camera_cci_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_cci_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_cci_i2c_write_table_w_microdelay,
+ .i2c_util = msm_sensor_cci_i2c_util,
+ .i2c_write_conf_tbl = msm_camera_cci_i2c_write_conf_tbl,
+ .i2c_write_table_async = msm_camera_cci_i2c_write_table_async,
+ .i2c_write_table_sync = msm_camera_cci_i2c_write_table_sync,
+ .i2c_write_table_sync_block = msm_camera_cci_i2c_write_table_sync_block,
+
+};
+
+static struct msm_camera_i2c_fn_t msm_sensor_qup_func_tbl = {
+ .i2c_read = msm_camera_qup_i2c_read,
+ .i2c_read_seq = msm_camera_qup_i2c_read_seq,
+ .i2c_write = msm_camera_qup_i2c_write,
+ .i2c_write_table = msm_camera_qup_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_qup_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_qup_i2c_write_table_w_microdelay,
+ .i2c_write_conf_tbl = msm_camera_qup_i2c_write_conf_tbl,
+ .i2c_write_table_async = msm_camera_qup_i2c_write_table,
+ .i2c_write_table_sync = msm_camera_qup_i2c_write_table,
+ .i2c_write_table_sync_block = msm_camera_qup_i2c_write_table,
+};
+
+static struct msm_camera_i2c_fn_t msm_sensor_secure_func_tbl = {
+ .i2c_read = msm_camera_tz_i2c_read,
+ .i2c_read_seq = msm_camera_tz_i2c_read_seq,
+ .i2c_write = msm_camera_tz_i2c_write,
+ .i2c_write_table = msm_camera_tz_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_tz_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_tz_i2c_write_table_w_microdelay,
+ .i2c_util = msm_sensor_tz_i2c_util,
+ .i2c_write_conf_tbl = msm_camera_tz_i2c_write_conf_tbl,
+ .i2c_write_table_async = msm_camera_tz_i2c_write_table_async,
+ .i2c_write_table_sync = msm_camera_tz_i2c_write_table_sync,
+ .i2c_write_table_sync_block = msm_camera_tz_i2c_write_table_sync_block,
+};
+
+int32_t msm_sensor_init_default_params(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ struct msm_camera_cci_client *cci_client = NULL;
+ unsigned long mount_pos = 0;
+
+ /* Validate input parameters */
+ if (!s_ctrl) {
+ pr_err("%s:%d failed: invalid params s_ctrl %pK\n", __func__,
+ __LINE__, s_ctrl);
+ return -EINVAL;
+ }
+
+ if (!s_ctrl->sensor_i2c_client) {
+ pr_err("%s:%d failed: invalid params sensor_i2c_client %pK\n",
+ __func__, __LINE__, s_ctrl->sensor_i2c_client);
+ return -EINVAL;
+ }
+
+ /* Initialize cci_client */
+ s_ctrl->sensor_i2c_client->cci_client = kzalloc(sizeof(
+ struct msm_camera_cci_client), GFP_KERNEL);
+ if (!s_ctrl->sensor_i2c_client->cci_client)
+ return -ENOMEM;
+
+ if (s_ctrl->sensor_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ cci_client = s_ctrl->sensor_i2c_client->cci_client;
+
+ /* Get CCI subdev */
+ cci_client->cci_subdev = msm_cci_get_subdev();
+
+ if (s_ctrl->is_secure)
+ msm_camera_tz_i2c_register_sensor((void *)s_ctrl);
+
+ /* Update CCI / I2C function table */
+ if (!s_ctrl->sensor_i2c_client->i2c_func_tbl)
+ s_ctrl->sensor_i2c_client->i2c_func_tbl =
+ &msm_sensor_cci_func_tbl;
+ } else {
+ if (!s_ctrl->sensor_i2c_client->i2c_func_tbl) {
+ CDBG("%s:%d\n", __func__, __LINE__);
+ s_ctrl->sensor_i2c_client->i2c_func_tbl =
+ &msm_sensor_qup_func_tbl;
+ }
+ }
+
+ /* Update function table driven by ioctl */
+ if (!s_ctrl->func_tbl)
+ s_ctrl->func_tbl = &msm_sensor_func_tbl;
+
+ /* Update v4l2 subdev ops table */
+ if (!s_ctrl->sensor_v4l2_subdev_ops)
+ s_ctrl->sensor_v4l2_subdev_ops = &msm_sensor_subdev_ops;
+
+ /* Update sensor mount angle and position in media entity flag */
+ mount_pos = s_ctrl->sensordata->sensor_info->position << 16;
+ mount_pos = mount_pos | ((s_ctrl->sensordata->sensor_info->
+ sensor_mount_angle / 90) << 8);
+ s_ctrl->msm_sd.sd.entity.flags = mount_pos | MEDIA_ENT_FL_DEFAULT;
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor.h b/drivers/media/platform/msm/ais/sensor/msm_sensor.h
new file mode 100644
index 000000000000..060383b05170
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/msm_sensor.h
@@ -0,0 +1,129 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SENSOR_H
+#define MSM_SENSOR_H
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <soc/qcom/ais.h>
+#include <media/ais/msm_ais_sensor.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include "msm_camera_i2c.h"
+#include "msm_camera_dt_util.h"
+#include "msm_sd.h"
+#include "msm_sensor_init.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+enum msm_sensor_sensor_slave_info_type {
+ MSM_SENSOR_SLAVEADDR_DATA,
+ MSM_SENSOR_IDREGADDR_DATA,
+ MSM_SENSOR_SENSOR_ID_DATA,
+ MSM_SENSOR_SENIDMASK_DATA,
+ MSM_SENSOR_NUM_ID_INFO_DATA,
+};
+
+struct msm_sensor_ctrl_t;
+
+enum msm_sensor_state_t {
+ MSM_SENSOR_POWER_DOWN,
+ MSM_SENSOR_POWER_UP,
+};
+
+struct msm_sensor_fn_t {
+ int (*sensor_config)(struct msm_sensor_ctrl_t *, void __user *);
+#ifdef CONFIG_COMPAT
+ int (*sensor_config32)(struct msm_sensor_ctrl_t *, void __user *);
+#endif
+ int (*sensor_power_down)(struct msm_sensor_ctrl_t *);
+ int (*sensor_power_up)(struct msm_sensor_ctrl_t *);
+ int (*sensor_match_id)(struct msm_sensor_ctrl_t *);
+};
+
+struct msm_sensor_ctrl_t {
+ struct platform_device *pdev;
+ struct mutex *msm_sensor_mutex;
+
+ enum msm_camera_device_type_t sensor_device_type;
+ struct msm_camera_sensor_board_info *sensordata;
+ struct msm_sensor_power_setting_array power_setting_array;
+ struct msm_sensor_packed_cfg_t *cfg_override;
+ struct msm_sd_subdev msm_sd;
+ enum cci_i2c_master_t cci_i2c_master;
+
+ struct msm_camera_i2c_client *sensor_i2c_client;
+ struct v4l2_subdev_info *sensor_v4l2_subdev_info;
+ uint8_t sensor_v4l2_subdev_info_size;
+ struct v4l2_subdev_ops *sensor_v4l2_subdev_ops;
+ struct msm_sensor_fn_t *func_tbl;
+ struct msm_camera_i2c_reg_setting stop_setting;
+ void *misc_regulator;
+ enum msm_sensor_state_t sensor_state;
+ uint8_t is_probe_succeed;
+ uint32_t id;
+ struct device_node *of_node;
+ enum msm_camera_stream_type_t camera_stream_type;
+ uint32_t set_mclk_23880000;
+ uint8_t is_csid_tg_mode;
+ uint32_t is_secure;
+
+ struct msm_sensor_init_t s_init;
+};
+
+int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp);
+
+int msm_sensor_power_up(struct msm_sensor_ctrl_t *s_ctrl);
+
+int msm_sensor_power_down(struct msm_sensor_ctrl_t *s_ctrl);
+
+int msm_sensor_check_id(struct msm_sensor_ctrl_t *s_ctrl);
+
+int msm_sensor_match_id(struct msm_sensor_ctrl_t *s_ctrl);
+
+int msm_sensor_update_cfg(struct msm_sensor_ctrl_t *s_ctrl);
+
+int msm_sensor_free_sensor_data(struct msm_sensor_ctrl_t *s_ctrl);
+
+int32_t msm_sensor_init_default_params(struct msm_sensor_ctrl_t *s_ctrl);
+
+int32_t msm_sensor_get_dt_gpio_req_tbl(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size);
+
+int32_t msm_sensor_get_dt_gpio_set_tbl(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size);
+
+int32_t msm_sensor_init_gpio_pin_tbl(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size);
+#ifdef CONFIG_COMPAT
+long msm_sensor_subdev_fops_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg);
+#endif
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c
new file mode 100644
index 000000000000..80c15717325c
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c
@@ -0,0 +1,1309 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define SENSOR_DRIVER_I2C "i2c_camera"
+/* Header file declaration */
+#include "msm_sensor.h"
+#include "msm_sd.h"
+#include "camera.h"
+#include "msm_cci.h"
+#include "msm_camera_dt_util.h"
+
+/* Logging macro */
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define SENSOR_MAX_MOUNTANGLE (360)
+
+static struct v4l2_file_operations msm_sensor_v4l2_subdev_fops;
+static int32_t msm_sensor_driver_platform_probe(struct platform_device *pdev);
+
+/* Static declaration */
+static struct msm_sensor_ctrl_t *g_sctrl[MAX_CAMERAS];
+
+static int msm_sensor_platform_remove(struct platform_device *pdev)
+{
+ struct msm_sensor_ctrl_t *s_ctrl;
+
+ pr_err("%s: sensor FREE\n", __func__);
+
+ s_ctrl = g_sctrl[pdev->id];
+ if (!s_ctrl) {
+ pr_err("%s: sensor device is NULL\n", __func__);
+ return 0;
+ }
+
+ msm_sensor_free_sensor_data(s_ctrl);
+ kfree(s_ctrl->msm_sensor_mutex);
+ kfree(s_ctrl->sensor_i2c_client);
+ kfree(s_ctrl);
+ g_sctrl[pdev->id] = NULL;
+
+ return 0;
+}
+
+
+static const struct of_device_id msm_sensor_driver_dt_match[] = {
+ {.compatible = "qcom,camera"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_sensor_driver_dt_match);
+
+static struct platform_driver msm_sensor_platform_driver = {
+ .probe = msm_sensor_driver_platform_probe,
+ .driver = {
+ .name = "qcom,camera",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_sensor_driver_dt_match,
+ },
+ .remove = msm_sensor_platform_remove,
+};
+
+static struct v4l2_subdev_info msm_sensor_driver_subdev_info[] = {
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .fmt = 1,
+ .order = 0,
+ },
+};
+
+static int32_t msm_sensor_driver_create_i2c_v4l_subdev
+ (struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ uint32_t session_id = 0;
+ struct i2c_client *client = s_ctrl->sensor_i2c_client->client;
+
+ CDBG("%s %s I2c probe succeeded\n", __func__, client->name);
+ rc = camera_init_v4l2(&client->dev, &session_id);
+ if (rc < 0) {
+ pr_err("failed: camera_init_i2c_v4l2 rc %d", rc);
+ return rc;
+ }
+ CDBG("%s rc %d session_id %d\n", __func__, rc, session_id);
+ snprintf(s_ctrl->msm_sd.sd.name,
+ sizeof(s_ctrl->msm_sd.sd.name), "%s",
+ s_ctrl->sensordata->sensor_name);
+ v4l2_i2c_subdev_init(&s_ctrl->msm_sd.sd, client,
+ s_ctrl->sensor_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&s_ctrl->msm_sd.sd, client);
+ s_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ media_entity_init(&s_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ s_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ s_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_SENSOR;
+ s_ctrl->msm_sd.sd.entity.name = s_ctrl->msm_sd.sd.name;
+ s_ctrl->sensordata->sensor_info->session_id = session_id;
+ s_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x3;
+ msm_sd_register(&s_ctrl->msm_sd);
+ msm_sensor_v4l2_subdev_fops = v4l2_subdev_fops;
+#ifdef CONFIG_COMPAT
+ msm_sensor_v4l2_subdev_fops.compat_ioctl32 =
+ msm_sensor_subdev_fops_ioctl;
+#endif
+ s_ctrl->msm_sd.sd.devnode->fops =
+ &msm_sensor_v4l2_subdev_fops;
+ CDBG("%s:%d\n", __func__, __LINE__);
+ return rc;
+}
+
+static int32_t msm_sensor_driver_create_v4l_subdev
+ (struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+
+ /* Create /dev/v4l-subdevX device */
+ v4l2_subdev_init(&s_ctrl->msm_sd.sd, s_ctrl->sensor_v4l2_subdev_ops);
+ snprintf(s_ctrl->msm_sd.sd.name, sizeof(s_ctrl->msm_sd.sd.name), "%s",
+ s_ctrl->sensordata->sensor_name);
+ v4l2_set_subdevdata(&s_ctrl->msm_sd.sd, s_ctrl->pdev);
+ s_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ media_entity_init(&s_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ s_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ s_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_SENSOR;
+ s_ctrl->msm_sd.sd.entity.name = s_ctrl->msm_sd.sd.name;
+ s_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x3;
+ msm_sd_register(&s_ctrl->msm_sd);
+ msm_cam_copy_v4l2_subdev_fops(&msm_sensor_v4l2_subdev_fops);
+#ifdef CONFIG_COMPAT
+ msm_sensor_v4l2_subdev_fops.compat_ioctl32 =
+ msm_sensor_subdev_fops_ioctl;
+#endif
+ s_ctrl->msm_sd.sd.devnode->fops =
+ &msm_sensor_v4l2_subdev_fops;
+
+ return rc;
+}
+
+static int32_t msm_sensor_fill_eeprom_subdevid_by_name(
+ struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ const char *eeprom_name;
+ struct device_node *src_node = NULL;
+ uint32_t val = 0, eeprom_name_len;
+ int32_t *eeprom_subdev_id, i, userspace_probe = 0;
+ int32_t count = 0;
+ struct msm_sensor_info_t *sensor_info;
+ struct device_node *of_node = s_ctrl->of_node;
+ const void *p;
+
+ if (!s_ctrl->sensordata->eeprom_name || !of_node)
+ return -EINVAL;
+
+ eeprom_name_len = strlen(s_ctrl->sensordata->eeprom_name);
+ if (eeprom_name_len >= MAX_SENSOR_NAME)
+ return -EINVAL;
+
+ sensor_info = s_ctrl->sensordata->sensor_info;
+ eeprom_subdev_id = &sensor_info->subdev_id[SUB_MODULE_EEPROM];
+ /*
+ * string for eeprom name is valid, set sudev id to -1
+ * and try to found new id
+ */
+ *eeprom_subdev_id = -1;
+
+ if (eeprom_name_len == 0)
+ return 0;
+
+ p = of_get_property(of_node, "qcom,eeprom-src", &count);
+ if (!p || !count)
+ return 0;
+
+ count /= sizeof(uint32_t);
+ for (i = 0; i < count; i++) {
+ userspace_probe = 0;
+ eeprom_name = NULL;
+ src_node = of_parse_phandle(of_node, "qcom,eeprom-src", i);
+ if (!src_node) {
+ pr_err("eeprom src node NULL\n");
+ continue;
+ }
+ /* In the case of eeprom probe from kernel eeprom name
+ * should be present, Otherwise it will throw as errors
+ */
+ rc = of_property_read_string(src_node, "qcom,eeprom-name",
+ &eeprom_name);
+ if (rc < 0) {
+ pr_err("%s:%d Eeprom userspace probe for %s\n",
+ __func__, __LINE__,
+ s_ctrl->sensordata->eeprom_name);
+ of_node_put(src_node);
+ userspace_probe = 1;
+ if (count > 1)
+ return -EINVAL;
+ }
+ if (!userspace_probe &&
+ strcmp(eeprom_name, s_ctrl->sensordata->eeprom_name))
+ continue;
+
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ if (rc < 0) {
+ pr_err("%s qcom,eeprom cell index %d, rc %d\n",
+ __func__, val, rc);
+ of_node_put(src_node);
+ if (userspace_probe)
+ return -EINVAL;
+ continue;
+ }
+
+ *eeprom_subdev_id = val;
+ CDBG("%s:%d Eeprom subdevice id is %d\n",
+ __func__, __LINE__, val);
+ of_node_put(src_node);
+ src_node = NULL;
+ break;
+ }
+
+ return rc;
+}
+
+static int32_t msm_sensor_fill_actuator_subdevid_by_name(
+ struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ struct device_node *src_node = NULL;
+ uint32_t val = 0, actuator_name_len;
+ int32_t *actuator_subdev_id;
+ struct msm_sensor_info_t *sensor_info;
+ struct device_node *of_node = s_ctrl->of_node;
+
+ if (!s_ctrl->sensordata->actuator_name || !of_node)
+ return -EINVAL;
+
+ actuator_name_len = strlen(s_ctrl->sensordata->actuator_name);
+ if (actuator_name_len >= MAX_SENSOR_NAME)
+ return -EINVAL;
+
+ sensor_info = s_ctrl->sensordata->sensor_info;
+ actuator_subdev_id = &sensor_info->subdev_id[SUB_MODULE_ACTUATOR];
+ /*
+ * string for actuator name is valid, set sudev id to -1
+ * and try to found new id
+ */
+ *actuator_subdev_id = -1;
+
+ if (actuator_name_len == 0)
+ return 0;
+
+ src_node = of_parse_phandle(of_node, "qcom,actuator-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,actuator cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ *actuator_subdev_id = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
+ return rc;
+}
+
+static int32_t msm_sensor_fill_ois_subdevid_by_name(
+ struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ struct device_node *src_node = NULL;
+ uint32_t val = 0, ois_name_len;
+ int32_t *ois_subdev_id;
+ struct msm_sensor_info_t *sensor_info;
+ struct device_node *of_node = s_ctrl->of_node;
+
+ if (!s_ctrl->sensordata->ois_name || !of_node)
+ return -EINVAL;
+
+ ois_name_len = strlen(s_ctrl->sensordata->ois_name);
+ if (ois_name_len >= MAX_SENSOR_NAME)
+ return -EINVAL;
+
+ sensor_info = s_ctrl->sensordata->sensor_info;
+ ois_subdev_id = &sensor_info->subdev_id[SUB_MODULE_OIS];
+ /*
+ * string for ois name is valid, set sudev id to -1
+ * and try to found new id
+ */
+ *ois_subdev_id = -1;
+
+ if (ois_name_len == 0)
+ return 0;
+
+ src_node = of_parse_phandle(of_node, "qcom,ois-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,ois cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ *ois_subdev_id = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
+ return rc;
+}
+
+static int32_t msm_sensor_fill_slave_info_init_params(
+ struct msm_camera_sensor_slave_info *slave_info,
+ struct msm_sensor_info_t *sensor_info)
+{
+ struct msm_sensor_init_params *sensor_init_params;
+
+ if (!slave_info || !sensor_info)
+ return -EINVAL;
+
+ sensor_init_params = &slave_info->sensor_init_params;
+ if (sensor_init_params->position != INVALID_CAMERA_B)
+ sensor_info->position =
+ sensor_init_params->position;
+
+ if (sensor_init_params->sensor_mount_angle < SENSOR_MAX_MOUNTANGLE) {
+ sensor_info->sensor_mount_angle =
+ sensor_init_params->sensor_mount_angle;
+ sensor_info->is_mount_angle_valid = 1;
+ }
+
+ if (sensor_init_params->modes_supported != CAMERA_MODE_INVALID)
+ sensor_info->modes_supported =
+ sensor_init_params->modes_supported;
+
+ return 0;
+}
+
+
+static int32_t msm_sensor_validate_slave_info(
+ struct msm_sensor_info_t *sensor_info)
+{
+ if (sensor_info->position == INVALID_CAMERA_B) {
+ sensor_info->position = BACK_CAMERA_B;
+ CDBG("%s:%d Set default sensor position\n",
+ __func__, __LINE__);
+ }
+ if (sensor_info->modes_supported == CAMERA_MODE_INVALID) {
+ sensor_info->modes_supported = CAMERA_MODE_2D_B;
+ CDBG("%s:%d Set default sensor modes_supported\n",
+ __func__, __LINE__);
+ }
+ if (sensor_info->sensor_mount_angle >= SENSOR_MAX_MOUNTANGLE) {
+ sensor_info->sensor_mount_angle = 0;
+ CDBG("%s:%d Set default sensor mount angle\n",
+ __func__, __LINE__);
+ sensor_info->is_mount_angle_valid = 1;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static int32_t msm_sensor_get_pw_settings_compat(
+ struct msm_sensor_power_setting *ps,
+ struct msm_sensor_power_setting *us_ps, uint32_t size)
+{
+ int32_t rc = 0, i = 0;
+ struct msm_sensor_power_setting32 *ps32 =
+ kzalloc(sizeof(*ps32) * size, GFP_KERNEL);
+
+ if (!ps32) {
+ pr_err("failed: no memory ps32");
+ return -ENOMEM;
+ }
+ if (copy_from_user(ps32, (void *)us_ps, sizeof(*ps32) * size)) {
+ pr_err("failed: copy_from_user");
+ kfree(ps32);
+ return -EFAULT;
+ }
+ for (i = 0; i < size; i++) {
+ ps[i].config_val = ps32[i].config_val;
+ ps[i].delay = ps32[i].delay;
+ ps[i].seq_type = ps32[i].seq_type;
+ ps[i].seq_val = ps32[i].seq_val;
+ }
+ kfree(ps32);
+ return rc;
+}
+#endif
+
+static int32_t msm_sensor_create_pd_settings(void *setting,
+ struct msm_sensor_power_setting *pd, uint32_t size_down,
+ struct msm_sensor_power_setting *pu)
+{
+ int32_t rc = 0;
+ int c, end;
+ struct msm_sensor_power_setting pd_tmp;
+
+ pr_err("Generating power_down_setting");
+
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ int i = 0;
+ struct msm_sensor_power_setting32 *power_setting_iter =
+ (struct msm_sensor_power_setting32 *)compat_ptr((
+ (struct msm_camera_sensor_slave_info32 *)setting)->
+ power_setting_array.power_setting);
+
+ for (i = 0; i < size_down; i++) {
+ pd[i].config_val = power_setting_iter[i].config_val;
+ pd[i].delay = power_setting_iter[i].delay;
+ pd[i].seq_type = power_setting_iter[i].seq_type;
+ pd[i].seq_val = power_setting_iter[i].seq_val;
+ }
+ } else
+#endif
+ {
+ if (copy_from_user(pd, (void *)pu, sizeof(*pd) * size_down)) {
+ pr_err("failed: copy_from_user");
+ return -EFAULT;
+ }
+ }
+ /* reverse */
+ end = size_down - 1;
+ for (c = 0; c < size_down/2; c++) {
+ pd_tmp = pd[c];
+ pd[c] = pd[end];
+ pd[end] = pd_tmp;
+ end--;
+ }
+ return rc;
+}
+
+static int32_t msm_sensor_get_power_down_settings(void *setting,
+ struct msm_camera_sensor_slave_info *slave_info,
+ struct msm_camera_power_ctrl_t *power_info)
+{
+ int32_t rc = 0;
+ uint16_t size_down = 0;
+ uint16_t i = 0;
+ struct msm_sensor_power_setting *pd = NULL;
+
+ /* DOWN */
+ size_down = slave_info->power_setting_array.size_down;
+ if (!size_down || size_down > MAX_POWER_CONFIG)
+ size_down = slave_info->power_setting_array.size;
+ /* Validate size_down */
+ if (size_down > MAX_POWER_CONFIG) {
+ pr_err("failed: invalid size_down %d", size_down);
+ return -EINVAL;
+ }
+ /* Allocate memory for power down setting */
+ pd = kcalloc(size_down, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -EFAULT;
+
+ if (slave_info->power_setting_array.power_down_setting) {
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ rc = msm_sensor_get_pw_settings_compat(
+ pd, slave_info->power_setting_array.
+ power_down_setting, size_down);
+ if (rc < 0) {
+ pr_err("failed");
+ kfree(pd);
+ return -EFAULT;
+ }
+ } else
+#endif
+ if (copy_from_user(pd, (void *)slave_info->power_setting_array.
+ power_down_setting, sizeof(*pd) * size_down)) {
+ pr_err("failed: copy_from_user");
+ kfree(pd);
+ return -EFAULT;
+ }
+ } else {
+
+ rc = msm_sensor_create_pd_settings(setting, pd, size_down,
+ slave_info->power_setting_array.power_setting);
+ if (rc < 0) {
+ pr_err("failed");
+ kfree(pd);
+ return -EFAULT;
+ }
+ }
+
+ /* Fill power down setting and power down setting size */
+ power_info->power_down_setting = pd;
+ power_info->power_down_setting_size = size_down;
+
+ /* Print power setting */
+ for (i = 0; i < size_down; i++) {
+ CDBG("DOWN seq_type %d seq_val %d config_val %ld delay %d",
+ pd[i].seq_type, pd[i].seq_val,
+ pd[i].config_val, pd[i].delay);
+ }
+ return rc;
+}
+
+static int32_t msm_sensor_get_power_up_settings(void *setting,
+ struct msm_camera_sensor_slave_info *slave_info,
+ struct msm_camera_power_ctrl_t *power_info)
+{
+ int32_t rc = 0;
+ uint16_t size = 0;
+ uint16_t i = 0;
+ struct msm_sensor_power_setting *pu = NULL;
+
+ size = slave_info->power_setting_array.size;
+
+ /* Validate size */
+ if ((size == 0) || (size > MAX_POWER_CONFIG)) {
+ pr_err("failed: invalid power_setting size_up = %d\n", size);
+ return -EINVAL;
+ }
+
+ /* Allocate memory for power up setting */
+ pu = kcalloc(size, sizeof(*pu), GFP_KERNEL);
+ if (!pu)
+ return -ENOMEM;
+
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ rc = msm_sensor_get_pw_settings_compat(pu,
+ slave_info->power_setting_array.
+ power_setting, size);
+ if (rc < 0) {
+ pr_err("failed");
+ kfree(pu);
+ return -EFAULT;
+ }
+ } else
+#endif
+ {
+ if (copy_from_user(pu,
+ (void *)slave_info->power_setting_array.power_setting,
+ sizeof(*pu) * size)) {
+ pr_err("failed: copy_from_user");
+ kfree(pu);
+ return -EFAULT;
+ }
+ }
+
+ /* Print power setting */
+ for (i = 0; i < size; i++) {
+ CDBG("UP seq_type %d seq_val %d config_val %ld delay %d",
+ pu[i].seq_type, pu[i].seq_val,
+ pu[i].config_val, pu[i].delay);
+ }
+
+
+ /* Fill power up setting and power up setting size */
+ power_info->power_setting = pu;
+ power_info->power_setting_size = size;
+
+ return rc;
+}
+
+static int32_t msm_sensor_get_power_settings(void *setting,
+ struct msm_camera_sensor_slave_info *slave_info,
+ struct msm_camera_power_ctrl_t *power_info)
+{
+ int32_t rc = 0;
+
+ rc = msm_sensor_get_power_up_settings(setting, slave_info, power_info);
+ if (rc < 0) {
+ pr_err("failed");
+ return -EINVAL;
+ }
+
+ rc = msm_sensor_get_power_down_settings(setting, slave_info,
+ power_info);
+ if (rc < 0) {
+ pr_err("failed");
+ return -EINVAL;
+ }
+ return rc;
+}
+
+static void msm_sensor_fill_sensor_info(struct msm_sensor_ctrl_t *s_ctrl,
+ struct msm_sensor_info_t *sensor_info, char *entity_name)
+{
+ uint32_t i;
+
+ if (!s_ctrl || !sensor_info) {
+ pr_err("%s:failed\n", __func__);
+ return;
+ }
+
+ strlcpy(sensor_info->sensor_name, s_ctrl->sensordata->sensor_name,
+ MAX_SENSOR_NAME);
+
+ sensor_info->session_id = s_ctrl->sensordata->sensor_info->session_id;
+
+ s_ctrl->sensordata->sensor_info->subdev_id[SUB_MODULE_SENSOR] =
+ s_ctrl->sensordata->sensor_info->session_id;
+ for (i = 0; i < SUB_MODULE_MAX; i++) {
+ sensor_info->subdev_id[i] =
+ s_ctrl->sensordata->sensor_info->subdev_id[i];
+ sensor_info->subdev_intf[i] =
+ s_ctrl->sensordata->sensor_info->subdev_intf[i];
+ }
+
+ sensor_info->is_mount_angle_valid =
+ s_ctrl->sensordata->sensor_info->is_mount_angle_valid;
+ sensor_info->sensor_mount_angle =
+ s_ctrl->sensordata->sensor_info->sensor_mount_angle;
+ sensor_info->modes_supported =
+ s_ctrl->sensordata->sensor_info->modes_supported;
+ sensor_info->position =
+ s_ctrl->sensordata->sensor_info->position;
+
+ strlcpy(entity_name, s_ctrl->msm_sd.sd.entity.name, MAX_SENSOR_NAME);
+}
+
+/* static function definition */
+int32_t msm_sensor_driver_probe(void *setting,
+ struct msm_sensor_info_t *probed_info, char *entity_name)
+{
+ int32_t rc = 0;
+ struct msm_sensor_ctrl_t *s_ctrl = NULL;
+ struct msm_camera_cci_client *cci_client = NULL;
+ struct msm_camera_sensor_slave_info *slave_info = NULL;
+ struct msm_camera_slave_info *camera_info = NULL;
+
+ unsigned long mount_pos = 0;
+ uint32_t is_yuv;
+
+ /* Validate input parameters */
+ if (!setting) {
+ pr_err("failed: slave_info %pK", setting);
+ return -EINVAL;
+ }
+
+ /* Allocate memory for slave info */
+ slave_info = kzalloc(sizeof(*slave_info), GFP_KERNEL);
+ if (!slave_info)
+ return -ENOMEM;
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ struct msm_camera_sensor_slave_info32 *slave_info32 =
+ kzalloc(sizeof(*slave_info32), GFP_KERNEL);
+ if (!slave_info32) {
+ pr_err("failed: no memory for slave_info32 %pK\n",
+ slave_info32);
+ rc = -ENOMEM;
+ goto free_slave_info;
+ }
+ if (copy_from_user((void *)slave_info32, setting,
+ sizeof(*slave_info32))) {
+ pr_err("failed: copy_from_user");
+ rc = -EFAULT;
+ kfree(slave_info32);
+ goto free_slave_info;
+ }
+
+ strlcpy(slave_info->actuator_name, slave_info32->actuator_name,
+ sizeof(slave_info->actuator_name));
+
+ strlcpy(slave_info->eeprom_name, slave_info32->eeprom_name,
+ sizeof(slave_info->eeprom_name));
+
+ strlcpy(slave_info->sensor_name, slave_info32->sensor_name,
+ sizeof(slave_info->sensor_name));
+
+ strlcpy(slave_info->ois_name, slave_info32->ois_name,
+ sizeof(slave_info->ois_name));
+
+ strlcpy(slave_info->flash_name, slave_info32->flash_name,
+ sizeof(slave_info->flash_name));
+
+ slave_info->addr_type = slave_info32->addr_type;
+ slave_info->camera_id = slave_info32->camera_id;
+
+ slave_info->i2c_freq_mode = slave_info32->i2c_freq_mode;
+ slave_info->sensor_id_info = slave_info32->sensor_id_info;
+
+ slave_info->slave_addr = slave_info32->slave_addr;
+ slave_info->power_setting_array.size =
+ slave_info32->power_setting_array.size;
+ slave_info->power_setting_array.size_down =
+ slave_info32->power_setting_array.size_down;
+ slave_info->power_setting_array.size_down =
+ slave_info32->power_setting_array.size_down;
+ slave_info->power_setting_array.power_setting =
+ compat_ptr(slave_info32->
+ power_setting_array.power_setting);
+ slave_info->power_setting_array.power_down_setting =
+ compat_ptr(slave_info32->
+ power_setting_array.power_down_setting);
+ slave_info->sensor_init_params =
+ slave_info32->sensor_init_params;
+ slave_info->output_format =
+ slave_info32->output_format;
+ kfree(slave_info32);
+ } else
+#endif
+ {
+ if (copy_from_user(slave_info,
+ (void *)setting, sizeof(*slave_info))) {
+ pr_err("failed: copy_from_user");
+ rc = -EFAULT;
+ goto free_slave_info;
+ }
+ }
+
+ /* Print slave info */
+ CDBG("camera id %d Slave addr 0x%X addr_type %d\n",
+ slave_info->camera_id, slave_info->slave_addr,
+ slave_info->addr_type);
+ CDBG("sensor_id_reg_addr 0x%X sensor_id 0x%X sensor id mask %d",
+ slave_info->sensor_id_info.sensor_id_reg_addr,
+ slave_info->sensor_id_info.sensor_id,
+ slave_info->sensor_id_info.sensor_id_mask);
+ CDBG("power up size %d power down size %d\n",
+ slave_info->power_setting_array.size,
+ slave_info->power_setting_array.size_down);
+ CDBG("position %d",
+ slave_info->sensor_init_params.position);
+ CDBG("mount %d",
+ slave_info->sensor_init_params.sensor_mount_angle);
+
+ /* Validate camera id */
+ if (slave_info->camera_id >= MAX_CAMERAS) {
+ pr_err("failed: invalid camera id %d max %d",
+ slave_info->camera_id, MAX_CAMERAS);
+ rc = -EINVAL;
+ goto free_slave_info;
+ }
+
+ /* Extract s_ctrl from camera id */
+ s_ctrl = g_sctrl[slave_info->camera_id];
+ if (!s_ctrl) {
+ pr_err("failed: s_ctrl %pK for camera_id %d", s_ctrl,
+ slave_info->camera_id);
+ rc = -EINVAL;
+ goto free_slave_info;
+ }
+
+ CDBG("s_ctrl[%d] %pK", slave_info->camera_id, s_ctrl);
+
+ if (s_ctrl->is_probe_succeed == 1) {
+ /*
+ * Different sensor on this camera slot has been connected
+ * and probe already succeeded for that sensor. Ignore this
+ * probe
+ */
+ if (slave_info->sensor_id_info.sensor_id ==
+ s_ctrl->sensordata->cam_slave_info->
+ sensor_id_info.sensor_id) {
+ pr_err("slot%d: sensor id%d already probed\n",
+ slave_info->camera_id,
+ s_ctrl->sensordata->cam_slave_info->
+ sensor_id_info.sensor_id);
+ msm_sensor_fill_sensor_info(s_ctrl,
+ probed_info, entity_name);
+ } else
+ pr_err("slot %d has some other sensor\n",
+ slave_info->camera_id);
+
+ rc = 0;
+ goto free_slave_info;
+ }
+
+ if ((slave_info->power_setting_array.size == 0) &&
+ (slave_info->slave_addr == 0)) {
+ s_ctrl->is_csid_tg_mode = 1;
+ goto CSID_TG;
+ }
+
+ rc = msm_sensor_get_power_settings(setting, slave_info,
+ &s_ctrl->sensordata->power_info);
+ if (rc < 0) {
+ pr_err("failed");
+ goto free_slave_info;
+ }
+
+
+ camera_info = kzalloc(sizeof(struct msm_camera_slave_info), GFP_KERNEL);
+ if (!camera_info)
+ goto free_slave_info;
+
+ s_ctrl->sensordata->slave_info = camera_info;
+
+ /* Fill sensor slave info */
+ camera_info->sensor_slave_addr = slave_info->slave_addr;
+ camera_info->sensor_id_reg_addr =
+ slave_info->sensor_id_info.sensor_id_reg_addr;
+ camera_info->sensor_id = slave_info->sensor_id_info.sensor_id;
+ camera_info->sensor_id_mask = slave_info->sensor_id_info.sensor_id_mask;
+
+ /* Fill CCI master, slave address and CCI default params */
+ if (!s_ctrl->sensor_i2c_client) {
+ pr_err("failed: sensor_i2c_client %pK",
+ s_ctrl->sensor_i2c_client);
+ rc = -EINVAL;
+ goto free_camera_info;
+ }
+ /* Fill sensor address type */
+ s_ctrl->sensor_i2c_client->addr_type = slave_info->addr_type;
+ if (s_ctrl->sensor_i2c_client->client)
+ s_ctrl->sensor_i2c_client->client->addr =
+ camera_info->sensor_slave_addr;
+
+ cci_client = s_ctrl->sensor_i2c_client->cci_client;
+ if (!cci_client) {
+ pr_err("failed: cci_client %pK", cci_client);
+ goto free_camera_info;
+ }
+ cci_client->cci_i2c_master = s_ctrl->cci_i2c_master;
+ cci_client->sid = slave_info->slave_addr >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+ cci_client->i2c_freq_mode = slave_info->i2c_freq_mode;
+
+ /* Parse and fill vreg params for powerup settings */
+ rc = msm_camera_fill_vreg_params(
+ s_ctrl->sensordata->power_info.cam_vreg,
+ s_ctrl->sensordata->power_info.num_vreg,
+ s_ctrl->sensordata->power_info.power_setting,
+ s_ctrl->sensordata->power_info.power_setting_size);
+ if (rc < 0) {
+ pr_err("failed: msm_camera_get_dt_power_setting_data rc %d",
+ rc);
+ goto free_camera_info;
+ }
+
+ /* Parse and fill vreg params for powerdown settings*/
+ rc = msm_camera_fill_vreg_params(
+ s_ctrl->sensordata->power_info.cam_vreg,
+ s_ctrl->sensordata->power_info.num_vreg,
+ s_ctrl->sensordata->power_info.power_down_setting,
+ s_ctrl->sensordata->power_info.power_down_setting_size);
+ if (rc < 0) {
+ pr_err("failed: msm_camera_fill_vreg_params for PDOWN rc %d",
+ rc);
+ goto free_camera_info;
+ }
+
+CSID_TG:
+ /* Update sensor, actuator and eeprom name in
+ * sensor control structure
+ */
+ s_ctrl->sensordata->sensor_name = slave_info->sensor_name;
+ s_ctrl->sensordata->eeprom_name = slave_info->eeprom_name;
+ s_ctrl->sensordata->actuator_name = slave_info->actuator_name;
+ s_ctrl->sensordata->ois_name = slave_info->ois_name;
+ /*
+ * Update eeporm subdevice Id by input eeprom name
+ */
+ rc = msm_sensor_fill_eeprom_subdevid_by_name(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto free_camera_info;
+ }
+ /*
+ * Update actuator subdevice Id by input actuator name
+ */
+ rc = msm_sensor_fill_actuator_subdevid_by_name(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto free_camera_info;
+ }
+
+ rc = msm_sensor_fill_ois_subdevid_by_name(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto free_camera_info;
+ }
+
+ /* Power up and probe sensor */
+ rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s power up failed", slave_info->sensor_name);
+ goto free_camera_info;
+ }
+
+ pr_err("%s probe succeeded", slave_info->sensor_name);
+
+ /* Set probe succeeded flag to 1 so that no other camera shall
+ * probed on this slot
+ */
+ s_ctrl->is_probe_succeed = 1;
+
+ /*
+ * Update the subdevice id of flash-src based on availability in kernel.
+ */
+ if (strlen(slave_info->flash_name) == 0) {
+ s_ctrl->sensordata->sensor_info->
+ subdev_id[SUB_MODULE_LED_FLASH] = -1;
+ }
+
+ /* Power down */
+ s_ctrl->func_tbl->sensor_power_down(s_ctrl);
+
+ rc = msm_sensor_fill_slave_info_init_params(
+ slave_info,
+ s_ctrl->sensordata->sensor_info);
+ if (rc < 0) {
+ pr_err("%s Fill slave info failed", slave_info->sensor_name);
+ goto free_camera_info;
+ }
+ rc = msm_sensor_validate_slave_info(s_ctrl->sensordata->sensor_info);
+ if (rc < 0) {
+ pr_err("%s Validate slave info failed",
+ slave_info->sensor_name);
+ goto free_camera_info;
+ }
+ /* Update sensor mount angle and position in media entity flag */
+ is_yuv = (slave_info->output_format == MSM_SENSOR_YCBCR) ? 1 : 0;
+ mount_pos = ((s_ctrl->is_secure & 0x1) << 26) | is_yuv << 25 |
+ (s_ctrl->sensordata->sensor_info->position << 16) |
+ ((s_ctrl->sensordata->
+ sensor_info->sensor_mount_angle / 90) << 8);
+
+ s_ctrl->msm_sd.sd.entity.flags = mount_pos | MEDIA_ENT_FL_DEFAULT;
+
+ /* Save sensor info */
+ s_ctrl->sensordata->cam_slave_info = slave_info;
+
+ msm_sensor_fill_sensor_info(s_ctrl, probed_info, entity_name);
+
+ return rc;
+
+free_camera_info:
+ kfree(camera_info);
+free_slave_info:
+ kfree(slave_info);
+ return rc;
+}
+
+static int32_t msm_sensor_driver_get_dt_data(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ struct msm_camera_sensor_board_info *sensordata = NULL;
+ struct device_node *of_node = s_ctrl->of_node;
+ uint32_t cell_id;
+
+ s_ctrl->sensordata = kzalloc(sizeof(*sensordata), GFP_KERNEL);
+ if (!s_ctrl->sensordata)
+ return -ENOMEM;
+
+ sensordata = s_ctrl->sensordata;
+
+ /*
+ * Read cell index - this cell index will be the camera slot where
+ * this camera will be mounted
+ */
+ rc = of_property_read_u32(of_node, "cell-index", &cell_id);
+ if (rc < 0) {
+ pr_err("failed: cell-index rc %d", rc);
+ goto FREE_SENSOR_DATA;
+ }
+ s_ctrl->id = cell_id;
+
+ /* Validate cell_id */
+ if (cell_id >= MAX_CAMERAS) {
+ pr_err("failed: invalid cell_id %d", cell_id);
+ rc = -EINVAL;
+ goto FREE_SENSOR_DATA;
+ }
+
+ /* Check whether g_sctrl is already filled for this cell_id */
+ if (g_sctrl[cell_id]) {
+ pr_err("failed: sctrl already filled for cell_id %d", cell_id);
+ rc = -EINVAL;
+ goto FREE_SENSOR_DATA;
+ }
+
+ /* Read subdev info */
+ rc = msm_sensor_get_sub_module_index(of_node, &sensordata->sensor_info);
+ if (rc < 0) {
+ pr_err("failed");
+ goto FREE_SENSOR_DATA;
+ }
+
+ /* Read vreg information */
+ rc = msm_camera_get_dt_vreg_data(of_node,
+ &sensordata->power_info.cam_vreg,
+ &sensordata->power_info.num_vreg);
+ if (rc < 0) {
+ pr_err("failed: msm_camera_get_dt_vreg_data rc %d", rc);
+ goto FREE_SUB_MODULE_DATA;
+ }
+
+ /* Read gpio information */
+ rc = msm_sensor_driver_get_gpio_data
+ (&(sensordata->power_info.gpio_conf), of_node);
+ if (rc < 0) {
+ pr_err("failed: msm_sensor_driver_get_gpio_data rc %d", rc);
+ goto FREE_VREG_DATA;
+ }
+
+ /* Get custom mode */
+ rc = of_property_read_u32(of_node, "qcom,secure",
+ &s_ctrl->is_secure);
+ CDBG("qcom,secure = %d, rc %d", s_ctrl->is_secure, rc);
+ if (rc < 0) {
+ /* Set default to non-secure mode */
+ s_ctrl->is_secure = 0;
+ rc = 0;
+ }
+
+ /* Get CCI master */
+ rc = of_property_read_u32(of_node, "qcom,cci-master",
+ &s_ctrl->cci_i2c_master);
+ CDBG("qcom,cci-master %d, rc %d", s_ctrl->cci_i2c_master, rc);
+ if (rc < 0) {
+ /* Set default master 0 */
+ s_ctrl->cci_i2c_master = MASTER_0;
+ rc = 0;
+ }
+
+ /* Get mount angle */
+ if (of_property_read_u32(of_node, "qcom,mount-angle",
+ &sensordata->sensor_info->sensor_mount_angle) < 0) {
+ /* Invalidate mount angle flag */
+ sensordata->sensor_info->is_mount_angle_valid = 0;
+ sensordata->sensor_info->sensor_mount_angle = 0;
+ } else {
+ sensordata->sensor_info->is_mount_angle_valid = 1;
+ }
+ CDBG("%s qcom,mount-angle %d\n", __func__,
+ sensordata->sensor_info->sensor_mount_angle);
+ if (of_property_read_u32(of_node, "qcom,sensor-position",
+ &sensordata->sensor_info->position) < 0) {
+ CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
+ sensordata->sensor_info->position = INVALID_CAMERA_B;
+ }
+ if (of_property_read_u32(of_node, "qcom,sensor-mode",
+ &sensordata->sensor_info->modes_supported) < 0) {
+ CDBG("%s:%d Invalid sensor mode supported\n",
+ __func__, __LINE__);
+ sensordata->sensor_info->modes_supported = CAMERA_MODE_INVALID;
+ }
+ /* Get vdd-cx regulator */
+ /* Optional property, don't return error if absent */
+ of_property_read_string(of_node, "qcom,vdd-cx-name",
+ &sensordata->misc_regulator);
+ CDBG("qcom,misc_regulator %s", sensordata->misc_regulator);
+
+ s_ctrl->set_mclk_23880000 = of_property_read_bool(of_node,
+ "qcom,mclk-23880000");
+
+ CDBG("%s qcom,mclk-23880000 = %d\n", __func__,
+ s_ctrl->set_mclk_23880000);
+
+ return rc;
+
+FREE_VREG_DATA:
+ kfree(sensordata->power_info.cam_vreg);
+FREE_SUB_MODULE_DATA:
+ kfree(sensordata->sensor_info);
+FREE_SENSOR_DATA:
+ kfree(sensordata);
+ return rc;
+}
+
+static int32_t msm_sensor_driver_parse(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter");
+ /* Validate input parameters */
+
+
+ /* Allocate memory for sensor_i2c_client */
+ s_ctrl->sensor_i2c_client = kzalloc(sizeof(*s_ctrl->sensor_i2c_client),
+ GFP_KERNEL);
+ if (!s_ctrl->sensor_i2c_client)
+ return -ENOMEM;
+
+ /* Allocate memory for mutex */
+ s_ctrl->msm_sensor_mutex = kzalloc(sizeof(*s_ctrl->msm_sensor_mutex),
+ GFP_KERNEL);
+ if (!s_ctrl->msm_sensor_mutex) {
+ rc = -ENOMEM;
+ goto FREE_SENSOR_I2C_CLIENT;
+ }
+
+ /* Parse dt information and store in sensor control structure */
+ rc = msm_sensor_driver_get_dt_data(s_ctrl);
+ if (rc < 0) {
+ pr_err("failed: rc %d", rc);
+ goto FREE_MUTEX;
+ }
+
+ /* Initialize mutex */
+ mutex_init(s_ctrl->msm_sensor_mutex);
+
+ /* Initialize v4l2 subdev info */
+ s_ctrl->sensor_v4l2_subdev_info = msm_sensor_driver_subdev_info;
+ s_ctrl->sensor_v4l2_subdev_info_size =
+ ARRAY_SIZE(msm_sensor_driver_subdev_info);
+
+ /* Initialize default parameters */
+ rc = msm_sensor_init_default_params(s_ctrl);
+ if (rc < 0) {
+ pr_err("failed: msm_sensor_init_default_params rc %d", rc);
+ goto FREE_DT_DATA;
+ }
+
+ /* Initialize sensor init */
+ mutex_init(&s_ctrl->s_init.imutex);
+ init_waitqueue_head(&s_ctrl->s_init.state_wait);
+
+ /* Store sensor control structure in static database */
+ g_sctrl[s_ctrl->id] = s_ctrl;
+ CDBG("g_sctrl[%d] %pK", s_ctrl->id, g_sctrl[s_ctrl->id]);
+
+ return rc;
+
+FREE_DT_DATA:
+ kfree(s_ctrl->sensordata->power_info.gpio_conf->gpio_num_info);
+ kfree(s_ctrl->sensordata->power_info.gpio_conf->cam_gpio_req_tbl);
+ kfree(s_ctrl->sensordata->power_info.gpio_conf);
+ kfree(s_ctrl->sensordata->power_info.cam_vreg);
+ kfree(s_ctrl->sensordata);
+FREE_MUTEX:
+ kfree(s_ctrl->msm_sensor_mutex);
+FREE_SENSOR_I2C_CLIENT:
+ kfree(s_ctrl->sensor_i2c_client);
+ return rc;
+}
+
+static int32_t msm_sensor_driver_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct msm_sensor_ctrl_t *s_ctrl = NULL;
+
+ /* Create sensor control structure */
+ s_ctrl = kzalloc(sizeof(*s_ctrl), GFP_KERNEL);
+ if (!s_ctrl)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, s_ctrl);
+
+ /* Initialize sensor device type */
+ s_ctrl->sensor_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ s_ctrl->of_node = pdev->dev.of_node;
+
+ /* fill in platform device */
+ s_ctrl->pdev = pdev;
+
+ rc = msm_sensor_driver_parse(s_ctrl);
+ if (rc < 0) {
+ pr_err("failed: msm_sensor_driver_parse rc %d", rc);
+ goto FREE_S_CTRL;
+ }
+
+ /* Get clocks information */
+ rc = msm_camera_get_clk_info(s_ctrl->pdev,
+ &s_ctrl->sensordata->power_info.clk_info,
+ &s_ctrl->sensordata->power_info.clk_ptr,
+ &s_ctrl->sensordata->power_info.clk_info_size);
+ if (rc < 0) {
+ pr_err("failed: msm_camera_get_clk_info rc %d", rc);
+ goto FREE_S_CTRL;
+ }
+
+ /* Fill platform device id*/
+ pdev->id = s_ctrl->id;
+
+ /* Fill device in power info */
+ s_ctrl->sensordata->power_info.dev = &pdev->dev;
+
+ /* Create sensor nodes */
+ if (s_ctrl->sensor_device_type == MSM_CAMERA_PLATFORM_DEVICE)
+ rc = msm_sensor_driver_create_v4l_subdev(s_ctrl);
+ else
+ rc = msm_sensor_driver_create_i2c_v4l_subdev(s_ctrl);
+ if (rc < 0) {
+ pr_err("failed: camera create v4l2 rc %d", rc);
+ goto FREE_S_CTRL;
+ }
+
+ return rc;
+FREE_S_CTRL:
+ kfree(s_ctrl);
+ return rc;
+}
+
+static int32_t msm_sensor_driver_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int32_t rc = 0;
+ struct msm_sensor_ctrl_t *s_ctrl;
+
+ CDBG("\n\nEnter: msm_sensor_driver_i2c_probe");
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ pr_err("%s %s i2c_check_functionality failed\n",
+ __func__, client->name);
+ rc = -EFAULT;
+ return rc;
+ }
+
+ /* Create sensor control structure */
+ s_ctrl = kzalloc(sizeof(*s_ctrl), GFP_KERNEL);
+ if (!s_ctrl)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, s_ctrl);
+
+ /* Initialize sensor device type */
+ s_ctrl->sensor_device_type = MSM_CAMERA_I2C_DEVICE;
+ s_ctrl->of_node = client->dev.of_node;
+
+ rc = msm_sensor_driver_parse(s_ctrl);
+ if (rc < 0) {
+ pr_err("failed: msm_sensor_driver_parse rc %d", rc);
+ goto FREE_S_CTRL;
+ }
+
+ if (s_ctrl->sensor_i2c_client != NULL) {
+ s_ctrl->sensor_i2c_client->client = client;
+ s_ctrl->sensordata->power_info.dev = &client->dev;
+
+ /* Get clocks information */
+ rc = msm_camera_i2c_dev_get_clk_info(
+ &s_ctrl->sensor_i2c_client->client->dev,
+ &s_ctrl->sensordata->power_info.clk_info,
+ &s_ctrl->sensordata->power_info.clk_ptr,
+ &s_ctrl->sensordata->power_info.clk_info_size);
+ if (rc < 0) {
+ pr_err("failed: msm_camera_i2c_dev_get_clk_info rc %d",
+ rc);
+ goto FREE_S_CTRL;
+ }
+ }
+ return rc;
+FREE_S_CTRL:
+ kfree(s_ctrl);
+ return rc;
+}
+
+static int msm_sensor_driver_i2c_remove(struct i2c_client *client)
+{
+ struct msm_sensor_ctrl_t *s_ctrl = i2c_get_clientdata(client);
+
+ pr_err("%s: sensor FREE\n", __func__);
+
+ if (!s_ctrl) {
+ pr_err("%s: sensor device is NULL\n", __func__);
+ return 0;
+ }
+
+ g_sctrl[s_ctrl->id] = NULL;
+ msm_sensor_free_sensor_data(s_ctrl);
+ kfree(s_ctrl->msm_sensor_mutex);
+ kfree(s_ctrl->sensor_i2c_client);
+ kfree(s_ctrl);
+
+ return 0;
+}
+
+static const struct i2c_device_id i2c_id[] = {
+ {SENSOR_DRIVER_I2C, (kernel_ulong_t)NULL},
+ { }
+};
+
+static struct i2c_driver msm_sensor_driver_i2c = {
+ .id_table = i2c_id,
+ .probe = msm_sensor_driver_i2c_probe,
+ .remove = msm_sensor_driver_i2c_remove,
+ .driver = {
+ .name = SENSOR_DRIVER_I2C,
+ },
+};
+
+static int __init msm_sensor_driver_init(void)
+{
+ int32_t rc = 0;
+
+ CDBG("%s Enter\n", __func__);
+ rc = platform_driver_register(&msm_sensor_platform_driver);
+ if (rc)
+ pr_err("%s platform_driver_register failed rc = %d",
+ __func__, rc);
+ rc = i2c_add_driver(&msm_sensor_driver_i2c);
+ if (rc)
+ pr_err("%s i2c_add_driver failed rc = %d", __func__, rc);
+
+ return rc;
+}
+
+static void __exit msm_sensor_driver_exit(void)
+{
+ CDBG("Enter");
+ platform_driver_unregister(&msm_sensor_platform_driver);
+ i2c_del_driver(&msm_sensor_driver_i2c);
+}
+
+module_init(msm_sensor_driver_init);
+module_exit(msm_sensor_driver_exit);
+MODULE_DESCRIPTION("msm_sensor_driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.h b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.h
new file mode 100644
index 000000000000..43c4507d49bc
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SENSOR_DRIVER_H
+#define MSM_SENSOR_DRIVER_H
+
+#include "msm_sensor.h"
+
+int32_t msm_sensor_driver_probe(void *setting,
+ struct msm_sensor_info_t *probed_info, char *entity_name);
+
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor_init.c b/drivers/media/platform/msm/ais/sensor/msm_sensor_init.c
new file mode 100644
index 000000000000..c3943be78226
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/msm_sensor_init.c
@@ -0,0 +1,114 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "MSM-SENSOR-INIT %s:%d " fmt "\n", __func__, __LINE__
+
+/* Header files */
+#include "msm_sensor_driver.h"
+#include "msm_sensor.h"
+#include "msm_sd.h"
+
+/* Logging macro */
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static struct msm_sensor_init_t *s_init;
+
+static int msm_sensor_wait_for_probe_done(struct msm_sensor_init_t *s_init)
+{
+ int rc;
+ int tm = 10000;
+
+ if (s_init->module_init_status == 1) {
+ CDBG("msm_cam_get_module_init_status -2\n");
+ return 0;
+ }
+ rc = wait_event_timeout(s_init->state_wait,
+ (s_init->module_init_status == 1), msecs_to_jiffies(tm));
+ if (rc == 0) {
+ pr_err("%s:%d wait timeout\n", __func__, __LINE__);
+ rc = -1;
+ }
+
+ return rc;
+}
+
+/* Static function definition */
+int32_t msm_sensor_driver_cmd(struct msm_sensor_init_t *s_init, void *arg)
+{
+ int32_t rc = 0;
+ struct sensor_init_cfg_data *cfg = (struct sensor_init_cfg_data *)arg;
+
+ /* Validate input parameters */
+ if (!s_init || !cfg) {
+ pr_err("failed: s_init %pK cfg %pK", s_init, cfg);
+ return -EINVAL;
+ }
+
+ pr_debug("%s : %d", __func__, cfg->cfgtype);
+ switch (cfg->cfgtype) {
+ case CFG_SINIT_PROBE:
+ mutex_lock(&s_init->imutex);
+ s_init->module_init_status = 0;
+ rc = msm_sensor_driver_probe(cfg->cfg.setting,
+ &cfg->probed_info,
+ cfg->entity_name);
+ mutex_unlock(&s_init->imutex);
+ if (rc < 0)
+ pr_err("%s failed (non-fatal) rc %d", __func__, rc);
+ break;
+
+ case CFG_SINIT_PROBE_DONE:
+ s_init->module_init_status = 1;
+ wake_up(&s_init->state_wait);
+ break;
+
+ case CFG_SINIT_PROBE_WAIT_DONE:
+ rc = msm_sensor_wait_for_probe_done(s_init);
+ break;
+
+ default:
+ pr_err("default");
+ break;
+ }
+
+ return rc;
+}
+
+static int __init msm_sensor_init_module(void)
+{
+ int ret = 0;
+
+ /* Allocate memory for msm_sensor_init control structure */
+ s_init = kzalloc(sizeof(struct msm_sensor_init_t), GFP_KERNEL);
+ if (!s_init)
+ return -ENOMEM;
+
+ CDBG("MSM_SENSOR_INIT_MODULE %pK", NULL);
+
+ /* Initialize mutex */
+ mutex_init(&s_init->imutex);
+
+ init_waitqueue_head(&s_init->state_wait);
+ return ret;
+}
+
+static void __exit msm_sensor_exit_module(void)
+{
+ mutex_destroy(&s_init->imutex);
+ kfree(s_init);
+}
+
+module_init(msm_sensor_init_module);
+module_exit(msm_sensor_exit_module);
+MODULE_DESCRIPTION("msm_sensor_init");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor_init.h b/drivers/media/platform/msm/ais/sensor/msm_sensor_init.h
new file mode 100644
index 000000000000..925387dc975e
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/msm_sensor_init.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SENSOR_INIT_H
+#define MSM_SENSOR_INIT_H
+
+struct msm_sensor_init_t {
+ struct mutex imutex;
+ struct msm_sd_subdev msm_sd;
+ int module_init_status;
+ wait_queue_head_t state_wait;
+};
+
+int32_t msm_sensor_driver_cmd(struct msm_sensor_init_t *s_init,
+ void *arg);
+
+#endif
diff --git a/drivers/media/platform/msm/ais/sensor/ois/Makefile b/drivers/media/platform/msm/ais/sensor/ois/Makefile
new file mode 100644
index 000000000000..9e08ea0f3c8d
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/ois/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+obj-$(CONFIG_MSM_AIS) += msm_ois.o
diff --git a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
new file mode 100644
index 000000000000..f3147b127438
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
@@ -0,0 +1,1015 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include "msm_sd.h"
+#include "msm_ois.h"
+#include "msm_cci.h"
+
+DEFINE_MSM_MUTEX(msm_ois_mutex);
+/* #define MSM_OIS_DEBUG */
+#undef CDBG
+#ifdef MSM_OIS_DEBUG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+static struct v4l2_file_operations msm_ois_v4l2_subdev_fops;
+static int32_t msm_ois_power_up(struct msm_ois_ctrl_t *o_ctrl);
+static int32_t msm_ois_power_down(struct msm_ois_ctrl_t *o_ctrl);
+
+static struct i2c_driver msm_ois_i2c_driver;
+
+static int32_t msm_ois_download(struct msm_ois_ctrl_t *o_ctrl)
+{
+ uint16_t bytes_in_tx = 0;
+ uint16_t total_bytes = 0;
+ uint8_t *ptr = NULL;
+ int32_t rc = 0;
+ const struct firmware *fw = NULL;
+ const char *fw_name_prog = NULL;
+ const char *fw_name_coeff = NULL;
+ char name_prog[MAX_SENSOR_NAME] = {0};
+ char name_coeff[MAX_SENSOR_NAME] = {0};
+ struct device *dev = &(o_ctrl->pdev->dev);
+ enum msm_camera_i2c_reg_addr_type save_addr_type;
+
+ CDBG("Enter\n");
+ save_addr_type = o_ctrl->i2c_client.addr_type;
+ o_ctrl->i2c_client.addr_type = MSM_CAMERA_I2C_BYTE_ADDR;
+
+ snprintf(name_coeff, MAX_SENSOR_NAME, "%s.coeff",
+ o_ctrl->oboard_info->ois_name);
+
+ snprintf(name_prog, MAX_SENSOR_NAME, "%s.prog",
+ o_ctrl->oboard_info->ois_name);
+
+ /* cast pointer as const pointer*/
+ fw_name_prog = name_prog;
+ fw_name_coeff = name_coeff;
+
+ /* Load FW */
+ rc = request_firmware(&fw, fw_name_prog, dev);
+ if (rc) {
+ dev_err(dev, "Failed to locate %s\n", fw_name_prog);
+ o_ctrl->i2c_client.addr_type = save_addr_type;
+ return rc;
+ }
+
+ total_bytes = fw->size;
+ for (ptr = (uint8_t *)fw->data; total_bytes;
+ total_bytes -= bytes_in_tx, ptr += bytes_in_tx) {
+ bytes_in_tx = (total_bytes > 10) ? 10 : total_bytes;
+ rc = o_ctrl->i2c_client.i2c_func_tbl->i2c_write_seq(
+ &o_ctrl->i2c_client, o_ctrl->oboard_info->opcode.prog,
+ ptr, bytes_in_tx);
+ if (rc < 0) {
+ pr_err("Failed: remaining bytes to be downloaded: %d",
+ bytes_in_tx);
+ /* abort download fw and return error*/
+ goto release_firmware;
+ }
+ }
+ release_firmware(fw);
+
+ rc = request_firmware(&fw, fw_name_coeff, dev);
+ if (rc) {
+ dev_err(dev, "Failed to locate %s\n", fw_name_coeff);
+ o_ctrl->i2c_client.addr_type = save_addr_type;
+ return rc;
+ }
+ total_bytes = fw->size;
+ for (ptr = (uint8_t *)fw->data; total_bytes;
+ total_bytes -= bytes_in_tx, ptr += bytes_in_tx) {
+ bytes_in_tx = (total_bytes > 10) ? 10 : total_bytes;
+ rc = o_ctrl->i2c_client.i2c_func_tbl->i2c_write_seq(
+ &o_ctrl->i2c_client, o_ctrl->oboard_info->opcode.coeff,
+ ptr, bytes_in_tx);
+ if (rc < 0) {
+ pr_err("Failed: remaining bytes to be downloaded: %d",
+ total_bytes);
+ /* abort download fw*/
+ break;
+ }
+ }
+release_firmware:
+ release_firmware(fw);
+ o_ctrl->i2c_client.addr_type = save_addr_type;
+
+ return rc;
+}
+
+static int32_t msm_ois_data_config(struct msm_ois_ctrl_t *o_ctrl,
+ struct msm_ois_slave_info *slave_info)
+{
+ int rc = 0;
+ struct msm_camera_cci_client *cci_client = NULL;
+
+ CDBG("Enter\n");
+ if (!slave_info) {
+ pr_err("failed : invalid slave_info ");
+ return -EINVAL;
+ }
+ /* fill ois slave info*/
+ if (strlcpy(o_ctrl->oboard_info->ois_name, slave_info->ois_name,
+ sizeof(o_ctrl->oboard_info->ois_name)) < 0) {
+ pr_err("failed: copy_from_user");
+ return -EFAULT;
+ }
+ memcpy(&(o_ctrl->oboard_info->opcode), &(slave_info->opcode),
+ sizeof(struct msm_ois_opcode));
+ o_ctrl->oboard_info->i2c_slaveaddr = slave_info->i2c_addr;
+
+ /* config cci_client*/
+ if (o_ctrl->ois_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ cci_client = o_ctrl->i2c_client.cci_client;
+ cci_client->sid =
+ o_ctrl->oboard_info->i2c_slaveaddr >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+ cci_client->cci_i2c_master = o_ctrl->cci_master;
+ } else {
+ o_ctrl->i2c_client.client->addr =
+ o_ctrl->oboard_info->i2c_slaveaddr;
+ }
+ o_ctrl->i2c_client.addr_type = MSM_CAMERA_I2C_WORD_ADDR;
+
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_ois_write_settings(struct msm_ois_ctrl_t *o_ctrl,
+ uint16_t size, struct reg_settings_ois_t *settings)
+{
+ int32_t rc = -EFAULT;
+ int32_t i = 0;
+ struct msm_camera_i2c_seq_reg_array *reg_setting;
+
+ CDBG("Enter\n");
+
+ for (i = 0; i < size; i++) {
+ switch (settings[i].i2c_operation) {
+ case MSM_OIS_WRITE: {
+ switch (settings[i].data_type) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ rc = o_ctrl->i2c_client.i2c_func_tbl->i2c_write(
+ &o_ctrl->i2c_client,
+ settings[i].reg_addr,
+ settings[i].reg_data,
+ settings[i].data_type);
+ break;
+ case MSM_CAMERA_I2C_DWORD_DATA:
+ reg_setting =
+ kzalloc(sizeof(struct msm_camera_i2c_seq_reg_array),
+ GFP_KERNEL);
+ if (!reg_setting)
+ return -ENOMEM;
+
+ reg_setting->reg_addr = settings[i].reg_addr;
+ reg_setting->reg_data[0] = (uint8_t)
+ ((settings[i].reg_data &
+ 0xFF000000) >> 24);
+ reg_setting->reg_data[1] = (uint8_t)
+ ((settings[i].reg_data &
+ 0x00FF0000) >> 16);
+ reg_setting->reg_data[2] = (uint8_t)
+ ((settings[i].reg_data &
+ 0x0000FF00) >> 8);
+ reg_setting->reg_data[3] = (uint8_t)
+ (settings[i].reg_data & 0x000000FF);
+ reg_setting->reg_data_size = 4;
+ rc = o_ctrl->i2c_client.i2c_func_tbl->
+ i2c_write_seq(&o_ctrl->i2c_client,
+ reg_setting->reg_addr,
+ reg_setting->reg_data,
+ reg_setting->reg_data_size);
+ kfree(reg_setting);
+ reg_setting = NULL;
+ if (rc < 0)
+ return rc;
+ break;
+
+ default:
+ pr_err("Unsupport data type: %d\n",
+ settings[i].data_type);
+ break;
+ }
+ if (settings[i].delay > 20)
+ msleep(settings[i].delay);
+ else if (settings[i].delay != 0)
+ usleep_range(settings[i].delay * 1000,
+ (settings[i].delay * 1000) + 1000);
+ }
+ break;
+
+ case MSM_OIS_POLL: {
+ switch (settings[i].data_type) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+
+ rc = o_ctrl->i2c_client.i2c_func_tbl
+ ->i2c_poll(&o_ctrl->i2c_client,
+ settings[i].reg_addr,
+ settings[i].reg_data,
+ settings[i].data_type,
+ settings[i].delay);
+ break;
+
+ default:
+ pr_err("Unsupport data type: %d\n",
+ settings[i].data_type);
+ break;
+ }
+ }
+ }
+
+ if (rc < 0)
+ break;
+ }
+
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_ois_vreg_control(struct msm_ois_ctrl_t *o_ctrl,
+ int config)
+{
+ int rc = 0, i, cnt;
+ struct msm_ois_vreg *vreg_cfg;
+
+ vreg_cfg = &o_ctrl->vreg_cfg;
+ cnt = vreg_cfg->num_vreg;
+ if (!cnt)
+ return 0;
+
+ if (cnt >= MSM_OIS_MAX_VREGS) {
+ pr_err("%s failed %d cnt %d\n", __func__, __LINE__, cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ rc = msm_camera_config_single_vreg(&(o_ctrl->pdev->dev),
+ &vreg_cfg->cam_vreg[i],
+ (struct regulator **)&vreg_cfg->data[i],
+ config);
+ }
+ return rc;
+}
+
+static int32_t msm_ois_power_down(struct msm_ois_ctrl_t *o_ctrl)
+{
+ int32_t rc = 0;
+ enum msm_sensor_power_seq_gpio_t gpio;
+
+ CDBG("Enter\n");
+ if (o_ctrl->ois_state != OIS_DISABLE_STATE) {
+
+ rc = msm_ois_vreg_control(o_ctrl, 0);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+
+ for (gpio = SENSOR_GPIO_AF_PWDM; gpio < SENSOR_GPIO_MAX;
+ gpio++) {
+ if (o_ctrl->gconf &&
+ o_ctrl->gconf->gpio_num_info &&
+ o_ctrl->gconf->
+ gpio_num_info->valid[gpio] == 1) {
+ gpio_set_value_cansleep(
+ o_ctrl->gconf->gpio_num_info
+ ->gpio_num[gpio],
+ GPIOF_OUT_INIT_LOW);
+
+ if (o_ctrl->cam_pinctrl_status) {
+ rc = pinctrl_select_state(
+ o_ctrl->pinctrl_info.pinctrl,
+ o_ctrl->pinctrl_info.
+ gpio_state_suspend);
+ if (rc < 0)
+ pr_err("ERR:%s:%d cannot set pin to suspend state: %d",
+ __func__, __LINE__, rc);
+ devm_pinctrl_put(
+ o_ctrl->pinctrl_info.pinctrl);
+ }
+ o_ctrl->cam_pinctrl_status = 0;
+ rc = msm_camera_request_gpio_table(
+ o_ctrl->gconf->cam_gpio_req_tbl,
+ o_ctrl->gconf->cam_gpio_req_tbl_size,
+ 0);
+ if (rc < 0)
+ pr_err("ERR:%s:Failed in selecting state in ois power down: %d\n",
+ __func__, rc);
+ }
+ }
+
+ o_ctrl->i2c_tbl_index = 0;
+ o_ctrl->ois_state = OIS_OPS_INACTIVE;
+ }
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int msm_ois_init(struct msm_ois_ctrl_t *o_ctrl)
+{
+ int rc = 0;
+
+ CDBG("Enter\n");
+
+ if (!o_ctrl) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+
+ if (o_ctrl->ois_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ rc = o_ctrl->i2c_client.i2c_func_tbl->i2c_util(
+ &o_ctrl->i2c_client, MSM_CCI_INIT);
+ if (rc < 0)
+ pr_err("cci_init failed\n");
+ }
+ o_ctrl->ois_state = OIS_OPS_ACTIVE;
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_ois_control(struct msm_ois_ctrl_t *o_ctrl,
+ struct msm_ois_set_info_t *set_info)
+{
+ struct reg_settings_ois_t *settings = NULL;
+ int32_t rc = 0;
+ struct msm_camera_cci_client *cci_client = NULL;
+
+ CDBG("Enter\n");
+
+ if (o_ctrl->ois_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ cci_client = o_ctrl->i2c_client.cci_client;
+ cci_client->sid =
+ set_info->ois_params.i2c_addr >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+ cci_client->cci_i2c_master = o_ctrl->cci_master;
+ cci_client->i2c_freq_mode = set_info->ois_params.i2c_freq_mode;
+ } else {
+ o_ctrl->i2c_client.client->addr =
+ set_info->ois_params.i2c_addr;
+ }
+ o_ctrl->i2c_client.addr_type = MSM_CAMERA_I2C_WORD_ADDR;
+
+
+ if (set_info->ois_params.setting_size > 0 &&
+ set_info->ois_params.setting_size
+ < MAX_OIS_REG_SETTINGS) {
+ settings = kmalloc(
+ sizeof(struct reg_settings_ois_t) *
+ (set_info->ois_params.setting_size),
+ GFP_KERNEL);
+ if (settings == NULL) {
+ pr_err("Error allocating memory\n");
+ return -EFAULT;
+ }
+ if (copy_from_user(settings,
+ (void *)set_info->ois_params.settings,
+ set_info->ois_params.setting_size *
+ sizeof(struct reg_settings_ois_t))) {
+ kfree(settings);
+ pr_err("Error copying\n");
+ return -EFAULT;
+ }
+
+ rc = msm_ois_write_settings(o_ctrl,
+ set_info->ois_params.setting_size,
+ settings);
+ kfree(settings);
+ if (rc < 0) {
+ pr_err("Error\n");
+ return -EFAULT;
+ }
+ }
+
+ CDBG("Exit\n");
+
+ return rc;
+}
+
+
+static int32_t msm_ois_config(struct msm_ois_ctrl_t *o_ctrl,
+ void __user *argp)
+{
+ struct msm_ois_cfg_data *cdata =
+ (struct msm_ois_cfg_data *)argp;
+ int32_t rc = 0;
+
+ mutex_lock(o_ctrl->ois_mutex);
+ CDBG("Enter\n");
+ CDBG("%s type %d\n", __func__, cdata->cfgtype);
+ switch (cdata->cfgtype) {
+ case CFG_OIS_INIT:
+ rc = msm_ois_init(o_ctrl);
+ if (rc < 0)
+ pr_err("msm_ois_init failed %d\n", rc);
+ break;
+ case CFG_OIS_POWERDOWN:
+ rc = msm_ois_power_down(o_ctrl);
+ if (rc < 0)
+ pr_err("msm_ois_power_down failed %d\n", rc);
+ break;
+ case CFG_OIS_POWERUP:
+ rc = msm_ois_power_up(o_ctrl);
+ if (rc < 0)
+ pr_err("Failed ois power up%d\n", rc);
+ break;
+ case CFG_OIS_CONTROL:
+ rc = msm_ois_control(o_ctrl, &cdata->cfg.set_info);
+ if (rc < 0)
+ pr_err("Failed ois control%d\n", rc);
+ break;
+ case CFG_OIS_I2C_WRITE_SEQ_TABLE: {
+ struct msm_camera_i2c_seq_reg_setting conf_array;
+ struct msm_camera_i2c_seq_reg_array *reg_setting = NULL;
+
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ memcpy(&conf_array,
+ (void *)cdata->cfg.settings,
+ sizeof(struct msm_camera_i2c_seq_reg_setting));
+ } else
+#endif
+ if (copy_from_user(&conf_array,
+ (void *)cdata->cfg.settings,
+ sizeof(struct msm_camera_i2c_seq_reg_setting))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (!conf_array.size ||
+ conf_array.size > I2C_SEQ_REG_DATA_MAX) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ reg_setting = kzalloc(conf_array.size *
+ (sizeof(struct msm_camera_i2c_seq_reg_array)),
+ GFP_KERNEL);
+ if (!reg_setting) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(reg_setting, (void *)conf_array.reg_setting,
+ conf_array.size *
+ sizeof(struct msm_camera_i2c_seq_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+
+ conf_array.reg_setting = reg_setting;
+ rc = o_ctrl->i2c_client.i2c_func_tbl->
+ i2c_write_seq_table(&o_ctrl->i2c_client,
+ &conf_array);
+ kfree(reg_setting);
+ break;
+ }
+ default:
+ break;
+ }
+ mutex_unlock(o_ctrl->ois_mutex);
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_ois_config_download(struct msm_ois_ctrl_t *o_ctrl,
+ void __user *argp)
+{
+ struct msm_ois_cfg_download_data *cdata =
+ (struct msm_ois_cfg_download_data *)argp;
+ int32_t rc = 0;
+
+ if (!o_ctrl || !cdata) {
+ pr_err("failed: Invalid data\n");
+ return -EINVAL;
+ }
+ mutex_lock(o_ctrl->ois_mutex);
+ CDBG("Enter\n");
+ CDBG("%s type %d\n", __func__, cdata->cfgtype);
+ switch (cdata->cfgtype) {
+ case CFG_OIS_DATA_CONFIG:
+ rc = msm_ois_data_config(o_ctrl, &cdata->slave_info);
+ if (rc < 0)
+ pr_err("Failed ois data config %d\n", rc);
+ break;
+ case CFG_OIS_DOWNLOAD:
+ rc = msm_ois_download(o_ctrl);
+ if (rc < 0)
+ pr_err("Failed ois download %d\n", rc);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(o_ctrl->ois_mutex);
+ CDBG("Exit\n");
+ return rc;
+}
+
+
+static int32_t msm_ois_get_subdev_id(struct msm_ois_ctrl_t *o_ctrl,
+ void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+
+ CDBG("Enter\n");
+ if (!subdev_id) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+ if (o_ctrl->ois_device_type == MSM_CAMERA_PLATFORM_DEVICE)
+ *subdev_id = o_ctrl->pdev->id;
+ else
+ *subdev_id = o_ctrl->subdev_id;
+
+ CDBG("subdev_id %d\n", *subdev_id);
+ CDBG("Exit\n");
+ return 0;
+}
+
+static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl = {
+ .i2c_read = msm_camera_cci_i2c_read,
+ .i2c_read_seq = msm_camera_cci_i2c_read_seq,
+ .i2c_write = msm_camera_cci_i2c_write,
+ .i2c_write_table = msm_camera_cci_i2c_write_table,
+ .i2c_write_seq = msm_camera_cci_i2c_write_seq,
+ .i2c_write_seq_table = msm_camera_cci_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_cci_i2c_write_table_w_microdelay,
+ .i2c_util = msm_sensor_cci_i2c_util,
+ .i2c_poll = msm_camera_cci_i2c_poll,
+};
+
+static struct msm_camera_i2c_fn_t msm_sensor_qup_func_tbl = {
+ .i2c_read = msm_camera_qup_i2c_read,
+ .i2c_read_seq = msm_camera_qup_i2c_read_seq,
+ .i2c_write = msm_camera_qup_i2c_write,
+ .i2c_write_table = msm_camera_qup_i2c_write_table,
+ .i2c_write_seq = msm_camera_qup_i2c_write_seq,
+ .i2c_write_seq_table = msm_camera_qup_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_qup_i2c_write_table_w_microdelay,
+ .i2c_poll = msm_camera_qup_i2c_poll,
+};
+
+static int msm_ois_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh) {
+ int rc = 0;
+ struct msm_ois_ctrl_t *o_ctrl = v4l2_get_subdevdata(sd);
+
+ CDBG("Enter\n");
+ if (!o_ctrl) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+ mutex_lock(o_ctrl->ois_mutex);
+ if (o_ctrl->ois_device_type == MSM_CAMERA_PLATFORM_DEVICE &&
+ o_ctrl->ois_state != OIS_DISABLE_STATE) {
+ rc = o_ctrl->i2c_client.i2c_func_tbl->i2c_util(
+ &o_ctrl->i2c_client, MSM_CCI_RELEASE);
+ if (rc < 0)
+ pr_err("cci_init failed\n");
+ }
+ o_ctrl->ois_state = OIS_DISABLE_STATE;
+ mutex_unlock(o_ctrl->ois_mutex);
+ CDBG("Exit\n");
+ return rc;
+}
+
+static const struct v4l2_subdev_internal_ops msm_ois_internal_ops = {
+ .close = msm_ois_close,
+};
+
+static long msm_ois_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc;
+ struct msm_ois_ctrl_t *o_ctrl = v4l2_get_subdevdata(sd);
+ void __user *argp = (void __user *)arg;
+
+ CDBG("Enter\n");
+ CDBG("%s:%d o_ctrl %pK argp %pK\n", __func__, __LINE__, o_ctrl, argp);
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ return msm_ois_get_subdev_id(o_ctrl, argp);
+ case VIDIOC_MSM_OIS_CFG:
+ return msm_ois_config(o_ctrl, argp);
+ case VIDIOC_MSM_OIS_CFG_DOWNLOAD:
+ return msm_ois_config_download(o_ctrl, argp);
+ case MSM_SD_SHUTDOWN:
+ if (!o_ctrl->i2c_client.i2c_func_tbl) {
+ pr_err("o_ctrl->i2c_client.i2c_func_tbl NULL\n");
+ return -EINVAL;
+ }
+ rc = msm_ois_power_down(o_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d OIS Power down failed\n",
+ __func__, __LINE__);
+ }
+ return msm_ois_close(sd, NULL);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static int32_t msm_ois_power_up(struct msm_ois_ctrl_t *o_ctrl)
+{
+ int rc = 0;
+ enum msm_sensor_power_seq_gpio_t gpio;
+
+ CDBG("%s called\n", __func__);
+
+ rc = msm_ois_vreg_control(o_ctrl, 1);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+
+ for (gpio = SENSOR_GPIO_AF_PWDM;
+ gpio < SENSOR_GPIO_MAX; gpio++) {
+ if (o_ctrl->gconf && o_ctrl->gconf->gpio_num_info &&
+ o_ctrl->gconf->gpio_num_info->valid[gpio] == 1) {
+ rc = msm_camera_request_gpio_table(
+ o_ctrl->gconf->cam_gpio_req_tbl,
+ o_ctrl->gconf->cam_gpio_req_tbl_size, 1);
+ if (rc < 0) {
+ pr_err("ERR:%s:Failed in selecting state for ois: %d\n",
+ __func__, rc);
+ return rc;
+ }
+ if (o_ctrl->cam_pinctrl_status) {
+ rc = pinctrl_select_state(
+ o_ctrl->pinctrl_info.pinctrl,
+ o_ctrl->pinctrl_info.gpio_state_active);
+ if (rc < 0)
+ pr_err("ERR:%s:%d cannot set pin to active state: %d",
+ __func__, __LINE__, rc);
+ }
+
+ gpio_set_value_cansleep(
+ o_ctrl->gconf->gpio_num_info->gpio_num[gpio],
+ 1);
+ }
+ }
+
+ o_ctrl->ois_state = OIS_ENABLE_STATE;
+ CDBG("Exit\n");
+ return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_ois_subdev_core_ops = {
+ .ioctl = msm_ois_subdev_ioctl,
+};
+
+static struct v4l2_subdev_ops msm_ois_subdev_ops = {
+ .core = &msm_ois_subdev_core_ops,
+};
+
+static const struct i2c_device_id msm_ois_i2c_id[] = {
+ {"qcom,ois", (kernel_ulong_t)NULL},
+ { }
+};
+
+static int32_t msm_ois_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ struct msm_ois_ctrl_t *ois_ctrl_t = NULL;
+
+ CDBG("Enter\n");
+
+ if (client == NULL) {
+ pr_err("msm_ois_i2c_probe: client is null\n");
+ return -EINVAL;
+ }
+
+ ois_ctrl_t = kzalloc(sizeof(struct msm_ois_ctrl_t),
+ GFP_KERNEL);
+ if (!ois_ctrl_t)
+ return -ENOMEM;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ pr_err("i2c_check_functionality failed\n");
+ rc = -EINVAL;
+ goto probe_failure;
+ }
+
+ CDBG("client = 0x%pK\n", client);
+
+ rc = of_property_read_u32(client->dev.of_node, "cell-index",
+ &ois_ctrl_t->subdev_id);
+ CDBG("cell-index %d, rc %d\n", ois_ctrl_t->subdev_id, rc);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ goto probe_failure;
+ }
+
+ ois_ctrl_t->i2c_driver = &msm_ois_i2c_driver;
+ ois_ctrl_t->i2c_client.client = client;
+ /* Set device type as I2C */
+ ois_ctrl_t->ois_device_type = MSM_CAMERA_I2C_DEVICE;
+ ois_ctrl_t->i2c_client.i2c_func_tbl = &msm_sensor_qup_func_tbl;
+ ois_ctrl_t->ois_v4l2_subdev_ops = &msm_ois_subdev_ops;
+ ois_ctrl_t->ois_mutex = &msm_ois_mutex;
+
+ /* Assign name for sub device */
+ snprintf(ois_ctrl_t->msm_sd.sd.name, sizeof(ois_ctrl_t->msm_sd.sd.name),
+ "%s", ois_ctrl_t->i2c_driver->driver.name);
+
+ /* Initialize sub device */
+ v4l2_i2c_subdev_init(&ois_ctrl_t->msm_sd.sd,
+ ois_ctrl_t->i2c_client.client,
+ ois_ctrl_t->ois_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&ois_ctrl_t->msm_sd.sd, ois_ctrl_t);
+ ois_ctrl_t->msm_sd.sd.internal_ops = &msm_ois_internal_ops;
+ ois_ctrl_t->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ media_entity_init(&ois_ctrl_t->msm_sd.sd.entity, 0, NULL, 0);
+ ois_ctrl_t->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ ois_ctrl_t->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_OIS;
+ ois_ctrl_t->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x2;
+ msm_sd_register(&ois_ctrl_t->msm_sd);
+ ois_ctrl_t->ois_state = OIS_DISABLE_STATE;
+ pr_info("msm_ois_i2c_probe: succeeded\n");
+ CDBG("Exit\n");
+
+probe_failure:
+ kfree(ois_ctrl_t);
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_ois_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ long rc = 0;
+ struct video_device *vdev;
+ struct v4l2_subdev *sd;
+ struct msm_ois_cfg_data32 *u32;
+ struct msm_ois_cfg_data ois_data;
+ void *parg;
+ struct msm_camera_i2c_seq_reg_setting settings;
+ struct msm_camera_i2c_seq_reg_setting32 settings32;
+
+ if (!file || !arg) {
+ pr_err("%s:failed NULL parameter\n", __func__);
+ return -EINVAL;
+ }
+ vdev = video_devdata(file);
+ sd = vdev_to_v4l2_subdev(vdev);
+ u32 = (struct msm_ois_cfg_data32 *)arg;
+ parg = arg;
+
+ ois_data.cfgtype = u32->cfgtype;
+
+ switch (cmd) {
+ case VIDIOC_MSM_OIS_CFG32:
+ cmd = VIDIOC_MSM_OIS_CFG;
+
+ switch (u32->cfgtype) {
+ case CFG_OIS_CONTROL:
+ ois_data.cfg.set_info.ois_params.setting_size =
+ u32->cfg.set_info.ois_params.setting_size;
+ ois_data.cfg.set_info.ois_params.i2c_addr =
+ u32->cfg.set_info.ois_params.i2c_addr;
+ ois_data.cfg.set_info.ois_params.i2c_freq_mode =
+ u32->cfg.set_info.ois_params.i2c_freq_mode;
+ ois_data.cfg.set_info.ois_params.i2c_addr_type =
+ u32->cfg.set_info.ois_params.i2c_addr_type;
+ ois_data.cfg.set_info.ois_params.i2c_data_type =
+ u32->cfg.set_info.ois_params.i2c_data_type;
+ ois_data.cfg.set_info.ois_params.settings =
+ compat_ptr(u32->cfg.set_info.ois_params.
+ settings);
+ parg = &ois_data;
+ break;
+ case CFG_OIS_I2C_WRITE_SEQ_TABLE:
+ if (copy_from_user(&settings32,
+ (void *)compat_ptr(u32->cfg.settings),
+ sizeof(
+ struct msm_camera_i2c_seq_reg_setting32))) {
+ pr_err("copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ settings.addr_type = settings32.addr_type;
+ settings.delay = settings32.delay;
+ settings.size = settings32.size;
+ settings.reg_setting =
+ compat_ptr(settings32.reg_setting);
+
+ ois_data.cfgtype = u32->cfgtype;
+ ois_data.cfg.settings = &settings;
+ parg = &ois_data;
+ break;
+ default:
+ parg = &ois_data;
+ break;
+ }
+ }
+ rc = msm_ois_subdev_ioctl(sd, cmd, parg);
+
+ return rc;
+}
+
+static long msm_ois_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_ois_subdev_do_ioctl);
+}
+#endif
+
+static int32_t msm_ois_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct msm_camera_cci_client *cci_client = NULL;
+ struct msm_ois_ctrl_t *msm_ois_t = NULL;
+ struct msm_ois_vreg *vreg_cfg;
+
+ CDBG("Enter\n");
+
+ if (!pdev->dev.of_node) {
+ pr_err("of_node NULL\n");
+ return -EINVAL;
+ }
+
+ msm_ois_t = kzalloc(sizeof(struct msm_ois_ctrl_t),
+ GFP_KERNEL);
+ if (!msm_ois_t)
+ return -ENOMEM;
+
+ msm_ois_t->oboard_info = kzalloc(sizeof(
+ struct msm_ois_board_info), GFP_KERNEL);
+ if (!msm_ois_t->oboard_info) {
+ kfree(msm_ois_t);
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32((&pdev->dev)->of_node, "cell-index",
+ &pdev->id);
+ CDBG("cell-index %d, rc %d\n", pdev->id, rc);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ goto release_memory;
+ }
+
+ rc = of_property_read_u32((&pdev->dev)->of_node, "qcom,cci-master",
+ &msm_ois_t->cci_master);
+ CDBG("qcom,cci-master %d, rc %d\n", msm_ois_t->cci_master, rc);
+ if (rc < 0 || msm_ois_t->cci_master >= MASTER_MAX) {
+ pr_err("failed rc %d\n", rc);
+ goto release_memory;
+ }
+
+ if (of_find_property((&pdev->dev)->of_node,
+ "qcom,cam-vreg-name", NULL)) {
+ vreg_cfg = &msm_ois_t->vreg_cfg;
+ rc = msm_camera_get_dt_vreg_data((&pdev->dev)->of_node,
+ &vreg_cfg->cam_vreg, &vreg_cfg->num_vreg);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ goto release_memory;
+ }
+ }
+
+ rc = msm_sensor_driver_get_gpio_data(&(msm_ois_t->gconf),
+ (&pdev->dev)->of_node);
+ if (-ENODEV == rc) {
+ pr_notice("No valid OIS GPIOs data\n");
+ } else if (rc < 0) {
+ pr_err("Error OIS GPIO\n");
+ } else {
+ msm_ois_t->cam_pinctrl_status = 1;
+ rc = msm_camera_pinctrl_init(
+ &(msm_ois_t->pinctrl_info), &(pdev->dev));
+ if (rc < 0) {
+ pr_err("ERR: Error in reading OIS pinctrl\n");
+ msm_ois_t->cam_pinctrl_status = 0;
+ }
+ }
+
+ msm_ois_t->ois_v4l2_subdev_ops = &msm_ois_subdev_ops;
+ msm_ois_t->ois_mutex = &msm_ois_mutex;
+
+ /* Set platform device handle */
+ msm_ois_t->pdev = pdev;
+ /* Set device type as platform device */
+ msm_ois_t->ois_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ msm_ois_t->i2c_client.i2c_func_tbl = &msm_sensor_cci_func_tbl;
+ msm_ois_t->i2c_client.cci_client = kzalloc(sizeof(
+ struct msm_camera_cci_client), GFP_KERNEL);
+ if (!msm_ois_t->i2c_client.cci_client) {
+ kfree(msm_ois_t->vreg_cfg.cam_vreg);
+ rc = -ENOMEM;
+ goto release_memory;
+ }
+
+ cci_client = msm_ois_t->i2c_client.cci_client;
+ cci_client->cci_subdev = msm_cci_get_subdev();
+ cci_client->cci_i2c_master = msm_ois_t->cci_master;
+ v4l2_subdev_init(&msm_ois_t->msm_sd.sd,
+ msm_ois_t->ois_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&msm_ois_t->msm_sd.sd, msm_ois_t);
+ msm_ois_t->msm_sd.sd.internal_ops = &msm_ois_internal_ops;
+ msm_ois_t->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(msm_ois_t->msm_sd.sd.name,
+ ARRAY_SIZE(msm_ois_t->msm_sd.sd.name), "msm_ois");
+ media_entity_init(&msm_ois_t->msm_sd.sd.entity, 0, NULL, 0);
+ msm_ois_t->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ msm_ois_t->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_OIS;
+ msm_ois_t->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x2;
+ msm_sd_register(&msm_ois_t->msm_sd);
+ msm_ois_t->ois_state = OIS_DISABLE_STATE;
+ msm_cam_copy_v4l2_subdev_fops(&msm_ois_v4l2_subdev_fops);
+#ifdef CONFIG_COMPAT
+ msm_ois_v4l2_subdev_fops.compat_ioctl32 =
+ msm_ois_subdev_fops_ioctl;
+#endif
+ msm_ois_t->msm_sd.sd.devnode->fops =
+ &msm_ois_v4l2_subdev_fops;
+
+ CDBG("Exit\n");
+ return rc;
+release_memory:
+ kfree(msm_ois_t->oboard_info);
+ kfree(msm_ois_t->gconf);
+ kfree(msm_ois_t);
+ return rc;
+}
+
+static const struct of_device_id msm_ois_i2c_dt_match[] = {
+ {.compatible = "qcom,ois"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_ois_i2c_dt_match);
+
+static struct i2c_driver msm_ois_i2c_driver = {
+ .id_table = msm_ois_i2c_id,
+ .probe = msm_ois_i2c_probe,
+ .remove = __exit_p(msm_ois_i2c_remove),
+ .driver = {
+ .name = "qcom,ois",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ois_i2c_dt_match,
+ },
+};
+
+static const struct of_device_id msm_ois_dt_match[] = {
+ {.compatible = "qcom,ois", .data = NULL},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_ois_dt_match);
+
+static struct platform_driver msm_ois_platform_driver = {
+ .probe = msm_ois_platform_probe,
+ .driver = {
+ .name = "qcom,ois",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ois_dt_match,
+ },
+};
+
+static int __init msm_ois_init_module(void)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+ rc = platform_driver_register(&msm_ois_platform_driver);
+ if (!rc)
+ return rc;
+ CDBG("%s:%d rc %d\n", __func__, __LINE__, rc);
+ return i2c_add_driver(&msm_ois_i2c_driver);
+}
+
+static void __exit msm_ois_exit_module(void)
+{
+ platform_driver_unregister(&msm_ois_platform_driver);
+ i2c_del_driver(&msm_ois_i2c_driver);
+}
+
+module_init(msm_ois_init_module);
+module_exit(msm_ois_exit_module);
+MODULE_DESCRIPTION("MSM OIS");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.h b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.h
new file mode 100644
index 000000000000..4b269e5f29f8
--- /dev/null
+++ b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.h
@@ -0,0 +1,74 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_OIS_H
+#define MSM_OIS_H
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <soc/qcom/ais.h>
+#include <media/v4l2-subdev.h>
+#include <media/ais/msm_ais.h>
+#include "msm_camera_i2c.h"
+#include "msm_camera_dt_util.h"
+#include "msm_camera_io_util.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+#define MSM_OIS_MAX_VREGS (10)
+
+struct msm_ois_ctrl_t;
+
+enum msm_ois_state_t {
+ OIS_ENABLE_STATE,
+ OIS_OPS_ACTIVE,
+ OIS_OPS_INACTIVE,
+ OIS_DISABLE_STATE,
+};
+
+struct msm_ois_vreg {
+ struct camera_vreg_t *cam_vreg;
+ void *data[MSM_OIS_MAX_VREGS];
+ int num_vreg;
+};
+
+struct msm_ois_board_info {
+ char ois_name[MAX_OIS_NAME_SIZE];
+ uint32_t i2c_slaveaddr;
+ struct msm_ois_opcode opcode;
+};
+
+struct msm_ois_ctrl_t {
+ struct i2c_driver *i2c_driver;
+ struct platform_driver *pdriver;
+ struct platform_device *pdev;
+ struct msm_camera_i2c_client i2c_client;
+ enum msm_camera_device_type_t ois_device_type;
+ struct msm_sd_subdev msm_sd;
+ struct mutex *ois_mutex;
+ enum msm_camera_i2c_data_type i2c_data_type;
+ struct v4l2_subdev sdev;
+ struct v4l2_subdev_ops *ois_v4l2_subdev_ops;
+ void *user_data;
+ uint16_t i2c_tbl_index;
+ enum cci_i2c_master_t cci_master;
+ uint32_t subdev_id;
+ enum msm_ois_state_t ois_state;
+ struct msm_ois_vreg vreg_cfg;
+ struct msm_camera_gpio_conf *gconf;
+ struct msm_pinctrl_info pinctrl_info;
+ uint8_t cam_pinctrl_status;
+ struct msm_ois_board_info *oboard_info;
+};
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index 737433209c2b..23f936258660 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -638,6 +638,7 @@ int vfe_hw_probe(struct platform_device *pdev)
spin_lock_init(&vfe_dev->shared_data_lock);
spin_lock_init(&vfe_dev->reg_update_lock);
spin_lock_init(&req_history_lock);
+ spin_lock_init(&vfe_dev->completion_lock);
media_entity_init(&vfe_dev->subdev.sd.entity, 0, NULL, 0);
vfe_dev->subdev.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
vfe_dev->subdev.sd.entity.group_id = MSM_CAMERA_SUBDEV_VFE;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index f37e183e35de..d64cee834bea 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -795,6 +795,7 @@ struct vfe_device {
struct mutex core_mutex;
spinlock_t shared_data_lock;
spinlock_t reg_update_lock;
+ spinlock_t completion_lock;
/* Tasklet info */
atomic_t irq_cnt;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index b80ef1dc900b..03d1b3c22d61 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -437,9 +437,13 @@ void msm_vfe47_clear_status_reg(struct vfe_device *vfe_dev)
void msm_vfe47_process_reset_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1)
{
+ unsigned long flags;
+
if (irq_status0 & (1 << 31)) {
+ spin_lock_irqsave(&vfe_dev->completion_lock, flags);
complete(&vfe_dev->reset_complete);
vfe_dev->reset_pending = 0;
+ spin_unlock_irqrestore(&vfe_dev->completion_lock, flags);
}
}
@@ -750,8 +754,11 @@ long msm_vfe47_reset_hardware(struct vfe_device *vfe_dev,
{
long rc = 0;
uint32_t reset;
+ unsigned long flags;
+ spin_lock_irqsave(&vfe_dev->completion_lock, flags);
init_completion(&vfe_dev->reset_complete);
+ spin_unlock_irqrestore(&vfe_dev->completion_lock, flags);
if (blocking_call)
vfe_dev->reset_pending = 1;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
index 7027fedaf721..891e528f75f1 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
@@ -793,6 +793,21 @@ int32_t msm_sensor_driver_probe(void *setting,
}
}
+ if (strlen(slave_info->sensor_name) >= MAX_SENSOR_NAME ||
+ strlen(slave_info->eeprom_name) >= MAX_SENSOR_NAME ||
+ strlen(slave_info->actuator_name) >= MAX_SENSOR_NAME ||
+ strlen(slave_info->ois_name) >= MAX_SENSOR_NAME) {
+ pr_err("failed: name len greater than 32.\n");
+ pr_err("sensor name len:%zu, eeprom name len: %zu.\n",
+ strlen(slave_info->sensor_name),
+ strlen(slave_info->eeprom_name));
+ pr_err("actuator name len: %zu, ois name len:%zu.\n",
+ strlen(slave_info->actuator_name),
+ strlen(slave_info->ois_name));
+ rc = -EINVAL;
+ goto free_slave_info;
+ }
+
/* Print slave info */
CDBG("camera id %d Slave addr 0x%X addr_type %d\n",
slave_info->camera_id, slave_info->slave_addr,
@@ -847,9 +862,12 @@ int32_t msm_sensor_driver_probe(void *setting,
*/
if (slave_info->sensor_id_info.sensor_id ==
s_ctrl->sensordata->cam_slave_info->
- sensor_id_info.sensor_id) {
- pr_err("slot%d: sensor id%d already probed\n",
+ sensor_id_info.sensor_id &&
+ !(strcmp(slave_info->sensor_name,
+ s_ctrl->sensordata->cam_slave_info->sensor_name))) {
+ pr_err("slot%d: sensor name: %s sensor id%d already probed\n",
slave_info->camera_id,
+ slave_info->sensor_name,
s_ctrl->sensordata->cam_slave_info->
sensor_id_info.sensor_id);
msm_sensor_fill_sensor_info(s_ctrl,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 442e80e7100e..62980f345f60 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -515,6 +515,12 @@ static int sde_rotator_import_buffer(struct sde_layer_buffer *buffer,
if (!input)
dir = DMA_FROM_DEVICE;
+ if (buffer->plane_count > SDE_ROT_MAX_PLANES) {
+ SDEROT_ERR("buffer plane_count exceeds MAX_PLANE limit:%d\n",
+ buffer->plane_count);
+ return -EINVAL;
+ }
+
memset(planes, 0, sizeof(planes));
for (i = 0; i < buffer->plane_count; i++) {
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 34ec6529d8ae..8157e8641e60 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -1771,6 +1771,10 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
wb_cfg.fps = entry->perf->config.frame_rate;
wb_cfg.bw = entry->perf->bw;
wb_cfg.fmt = sde_get_format_params(item->output.format);
+ if (!wb_cfg.fmt) {
+ SDEROT_ERR("Output format is NULL\n");
+ return -EINVAL;
+ }
wb_cfg.dst_rect = &item->dst_rect;
wb_cfg.data = &entry->dst_buf;
sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 644203b65999..1ff2ca4cb91f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -970,7 +970,7 @@ int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b)
b->m.planes[i].m.userptr = buffer_info->uvaddr[i];
b->m.planes[i].reserved[0] = buffer_info->fd[i];
b->m.planes[i].reserved[1] = buffer_info->buff_off[i];
- if (!b->m.planes[i].m.userptr) {
+ if (!(inst->flags & VIDC_SECURE) && !b->m.planes[i].m.userptr) {
dprintk(VIDC_ERR,
"%s: Failed to find user virtual address, %#lx, %d, %d\n",
__func__, b->m.planes[i].m.userptr, b->type, i);
diff --git a/drivers/misc/memory_state_time.c b/drivers/misc/memory_state_time.c
index 34c797a06a31..ba94dcf09169 100644
--- a/drivers/misc/memory_state_time.c
+++ b/drivers/misc/memory_state_time.c
@@ -296,27 +296,31 @@ static int get_bw_buckets(struct device *dev)
struct device_node *node = dev->of_node;
of_property_read_u32(node, NUM_SOURCES, &num_sources);
- if (of_find_property(node, BW_TBL, &lenb)) {
- bandwidths = devm_kzalloc(dev,
- sizeof(*bandwidths) * num_sources, GFP_KERNEL);
- if (!bandwidths)
- return -ENOMEM;
- lenb /= sizeof(*bw_buckets);
- bw_buckets = devm_kzalloc(dev, lenb * sizeof(*bw_buckets),
- GFP_KERNEL);
- if (!bw_buckets) {
- devm_kfree(dev, bandwidths);
- return -ENOMEM;
- }
- ret = of_property_read_u32_array(node, BW_TBL, bw_buckets,
- lenb);
- if (ret < 0) {
- devm_kfree(dev, bandwidths);
- devm_kfree(dev, bw_buckets);
- pr_err("Unable to read bandwidth table from device tree.\n");
- return ret;
- }
+ if (!of_find_property(node, BW_TBL, &lenb)) {
+ pr_err("Missing %s property\n", BW_TBL);
+ return -ENODATA;
+ }
+
+ bandwidths = devm_kzalloc(dev,
+ sizeof(*bandwidths) * num_sources, GFP_KERNEL);
+ if (!bandwidths)
+ return -ENOMEM;
+ lenb /= sizeof(*bw_buckets);
+ bw_buckets = devm_kzalloc(dev, lenb * sizeof(*bw_buckets),
+ GFP_KERNEL);
+ if (!bw_buckets) {
+ devm_kfree(dev, bandwidths);
+ return -ENOMEM;
+ }
+ ret = of_property_read_u32_array(node, BW_TBL, bw_buckets,
+ lenb);
+ if (ret < 0) {
+ devm_kfree(dev, bandwidths);
+ devm_kfree(dev, bw_buckets);
+ pr_err("Unable to read bandwidth table from device tree.\n");
+ return ret;
}
+
curr_bw = 0;
num_buckets = lenb;
return 0;
@@ -332,22 +336,26 @@ static int freq_buckets_init(struct device *dev)
int ret, lenf;
struct device_node *node = dev->of_node;
- if (of_find_property(node, FREQ_TBL, &lenf)) {
- lenf /= sizeof(*freq_buckets);
- freq_buckets = devm_kzalloc(dev, lenf * sizeof(*freq_buckets),
- GFP_KERNEL);
- if (!freq_buckets)
- return -ENOMEM;
- pr_debug("freqs found len %d\n", lenf);
- ret = of_property_read_u32_array(node, FREQ_TBL, freq_buckets,
- lenf);
- if (ret < 0) {
- devm_kfree(dev, freq_buckets);
- pr_err("Unable to read frequency table from device tree.\n");
- return ret;
- }
- pr_debug("ret freq %d\n", ret);
+ if (!of_find_property(node, FREQ_TBL, &lenf)) {
+ pr_err("Missing %s property\n", FREQ_TBL);
+ return -ENODATA;
}
+
+ lenf /= sizeof(*freq_buckets);
+ freq_buckets = devm_kzalloc(dev, lenf * sizeof(*freq_buckets),
+ GFP_KERNEL);
+ if (!freq_buckets)
+ return -ENOMEM;
+ pr_debug("freqs found len %d\n", lenf);
+ ret = of_property_read_u32_array(node, FREQ_TBL, freq_buckets,
+ lenf);
+ if (ret < 0) {
+ devm_kfree(dev, freq_buckets);
+ pr_err("Unable to read frequency table from device tree.\n");
+ return ret;
+ }
+ pr_debug("ret freq %d\n", ret);
+
num_freqs = lenf;
curr_freq = freq_buckets[LOWEST_FREQ];
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index e9f1a19dfe3f..69e51cc96303 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1707,6 +1707,8 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
/* We couldn't get a response from the card. Give up. */
if (err) {
+ if (card->err_in_sdr104)
+ return ERR_RETRY;
/* Check if the card is removed */
if (mmc_detect_card_removed(card->host))
return ERR_NOMEDIUM;
@@ -2197,7 +2199,8 @@ static int mmc_blk_err_check(struct mmc_card *card,
brq->data.error == -ETIMEDOUT ||
brq->cmd.error == -EILSEQ ||
brq->cmd.error == -EIO ||
- brq->cmd.error == -ETIMEDOUT))
+ brq->cmd.error == -ETIMEDOUT ||
+ brq->sbc.error))
card->err_in_sdr104 = true;
/*
@@ -4695,10 +4698,6 @@ static int _mmc_blk_suspend(struct mmc_card *card, bool wait)
static void mmc_blk_shutdown(struct mmc_card *card)
{
_mmc_blk_suspend(card, 1);
-
- /* send power off notification */
- if (mmc_card_mmc(card))
- mmc_send_pon(card);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index c462eee4a5f7..63f7bf87843f 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -464,6 +464,22 @@ out:
}
EXPORT_SYMBOL(mmc_clk_update_freq);
+void mmc_recovery_fallback_lower_speed(struct mmc_host *host)
+{
+ if (!host->card)
+ return;
+
+ if (host->sdr104_wa && mmc_card_sd(host->card) &&
+ (host->ios.timing == MMC_TIMING_UHS_SDR104) &&
+ !host->card->sdr104_blocked) {
+ pr_err("%s: %s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
+ mmc_hostname(host), __func__);
+ mmc_host_clear_sdr104(host);
+ mmc_hw_reset(host);
+ host->card->sdr104_blocked = true;
+ }
+}
+
static int mmc_devfreq_set_target(struct device *dev,
unsigned long *freq, u32 devfreq_flags)
{
@@ -510,6 +526,9 @@ static int mmc_devfreq_set_target(struct device *dev,
if (abort)
goto out;
+ if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+ goto rel_host;
+
/*
* In case we were able to claim host there is no need to
* defer the frequency change. It will be done now
@@ -518,15 +537,18 @@ static int mmc_devfreq_set_target(struct device *dev,
mmc_host_clk_hold(host);
err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
- if (err && err != -EAGAIN)
+ if (err && err != -EAGAIN) {
pr_err("%s: clock scale to %lu failed with error %d\n",
mmc_hostname(host), *freq, err);
- else
+ mmc_recovery_fallback_lower_speed(host);
+ } else {
pr_debug("%s: clock change to %lu finished successfully (%s)\n",
mmc_hostname(host), *freq, current->comm);
+ }
mmc_host_clk_release(host);
+rel_host:
mmc_release_host(host);
out:
return err;
@@ -547,6 +569,9 @@ void mmc_deferred_scaling(struct mmc_host *host)
if (!host->clk_scaling.enable)
return;
+ if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+ return;
+
spin_lock_bh(&host->clk_scaling.lock);
if (host->clk_scaling.clk_scaling_in_progress ||
@@ -567,13 +592,15 @@ void mmc_deferred_scaling(struct mmc_host *host)
err = mmc_clk_update_freq(host, target_freq,
host->clk_scaling.state);
- if (err && err != -EAGAIN)
+ if (err && err != -EAGAIN) {
pr_err("%s: failed on deferred scale clocks (%d)\n",
mmc_hostname(host), err);
- else
+ mmc_recovery_fallback_lower_speed(host);
+ } else {
pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
mmc_hostname(host),
target_freq, current->comm);
+ }
host->clk_scaling.clk_scaling_in_progress = false;
atomic_dec(&host->clk_scaling.devfreq_abort);
}
@@ -790,10 +817,15 @@ int mmc_resume_clk_scaling(struct mmc_host *host)
if (!mmc_can_scale_clk(host))
return 0;
+ /*
+ * If clock scaling is already exited when resume is called, like
+ * during mmc shutdown, it is not an error and should not fail the
+ * API calling this.
+ */
if (!host->clk_scaling.devfreq) {
- pr_err("%s: %s: no devfreq is assosiated with this device\n",
+ pr_warn("%s: %s: no devfreq is assosiated with this device\n",
mmc_hostname(host), __func__);
- return -EPERM;
+ return 0;
}
atomic_set(&host->clk_scaling.devfreq_abort, 0);
@@ -1469,8 +1501,13 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
}
}
if (!cmd->error || !cmd->retries ||
- mmc_card_removed(host->card))
+ mmc_card_removed(host->card)) {
+ if (cmd->error && !cmd->retries &&
+ cmd->opcode != MMC_SEND_STATUS &&
+ cmd->opcode != MMC_SEND_TUNING_BLOCK)
+ mmc_recovery_fallback_lower_speed(host);
break;
+ }
mmc_retune_recheck(host);
@@ -2312,6 +2349,13 @@ void mmc_ungate_clock(struct mmc_host *host)
WARN_ON(host->ios.clock);
/* This call will also set host->clk_gated to false */
__mmc_set_clock(host, host->clk_old);
+ /*
+ * We have seen that host controller's clock tuning circuit may
+ * go out of sync if controller clocks are gated.
+ * To workaround this issue, we are triggering retuning of the
+ * tuning circuit after ungating the controller clocks.
+ */
+ mmc_retune_needed(host);
}
}
@@ -4038,12 +4082,18 @@ int _mmc_detect_card_removed(struct mmc_host *host)
}
if (ret) {
- mmc_card_set_removed(host->card);
- if (host->card->sdr104_blocked) {
- mmc_host_set_sdr104(host);
- host->card->sdr104_blocked = false;
+ if (host->ops->get_cd && host->ops->get_cd(host)) {
+ mmc_recovery_fallback_lower_speed(host);
+ ret = 0;
+ } else {
+ mmc_card_set_removed(host->card);
+ if (host->card->sdr104_blocked) {
+ mmc_host_set_sdr104(host);
+ host->card->sdr104_blocked = false;
+ }
+ pr_debug("%s: card remove detected\n",
+ mmc_hostname(host));
}
- pr_debug("%s: card remove detected\n", mmc_hostname(host));
}
return ret;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 5eda4f4fb0fe..df60774b02af 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -2933,6 +2933,22 @@ static int mmc_reset(struct mmc_host *host)
return ret;
}
+static int mmc_shutdown(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+
+ /*
+ * Exit clock scaling so that it doesn't kick in after
+ * power off notification is sent
+ */
+ if (host->caps2 & MMC_CAP2_CLK_SCALE)
+ mmc_exit_clk_scaling(card->host);
+ /* send power off notification */
+ if (mmc_card_mmc(card))
+ mmc_send_pon(card);
+ return 0;
+}
+
static const struct mmc_bus_ops mmc_ops = {
.remove = mmc_remove,
.detect = mmc_detect,
@@ -2943,6 +2959,7 @@ static const struct mmc_bus_ops mmc_ops = {
.alive = mmc_alive,
.change_bus_speed = mmc_change_bus_speed,
.reset = mmc_reset,
+ .shutdown = mmc_shutdown,
};
/*
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 07f5f239cb65..4744919440e0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2473,7 +2473,8 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+ bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+ sizeof(long),
GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c
index b5ab98ee1445..82753e7c7e7c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c
@@ -211,7 +211,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
int ret;
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- struct ethhdr *eh = (struct ethhdr *)(skb->data);
+ struct ethhdr *eh;
brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
@@ -232,22 +232,13 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
goto done;
}
- /* Make sure there's enough room for any header */
- if (skb_headroom(skb) < drvr->hdrlen) {
- struct sk_buff *skb2;
-
- brcmf_dbg(INFO, "%s: insufficient headroom\n",
+ /* Make sure there's enough writable headroom*/
+ ret = skb_cow_head(skb, drvr->hdrlen);
+ if (ret < 0) {
+ brcmf_err("%s: skb_cow_head failed\n",
brcmf_ifname(drvr, ifp->bssidx));
- drvr->bus_if->tx_realloc++;
- skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
dev_kfree_skb(skb);
- skb = skb2;
- if (skb == NULL) {
- brcmf_err("%s: skb_realloc_headroom failed\n",
- brcmf_ifname(drvr, ifp->bssidx));
- ret = -ENOMEM;
- goto done;
- }
+ goto done;
}
/* validate length for ether packet */
@@ -257,6 +248,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
goto done;
}
+ eh = (struct ethhdr *)(skb->data);
+
if (eh->h_proto == htons(ETH_P_PAE))
atomic_inc(&ifp->pend_8021x_cnt);
diff --git a/drivers/net/wireless/cnss/cnss_common.c b/drivers/net/wireless/cnss/cnss_common.c
index 7805882aa6fe..0b73f37afd94 100644
--- a/drivers/net/wireless/cnss/cnss_common.c
+++ b/drivers/net/wireless/cnss/cnss_common.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -68,6 +68,8 @@ static struct cnss_dfs_nol_info {
u16 dfs_nol_info_len;
} dfs_nol_info;
+static enum cnss_cc_src cnss_cc_source = CNSS_SOURCE_CORE;
+
int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count)
{
struct cnss_unsafe_channel_list *unsafe_list;
@@ -491,6 +493,18 @@ int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files,
}
EXPORT_SYMBOL(cnss_get_fw_files_for_target);
+void cnss_set_cc_source(enum cnss_cc_src cc_source)
+{
+ cnss_cc_source = cc_source;
+}
+EXPORT_SYMBOL(cnss_set_cc_source);
+
+enum cnss_cc_src cnss_get_cc_source(void)
+{
+ return cnss_cc_source;
+}
+EXPORT_SYMBOL(cnss_get_cc_source);
+
const char *cnss_wlan_get_evicted_data_file(void)
{
return FW_FILES_QCA6174_FW_3_0.evicted_data;
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index aa498e0d2204..49f3e17c28ea 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -101,13 +101,6 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
{
struct txpd *local_tx_pd;
struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
- unsigned int pad;
- int headroom = (priv->adapter->iface_type ==
- MWIFIEX_USB) ? 0 : INTF_HEADER_LEN;
-
- pad = ((void *)skb->data - sizeof(*local_tx_pd) -
- headroom - NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1);
- skb_push(skb, pad);
skb_push(skb, sizeof(*local_tx_pd));
@@ -121,12 +114,10 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
local_tx_pd->bss_num = priv->bss_num;
local_tx_pd->bss_type = priv->bss_type;
/* Always zero as the data is followed by struct txpd */
- local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) +
- pad);
+ local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU);
local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
- sizeof(*local_tx_pd) -
- pad);
+ sizeof(*local_tx_pd));
if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
@@ -190,7 +181,11 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
ra_list_flags);
return -1;
}
- skb_reserve(skb_aggr, MWIFIEX_MIN_DATA_HEADER_LEN);
+
+ /* skb_aggr->data already 64 byte align, just reserve bus interface
+ * header and txpd.
+ */
+ skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 9824d8dd2b44..45d97b64ef84 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -115,7 +115,8 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
p += sprintf(p, "multicast_count=\"%d\"\n",
netdev_mc_count(netdev));
- p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid);
+ p += sprintf(p, "essid=\"%.*s\"\n", info.ssid.ssid_len,
+ info.ssid.ssid);
p += sprintf(p, "bssid=\"%pM\"\n", info.bssid);
p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
p += sprintf(p, "country_code = \"%s\"\n", info.country_code);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index d6c4f0f60839..6cfa2969b123 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -1098,8 +1098,6 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
encrypt_key.is_rx_seq_valid = true;
}
} else {
- if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
- return 0;
encrypt_key.key_disable = true;
if (mac_addr)
memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 8bb759d10074..1aebd49220b0 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -5857,7 +5857,7 @@ static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev,
}
ret = iommu_map(domain, iova, rounddown(dev->msi_gicm_addr, PAGE_SIZE),
- PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
+ PAGE_SIZE, IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
if (ret < 0) {
PCIE_ERR(dev,
"PCIe: RC%d: ret: %d: Could not do iommu map for QGIC address\n",
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 03cb3ea2d2c0..b5679fb67591 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -368,6 +368,7 @@ config PHY_QCOM_UFS
config PHY_TUSB1210
tristate "TI TUSB1210 ULPI PHY module"
depends on USB_ULPI_BUS
+ depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
select GENERIC_PHY
help
Support for TI TUSB1210 USB ULPI PHY.
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index da014427852b..ce899ef9c531 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -80,6 +80,7 @@ static void *subsys_notify_handle;
u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */
static struct mutex ipa_to_apps_pipe_handle_guard;
+static struct mutex add_mux_channel_lock;
static int wwan_add_ul_flt_rule_to_ipa(void);
static int wwan_del_ul_flt_rule_to_ipa(void);
static void ipa_wwan_msg_free_cb(void*, u32, u32);
@@ -1528,9 +1529,11 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
rmnet_mux_val.mux_id);
return rc;
}
+ mutex_lock(&add_mux_channel_lock);
if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) {
IPAWANERR("Exceed mux_channel limit(%d)\n",
rmnet_index);
+ mutex_unlock(&add_mux_channel_lock);
return -EFAULT;
}
IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
@@ -1559,6 +1562,7 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
IPAWANERR("device %s reg IPA failed\n",
extend_ioctl_data.u.
rmnet_mux_val.vchannel_name);
+ mutex_unlock(&add_mux_channel_lock);
return -ENODEV;
}
mux_channel[rmnet_index].mux_channel_set = true;
@@ -1571,6 +1575,7 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
mux_channel[rmnet_index].ul_flt_reg = false;
}
rmnet_index++;
+ mutex_unlock(&add_mux_channel_lock);
break;
case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
@@ -3177,6 +3182,7 @@ static int __init ipa_wwan_init(void)
atomic_set(&is_ssr, 0);
mutex_init(&ipa_to_apps_pipe_handle_guard);
+ mutex_init(&add_mux_channel_lock);
ipa_to_apps_hdl = -1;
ipa_qmi_init();
@@ -3195,6 +3201,7 @@ static void __exit ipa_wwan_cleanup(void)
int ret;
ipa_qmi_cleanup();
mutex_destroy(&ipa_to_apps_pipe_handle_guard);
+ mutex_destroy(&add_mux_channel_lock);
ret = subsys_notif_unregister_notifier(subsys_notify_handle,
&ssr_notifier);
if (ret)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 5a7a0e5000b9..a50cd0b807a2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -240,6 +240,9 @@ static void ipa_gsi_release_resource(struct work_struct *work);
static DECLARE_DELAYED_WORK(ipa_gsi_release_resource_work,
ipa_gsi_release_resource);
+static void ipa3_post_init_wq(struct work_struct *work);
+static DECLARE_WORK(ipa3_post_init_work, ipa3_post_init_wq);
+
static struct ipa3_plat_drv_res ipa3_res = {0, };
struct msm_bus_scale_pdata *ipa3_bus_scale_table;
@@ -3951,6 +3954,15 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
struct gsi_per_props gsi_props;
struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
+ if (ipa3_ctx == NULL) {
+ IPADBG("IPA driver haven't initialized\n");
+ return -ENXIO;
+ }
+
+ /* Prevent consequent calls from trying to load the FW again. */
+ if (ipa3_ctx->ipa_initialization_complete)
+ return 0;
+
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
memset(&gsi_props, 0, sizeof(gsi_props));
gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
@@ -4063,40 +4075,14 @@ fail_setup_apps_pipes:
else
sps_deregister_bam_device(ipa3_ctx->bam_handle);
fail_register_device:
- ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
- ipa_rm_exit();
- cdev_del(&ipa3_ctx->cdev);
- device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
- unregister_chrdev_region(ipa3_ctx->dev_num, 1);
- if (ipa3_ctx->pipe_mem_pool)
- gen_pool_destroy(ipa3_ctx->pipe_mem_pool);
- ipa3_free_dma_task_for_gsi();
- ipa3_destroy_flt_tbl_idrs();
- idr_destroy(&ipa3_ctx->ipa_idr);
- kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
- kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
- kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
- kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
- kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
- kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
- kmem_cache_destroy(ipa3_ctx->hdr_cache);
- kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
- kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
- destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
- destroy_workqueue(ipa3_ctx->power_mgmt_wq);
- iounmap(ipa3_ctx->mmio);
- ipa3_disable_clks();
- msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
- if (ipa3_bus_scale_table) {
- msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
- ipa3_bus_scale_table = NULL;
- }
- kfree(ipa3_ctx->ctrl);
- kfree(ipa3_ctx);
- ipa3_ctx = NULL;
return result;
}
+static void ipa3_post_init_wq(struct work_struct *work)
+{
+ ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
+}
+
static int ipa3_trigger_fw_loading_mdms(void)
{
int result;
@@ -4193,8 +4179,10 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
if (result) {
IPAERR("FW loading process has failed\n");
return result;
- } else
- ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
+ } else {
+ queue_work(ipa3_ctx->transport_power_mgmt_wq,
+ &ipa3_post_init_work);
+ }
}
return count;
}
@@ -4673,20 +4661,6 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
goto fail_device_create;
}
- cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
- ipa3_ctx->cdev.owner = THIS_MODULE;
- ipa3_ctx->cdev.ops = &ipa3_drv_fops; /* from LDD3 */
-
- result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
- if (result) {
- IPAERR(":cdev_add err=%d\n", -result);
- result = -ENODEV;
- goto fail_cdev_add;
- }
- IPADBG("ipa cdev added successful. major:%d minor:%d\n",
- MAJOR(ipa3_ctx->dev_num),
- MINOR(ipa3_ctx->dev_num));
-
if (ipa3_create_nat_device()) {
IPAERR("unable to create nat device\n");
result = -ENODEV;
@@ -4746,21 +4720,45 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
goto fail_ipa_init_interrupts;
}
}
+ } else {
+ /*
+ * For BAM (No other mode),
+ * we can just carry on with initialization
+ */
+ result = ipa3_post_init(resource_p, ipa_dev);
+ if (result) {
+ IPAERR("ipa3_post_init failed\n");
+ goto fail_ipa_post_init;
+ }
}
- /* For BAM (No other mode), we can just carry on with initialization */
- else
- return ipa3_post_init(resource_p, ipa_dev);
+ cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
+ ipa3_ctx->cdev.owner = THIS_MODULE;
+ ipa3_ctx->cdev.ops = &ipa3_drv_fops; /* from LDD3 */
+
+ result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
+ if (result) {
+ IPAERR(":cdev_add err=%d\n", -result);
+ result = -ENODEV;
+ goto fail_cdev_add;
+ }
+ IPADBG("ipa cdev added successful. major:%d minor:%d\n",
+ MAJOR(ipa3_ctx->dev_num),
+ MINOR(ipa3_ctx->dev_num));
return 0;
+fail_cdev_add:
+fail_ipa_post_init:
+ if (ipa3_bus_scale_table) {
+ msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
+ ipa3_bus_scale_table = NULL;
+ }
fail_ipa_init_interrupts:
ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
fail_create_apps_resource:
ipa_rm_exit();
fail_ipa_rm_init:
fail_nat_dev_add:
- cdev_del(&ipa3_ctx->cdev);
-fail_cdev_add:
device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
fail_device_create:
unregister_chrdev_region(ipa3_ctx->dev_num, 1);
@@ -4802,7 +4800,8 @@ fail_remap:
ipa3_active_clients_log_destroy();
fail_init_active_client:
fail_clk:
- msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
+ if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
+ msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
fail_bus_reg:
fail_init_mem_partition:
fail_bind:
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 1bd4f7fda1b7..03dbcbb059aa 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -141,6 +141,7 @@ struct rmnet_ipa3_context {
u32 apps_to_ipa3_hdl;
u32 ipa3_to_apps_hdl;
struct mutex pipe_handle_guard;
+ struct mutex add_mux_channel_lock;
};
static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
@@ -1644,10 +1645,13 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
rmnet_mux_val.mux_id);
return rc;
}
+ mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock);
if (rmnet_ipa3_ctx->rmnet_index
>= MAX_NUM_OF_MUX_CHANNEL) {
IPAWANERR("Exceed mux_channel limit(%d)\n",
rmnet_ipa3_ctx->rmnet_index);
+ mutex_unlock(&rmnet_ipa3_ctx->
+ add_mux_channel_lock);
return -EFAULT;
}
IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
@@ -1681,6 +1685,8 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
IPAWANERR("device %s reg IPA failed\n",
extend_ioctl_data.u.
rmnet_mux_val.vchannel_name);
+ mutex_unlock(&rmnet_ipa3_ctx->
+ add_mux_channel_lock);
return -ENODEV;
}
mux_channel[rmnet_index].mux_channel_set = true;
@@ -1693,6 +1699,7 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
mux_channel[rmnet_index].ul_flt_reg = false;
}
rmnet_ipa3_ctx->rmnet_index++;
+ mutex_unlock(&rmnet_ipa3_ctx->add_mux_channel_lock);
break;
case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
rc = handle3_egress_format(dev, &extend_ioctl_data);
@@ -3301,6 +3308,7 @@ static int __init ipa3_wwan_init(void)
atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard);
+ mutex_init(&rmnet_ipa3_ctx->add_mux_channel_lock);
rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1;
@@ -3319,8 +3327,10 @@ static int __init ipa3_wwan_init(void)
static void __exit ipa3_wwan_cleanup(void)
{
int ret;
+
ipa3_qmi_cleanup();
mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard);
+ mutex_destroy(&rmnet_ipa3_ctx->add_mux_channel_lock);
ret = subsys_notif_unregister_notifier(
rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
if (ret)
diff --git a/drivers/platform/msm/mhi/mhi.h b/drivers/platform/msm/mhi/mhi.h
index b4f3df4ec3d2..7b57c49f0d9e 100644
--- a/drivers/platform/msm/mhi/mhi.h
+++ b/drivers/platform/msm/mhi/mhi.h
@@ -82,12 +82,6 @@ struct bhie_vec_table {
struct bhi_ctxt_t {
void __iomem *bhi_base;
- void *unaligned_image_loc;
- dma_addr_t dma_handle;
- size_t alloc_size;
- void *image_loc;
- dma_addr_t phy_image_loc;
- size_t image_size;
dev_t bhi_dev;
struct cdev cdev;
struct device *dev;
diff --git a/drivers/platform/msm/mhi/mhi_bhi.c b/drivers/platform/msm/mhi/mhi_bhi.c
index 3bc8205b5f0f..5b05270b1e66 100644
--- a/drivers/platform/msm/mhi/mhi_bhi.c
+++ b/drivers/platform/msm/mhi/mhi_bhi.c
@@ -109,30 +109,29 @@ alloc_bhi_mem_info_error:
}
static int bhi_alloc_pbl_xfer(struct mhi_device_ctxt *mhi_dev_ctxt,
+ struct bhie_mem_info *const mem_info,
size_t size)
{
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
const phys_addr_t align_len = bhi_ctxt->alignment;
- size_t alloc_size = size + (align_len - 1);
struct device *dev = &mhi_dev_ctxt->plat_dev->dev;
- bhi_ctxt->unaligned_image_loc =
- dma_alloc_coherent(dev, alloc_size, &bhi_ctxt->dma_handle,
- GFP_KERNEL);
- if (bhi_ctxt->unaligned_image_loc == NULL)
+ mem_info->size = size;
+ mem_info->alloc_size = size + (align_len - 1);
+ mem_info->pre_aligned =
+ dma_alloc_coherent(dev, mem_info->alloc_size,
+ &mem_info->dma_handle, GFP_KERNEL);
+ if (mem_info->pre_aligned == NULL)
return -ENOMEM;
- bhi_ctxt->alloc_size = alloc_size;
- bhi_ctxt->phy_image_loc = (bhi_ctxt->dma_handle + (align_len - 1)) &
+ mem_info->phys_addr = (mem_info->dma_handle + (align_len - 1)) &
~(align_len - 1);
- bhi_ctxt->image_loc = bhi_ctxt->unaligned_image_loc +
- (bhi_ctxt->phy_image_loc - bhi_ctxt->dma_handle);
- bhi_ctxt->image_size = size;
-
+ mem_info->aligned = mem_info->pre_aligned + (mem_info->phys_addr -
+ mem_info->dma_handle);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"alloc_size:%lu image_size:%lu unal_addr:0x%llx0x al_addr:0x%llx\n",
- bhi_ctxt->alloc_size, bhi_ctxt->image_size,
- bhi_ctxt->dma_handle, bhi_ctxt->phy_image_loc);
+ mem_info->alloc_size, mem_info->size,
+ mem_info->dma_handle, mem_info->phys_addr);
return 0;
}
@@ -264,7 +263,8 @@ int bhi_rddm(struct mhi_device_ctxt *mhi_dev_ctxt, bool in_panic)
return -EINVAL;
}
-static int bhi_load_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
+static int bhi_load_firmware(struct mhi_device_ctxt *mhi_dev_ctxt,
+ const struct bhie_mem_info *const mem_info)
{
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
u32 pcie_word_val = 0;
@@ -278,28 +278,21 @@ static int bhi_load_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
read_unlock_bh(pm_xfer_lock);
return -EIO;
}
- pcie_word_val = HIGH_WORD(bhi_ctxt->phy_image_loc);
- mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
- BHI_IMGADDR_HIGH,
- 0xFFFFFFFF,
- 0,
- pcie_word_val);
+ pcie_word_val = HIGH_WORD(mem_info->phys_addr);
+ mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_IMGADDR_HIGH,
+ 0xFFFFFFFF, 0, pcie_word_val);
- pcie_word_val = LOW_WORD(bhi_ctxt->phy_image_loc);
+ pcie_word_val = LOW_WORD(mem_info->phys_addr);
+ mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_IMGADDR_LOW,
+ 0xFFFFFFFF, 0, pcie_word_val);
- mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
- BHI_IMGADDR_LOW,
- 0xFFFFFFFF,
- 0,
- pcie_word_val);
-
- pcie_word_val = bhi_ctxt->image_size;
+ pcie_word_val = mem_info->size;
mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_IMGSIZE,
- 0xFFFFFFFF, 0, pcie_word_val);
+ 0xFFFFFFFF, 0, pcie_word_val);
pcie_word_val = mhi_reg_read(bhi_ctxt->bhi_base, BHI_IMGTXDB);
mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
- BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val);
+ BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val);
read_unlock_bh(pm_xfer_lock);
timeout = jiffies + msecs_to_jiffies(bhi_ctxt->poll_timeout);
while (time_before(jiffies, timeout)) {
@@ -342,6 +335,7 @@ static ssize_t bhi_write(struct file *file,
int ret_val = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = file->private_data;
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ struct bhie_mem_info mem_info;
long timeout;
if (buf == NULL || 0 == count)
@@ -350,11 +344,11 @@ static ssize_t bhi_write(struct file *file,
if (count > BHI_MAX_IMAGE_SIZE)
return -ENOMEM;
- ret_val = bhi_alloc_pbl_xfer(mhi_dev_ctxt, count);
+ ret_val = bhi_alloc_pbl_xfer(mhi_dev_ctxt, &mem_info, count);
if (ret_val)
return -ENOMEM;
- if (copy_from_user(bhi_ctxt->image_loc, buf, count)) {
+ if (copy_from_user(mem_info.aligned, buf, count)) {
ret_val = -ENOMEM;
goto bhi_copy_error;
}
@@ -370,15 +364,13 @@ static ssize_t bhi_write(struct file *file,
goto bhi_copy_error;
}
- ret_val = bhi_load_firmware(mhi_dev_ctxt);
+ ret_val = bhi_load_firmware(mhi_dev_ctxt, &mem_info);
if (ret_val) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to load bhi image\n");
}
- dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
- bhi_ctxt->alloc_size,
- bhi_ctxt->unaligned_image_loc,
- bhi_ctxt->dma_handle);
+ dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev, mem_info.alloc_size,
+ mem_info.pre_aligned, mem_info.dma_handle);
/* Regardless of failure set to RESET state */
ret_val = mhi_init_state_transition(mhi_dev_ctxt,
@@ -390,10 +382,8 @@ static ssize_t bhi_write(struct file *file,
return count;
bhi_copy_error:
- dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
- bhi_ctxt->alloc_size,
- bhi_ctxt->unaligned_image_loc,
- bhi_ctxt->dma_handle);
+ dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev, mem_info.alloc_size,
+ mem_info.pre_aligned, mem_info.dma_handle);
return ret_val;
}
@@ -447,6 +437,7 @@ void bhi_firmware_download(struct work_struct *work)
{
struct mhi_device_ctxt *mhi_dev_ctxt;
struct bhi_ctxt_t *bhi_ctxt;
+ struct bhie_mem_info mem_info;
int ret;
long timeout;
@@ -459,7 +450,10 @@ void bhi_firmware_download(struct work_struct *work)
wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_BHI);
- ret = bhi_load_firmware(mhi_dev_ctxt);
+ /* PBL image is the first segment in firmware vector table */
+ mem_info = *bhi_ctxt->fw_table.bhie_mem_info;
+ mem_info.size = bhi_ctxt->firmware_info.max_sbl_len;
+ ret = bhi_load_firmware(mhi_dev_ctxt, &mem_info);
if (ret) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to Load sbl firmware\n");
@@ -547,13 +541,6 @@ int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt)
image += to_copy;
}
- /*
- * Re-use BHI/E pointer for BHI since we guranteed BHI/E segment
- * is >= to SBL image.
- */
- bhi_ctxt->phy_image_loc = sg_dma_address(&fw_table->sg_list[1]);
- bhi_ctxt->image_size = fw_info->max_sbl_len;
-
fw_table->sequence++;
release_firmware(firmware);
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index b1989f8741f5..c9c00d04c30a 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -1098,7 +1098,7 @@ static int msm_11ad_probe(struct platform_device *pdev)
}
ctx->use_smmu = of_property_read_bool(of_node, "qcom,smmu-support");
ctx->keep_radio_on_during_sleep = of_property_read_bool(of_node,
- "qcom,keep_radio_on_during_sleep");
+ "qcom,keep-radio-on-during-sleep");
ctx->bus_scale = msm_bus_cl_get_pdata(pdev);
ctx->smmu_s1_en = of_property_read_bool(of_node, "qcom,smmu-s1-en");
@@ -1336,6 +1336,17 @@ static void msm_11ad_set_boost_affinity(struct msm11ad_ctx *ctx)
dev_warn(ctx->dev, "failed to set CPU boost affinity\n");
}
+static void msm_11ad_clear_boost_affinity(struct msm11ad_ctx *ctx)
+{
+ int rc;
+
+ irq_modify_status(ctx->pcidev->irq, IRQ_NO_BALANCING, 0);
+ rc = irq_set_affinity_hint(ctx->pcidev->irq, NULL);
+ if (rc)
+ dev_warn(ctx->dev,
+ "Failed clear affinity, rc=%d\n", rc);
+}
+
/* hooks for the wil6210 driver */
static int ops_bus_request(void *handle, u32 kbps /* KBytes/Sec */)
{
@@ -1385,8 +1396,7 @@ static int ops_bus_request(void *handle, u32 kbps /* KBytes/Sec */)
dev_err(ctx->dev,
"Failed disable boost rc=%d\n",
rc);
- irq_modify_status(ctx->pcidev->irq,
- IRQ_NO_BALANCING, 0);
+ msm_11ad_clear_boost_affinity(ctx);
dev_dbg(ctx->dev, "CPU boost disabled\n");
}
ctx->is_cpu_boosted = needs_boost;
@@ -1472,7 +1482,7 @@ static int ops_notify(void *handle, enum wil_platform_event evt)
return rc;
}
-bool ops_keep_radio_on_during_sleep(void *handle)
+static bool ops_keep_radio_on_during_sleep(void *handle)
{
struct msm11ad_ctx *ctx = (struct msm11ad_ctx *)handle;
diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index c7c1b1567bf3..53c85773b377 100644
--- a/drivers/platform/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -39,7 +39,6 @@ struct msm_ext_disp {
struct list_head display_list;
struct mutex lock;
struct completion hpd_comp;
- u32 flags;
};
static int msm_ext_disp_get_intf_data(struct msm_ext_disp *ext_disp,
@@ -288,7 +287,6 @@ static bool msm_ext_disp_validate_connect(struct msm_ext_disp *ext_disp,
if (ext_disp->current_disp != type)
return false;
end:
- ext_disp->flags |= flags;
ext_disp->current_disp = type;
return true;
}
@@ -304,13 +302,7 @@ static bool msm_ext_disp_validate_disconnect(struct msm_ext_disp *ext_disp,
if (ext_disp->current_disp != type)
return false;
- /* allow only an already connected type */
- if (ext_disp->flags & flags) {
- ext_disp->flags &= ~flags;
- return true;
- }
-
- return false;
+ return true;
}
static int msm_ext_disp_hpd(struct platform_device *pdev,
@@ -378,8 +370,7 @@ static int msm_ext_disp_hpd(struct platform_device *pdev,
msm_ext_disp_update_audio_ops(ext_disp, type, state, flags);
msm_ext_disp_process_display(ext_disp, type, state, flags);
- if (!ext_disp->flags)
- ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX;
+ ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX;
}
pr_debug("Hpd (%d) for display (%s)\n", state,
diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
index f5746b9f4e83..f05d2773fe00 100644
--- a/drivers/power/bq24190_charger.c
+++ b/drivers/power/bq24190_charger.c
@@ -144,10 +144,7 @@
* so the first read after a fault returns the latched value and subsequent
* reads return the current value. In order to return the fault status
* to the user, have the interrupt handler save the reg's value and retrieve
- * it in the appropriate health/status routine. Each routine has its own
- * flag indicating whether it should use the value stored by the last run
- * of the interrupt handler or do an actual reg read. That way each routine
- * can report back whatever fault may have occured.
+ * it in the appropriate health/status routine.
*/
struct bq24190_dev_info {
struct i2c_client *client;
@@ -159,10 +156,6 @@ struct bq24190_dev_info {
unsigned int gpio_int;
unsigned int irq;
struct mutex f_reg_lock;
- bool first_time;
- bool charger_health_valid;
- bool battery_health_valid;
- bool battery_status_valid;
u8 f_reg;
u8 ss_reg;
u8 watchdog;
@@ -636,21 +629,11 @@ static int bq24190_charger_get_health(struct bq24190_dev_info *bdi,
union power_supply_propval *val)
{
u8 v;
- int health, ret;
+ int health;
mutex_lock(&bdi->f_reg_lock);
-
- if (bdi->charger_health_valid) {
- v = bdi->f_reg;
- bdi->charger_health_valid = false;
- mutex_unlock(&bdi->f_reg_lock);
- } else {
- mutex_unlock(&bdi->f_reg_lock);
-
- ret = bq24190_read(bdi, BQ24190_REG_F, &v);
- if (ret < 0)
- return ret;
- }
+ v = bdi->f_reg;
+ mutex_unlock(&bdi->f_reg_lock);
if (v & BQ24190_REG_F_BOOST_FAULT_MASK) {
/*
@@ -937,18 +920,8 @@ static int bq24190_battery_get_status(struct bq24190_dev_info *bdi,
int status, ret;
mutex_lock(&bdi->f_reg_lock);
-
- if (bdi->battery_status_valid) {
- chrg_fault = bdi->f_reg;
- bdi->battery_status_valid = false;
- mutex_unlock(&bdi->f_reg_lock);
- } else {
- mutex_unlock(&bdi->f_reg_lock);
-
- ret = bq24190_read(bdi, BQ24190_REG_F, &chrg_fault);
- if (ret < 0)
- return ret;
- }
+ chrg_fault = bdi->f_reg;
+ mutex_unlock(&bdi->f_reg_lock);
chrg_fault &= BQ24190_REG_F_CHRG_FAULT_MASK;
chrg_fault >>= BQ24190_REG_F_CHRG_FAULT_SHIFT;
@@ -996,21 +969,11 @@ static int bq24190_battery_get_health(struct bq24190_dev_info *bdi,
union power_supply_propval *val)
{
u8 v;
- int health, ret;
+ int health;
mutex_lock(&bdi->f_reg_lock);
-
- if (bdi->battery_health_valid) {
- v = bdi->f_reg;
- bdi->battery_health_valid = false;
- mutex_unlock(&bdi->f_reg_lock);
- } else {
- mutex_unlock(&bdi->f_reg_lock);
-
- ret = bq24190_read(bdi, BQ24190_REG_F, &v);
- if (ret < 0)
- return ret;
- }
+ v = bdi->f_reg;
+ mutex_unlock(&bdi->f_reg_lock);
if (v & BQ24190_REG_F_BAT_FAULT_MASK) {
health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
@@ -1197,9 +1160,12 @@ static const struct power_supply_desc bq24190_battery_desc = {
static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
{
struct bq24190_dev_info *bdi = data;
- bool alert_userspace = false;
+ const u8 battery_mask_ss = BQ24190_REG_SS_CHRG_STAT_MASK;
+ const u8 battery_mask_f = BQ24190_REG_F_BAT_FAULT_MASK
+ | BQ24190_REG_F_NTC_FAULT_MASK;
+ bool alert_charger = false, alert_battery = false;
u8 ss_reg = 0, f_reg = 0;
- int ret;
+ int i, ret;
pm_runtime_get_sync(bdi->dev);
@@ -1209,6 +1175,32 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
goto out;
}
+ i = 0;
+ do {
+ ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg);
+ if (ret < 0) {
+ dev_err(bdi->dev, "Can't read F reg: %d\n", ret);
+ goto out;
+ }
+ } while (f_reg && ++i < 2);
+
+ if (f_reg != bdi->f_reg) {
+ dev_info(bdi->dev,
+ "Fault: boost %d, charge %d, battery %d, ntc %d\n",
+ !!(f_reg & BQ24190_REG_F_BOOST_FAULT_MASK),
+ !!(f_reg & BQ24190_REG_F_CHRG_FAULT_MASK),
+ !!(f_reg & BQ24190_REG_F_BAT_FAULT_MASK),
+ !!(f_reg & BQ24190_REG_F_NTC_FAULT_MASK));
+
+ mutex_lock(&bdi->f_reg_lock);
+ if ((bdi->f_reg & battery_mask_f) != (f_reg & battery_mask_f))
+ alert_battery = true;
+ if ((bdi->f_reg & ~battery_mask_f) != (f_reg & ~battery_mask_f))
+ alert_charger = true;
+ bdi->f_reg = f_reg;
+ mutex_unlock(&bdi->f_reg_lock);
+ }
+
if (ss_reg != bdi->ss_reg) {
/*
* The device is in host mode so when PG_STAT goes from 1->0
@@ -1225,47 +1217,17 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
ret);
}
+ if ((bdi->ss_reg & battery_mask_ss) != (ss_reg & battery_mask_ss))
+ alert_battery = true;
+ if ((bdi->ss_reg & ~battery_mask_ss) != (ss_reg & ~battery_mask_ss))
+ alert_charger = true;
bdi->ss_reg = ss_reg;
- alert_userspace = true;
- }
-
- mutex_lock(&bdi->f_reg_lock);
-
- ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg);
- if (ret < 0) {
- mutex_unlock(&bdi->f_reg_lock);
- dev_err(bdi->dev, "Can't read F reg: %d\n", ret);
- goto out;
}
- if (f_reg != bdi->f_reg) {
- bdi->f_reg = f_reg;
- bdi->charger_health_valid = true;
- bdi->battery_health_valid = true;
- bdi->battery_status_valid = true;
-
- alert_userspace = true;
- }
-
- mutex_unlock(&bdi->f_reg_lock);
-
- /*
- * Sometimes bq24190 gives a steady trickle of interrupts even
- * though the watchdog timer is turned off and neither the STATUS
- * nor FAULT registers have changed. Weed out these sprurious
- * interrupts so userspace isn't alerted for no reason.
- * In addition, the chip always generates an interrupt after
- * register reset so we should ignore that one (the very first
- * interrupt received).
- */
- if (alert_userspace) {
- if (!bdi->first_time) {
- power_supply_changed(bdi->charger);
- power_supply_changed(bdi->battery);
- } else {
- bdi->first_time = false;
- }
- }
+ if (alert_charger)
+ power_supply_changed(bdi->charger);
+ if (alert_battery)
+ power_supply_changed(bdi->battery);
out:
pm_runtime_put_sync(bdi->dev);
@@ -1300,6 +1262,10 @@ static int bq24190_hw_init(struct bq24190_dev_info *bdi)
goto out;
ret = bq24190_set_mode_host(bdi);
+ if (ret < 0)
+ goto out;
+
+ ret = bq24190_read(bdi, BQ24190_REG_SS, &bdi->ss_reg);
out:
pm_runtime_put_sync(bdi->dev);
return ret;
@@ -1375,10 +1341,8 @@ static int bq24190_probe(struct i2c_client *client,
bdi->model = id->driver_data;
strncpy(bdi->model_name, id->name, I2C_NAME_SIZE);
mutex_init(&bdi->f_reg_lock);
- bdi->first_time = true;
- bdi->charger_health_valid = false;
- bdi->battery_health_valid = false;
- bdi->battery_status_valid = false;
+ bdi->f_reg = 0;
+ bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */
i2c_set_clientdata(client, bdi);
@@ -1392,22 +1356,13 @@ static int bq24190_probe(struct i2c_client *client,
return -EINVAL;
}
- ret = devm_request_threaded_irq(dev, bdi->irq, NULL,
- bq24190_irq_handler_thread,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- "bq24190-charger", bdi);
- if (ret < 0) {
- dev_err(dev, "Can't set up irq handler\n");
- goto out1;
- }
-
pm_runtime_enable(dev);
pm_runtime_resume(dev);
ret = bq24190_hw_init(bdi);
if (ret < 0) {
dev_err(dev, "Hardware init failed\n");
- goto out2;
+ goto out1;
}
charger_cfg.drv_data = bdi;
@@ -1418,7 +1373,7 @@ static int bq24190_probe(struct i2c_client *client,
if (IS_ERR(bdi->charger)) {
dev_err(dev, "Can't register charger\n");
ret = PTR_ERR(bdi->charger);
- goto out2;
+ goto out1;
}
battery_cfg.drv_data = bdi;
@@ -1427,24 +1382,34 @@ static int bq24190_probe(struct i2c_client *client,
if (IS_ERR(bdi->battery)) {
dev_err(dev, "Can't register battery\n");
ret = PTR_ERR(bdi->battery);
- goto out3;
+ goto out2;
}
ret = bq24190_sysfs_create_group(bdi);
if (ret) {
dev_err(dev, "Can't create sysfs entries\n");
+ goto out3;
+ }
+
+ ret = devm_request_threaded_irq(dev, bdi->irq, NULL,
+ bq24190_irq_handler_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "bq24190-charger", bdi);
+ if (ret < 0) {
+ dev_err(dev, "Can't set up irq handler\n");
goto out4;
}
return 0;
out4:
- power_supply_unregister(bdi->battery);
+ bq24190_sysfs_remove_group(bdi);
out3:
- power_supply_unregister(bdi->charger);
+ power_supply_unregister(bdi->battery);
out2:
- pm_runtime_disable(dev);
+ power_supply_unregister(bdi->charger);
out1:
+ pm_runtime_disable(dev);
if (bdi->gpio_int)
gpio_free(bdi->gpio_int);
@@ -1488,12 +1453,13 @@ static int bq24190_pm_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
- bdi->charger_health_valid = false;
- bdi->battery_health_valid = false;
- bdi->battery_status_valid = false;
+ bdi->f_reg = 0;
+ bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */
pm_runtime_get_sync(bdi->dev);
bq24190_register_reset(bdi);
+ bq24190_set_mode_host(bdi);
+ bq24190_read(bdi, BQ24190_REG_SS, &bdi->ss_reg);
pm_runtime_put_sync(bdi->dev);
/* Things may have changed while suspended so alert upper layer */
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 8d7322a325de..bd77e55bafc6 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -296,6 +296,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(ctm_current_max),
POWER_SUPPLY_ATTR(hw_current_max),
POWER_SUPPLY_ATTR(real_type),
+ POWER_SUPPLY_ATTR(pr_swap),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 98d75f586b67..8b4067b17103 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -38,6 +38,8 @@
#define PL_VOTER "PL_VOTER"
#define RESTRICT_CHG_VOTER "RESTRICT_CHG_VOTER"
#define ICL_CHANGE_VOTER "ICL_CHANGE_VOTER"
+#define PL_INDIRECT_VOTER "PL_INDIRECT_VOTER"
+#define USBIN_I_VOTER "USBIN_I_VOTER"
struct pl_data {
int pl_mode;
@@ -52,7 +54,8 @@ struct pl_data {
struct votable *pl_awake_votable;
struct votable *hvdcp_hw_inov_dis_votable;
struct votable *usb_icl_votable;
- struct work_struct status_change_work;
+ struct votable *pl_enable_votable_indirect;
+ struct delayed_work status_change_work;
struct work_struct pl_disable_forever_work;
struct delayed_work pl_taper_work;
struct power_supply *main_psy;
@@ -490,6 +493,7 @@ static int pl_fv_vote_callback(struct votable *votable, void *data,
}
#define ICL_STEP_UA 25000
+#define PL_DELAY_MS 3000
static int usb_icl_vote_callback(struct votable *votable, void *data,
int icl_ua, const char *client)
{
@@ -511,6 +515,21 @@ static int usb_icl_vote_callback(struct votable *votable, void *data,
*/
vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, true, 0);
+ /*
+ * if (ICL < 1400)
+ * disable parallel charger using USBIN_I_VOTER
+ * else
+ * instead of re-enabling here rely on status_changed_work
+ * (triggered via AICL completed or scheduled from here to
+ * unvote USBIN_I_VOTER) the status_changed_work enables
+ * USBIN_I_VOTER based on settled current.
+ */
+ if (icl_ua <= 1400000)
+ vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+ else
+ schedule_delayed_work(&chip->status_change_work,
+ msecs_to_jiffies(PL_DELAY_MS));
+
/* rerun AICL */
/* get the settled current */
rc = power_supply_get_property(chip->main_psy,
@@ -642,6 +661,16 @@ static int pl_disable_vote_callback(struct votable *votable,
return 0;
}
+static int pl_enable_indirect_vote_callback(struct votable *votable,
+ void *data, int pl_enable, const char *client)
+{
+ struct pl_data *chip = data;
+
+ vote(chip->pl_disable_votable, PL_INDIRECT_VOTER, !pl_enable, 0);
+
+ return 0;
+}
+
static int pl_awake_vote_callback(struct votable *votable,
void *data, int awake, const char *client)
{
@@ -774,6 +803,42 @@ static void handle_settled_icl_change(struct pl_data *chip)
union power_supply_propval pval = {0, };
int new_total_settled_ua;
int rc;
+ int main_settled_ua;
+ int main_limited;
+ int total_current_ua;
+
+ total_current_ua = get_effective_result_locked(chip->usb_icl_votable);
+
+ /*
+ * call aicl split only when USBIN_USBIN and enabled
+ * and if aicl changed
+ */
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+ return;
+ }
+ main_settled_ua = pval.intval;
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+ return;
+ }
+ main_limited = pval.intval;
+
+ if ((main_limited && (main_settled_ua + chip->pl_settled_ua) < 1400000)
+ || (main_settled_ua == 0)
+ || ((total_current_ua >= 0) &&
+ (total_current_ua <= 1400000)))
+ vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+ else
+ vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, true, 0);
+
if (get_effective_result(chip->pl_disable_votable))
return;
@@ -782,17 +847,10 @@ static void handle_settled_icl_change(struct pl_data *chip)
|| chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT) {
/*
* call aicl split only when USBIN_USBIN and enabled
- * and if aicl changed
+ * and if settled current has changed by more than 300mA
*/
- rc = power_supply_get_property(chip->main_psy,
- POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
- &pval);
- if (rc < 0) {
- pr_err("Couldn't get aicl settled value rc=%d\n", rc);
- return;
- }
- new_total_settled_ua = pval.intval + chip->pl_settled_ua;
+ new_total_settled_ua = main_settled_ua + chip->pl_settled_ua;
pl_dbg(chip, PR_PARALLEL,
"total_settled_ua=%d settled_ua=%d new_total_settled_ua=%d\n",
chip->total_settled_ua, pval.intval,
@@ -839,7 +897,7 @@ static void handle_parallel_in_taper(struct pl_data *chip)
static void status_change_work(struct work_struct *work)
{
struct pl_data *chip = container_of(work,
- struct pl_data, status_change_work);
+ struct pl_data, status_change_work.work);
if (!chip->main_psy && is_main_available(chip)) {
/*
@@ -877,7 +935,7 @@ static int pl_notifier_call(struct notifier_block *nb,
if ((strcmp(psy->desc->name, "parallel") == 0)
|| (strcmp(psy->desc->name, "battery") == 0)
|| (strcmp(psy->desc->name, "main") == 0))
- schedule_work(&chip->status_change_work);
+ schedule_delayed_work(&chip->status_change_work, 0);
return NOTIFY_OK;
}
@@ -898,7 +956,7 @@ static int pl_register_notifier(struct pl_data *chip)
static int pl_determine_initial_status(struct pl_data *chip)
{
- status_change_work(&chip->status_change_work);
+ status_change_work(&chip->status_change_work.work);
return 0;
}
@@ -967,7 +1025,18 @@ int qcom_batt_init(void)
goto destroy_votable;
}
- INIT_WORK(&chip->status_change_work, status_change_work);
+ chip->pl_enable_votable_indirect = create_votable("PL_ENABLE_INDIRECT",
+ VOTE_SET_ANY,
+ pl_enable_indirect_vote_callback,
+ chip);
+ if (IS_ERR(chip->pl_enable_votable_indirect)) {
+ rc = PTR_ERR(chip->pl_enable_votable_indirect);
+ return rc;
+ }
+
+ vote(chip->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
+
+ INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
INIT_DELAYED_WORK(&chip->pl_taper_work, pl_taper_work);
INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
@@ -1000,6 +1069,7 @@ int qcom_batt_init(void)
unreg_notifier:
power_supply_unreg_notifier(&chip->nb);
destroy_votable:
+ destroy_votable(chip->pl_enable_votable_indirect);
destroy_votable(chip->pl_awake_votable);
destroy_votable(chip->pl_disable_votable);
destroy_votable(chip->fv_votable);
@@ -1019,11 +1089,12 @@ void qcom_batt_deinit(void)
if (chip == NULL)
return;
- cancel_work_sync(&chip->status_change_work);
+ cancel_delayed_work_sync(&chip->status_change_work);
cancel_delayed_work_sync(&chip->pl_taper_work);
cancel_work_sync(&chip->pl_disable_forever_work);
power_supply_unreg_notifier(&chip->nb);
+ destroy_votable(chip->pl_enable_votable_indirect);
destroy_votable(chip->pl_awake_votable);
destroy_votable(chip->pl_disable_votable);
destroy_votable(chip->fv_votable);
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index d0f7a5e1e227..26648595c55c 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -3880,7 +3880,7 @@ static int fg_parse_ki_coefficients(struct fg_chip *chip)
}
#define DEFAULT_CUTOFF_VOLT_MV 3200
-#define DEFAULT_EMPTY_VOLT_MV 2800
+#define DEFAULT_EMPTY_VOLT_MV 2850
#define DEFAULT_RECHARGE_VOLT_MV 4250
#define DEFAULT_CHG_TERM_CURR_MA 100
#define DEFAULT_CHG_TERM_BASE_CURR_MA 75
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index b2597987292b..d3abfbfbbc43 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -305,9 +305,6 @@ static int smb2_parse_dt(struct smb2 *chip)
chip->dt.no_battery = of_property_read_bool(node,
"qcom,batteryless-platform");
- chg->external_vconn = of_property_read_bool(node,
- "qcom,external-vconn");
-
rc = of_property_read_u32(node,
"qcom,fcc-max-ua", &chg->batt_profile_fcc_ua);
if (rc < 0)
@@ -1046,7 +1043,8 @@ static int smb2_batt_get_prop(struct power_supply *psy,
rc = smblib_get_prop_batt_voltage_now(chg, val);
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
- val->intval = get_client_vote(chg->fv_votable, DEFAULT_VOTER);
+ val->intval = get_client_vote(chg->fv_votable,
+ BATT_PROFILE_VOTER);
break;
case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
rc = smblib_get_prop_charge_qnovo_enable(chg, val);
@@ -1064,7 +1062,7 @@ static int smb2_batt_get_prop(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
val->intval = get_client_vote(chg->fcc_votable,
- DEFAULT_VOTER);
+ BATT_PROFILE_VOTER);
break;
case POWER_SUPPLY_PROP_TEMP:
rc = smblib_get_prop_batt_temp(chg, val);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index cc50e0f478d1..4c66bc500ab4 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -638,6 +638,7 @@ static void smblib_uusb_removal(struct smb_charger *chg)
/* reset both usbin current and voltage votes */
vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+ vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
@@ -865,6 +866,7 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
goto enable_icl_changed_interrupt;
}
} else {
+ set_sdp_current(chg, 100000);
rc = smblib_set_charge_param(chg, &chg->param.usb_icl, icl_ua);
if (rc < 0) {
smblib_err(chg, "Couldn't set HC ICL rc=%d\n", rc);
@@ -1047,16 +1049,6 @@ static int smblib_chg_disable_vote_callback(struct votable *votable, void *data,
return 0;
}
-static int smblib_pl_enable_indirect_vote_callback(struct votable *votable,
- void *data, int chg_enable, const char *client)
-{
- struct smb_charger *chg = data;
-
- vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, !chg_enable, 0);
-
- return 0;
-}
-
static int smblib_hvdcp_enable_vote_callback(struct votable *votable,
void *data,
int hvdcp_enable, const char *client)
@@ -1213,36 +1205,13 @@ static int smblib_typec_irq_disable_vote_callback(struct votable *votable,
static int _smblib_vconn_regulator_enable(struct regulator_dev *rdev)
{
struct smb_charger *chg = rdev_get_drvdata(rdev);
- u8 otg_stat, val;
- int rc = 0, i;
-
- if (!chg->external_vconn) {
- /*
- * Hardware based OTG soft start should complete within 1ms, so
- * wait for 2ms in the worst case.
- */
- for (i = 0; i < MAX_OTG_SS_TRIES; ++i) {
- usleep_range(1000, 1100);
- rc = smblib_read(chg, OTG_STATUS_REG, &otg_stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read OTG status rc=%d\n",
- rc);
- return rc;
- }
-
- if (otg_stat & BOOST_SOFTSTART_DONE_BIT)
- break;
- }
-
- if (!(otg_stat & BOOST_SOFTSTART_DONE_BIT)) {
- smblib_err(chg, "Couldn't enable VCONN; OTG soft start failed\n");
- return -EAGAIN;
- }
- }
+ int rc = 0;
+ u8 val;
/*
- * VCONN_EN_ORIENTATION is overloaded with overriding the CC pin used
- * for Vconn, and it should be set with reverse polarity of CC_OUT.
+ * When enabling VCONN using the command register the CC pin must be
+ * selected. VCONN should be supplied to the inactive CC pin hence using
+ * the opposite of the CC_ORIENTATION_BIT.
*/
smblib_dbg(chg, PR_OTG, "enabling VCONN\n");
val = chg->typec_status[3] &
@@ -1263,7 +1232,7 @@ int smblib_vconn_regulator_enable(struct regulator_dev *rdev)
struct smb_charger *chg = rdev_get_drvdata(rdev);
int rc = 0;
- mutex_lock(&chg->otg_oc_lock);
+ mutex_lock(&chg->vconn_oc_lock);
if (chg->vconn_en)
goto unlock;
@@ -1272,7 +1241,7 @@ int smblib_vconn_regulator_enable(struct regulator_dev *rdev)
chg->vconn_en = true;
unlock:
- mutex_unlock(&chg->otg_oc_lock);
+ mutex_unlock(&chg->vconn_oc_lock);
return rc;
}
@@ -1295,7 +1264,7 @@ int smblib_vconn_regulator_disable(struct regulator_dev *rdev)
struct smb_charger *chg = rdev_get_drvdata(rdev);
int rc = 0;
- mutex_lock(&chg->otg_oc_lock);
+ mutex_lock(&chg->vconn_oc_lock);
if (!chg->vconn_en)
goto unlock;
@@ -1304,7 +1273,7 @@ int smblib_vconn_regulator_disable(struct regulator_dev *rdev)
chg->vconn_en = false;
unlock:
- mutex_unlock(&chg->otg_oc_lock);
+ mutex_unlock(&chg->vconn_oc_lock);
return rc;
}
@@ -1313,9 +1282,9 @@ int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev)
struct smb_charger *chg = rdev_get_drvdata(rdev);
int ret;
- mutex_lock(&chg->otg_oc_lock);
+ mutex_lock(&chg->vconn_oc_lock);
ret = chg->vconn_en;
- mutex_unlock(&chg->otg_oc_lock);
+ mutex_unlock(&chg->vconn_oc_lock);
return ret;
}
@@ -1418,13 +1387,6 @@ static int _smblib_vbus_regulator_disable(struct regulator_dev *rdev)
struct smb_charger *chg = rdev_get_drvdata(rdev);
int rc;
- if (!chg->external_vconn && chg->vconn_en) {
- smblib_dbg(chg, PR_OTG, "Killing VCONN before disabling OTG\n");
- rc = _smblib_vconn_regulator_disable(rdev);
- if (rc < 0)
- smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
- }
-
if (chg->wa_flags & OTG_WA) {
/* set OTG current limit to minimum value */
rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
@@ -1652,6 +1614,7 @@ int smblib_get_prop_batt_health(struct smb_charger *chg,
{
union power_supply_propval pval;
int rc;
+ int effective_fv_uv;
u8 stat;
rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat);
@@ -1670,10 +1633,11 @@ int smblib_get_prop_batt_health(struct smb_charger *chg,
* If Vbatt is within 40mV above Vfloat, then don't
* treat it as overvoltage.
*/
- if (pval.intval >=
- get_effective_result(chg->fv_votable) + 40000) {
+ effective_fv_uv = get_effective_result(chg->fv_votable);
+ if (pval.intval >= effective_fv_uv + 40000) {
val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- smblib_err(chg, "battery over-voltage\n");
+ smblib_err(chg, "battery over-voltage vbat_fg = %duV, fv = %duV\n",
+ pval.intval, effective_fv_uv);
goto done;
}
}
@@ -1927,37 +1891,17 @@ int smblib_rerun_aicl(struct smb_charger *chg)
return rc;
smblib_dbg(chg, PR_MISC, "re-running AICL\n");
- switch (chg->smb_version) {
- case PMI8998_SUBTYPE:
- rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
- &settled_icl_ua);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
- return rc;
- }
+ rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+ &settled_icl_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
+ return rc;
+ }
- vote(chg->usb_icl_votable, AICL_RERUN_VOTER, true,
- max(settled_icl_ua - chg->param.usb_icl.step_u,
+ vote(chg->usb_icl_votable, AICL_RERUN_VOTER, true,
+ max(settled_icl_ua - chg->param.usb_icl.step_u,
chg->param.usb_icl.step_u));
- vote(chg->usb_icl_votable, AICL_RERUN_VOTER, false, 0);
- break;
- case PM660_SUBTYPE:
- /*
- * Use restart_AICL instead of trigger_AICL as it runs the
- * complete AICL instead of starting from the last settled
- * value.
- */
- rc = smblib_masked_write(chg, CMD_HVDCP_2_REG,
- RESTART_AICL_BIT, RESTART_AICL_BIT);
- if (rc < 0)
- smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
- rc);
- break;
- default:
- smblib_dbg(chg, PR_PARALLEL, "unknown SMB chip %d\n",
- chg->smb_version);
- return -EINVAL;
- }
+ vote(chg->usb_icl_votable, AICL_RERUN_VOTER, false, 0);
return 0;
}
@@ -1993,6 +1937,7 @@ static int smblib_dm_pulse(struct smb_charger *chg)
int smblib_dp_dm(struct smb_charger *chg, int val)
{
int target_icl_ua, rc = 0;
+ union power_supply_propval pval;
switch (val) {
case POWER_SUPPLY_DP_DM_DP_PULSE:
@@ -2010,10 +1955,35 @@ int smblib_dp_dm(struct smb_charger *chg, int val)
rc, chg->pulse_cnt);
break;
case POWER_SUPPLY_DP_DM_ICL_DOWN:
- chg->usb_icl_delta_ua -= 100000;
target_icl_ua = get_effective_result(chg->usb_icl_votable);
+ if (target_icl_ua < 0) {
+ /* no client vote, get the ICL from charger */
+ rc = power_supply_get_property(chg->usb_psy,
+ POWER_SUPPLY_PROP_HW_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't get max current rc=%d\n",
+ rc);
+ return rc;
+ }
+ target_icl_ua = pval.intval;
+ }
+
+ /*
+ * Check if any other voter voted on USB_ICL in case of
+ * voter other than SW_QC3_VOTER reset and restart reduction
+ * again.
+ */
+ if (target_icl_ua != get_client_vote(chg->usb_icl_votable,
+ SW_QC3_VOTER))
+ chg->usb_icl_delta_ua = 0;
+
+ chg->usb_icl_delta_ua += 100000;
vote(chg->usb_icl_votable, SW_QC3_VOTER, true,
- target_icl_ua + chg->usb_icl_delta_ua);
+ target_icl_ua - 100000);
+ smblib_dbg(chg, PR_PARALLEL, "ICL DOWN ICL=%d reduction=%d\n",
+ target_icl_ua, chg->usb_icl_delta_ua);
break;
case POWER_SUPPLY_DP_DM_ICL_UP:
default:
@@ -3628,6 +3598,7 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
+ vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
/* reset hvdcp voters */
vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
@@ -4033,19 +4004,6 @@ static void smblib_otg_oc_exit(struct smb_charger *chg, bool success)
QUICKSTART_OTG_FASTROLESWAP_BIT, 0);
if (rc < 0)
smblib_err(chg, "Couldn't enable VBUS < 1V check rc=%d\n", rc);
-
- if (!chg->external_vconn && chg->vconn_en) {
- chg->vconn_attempts = 0;
- if (success) {
- rc = _smblib_vconn_regulator_enable(
- chg->vconn_vreg->rdev);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable VCONN rc=%d\n",
- rc);
- } else {
- chg->vconn_en = false;
- }
- }
}
#define MAX_OC_FALLING_TRIES 10
@@ -4134,7 +4092,7 @@ static void smblib_vconn_oc_work(struct work_struct *work)
if (!chg->vconn_vreg || !chg->vconn_vreg->rdev)
return;
- mutex_lock(&chg->otg_oc_lock);
+ mutex_lock(&chg->vconn_oc_lock);
rc = _smblib_vconn_regulator_disable(chg->vconn_vreg->rdev);
if (rc < 0) {
smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
@@ -4183,7 +4141,7 @@ static void smblib_vconn_oc_work(struct work_struct *work)
}
unlock:
- mutex_unlock(&chg->otg_oc_lock);
+ mutex_unlock(&chg->vconn_oc_lock);
}
static void smblib_otg_ss_done_work(struct work_struct *work)
@@ -4218,8 +4176,6 @@ static void smblib_icl_change_work(struct work_struct *work)
}
power_supply_changed(chg->usb_main_psy);
- vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER,
- settled_ua >= USB_WEAK_INPUT_UA, 0);
smblib_dbg(chg, PR_INTERRUPT, "icl_settled=%d\n", settled_ua);
}
@@ -4315,7 +4271,16 @@ static int smblib_create_votables(struct smb_charger *chg)
smblib_err(chg, "Couldn't find votable PL_DISABLE rc=%d\n", rc);
return rc;
}
- vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
+
+ chg->pl_enable_votable_indirect = find_votable("PL_ENABLE_INDIRECT");
+ if (chg->pl_enable_votable_indirect == NULL) {
+ rc = -EINVAL;
+ smblib_err(chg,
+ "Couldn't find votable PL_ENABLE_INDIRECT rc=%d\n",
+ rc);
+ return rc;
+ }
+
vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
chg->dc_suspend_votable = create_votable("DC_SUSPEND", VOTE_SET_ANY,
@@ -4365,14 +4330,6 @@ static int smblib_create_votables(struct smb_charger *chg)
return rc;
}
- chg->pl_enable_votable_indirect = create_votable("PL_ENABLE_INDIRECT",
- VOTE_SET_ANY,
- smblib_pl_enable_indirect_vote_callback,
- chg);
- if (IS_ERR(chg->pl_enable_votable_indirect)) {
- rc = PTR_ERR(chg->pl_enable_votable_indirect);
- return rc;
- }
chg->hvdcp_disable_votable_indirect = create_votable(
"HVDCP_DISABLE_INDIRECT",
@@ -4448,8 +4405,6 @@ static void smblib_destroy_votables(struct smb_charger *chg)
destroy_votable(chg->awake_votable);
if (chg->chg_disable_votable)
destroy_votable(chg->chg_disable_votable);
- if (chg->pl_enable_votable_indirect)
- destroy_votable(chg->pl_enable_votable_indirect);
if (chg->apsd_disable_votable)
destroy_votable(chg->apsd_disable_votable);
if (chg->hvdcp_hw_inov_dis_votable)
@@ -4479,6 +4434,7 @@ int smblib_init(struct smb_charger *chg)
mutex_init(&chg->lock);
mutex_init(&chg->write_lock);
mutex_init(&chg->otg_oc_lock);
+ mutex_init(&chg->vconn_oc_lock);
INIT_WORK(&chg->bms_update_work, bms_update_work);
INIT_WORK(&chg->rdstd_cc2_detach_work, rdstd_cc2_detach_work);
INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 3fedff8897ee..41015d49b31e 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -36,9 +36,7 @@ enum print_reason {
#define PL_USBIN_USBIN_VOTER "PL_USBIN_USBIN_VOTER"
#define USB_PSY_VOTER "USB_PSY_VOTER"
#define PL_TAPER_WORK_RUNNING_VOTER "PL_TAPER_WORK_RUNNING_VOTER"
-#define PL_INDIRECT_VOTER "PL_INDIRECT_VOTER"
#define PL_QNOVO_VOTER "PL_QNOVO_VOTER"
-#define USBIN_I_VOTER "USBIN_I_VOTER"
#define USBIN_V_VOTER "USBIN_V_VOTER"
#define CHG_STATE_VOTER "CHG_STATE_VOTER"
#define TYPEC_SRC_VOTER "TYPEC_SRC_VOTER"
@@ -65,6 +63,7 @@ enum print_reason {
#define QNOVO_VOTER "QNOVO_VOTER"
#define BATT_PROFILE_VOTER "BATT_PROFILE_VOTER"
#define OTG_DELAY_VOTER "OTG_DELAY_VOTER"
+#define USBIN_I_VOTER "USBIN_I_VOTER"
#define VCONN_MAX_ATTEMPTS 3
#define OTG_MAX_ATTEMPTS 3
@@ -228,7 +227,6 @@ struct smb_charger {
struct smb_iio iio;
int *debug_mask;
enum smb_mode mode;
- bool external_vconn;
struct smb_chg_freq chg_freq;
int smb_version;
int otg_delay_ms;
@@ -238,6 +236,7 @@ struct smb_charger {
struct mutex write_lock;
struct mutex ps_change_lock;
struct mutex otg_oc_lock;
+ struct mutex vconn_oc_lock;
/* power supplies */
struct power_supply *batt_psy;
diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c
index 858ddcc228df..72c697bdcd29 100644
--- a/drivers/regulator/qpnp-labibb-regulator.c
+++ b/drivers/regulator/qpnp-labibb-regulator.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/regmap.h>
#include <linux/module.h>
#include <linux/notifier.h>
@@ -37,6 +38,7 @@
#define REG_REVISION_2 0x01
#define REG_PERPH_TYPE 0x04
+#define REG_INT_RT_STS 0x10
#define QPNP_LAB_TYPE 0x24
#define QPNP_IBB_TYPE 0x20
@@ -76,8 +78,8 @@
/* LAB register bits definitions */
/* REG_LAB_STATUS1 */
-#define LAB_STATUS1_VREG_OK_MASK BIT(7)
-#define LAB_STATUS1_VREG_OK BIT(7)
+#define LAB_STATUS1_VREG_OK_BIT BIT(7)
+#define LAB_STATUS1_SC_DETECT_BIT BIT(6)
/* REG_LAB_SWIRE_PGM_CTL */
#define LAB_EN_SWIRE_PGM_VOUT BIT(7)
@@ -184,8 +186,8 @@
/* IBB register bits definition */
/* REG_IBB_STATUS1 */
-#define IBB_STATUS1_VREG_OK_MASK BIT(7)
-#define IBB_STATUS1_VREG_OK BIT(7)
+#define IBB_STATUS1_VREG_OK_BIT BIT(7)
+#define IBB_STATUS1_SC_DETECT_BIT BIT(6)
/* REG_IBB_VOLTAGE */
#define IBB_VOLTAGE_OVERRIDE_EN BIT(7)
@@ -553,6 +555,8 @@ struct lab_regulator {
struct mutex lab_mutex;
int lab_vreg_ok_irq;
+ int lab_sc_irq;
+
int curr_volt;
int min_volt;
@@ -569,6 +573,8 @@ struct ibb_regulator {
struct regulator_dev *rdev;
struct mutex ibb_mutex;
+ int ibb_sc_irq;
+
int curr_volt;
int min_volt;
@@ -599,6 +605,9 @@ struct qpnp_labibb {
struct mutex bus_mutex;
enum qpnp_labibb_mode mode;
struct work_struct lab_vreg_ok_work;
+ struct delayed_work sc_err_recovery_work;
+ struct hrtimer sc_err_check_timer;
+ int sc_err_count;
bool standalone;
bool ttw_en;
bool in_ttw_mode;
@@ -2153,7 +2162,7 @@ static void qpnp_lab_vreg_notifier_work(struct work_struct *work)
return;
}
- if (val & LAB_STATUS1_VREG_OK) {
+ if (val & LAB_STATUS1_VREG_OK_BIT) {
raw_notifier_call_chain(&labibb_notifier,
LAB_VREG_OK, NULL);
break;
@@ -2186,6 +2195,74 @@ static void qpnp_lab_vreg_notifier_work(struct work_struct *work)
}
}
+static int qpnp_lab_enable_standalone(struct qpnp_labibb *labibb)
+{
+ int rc;
+ u8 val;
+
+ val = LAB_ENABLE_CTL_EN;
+ rc = qpnp_labibb_write(labibb,
+ labibb->lab_base + REG_LAB_ENABLE_CTL, &val, 1);
+ if (rc < 0) {
+ pr_err("Write register %x failed rc = %d\n",
+ REG_LAB_ENABLE_CTL, rc);
+ return rc;
+ }
+
+ udelay(labibb->lab_vreg.soft_start);
+
+ rc = qpnp_labibb_read(labibb, labibb->lab_base +
+ REG_LAB_STATUS1, &val, 1);
+ if (rc < 0) {
+ pr_err("Read register %x failed rc = %d\n",
+ REG_LAB_STATUS1, rc);
+ return rc;
+ }
+
+ if (!(val & LAB_STATUS1_VREG_OK_BIT)) {
+ pr_err("Can't enable LAB standalone\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qpnp_ibb_enable_standalone(struct qpnp_labibb *labibb)
+{
+ int rc, delay, retries = 10;
+ u8 val;
+
+ rc = qpnp_ibb_set_mode(labibb, IBB_SW_CONTROL_EN);
+ if (rc < 0) {
+ pr_err("Unable to set IBB_MODULE_EN rc = %d\n", rc);
+ return rc;
+ }
+
+ delay = labibb->ibb_vreg.soft_start;
+ while (retries--) {
+ /* Wait for a small period before reading IBB_STATUS1 */
+ usleep_range(delay, delay + 100);
+
+ rc = qpnp_labibb_read(labibb, labibb->ibb_base +
+ REG_IBB_STATUS1, &val, 1);
+ if (rc < 0) {
+ pr_err("Read register %x failed rc = %d\n",
+ REG_IBB_STATUS1, rc);
+ return rc;
+ }
+
+ if (val & IBB_STATUS1_VREG_OK_BIT)
+ break;
+ }
+
+ if (!(val & IBB_STATUS1_VREG_OK_BIT)) {
+ pr_err("Can't enable IBB standalone\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int qpnp_labibb_regulator_enable(struct qpnp_labibb *labibb)
{
int rc;
@@ -2227,7 +2304,7 @@ static int qpnp_labibb_regulator_enable(struct qpnp_labibb *labibb)
labibb->lab_vreg.soft_start, labibb->ibb_vreg.soft_start,
labibb->ibb_vreg.pwrup_dly, dly);
- if (!(val & LAB_STATUS1_VREG_OK)) {
+ if (!(val & LAB_STATUS1_VREG_OK_BIT)) {
pr_err("failed for LAB %x\n", val);
goto err_out;
}
@@ -2244,7 +2321,7 @@ static int qpnp_labibb_regulator_enable(struct qpnp_labibb *labibb)
goto err_out;
}
- if (val & IBB_STATUS1_VREG_OK) {
+ if (val & IBB_STATUS1_VREG_OK_BIT) {
enabled = true;
break;
}
@@ -2315,7 +2392,7 @@ static int qpnp_labibb_regulator_disable(struct qpnp_labibb *labibb)
return rc;
}
- if (!(val & IBB_STATUS1_VREG_OK)) {
+ if (!(val & IBB_STATUS1_VREG_OK_BIT)) {
disabled = true;
break;
}
@@ -2344,8 +2421,6 @@ static int qpnp_labibb_regulator_disable(struct qpnp_labibb *labibb)
static int qpnp_lab_regulator_enable(struct regulator_dev *rdev)
{
int rc;
- u8 val;
-
struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
if (labibb->sc_detected) {
@@ -2362,34 +2437,14 @@ static int qpnp_lab_regulator_enable(struct regulator_dev *rdev)
}
if (!labibb->lab_vreg.vreg_enabled && !labibb->swire_control) {
-
if (!labibb->standalone)
return qpnp_labibb_regulator_enable(labibb);
- val = LAB_ENABLE_CTL_EN;
- rc = qpnp_labibb_write(labibb,
- labibb->lab_base + REG_LAB_ENABLE_CTL, &val, 1);
- if (rc < 0) {
- pr_err("qpnp_lab_regulator_enable write register %x failed rc = %d\n",
- REG_LAB_ENABLE_CTL, rc);
- return rc;
- }
-
- udelay(labibb->lab_vreg.soft_start);
-
- rc = qpnp_labibb_read(labibb, labibb->lab_base +
- REG_LAB_STATUS1, &val, 1);
- if (rc < 0) {
- pr_err("qpnp_lab_regulator_enable read register %x failed rc = %d\n",
- REG_LAB_STATUS1, rc);
+ rc = qpnp_lab_enable_standalone(labibb);
+ if (rc) {
+ pr_err("enable lab standalone failed, rc=%d\n", rc);
return rc;
}
-
- if ((val & LAB_STATUS1_VREG_OK_MASK) != LAB_STATUS1_VREG_OK) {
- pr_err("qpnp_lab_regulator_enable failed\n");
- return -EINVAL;
- }
-
labibb->lab_vreg.vreg_enabled = 1;
}
@@ -2434,6 +2489,163 @@ static int qpnp_lab_regulator_is_enabled(struct regulator_dev *rdev)
return labibb->lab_vreg.vreg_enabled;
}
+static int qpnp_labibb_force_enable(struct qpnp_labibb *labibb)
+{
+ int rc;
+
+ if (labibb->skip_2nd_swire_cmd) {
+ rc = qpnp_ibb_ps_config(labibb, false);
+ if (rc < 0) {
+ pr_err("Failed to disable IBB PS rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (!labibb->swire_control) {
+ if (!labibb->standalone)
+ return qpnp_labibb_regulator_enable(labibb);
+
+ rc = qpnp_ibb_enable_standalone(labibb);
+ if (rc < 0) {
+ pr_err("enable ibb standalone failed, rc=%d\n", rc);
+ return rc;
+ }
+ labibb->ibb_vreg.vreg_enabled = 1;
+
+ rc = qpnp_lab_enable_standalone(labibb);
+ if (rc < 0) {
+ pr_err("enable lab standalone failed, rc=%d\n", rc);
+ return rc;
+ }
+ labibb->lab_vreg.vreg_enabled = 1;
+ }
+
+ return 0;
+}
+
+#define SC_ERR_RECOVERY_DELAY_MS 250
+#define SC_ERR_COUNT_INTERVAL_SEC 1
+#define POLLING_SCP_DONE_COUNT 2
+#define POLLING_SCP_DONE_INTERVAL_MS 5
+static irqreturn_t labibb_sc_err_handler(int irq, void *_labibb)
+{
+ int rc;
+ u16 reg;
+ u8 sc_err_mask, val;
+ char *str;
+ struct qpnp_labibb *labibb = (struct qpnp_labibb *)_labibb;
+ bool in_sc_err, lab_en, ibb_en, scp_done = false;
+ int count;
+
+ if (irq == labibb->lab_vreg.lab_sc_irq) {
+ reg = labibb->lab_base + REG_LAB_STATUS1;
+ sc_err_mask = LAB_STATUS1_SC_DETECT_BIT;
+ str = "LAB";
+ } else if (irq == labibb->ibb_vreg.ibb_sc_irq) {
+ reg = labibb->ibb_base + REG_IBB_STATUS1;
+ sc_err_mask = IBB_STATUS1_SC_DETECT_BIT;
+ str = "IBB";
+ } else {
+ return IRQ_HANDLED;
+ }
+
+ rc = qpnp_labibb_read(labibb, reg, &val, 1);
+ if (rc < 0) {
+ pr_err("Read 0x%x failed, rc=%d\n", reg, rc);
+ return IRQ_HANDLED;
+ }
+ pr_debug("%s SC error triggered! %s_STATUS1 = %d\n", str, str, val);
+
+ in_sc_err = !!(val & sc_err_mask);
+
+ /*
+ * The SC fault would trigger PBS to disable regulators
+ * for protection. This would cause the SC_DETECT status being
+ * cleared so that it's not able to get the SC fault status.
+ * Check if LAB/IBB regulators are enabled in the driver but
+ * disabled in hardware, this means a SC fault had happened
+ * and SCP handling is completed by PBS.
+ */
+ if (!in_sc_err) {
+ count = POLLING_SCP_DONE_COUNT;
+ do {
+ reg = labibb->lab_base + REG_LAB_ENABLE_CTL;
+ rc = qpnp_labibb_read(labibb, reg, &val, 1);
+ if (rc < 0) {
+ pr_err("Read 0x%x failed, rc=%d\n", reg, rc);
+ return IRQ_HANDLED;
+ }
+ lab_en = !!(val & LAB_ENABLE_CTL_EN);
+
+ reg = labibb->ibb_base + REG_IBB_ENABLE_CTL;
+ rc = qpnp_labibb_read(labibb, reg, &val, 1);
+ if (rc < 0) {
+ pr_err("Read 0x%x failed, rc=%d\n", reg, rc);
+ return IRQ_HANDLED;
+ }
+ ibb_en = !!(val & IBB_ENABLE_CTL_MODULE_EN);
+ if (lab_en || ibb_en)
+ msleep(POLLING_SCP_DONE_INTERVAL_MS);
+ else
+ break;
+ } while ((lab_en || ibb_en) && count--);
+
+ if (labibb->lab_vreg.vreg_enabled
+ && labibb->ibb_vreg.vreg_enabled
+ && !lab_en && !ibb_en) {
+ pr_debug("LAB/IBB has been disabled by SCP\n");
+ scp_done = true;
+ }
+ }
+
+ if (in_sc_err || scp_done) {
+ if (hrtimer_active(&labibb->sc_err_check_timer) ||
+ hrtimer_callback_running(&labibb->sc_err_check_timer)) {
+ labibb->sc_err_count++;
+ } else {
+ labibb->sc_err_count = 1;
+ hrtimer_start(&labibb->sc_err_check_timer,
+ ktime_set(SC_ERR_COUNT_INTERVAL_SEC, 0),
+ HRTIMER_MODE_REL);
+ }
+ schedule_delayed_work(&labibb->sc_err_recovery_work,
+ msecs_to_jiffies(SC_ERR_RECOVERY_DELAY_MS));
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define SC_FAULT_COUNT_MAX 4
+static enum hrtimer_restart labibb_check_sc_err_count(struct hrtimer *timer)
+{
+ struct qpnp_labibb *labibb = container_of(timer,
+ struct qpnp_labibb, sc_err_check_timer);
+ /*
+ * if SC fault triggers more than 4 times in 1 second,
+ * then disable the IRQs and leave as it.
+ */
+ if (labibb->sc_err_count > SC_FAULT_COUNT_MAX) {
+ disable_irq(labibb->lab_vreg.lab_sc_irq);
+ disable_irq(labibb->ibb_vreg.ibb_sc_irq);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static void labibb_sc_err_recovery_work(struct work_struct *work)
+{
+ struct qpnp_labibb *labibb = container_of(work, struct qpnp_labibb,
+ sc_err_recovery_work.work);
+ int rc;
+
+ labibb->ibb_vreg.vreg_enabled = 0;
+ labibb->lab_vreg.vreg_enabled = 0;
+ rc = qpnp_labibb_force_enable(labibb);
+ if (rc < 0)
+ pr_err("force enable labibb failed, rc=%d\n", rc);
+
+}
+
static int qpnp_lab_regulator_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned int *selector)
{
@@ -2495,7 +2707,7 @@ static int qpnp_skip_swire_command(struct qpnp_labibb *labibb)
pr_err("Failed to read ibb_status1 reg rc=%d\n", rc);
return rc;
}
- if ((reg & IBB_STATUS1_VREG_OK_MASK) == IBB_STATUS1_VREG_OK)
+ if (reg & IBB_STATUS1_VREG_OK_BIT)
break;
/* poll delay */
@@ -2829,6 +3041,18 @@ static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb,
}
}
+ if (labibb->lab_vreg.lab_sc_irq != -EINVAL) {
+ rc = devm_request_threaded_irq(labibb->dev,
+ labibb->lab_vreg.lab_sc_irq, NULL,
+ labibb_sc_err_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "lab-sc-err", labibb);
+ if (rc) {
+ pr_err("Failed to register 'lab-sc-err' irq rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
rc = qpnp_labibb_read(labibb, labibb->lab_base + REG_LAB_MODULE_RDY,
&val, 1);
if (rc < 0) {
@@ -3287,8 +3511,7 @@ static int qpnp_ibb_dt_init(struct qpnp_labibb *labibb,
static int qpnp_ibb_regulator_enable(struct regulator_dev *rdev)
{
- int rc, delay, retries = 10;
- u8 val;
+ int rc = 0;
struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
if (labibb->sc_detected) {
@@ -3297,40 +3520,17 @@ static int qpnp_ibb_regulator_enable(struct regulator_dev *rdev)
}
if (!labibb->ibb_vreg.vreg_enabled && !labibb->swire_control) {
-
if (!labibb->standalone)
return qpnp_labibb_regulator_enable(labibb);
- rc = qpnp_ibb_set_mode(labibb, IBB_SW_CONTROL_EN);
+ rc = qpnp_ibb_enable_standalone(labibb);
if (rc < 0) {
- pr_err("Unable to set IBB_MODULE_EN rc = %d\n", rc);
+ pr_err("enable ibb standalone failed, rc=%d\n", rc);
return rc;
}
-
- delay = labibb->ibb_vreg.soft_start;
- while (retries--) {
- /* Wait for a small period before reading IBB_STATUS1 */
- usleep_range(delay, delay + 100);
-
- rc = qpnp_labibb_read(labibb, labibb->ibb_base +
- REG_IBB_STATUS1, &val, 1);
- if (rc < 0) {
- pr_err("qpnp_ibb_regulator_enable read register %x failed rc = %d\n",
- REG_IBB_STATUS1, rc);
- return rc;
- }
-
- if (val & IBB_STATUS1_VREG_OK)
- break;
- }
-
- if (!(val & IBB_STATUS1_VREG_OK)) {
- pr_err("qpnp_ibb_regulator_enable failed\n");
- return -EINVAL;
- }
-
labibb->ibb_vreg.vreg_enabled = 1;
}
+
return 0;
}
@@ -3379,7 +3579,6 @@ static int qpnp_ibb_regulator_set_voltage(struct regulator_dev *rdev,
return rc;
}
-
static int qpnp_ibb_regulator_get_voltage(struct regulator_dev *rdev)
{
struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
@@ -3601,6 +3800,19 @@ static int register_qpnp_ibb_regulator(struct qpnp_labibb *labibb,
labibb->ibb_vreg.pwrdn_dly = 0;
}
+ if (labibb->ibb_vreg.ibb_sc_irq != -EINVAL) {
+ rc = devm_request_threaded_irq(labibb->dev,
+ labibb->ibb_vreg.ibb_sc_irq, NULL,
+ labibb_sc_err_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "ibb-sc-err", labibb);
+ if (rc) {
+ pr_err("Failed to register 'ibb-sc-err' irq rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
rc = qpnp_labibb_read(labibb, labibb->ibb_base + REG_IBB_MODULE_RDY,
&val, 1);
if (rc < 0) {
@@ -3674,15 +3886,39 @@ static int register_qpnp_ibb_regulator(struct qpnp_labibb *labibb,
static int qpnp_lab_register_irq(struct device_node *child,
struct qpnp_labibb *labibb)
{
+ int rc = 0;
+
if (is_lab_vreg_ok_irq_available(labibb)) {
- labibb->lab_vreg.lab_vreg_ok_irq =
- of_irq_get_byname(child, "lab-vreg-ok");
- if (labibb->lab_vreg.lab_vreg_ok_irq < 0) {
+ rc = of_irq_get_byname(child, "lab-vreg-ok");
+ if (rc < 0) {
pr_err("Invalid lab-vreg-ok irq\n");
- return -EINVAL;
+ return rc;
}
+ labibb->lab_vreg.lab_vreg_ok_irq = rc;
}
+ labibb->lab_vreg.lab_sc_irq = -EINVAL;
+ rc = of_irq_get_byname(child, "lab-sc-err");
+ if (rc < 0)
+ pr_debug("Unable to get lab-sc-err, rc = %d\n", rc);
+ else
+ labibb->lab_vreg.lab_sc_irq = rc;
+
+ return 0;
+}
+
+static int qpnp_ibb_register_irq(struct device_node *child,
+ struct qpnp_labibb *labibb)
+{
+ int rc;
+
+ labibb->ibb_vreg.ibb_sc_irq = -EINVAL;
+ rc = of_irq_get_byname(child, "ibb-sc-err");
+ if (rc < 0)
+ pr_debug("Unable to get ibb-sc-err, rc = %d\n", rc);
+ else
+ labibb->ibb_vreg.ibb_sc_irq = rc;
+
return 0;
}
@@ -3882,6 +4118,7 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
case QPNP_IBB_TYPE:
labibb->ibb_base = base;
labibb->ibb_dig_major = revision;
+ qpnp_ibb_register_irq(child, labibb);
rc = register_qpnp_ibb_regulator(labibb, child);
if (rc < 0)
goto fail_registration;
@@ -3905,6 +4142,11 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
}
INIT_WORK(&labibb->lab_vreg_ok_work, qpnp_lab_vreg_notifier_work);
+ INIT_DELAYED_WORK(&labibb->sc_err_recovery_work,
+ labibb_sc_err_recovery_work);
+ hrtimer_init(&labibb->sc_err_check_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ labibb->sc_err_check_timer.function = labibb_check_sc_err_count;
dev_set_drvdata(&pdev->dev, labibb);
pr_info("LAB/IBB registered successfully, lab_vreg enable=%d ibb_vreg enable=%d swire_control=%d\n",
labibb->lab_vreg.vreg_enabled,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 64eed87d34a8..433c5e3d5733 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1637,7 +1637,7 @@ config ATARI_SCSI_RESET_BOOT
config MAC_SCSI
tristate "Macintosh NCR5380 SCSI"
- depends on MAC && SCSI=y
+ depends on MAC && SCSI
select SCSI_SPI_ATTRS
help
This is the NCR 5380 SCSI controller included on most of the 68030
diff --git a/drivers/soc/qcom/common_log.c b/drivers/soc/qcom/common_log.c
index f001e820b797..1e8744b41e4c 100644
--- a/drivers/soc/qcom/common_log.c
+++ b/drivers/soc/qcom/common_log.c
@@ -17,6 +17,7 @@
#include <linux/kallsyms.h>
#include <linux/slab.h>
#include <linux/kmemleak.h>
+#include <linux/async.h>
#include <soc/qcom/memory_dump.h>
#include <soc/qcom/minidump.h>
#include <asm/sections.h>
@@ -255,7 +256,7 @@ static void __init register_kernel_sections(void)
}
}
-static int __init msm_common_log_init(void)
+static void __init async_common_log_init(void *data, async_cookie_t cookie)
{
register_kernel_sections();
common_log_register_log_buf();
@@ -263,6 +264,12 @@ static int __init msm_common_log_init(void)
register_pmic_dump();
register_vsense_dump();
register_rpm_dump();
+}
+
+static int __init msm_common_log_init(void)
+{
+ /* Initialize asynchronously to reduce boot time */
+ async_schedule(async_common_log_init, NULL);
return 0;
}
late_initcall(msm_common_log_init);
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 72f5829d1eb6..561b9074f2ee 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -232,7 +232,9 @@ static DEFINE_MUTEX(edge_list_lock_lhd0);
* @req_rate_kBps: Current QoS request by the channel.
* @tx_intent_cnt: Intent count to transmit soon in future.
* @tx_cnt: Packets to be picked by tx scheduler.
- */
+ * @rt_vote_on: Number of times RT vote on is called.
+ * @rt_vote_off: Number of times RT vote off is called.
+*/
struct channel_ctx {
struct rwref_lock ch_state_lhb2;
struct list_head port_list_node;
@@ -311,6 +313,9 @@ struct channel_ctx {
unsigned long req_rate_kBps;
uint32_t tx_intent_cnt;
uint32_t tx_cnt;
+
+ uint32_t rt_vote_on;
+ uint32_t rt_vote_off;
uint32_t magic_number;
};
@@ -2418,6 +2423,25 @@ static int dummy_power_unvote(struct glink_transport_if *if_ptr)
}
/**
+ * dummy_rx_rt_vote() - Dummy RX Realtime thread vote
+ * @if_ptr: The transport to transmit on.
+
+ */
+static int dummy_rx_rt_vote(struct glink_transport_if *if_ptr)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_rx_rt_unvote() - Dummy RX Realtime thread unvote
+ * @if_ptr: The transport to transmit on.
+ */
+static int dummy_rx_rt_unvote(struct glink_transport_if *if_ptr)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* notif_if_up_all_xprts() - Check and notify existing transport state if up
* @notif_info: Data structure containing transport information to be notified.
*
@@ -3543,6 +3567,61 @@ unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size)
}
EXPORT_SYMBOL(glink_qos_get_ramp_time);
+
+/**
+ * glink_start_rx_rt() - Vote for RT thread priority on RX.
+ * @handle: Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_start_rx_rt(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
+
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ glink_put_ch_ctx(ctx, false);
+ return -EBUSY;
+ }
+ ret = ctx->transport_ptr->ops->rx_rt_vote(ctx->transport_ptr->ops);
+ ctx->rt_vote_on++;
+ GLINK_INFO_CH(ctx, "%s: Voting RX Realtime Thread %d", __func__, ret);
+ glink_put_ch_ctx(ctx, false);
+ return ret;
+}
+
+/**
+ * glink_end_rx_rt() - Vote for RT thread priority on RX.
+ * @handle: Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_end_rx_rt(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
+
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ glink_put_ch_ctx(ctx, false);
+ return -EBUSY;
+ }
+ ret = ctx->transport_ptr->ops->rx_rt_unvote(ctx->transport_ptr->ops);
+ ctx->rt_vote_off++;
+ GLINK_INFO_CH(ctx, "%s: Unvoting RX Realtime Thread %d", __func__, ret);
+ glink_put_ch_ctx(ctx, false);
+ return ret;
+}
+
/**
* glink_rpm_rx_poll() - Poll and receive any available events
* @handle: Channel handle in which this operation is performed.
@@ -3950,6 +4029,10 @@ int glink_core_register_transport(struct glink_transport_if *if_ptr,
if_ptr->power_vote = dummy_power_vote;
if (!if_ptr->power_unvote)
if_ptr->power_unvote = dummy_power_unvote;
+ if (!if_ptr->rx_rt_vote)
+ if_ptr->rx_rt_vote = dummy_rx_rt_vote;
+ if (!if_ptr->rx_rt_unvote)
+ if_ptr->rx_rt_unvote = dummy_rx_rt_unvote;
xprt_ptr->capabilities = 0;
xprt_ptr->ops = if_ptr;
spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1);
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 3f969234b705..168db46084df 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -182,6 +182,8 @@ struct mailbox_config_info {
* @deferred_cmds: List of deferred commands that need to be
* processed in process context.
* @deferred_cmds_cnt: Number of deferred commands in queue.
+ * @rt_vote_lock: Serialize access to RT rx votes
+ * @rt_votes: Vote count for RT rx thread priority
* @num_pw_states: Size of @ramp_time_us.
* @ramp_time_us: Array of ramp times in microseconds where array
* index position represents a power state.
@@ -221,6 +223,8 @@ struct edge_info {
spinlock_t rx_lock;
struct list_head deferred_cmds;
uint32_t deferred_cmds_cnt;
+ spinlock_t rt_vote_lock;
+ uint32_t rt_votes;
uint32_t num_pw_states;
unsigned long *ramp_time_us;
struct mailbox_config_info *mailbox;
@@ -2093,6 +2097,52 @@ static int power_unvote(struct glink_transport_if *if_ptr)
}
/**
+ * rx_rt_vote() - Increment and RX thread RT vote
+ * @if_ptr: The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int rx_rt_vote(struct glink_transport_if *if_ptr)
+{
+ struct edge_info *einfo;
+ struct sched_param param = { .sched_priority = 1 };
+ int ret = 0;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ spin_lock_irqsave(&einfo->rt_vote_lock, flags);
+ if (!einfo->rt_votes)
+ ret = sched_setscheduler_nocheck(einfo->task, SCHED_FIFO,
+ &param);
+ einfo->rt_votes++;
+ spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
+ return ret;
+}
+
+/**
+ * rx_rt_unvote() - Remove a RX thread RT vote
+ * @if_ptr: The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int rx_rt_unvote(struct glink_transport_if *if_ptr)
+{
+ struct edge_info *einfo;
+ struct sched_param param = { .sched_priority = 0 };
+ int ret = 0;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ spin_lock_irqsave(&einfo->rt_vote_lock, flags);
+ einfo->rt_votes--;
+ if (!einfo->rt_votes)
+ ret = sched_setscheduler_nocheck(einfo->task, SCHED_NORMAL,
+ &param);
+ spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
+ return ret;
+}
+
+/**
* negotiate_features_v1() - determine what features of a version can be used
* @if_ptr: The transport for which features are negotiated for.
* @version: The version negotiated.
@@ -2137,6 +2187,8 @@ static void init_xprt_if(struct edge_info *einfo)
einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
einfo->xprt_if.power_vote = power_vote;
einfo->xprt_if.power_unvote = power_unvote;
+ einfo->xprt_if.rx_rt_vote = rx_rt_vote;
+ einfo->xprt_if.rx_rt_unvote = rx_rt_unvote;
}
/**
@@ -2310,6 +2362,8 @@ static int glink_smem_native_probe(struct platform_device *pdev)
init_srcu_struct(&einfo->use_ref);
spin_lock_init(&einfo->rx_lock);
INIT_LIST_HEAD(&einfo->deferred_cmds);
+ spin_lock_init(&einfo->rt_vote_lock);
+ einfo->rt_votes = 0;
mutex_lock(&probe_lock);
if (edge_infos[einfo->remote_proc_id]) {
diff --git a/drivers/soc/qcom/glink_xprt_if.h b/drivers/soc/qcom/glink_xprt_if.h
index f4d5a3b303db..47c15807e379 100644
--- a/drivers/soc/qcom/glink_xprt_if.h
+++ b/drivers/soc/qcom/glink_xprt_if.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -141,6 +141,8 @@ struct glink_transport_if {
struct glink_transport_if *if_ptr, uint32_t state);
int (*power_vote)(struct glink_transport_if *if_ptr, uint32_t state);
int (*power_unvote)(struct glink_transport_if *if_ptr);
+ int (*rx_rt_vote)(struct glink_transport_if *if_ptr);
+ int (*rx_rt_unvote)(struct glink_transport_if *if_ptr);
/*
* Keep data pointers at the end of the structure after all function
* pointer to allow for in-place initialization.
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 8c242bc7a702..43d954a0f7c7 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -366,6 +366,8 @@ static struct icnss_priv {
bool bypass_s1_smmu;
} *penv;
+static enum cnss_cc_src cnss_cc_source = CNSS_SOURCE_CORE;
+
#ifdef CONFIG_ICNSS_DEBUG
static void icnss_ignore_qmi_timeout(bool ignore)
{
@@ -939,6 +941,18 @@ static int icnss_hw_power_off(struct icnss_priv *priv)
return ret;
}
+void cnss_set_cc_source(enum cnss_cc_src cc_source)
+{
+ cnss_cc_source = cc_source;
+}
+EXPORT_SYMBOL(cnss_set_cc_source);
+
+enum cnss_cc_src cnss_get_cc_source(void)
+{
+ return cnss_cc_source;
+}
+EXPORT_SYMBOL(cnss_get_cc_source);
+
int icnss_power_on(struct device *dev)
{
struct icnss_priv *priv = dev_get_drvdata(dev);
@@ -3971,6 +3985,9 @@ static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
data_len > QMI_WLFW_MAX_DATA_SIZE_V01)
return -EINVAL;
+ kfree(priv->diag_reg_read_buf);
+ priv->diag_reg_read_buf = NULL;
+
reg_buf = kzalloc(data_len, GFP_KERNEL);
if (!reg_buf)
return -ENOMEM;
@@ -4004,12 +4021,13 @@ static const struct file_operations icnss_regread_fops = {
.llseek = seq_lseek,
};
+#ifdef CONFIG_ICNSS_DEBUG
static int icnss_debugfs_create(struct icnss_priv *priv)
{
int ret = 0;
struct dentry *root_dentry;
- root_dentry = debugfs_create_dir("icnss", 0);
+ root_dentry = debugfs_create_dir("icnss", NULL);
if (IS_ERR(root_dentry)) {
ret = PTR_ERR(root_dentry);
@@ -4019,19 +4037,40 @@ static int icnss_debugfs_create(struct icnss_priv *priv)
priv->root_dentry = root_dentry;
- debugfs_create_file("fw_debug", 0644, root_dentry, priv,
+ debugfs_create_file("fw_debug", 0600, root_dentry, priv,
&icnss_fw_debug_fops);
- debugfs_create_file("stats", 0644, root_dentry, priv,
+ debugfs_create_file("stats", 0600, root_dentry, priv,
&icnss_stats_fops);
debugfs_create_file("reg_read", 0600, root_dentry, priv,
&icnss_regread_fops);
- debugfs_create_file("reg_write", 0644, root_dentry, priv,
+ debugfs_create_file("reg_write", 0600, root_dentry, priv,
&icnss_regwrite_fops);
out:
return ret;
}
+#else
+static int icnss_debugfs_create(struct icnss_priv *priv)
+{
+ int ret = 0;
+ struct dentry *root_dentry;
+
+ root_dentry = debugfs_create_dir("icnss", NULL);
+
+ if (IS_ERR(root_dentry)) {
+ ret = PTR_ERR(root_dentry);
+ icnss_pr_err("Unable to create debugfs %d\n", ret);
+ return ret;
+ }
+
+ priv->root_dentry = root_dentry;
+
+ debugfs_create_file("stats", 0600, root_dentry, priv,
+ &icnss_stats_fops);
+ return 0;
+}
+#endif
static void icnss_debugfs_destroy(struct icnss_priv *priv)
{
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
index 88ba18653cf5..8db3a62a1263 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, 2014-2015, The Linux Foundation. All rights
+/* Copyright (c) 2010-2012, 2014-2015, 2017 The Linux Foundation. All rights
* reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -38,6 +38,7 @@
static struct dentry *clients;
static struct dentry *dir;
static DEFINE_MUTEX(msm_bus_dbg_fablist_lock);
+static DEFINE_RT_MUTEX(msm_bus_dbg_cllist_lock);
struct msm_bus_dbg_state {
uint32_t cl;
uint8_t enable;
@@ -289,7 +290,9 @@ static ssize_t client_data_read(struct file *file, char __user *buf,
struct msm_bus_cldata *cldata = NULL;
const struct msm_bus_client_handle *handle = file->private_data;
int found = 0;
+ ssize_t ret;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if ((cldata->clid == cl) ||
(cldata->handle && (cldata->handle == handle))) {
@@ -298,12 +301,17 @@ static ssize_t client_data_read(struct file *file, char __user *buf,
}
}
- if (!found)
+ if (!found) {
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
return 0;
+ }
bsize = cldata->size;
- return simple_read_from_buffer(buf, count, ppos,
+ ret = simple_read_from_buffer(buf, count, ppos,
cldata->buffer, bsize);
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+ return ret;
}
static int client_data_open(struct inode *inode, struct file *file)
@@ -339,7 +347,9 @@ int msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata)
return -ENOMEM;
}
cldata->handle = pdata;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_add_tail(&cldata->list, &cl_list);
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
return 0;
}
@@ -352,6 +362,7 @@ int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
bool found = false;
char *buf = NULL;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if (cldata->handle == pdata) {
found = true;
@@ -359,12 +370,15 @@ int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
}
}
- if (!found)
+ if (!found) {
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
return -ENOENT;
+ }
if (cldata->file == NULL) {
if (pdata->name == NULL) {
MSM_BUS_DBG("Client doesn't have a name\n");
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
return -EINVAL;
}
cldata->file = debugfs_create_file(pdata->name, S_IRUGO,
@@ -393,6 +407,7 @@ int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ", ib);
i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
cldata->size = i;
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec,
pdata->name, pdata->mas, pdata->slv, ab, ib);
@@ -404,6 +419,7 @@ void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata)
{
struct msm_bus_cldata *cldata = NULL;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if (cldata->handle == pdata) {
debugfs_remove(cldata->file);
@@ -412,6 +428,7 @@ void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata)
break;
}
}
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
}
static int msm_bus_dbg_record_client(const struct msm_bus_scale_pdata *pdata,
@@ -429,7 +446,9 @@ static int msm_bus_dbg_record_client(const struct msm_bus_scale_pdata *pdata,
cldata->clid = clid;
cldata->file = file;
cldata->size = 0;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_add_tail(&cldata->list, &cl_list);
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
return 0;
}
@@ -437,6 +456,7 @@ static void msm_bus_dbg_free_client(uint32_t clid)
{
struct msm_bus_cldata *cldata = NULL;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if (cldata->clid == clid) {
debugfs_remove(cldata->file);
@@ -445,6 +465,7 @@ static void msm_bus_dbg_free_client(uint32_t clid)
break;
}
}
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
}
static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata,
@@ -456,6 +477,7 @@ static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata,
struct timespec ts;
int found = 0;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if (cldata->clid == clid) {
found = 1;
@@ -463,11 +485,14 @@ static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata,
}
}
- if (!found)
+ if (!found) {
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
return -ENOENT;
+ }
if (cldata->file == NULL) {
if (pdata->name == NULL) {
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
MSM_BUS_DBG("Client doesn't have a name\n");
return -EINVAL;
}
@@ -515,19 +540,9 @@ static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata,
cldata->index = index;
cldata->size = i;
- return i;
-}
-
-static int msm_bus_dbg_update_request(struct msm_bus_cldata *cldata, int index)
-{
- int ret = 0;
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
- if ((index < 0) || (index > cldata->pdata->num_usecases)) {
- MSM_BUS_DBG("Invalid index!\n");
- return -EINVAL;
- }
- ret = msm_bus_scale_client_update_request(cldata->clid, index);
- return ret;
+ return i;
}
static ssize_t msm_bus_dbg_update_request_write(struct file *file,
@@ -539,19 +554,26 @@ static ssize_t msm_bus_dbg_update_request_write(struct file *file,
char *chid;
char *buf = kmalloc((sizeof(char) * (cnt + 1)), GFP_KERNEL);
int found = 0;
+ uint32_t clid;
+ ssize_t res = cnt;
if (!buf || IS_ERR(buf)) {
MSM_BUS_ERR("Memory allocation for buffer failed\n");
return -ENOMEM;
}
- if (cnt == 0)
- return 0;
- if (copy_from_user(buf, ubuf, cnt))
- return -EFAULT;
+ if (cnt == 0) {
+ res = 0;
+ goto out;
+ }
+ if (copy_from_user(buf, ubuf, cnt)) {
+ res = -EFAULT;
+ goto out;
+ }
buf[cnt] = '\0';
chid = buf;
MSM_BUS_DBG("buffer: %s\n size: %zu\n", buf, sizeof(ubuf));
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if (strnstr(chid, cldata->pdata->name, cnt)) {
found = 1;
@@ -562,21 +584,35 @@ static ssize_t msm_bus_dbg_update_request_write(struct file *file,
if (ret) {
MSM_BUS_DBG("Index conversion"
" failed\n");
- return -EFAULT;
+ rt_mutex_unlock(
+ &msm_bus_dbg_cllist_lock);
+ res = -EFAULT;
+ goto out;
}
} else {
MSM_BUS_DBG("Error parsing input. Index not"
" found\n");
found = 0;
}
+ if ((index < 0) ||
+ (index > cldata->pdata->num_usecases)) {
+ MSM_BUS_DBG("Invalid index!\n");
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+ res = -EINVAL;
+ goto out;
+ }
+ clid = cldata->clid;
break;
}
}
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
if (found)
- msm_bus_dbg_update_request(cldata, index);
+ msm_bus_scale_client_update_request(clid, index);
+
+out:
kfree(buf);
- return cnt;
+ return res;
}
/**
@@ -599,8 +635,10 @@ static ssize_t fabric_data_read(struct file *file, char __user *buf,
break;
}
}
- if (!found)
+ if (!found) {
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
return -ENOENT;
+ }
bsize = fablist->size;
ret = simple_read_from_buffer(buf, count, ppos,
fablist->buffer, bsize);
@@ -689,8 +727,10 @@ static int msm_bus_dbg_fill_fab_buffer(const char *fabname,
break;
}
}
- if (!found)
+ if (!found) {
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
return -ENOENT;
+ }
if (fablist->file == NULL) {
MSM_BUS_DBG("Fabric dbg entry does not exist\n");
@@ -741,6 +781,8 @@ static ssize_t msm_bus_dbg_dump_clients_read(struct file *file,
"\nDumping curent client votes to trace log\n");
if (*ppos)
goto exit_dump_clients_read;
+
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if (IS_ERR_OR_NULL(cldata->pdata))
continue;
@@ -756,6 +798,7 @@ static ssize_t msm_bus_dbg_dump_clients_read(struct file *file,
cldata->pdata->active_only);
}
}
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
exit_dump_clients_read:
return simple_read_from_buffer(buf, count, ppos, msg, cnt);
}
@@ -880,6 +923,7 @@ static int __init msm_bus_debugfs_init(void)
goto err;
}
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if (cldata->pdata) {
if (cldata->pdata->name == NULL) {
@@ -899,6 +943,7 @@ static int __init msm_bus_debugfs_init(void)
&client_data_fops);
}
}
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
if (debugfs_create_file("dump_clients", S_IRUGO | S_IWUSR,
clients, NULL, &msm_bus_dbg_dump_clients_fops) == NULL)
@@ -911,6 +956,7 @@ static int __init msm_bus_debugfs_init(void)
if (fablist->file == NULL) {
MSM_BUS_DBG("Cannot create files for commit data\n");
kfree(rules_buf);
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
goto err;
}
}
@@ -930,10 +976,14 @@ static void __exit msm_bus_dbg_teardown(void)
struct msm_bus_cldata *cldata = NULL, *cldata_temp;
debugfs_remove_recursive(dir);
+
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry_safe(cldata, cldata_temp, &cl_list, list) {
list_del(&cldata->list);
kfree(cldata);
}
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
mutex_lock(&msm_bus_dbg_fablist_lock);
list_for_each_entry_safe(fablist, fablist_temp, &fabdata_list, list) {
list_del(&fablist->list);
diff --git a/drivers/soc/qcom/msm_performance.c b/drivers/soc/qcom/msm_performance.c
index 7f03ce4518a6..1046af031838 100644
--- a/drivers/soc/qcom/msm_performance.c
+++ b/drivers/soc/qcom/msm_performance.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -309,7 +309,7 @@ static int set_managed_cpus(const char *buf, const struct kernel_param *kp)
static int get_managed_cpus(char *buf, const struct kernel_param *kp)
{
int i, cnt = 0, total_cnt = 0;
- char tmp[MAX_LENGTH_CPU_STRING];
+ char tmp[MAX_LENGTH_CPU_STRING] = "";
if (!clusters_inited)
return cnt;
@@ -347,7 +347,7 @@ device_param_cb(managed_cpus, &param_ops_managed_cpus, NULL, 0644);
static int get_managed_online_cpus(char *buf, const struct kernel_param *kp)
{
int i, cnt = 0, total_cnt = 0;
- char tmp[MAX_LENGTH_CPU_STRING];
+ char tmp[MAX_LENGTH_CPU_STRING] = "";
struct cpumask tmp_mask;
struct cluster *i_cl;
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 3415338a1294..ed8006cacc08 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -66,6 +66,7 @@ static int proxy_timeout_ms = -1;
module_param(proxy_timeout_ms, int, S_IRUGO | S_IWUSR);
static bool disable_timeouts;
+static const char firmware_error_msg[] = "firmware_error\n";
/**
* struct pil_mdt - Representation of <name>.mdt file in memory
* @hdr: ELF32 header
@@ -673,12 +674,14 @@ static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
if (ret < 0) {
pil_err(desc, "Failed to locate blob %s or blob is too big(rc:%d)\n",
fw_name, ret);
+ subsys_set_error(desc->subsys_dev, firmware_error_msg);
return ret;
}
if (ret != seg->filesz) {
pil_err(desc, "Blob size %u doesn't match %lu\n",
ret, seg->filesz);
+ subsys_set_error(desc->subsys_dev, firmware_error_msg);
return -EPERM;
}
ret = 0;
@@ -707,9 +710,11 @@ static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
if (desc->ops->verify_blob) {
ret = desc->ops->verify_blob(desc, seg->paddr, seg->sz);
- if (ret)
+ if (ret) {
pil_err(desc, "Blob%u failed verification(rc:%d)\n",
num, ret);
+ subsys_set_error(desc->subsys_dev, firmware_error_msg);
+ }
}
return ret;
@@ -790,6 +795,7 @@ int pil_boot(struct pil_desc *desc)
if (fw->size < sizeof(*ehdr)) {
pil_err(desc, "Not big enough to be an elf header\n");
+ subsys_set_error(desc->subsys_dev, firmware_error_msg);
ret = -EIO;
goto release_fw;
}
@@ -799,18 +805,21 @@ int pil_boot(struct pil_desc *desc)
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
pil_err(desc, "Not an elf header\n");
+ subsys_set_error(desc->subsys_dev, firmware_error_msg);
ret = -EIO;
goto release_fw;
}
if (ehdr->e_phnum == 0) {
pil_err(desc, "No loadable segments\n");
+ subsys_set_error(desc->subsys_dev, firmware_error_msg);
ret = -EIO;
goto release_fw;
}
if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
sizeof(struct elf32_hdr) > fw->size) {
pil_err(desc, "Program headers not within mdt\n");
+ subsys_set_error(desc->subsys_dev, firmware_error_msg);
ret = -EIO;
goto release_fw;
}
@@ -830,6 +839,7 @@ int pil_boot(struct pil_desc *desc)
ret = desc->ops->init_image(desc, fw->data, fw->size);
if (ret) {
pil_err(desc, "Initializing image failed(rc:%d)\n", ret);
+ subsys_set_error(desc->subsys_dev, firmware_error_msg);
goto err_boot;
}
@@ -887,6 +897,7 @@ int pil_boot(struct pil_desc *desc)
ret = desc->ops->auth_and_reset(desc);
if (ret) {
pil_err(desc, "Failed to bring out of reset(rc:%d)\n", ret);
+ subsys_set_error(desc->subsys_dev, firmware_error_msg);
goto err_auth_and_reset;
}
pil_info(desc, "Brought out of reset\n");
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
index 9521cf726069..0cd2aeae1edd 100644
--- a/drivers/soc/qcom/peripheral-loader.h
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -44,6 +44,7 @@ struct pil_desc {
const char *name;
const char *fw_name;
struct device *dev;
+ struct subsys_device *subsys_dev;
const struct pil_reset_ops *ops;
struct module *owner;
unsigned long proxy_timeout;
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index 793edc5b67ed..bbcff5923c53 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -222,6 +222,7 @@ static int pil_subsys_init(struct modem_data *drv,
goto err_subsys;
}
+ drv->q6->desc.subsys_dev = drv->subsys;
drv->ramdump_dev = create_ramdump_device("modem", &pdev->dev);
if (!drv->ramdump_dev) {
pr_err("%s: Unable to create a modem ramdump device.\n",
diff --git a/drivers/soc/qcom/qbt1000.c b/drivers/soc/qcom/qbt1000.c
index 6e7d34ac9163..d14e82415c5a 100644
--- a/drivers/soc/qcom/qbt1000.c
+++ b/drivers/soc/qcom/qbt1000.c
@@ -145,18 +145,17 @@ static int get_cmd_rsp_buffers(struct qseecom_handle *hdl,
uint32_t *rsp_len)
{
/* 64 bytes alignment for QSEECOM */
- *cmd_len = ALIGN(*cmd_len, 64);
- *rsp_len = ALIGN(*rsp_len, 64);
+ uint64_t aligned_cmd_len = ALIGN((uint64_t)*cmd_len, 64);
+ uint64_t aligned_rsp_len = ALIGN((uint64_t)*rsp_len, 64);
- if (((uint64_t)*rsp_len + (uint64_t)*cmd_len)
- > (uint64_t)g_app_buf_size) {
- pr_err("buffer too small to hold cmd=%d and rsp=%d\n",
- *cmd_len, *rsp_len);
+ if ((aligned_rsp_len + aligned_cmd_len) > (uint64_t)g_app_buf_size)
return -ENOMEM;
- }
*cmd = hdl->sbuf;
+ *cmd_len = aligned_cmd_len;
*rsp = hdl->sbuf + *cmd_len;
+ *rsp_len = aligned_rsp_len;
+
return 0;
}
diff --git a/drivers/soc/qcom/qdsp6v2/apr.c b/drivers/soc/qcom/qdsp6v2/apr.c
index a275537d4e08..2da8731c5753 100644
--- a/drivers/soc/qcom/qdsp6v2/apr.c
+++ b/drivers/soc/qcom/qdsp6v2/apr.c
@@ -745,13 +745,14 @@ int apr_deregister(void *handle)
if (!handle)
return -EINVAL;
+ mutex_lock(&svc->m_lock);
if (!svc->svc_cnt) {
pr_err("%s: svc already deregistered. svc = %pK\n",
__func__, svc);
+ mutex_unlock(&svc->m_lock);
return -EINVAL;
}
- mutex_lock(&svc->m_lock);
dest_id = svc->dest_id;
client_id = svc->client_id;
clnt = &client[dest_id][client_id];
diff --git a/drivers/soc/qcom/rpm_log.c b/drivers/soc/qcom/rpm_log.c
index deea77c6b0a9..66844565dde0 100644
--- a/drivers/soc/qcom/rpm_log.c
+++ b/drivers/soc/qcom/rpm_log.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, 2013-2014, The Linux Foundation.
+/* Copyright (c) 2010-2011, 2013-2014, 2017, The Linux Foundation.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -345,8 +345,9 @@ static int msm_rpm_log_probe(struct platform_device *pdev)
/* Remap the rpm-log pointer */
phys_ptr = ioremap_nocache(offset->start, SZ_4);
if (!phys_ptr) {
- pr_err("%s: Failed to ioremap address: %x\n",
- __func__, offset_addr);
+ pr_err("%s: Failed to ioremap address: %pa\n",
+ __func__, &offset->start);
+ kfree(pdata);
return -ENODEV;
}
offset_addr = readl_relaxed(phys_ptr);
diff --git a/drivers/soc/qcom/rpm_stats.c b/drivers/soc/qcom/rpm_stats.c
index dacca0efe231..8f3094853ba3 100644
--- a/drivers/soc/qcom/rpm_stats.c
+++ b/drivers/soc/qcom/rpm_stats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -462,8 +462,10 @@ static int msm_rpmstats_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"phys_addr_base");
- if (!res)
+ if (!res) {
+ kfree(pdata);
return -EINVAL;
+ }
offset = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"offset_addr");
@@ -471,8 +473,9 @@ static int msm_rpmstats_probe(struct platform_device *pdev)
/* Remap the rpm-stats pointer */
phys_ptr = ioremap_nocache(offset->start, SZ_4);
if (!phys_ptr) {
- pr_err("%s: Failed to ioremap address: %x\n",
- __func__, offset_addr);
+ pr_err("%s: Failed to ioremap address: %pa\n",
+ __func__, &offset->start);
+ kfree(pdata);
return -ENODEV;
}
offset_addr = readl_relaxed(phys_ptr);
diff --git a/drivers/soc/qcom/smp2p_test.c b/drivers/soc/qcom/smp2p_test.c
index 397a547be423..e81bada0137a 100644
--- a/drivers/soc/qcom/smp2p_test.c
+++ b/drivers/soc/qcom/smp2p_test.c
@@ -1,6 +1,6 @@
/* drivers/soc/qcom/smp2p_test.c
*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <soc/qcom/subsystem_restart.h>
#include "smp2p_private.h"
#include "smp2p_test_common.h"
@@ -1239,12 +1240,15 @@ static void smp2p_ut_remote_ssr_ack(struct seq_file *s)
}
static struct dentry *dent;
+static DEFINE_MUTEX(show_lock);
static int debugfs_show(struct seq_file *s, void *data)
{
void (*show)(struct seq_file *) = s->private;
+ mutex_lock(&show_lock);
show(s);
+ mutex_unlock(&show_lock);
return 0;
}
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index c2af34926b37..0e1e4ae975b0 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -121,7 +121,7 @@
/*
* After both sides get CONNECTED,
- * there is a race between once side queueing rx buffer and the other side
+ * there is a race between once side queuing rx buffer and the other side
* trying to call glink_tx() , this race is only on the 1st tx.
* do tx retry with some delay to allow the other side to queue rx buffer.
*/
@@ -138,7 +138,7 @@
/*
* ACK timeout from remote side for TX data.
- * Normally, it takes few msec for SPSS to responde with ACK for TX data.
+ * Normally, it takes few msec for SPSS to respond with ACK for TX data.
* However, due to SPSS HW issue, the SPSS might disable interrupts
* for a very long time.
*/
@@ -364,7 +364,7 @@ static void spcom_link_state_notif_cb(struct glink_link_state_cb_info *cb_info,
const char *ch_name = "sp_kernel";
if (!cb_info) {
- pr_err("invalid NULL cb_info.\n");
+ pr_err("invalid NULL cb_info param\n");
return;
}
@@ -721,9 +721,6 @@ static int spcom_open(struct spcom_channel *ch, unsigned int timeout_msec)
pr_err("channel [%s] already in use.\n", name);
goto exit_err;
}
- ch->ref_count++;
- ch->pid = current_pid();
- ch->txn_id = INITIAL_TXN_ID;
pr_debug("ch [%s] opened by PID [%d], count [%d]\n",
name, ch->pid, ch->ref_count);
@@ -748,7 +745,12 @@ static int spcom_open(struct spcom_channel *ch, unsigned int timeout_msec)
} else {
pr_debug("glink_open [%s] ok.\n", name);
}
+
+ /* init channel context after successful open */
ch->glink_handle = handle;
+ ch->ref_count++;
+ ch->pid = current_pid();
+ ch->txn_id = INITIAL_TXN_ID;
pr_debug("Wait for connection on channel [%s] timeout_msec [%d].\n",
name, timeout_msec);
@@ -1480,6 +1482,7 @@ static int spcom_handle_create_channel_command(void *cmd_buf, int cmd_size)
int ret = 0;
struct spcom_user_create_channel_command *cmd = cmd_buf;
const char *ch_name;
+ const size_t maxlen = sizeof(cmd->ch_name);
if (cmd_size != sizeof(*cmd)) {
pr_err("cmd_size [%d] , expected [%d].\n",
@@ -1488,6 +1491,10 @@ static int spcom_handle_create_channel_command(void *cmd_buf, int cmd_size)
}
ch_name = cmd->ch_name;
+ if (strnlen(cmd->ch_name, maxlen) == maxlen) {
+ pr_err("channel name is not NULL terminated\n");
+ return -EINVAL;
+ }
pr_debug("ch_name [%s].\n", ch_name);
@@ -1626,7 +1633,7 @@ static int modify_ion_addr(void *buf,
/* Get ION handle from fd */
handle = ion_import_dma_buf(spcom_dev->ion_client, fd);
- if (handle == NULL) {
+ if (IS_ERR_OR_NULL(handle)) {
pr_err("fail to get ion handle.\n");
return -EINVAL;
}
@@ -1787,7 +1794,7 @@ static int spcom_handle_lock_ion_buf_command(struct spcom_channel *ch,
/* Get ION handle from fd - this increments the ref count */
ion_handle = ion_import_dma_buf(spcom_dev->ion_client, fd);
- if (ion_handle == NULL) {
+ if (IS_ERR_OR_NULL(ion_handle)) {
pr_err("fail to get ion handle.\n");
return -EINVAL;
}
@@ -1865,6 +1872,8 @@ static int spcom_unlock_ion_buf(struct spcom_channel *ch, int fd)
} else {
/* unlock specific ION buf */
for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
+ if (ch->ion_handle_table[i] == NULL)
+ continue;
if (ch->ion_fd_table[i] == fd) {
pr_debug("unlocked ion buf #%d fd [%d].\n",
i, ch->ion_fd_table[i]);
@@ -1932,9 +1941,9 @@ static int spcom_handle_write(struct spcom_channel *ch,
int swap_id;
char cmd_name[5] = {0}; /* debug only */
- /* opcode field is the minimum length of cmd */
- if (buf_size < sizeof(cmd->cmd_id)) {
- pr_err("Invalid argument user buffer size %d.\n", buf_size);
+ /* Minimal command should have command-id and argument */
+ if (buf_size < sizeof(struct spcom_user_command)) {
+ pr_err("Command buffer size [%d] too small\n", buf_size);
return -EINVAL;
}
@@ -2024,7 +2033,7 @@ static int spcom_handle_read_req_resp(struct spcom_channel *ch,
/* Check param validity */
if (size > SPCOM_MAX_RESPONSE_SIZE) {
- pr_err("ch [%s] inavlid size [%d].\n",
+ pr_err("ch [%s] invalid size [%d].\n",
ch->name, size);
return -EINVAL;
}
@@ -2152,6 +2161,10 @@ static int spcom_device_open(struct inode *inode, struct file *filp)
struct spcom_channel *ch;
const char *name = file_to_filename(filp);
+ /* silent error message until spss link is up */
+ if (!spcom_is_sp_subsystem_link_up())
+ return -ENODEV;
+
pr_debug("Open file [%s].\n", name);
if (strcmp(name, DEVICE_NAME) == 0) {
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index 51f4ec79db10..6afe2fb8cd75 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -158,6 +158,7 @@ struct subsys_device {
struct work_struct work;
struct wakeup_source ssr_wlock;
char wlname[64];
+ char error_buf[64];
struct work_struct device_restart_work;
struct subsys_tracking track;
@@ -323,6 +324,12 @@ static void subsys_set_state(struct subsys_device *subsys,
spin_unlock_irqrestore(&subsys->track.s_lock, flags);
}
+static ssize_t error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", to_subsys(dev)->error_buf);
+}
+
/**
* subsytem_default_online() - Mark a subsystem as online by default
* @dev: subsystem to mark as online
@@ -341,6 +348,7 @@ static struct device_attribute subsys_attrs[] = {
__ATTR_RO(name),
__ATTR_RO(state),
__ATTR_RO(crash_count),
+ __ATTR_RO(error),
__ATTR(restart_level, 0644, restart_level_show, restart_level_store),
__ATTR(firmware_name, 0644, firmware_name_show, firmware_name_store),
__ATTR(system_debug, 0644, system_debug_show, system_debug_store),
@@ -1174,6 +1182,12 @@ enum crash_status subsys_get_crash_status(struct subsys_device *dev)
return dev->crashed;
}
+void subsys_set_error(struct subsys_device *dev, const char *error_msg)
+{
+ snprintf(dev->error_buf, sizeof(dev->error_buf), "%s", error_msg);
+ sysfs_notify(&dev->dev.kobj, NULL, "error");
+}
+
static struct subsys_device *desc_to_subsys(struct device *d)
{
struct subsys_device *device, *subsys_dev = 0;
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index d1802bcba0fb..d3130cfd6433 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -577,20 +577,53 @@ static void __pmic_arb_chained_irq(struct spmi_pmic_arb *pa, bool show)
int last = pa->max_apid >> 5;
u32 status, enable;
int i, id, apid;
+ /* status based dispatch */
+ bool acc_valid = false;
+ u32 irq_status = 0;
for (i = first; i <= last; ++i) {
status = readl_relaxed(pa->acc_status +
pa->ver_ops->owner_acc_status(pa->ee, i));
+ if (status)
+ acc_valid = true;
+
while (status) {
id = ffs(status) - 1;
status &= ~BIT(id);
apid = id + i * 32;
+ if (apid < pa->min_apid || apid > pa->max_apid) {
+ WARN_ONCE(true, "spurious spmi irq received for apid=%d\n",
+ apid);
+ continue;
+ }
enable = readl_relaxed(pa->intr +
pa->ver_ops->acc_enable(apid));
if (enable & SPMI_PIC_ACC_ENABLE_BIT)
periph_interrupt(pa, apid, show);
}
}
+
+ /* ACC_STATUS is empty but IRQ fired check IRQ_STATUS */
+ if (!acc_valid) {
+ for (i = pa->min_apid; i <= pa->max_apid; i++) {
+ /* skip if APPS is not irq owner */
+ if (pa->apid_data[i].irq_owner != pa->ee)
+ continue;
+
+ irq_status = readl_relaxed(pa->intr +
+ pa->ver_ops->irq_status(i));
+ if (irq_status) {
+ enable = readl_relaxed(pa->intr +
+ pa->ver_ops->acc_enable(i));
+ if (enable & SPMI_PIC_ACC_ENABLE_BIT) {
+ dev_dbg(&pa->spmic->dev,
+ "Dispatching IRQ for apid=%d status=%x\n",
+ i, irq_status);
+ periph_interrupt(pa, i, show);
+ }
+ }
+ }
+ }
}
static void pmic_arb_chained_irq(struct irq_desc *desc)
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 49ed6de1a95e..07fc21797f0f 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -699,7 +699,7 @@ static void user_ion_free_nolock(struct ion_client *client,
WARN(1, "%s: invalid handle passed to free.\n", __func__);
return;
}
- if (!handle->user_ref_count > 0) {
+ if (handle->user_ref_count == 0) {
WARN(1, "%s: User does not have access!\n", __func__);
return;
}
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 4e6c16af40fc..91ff8fb0cc3a 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -3181,7 +3181,7 @@ static const struct {
};
/*-------------------------------------------------------------------------*/
-static void __init nbu2ss_drv_ep_init(struct nbu2ss_udc *udc)
+static void nbu2ss_drv_ep_init(struct nbu2ss_udc *udc)
{
int i;
@@ -3211,7 +3211,7 @@ static void __init nbu2ss_drv_ep_init(struct nbu2ss_udc *udc)
/*-------------------------------------------------------------------------*/
/* platform_driver */
-static int __init nbu2ss_drv_contest_init(
+static int nbu2ss_drv_contest_init(
struct platform_device *pdev,
struct nbu2ss_udc *udc)
{
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index a9c1e0bafa62..e35fbece3d2f 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -232,7 +232,7 @@ static int p80211_convert_to_ether(wlandevice_t *wlandev, struct sk_buff *skb)
struct p80211_hdr_a3 *hdr;
hdr = (struct p80211_hdr_a3 *) skb->data;
- if (p80211_rx_typedrop(wlandev, hdr->fc))
+ if (p80211_rx_typedrop(wlandev, le16_to_cpu(hdr->fc)))
return CONV_TO_ETHER_SKIPPED;
/* perform mcast filtering: allow my local address through but reject
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index a2c0734c76e2..e8dd296fb25b 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1235,7 +1235,8 @@ static int omap8250_probe(struct platform_device *pdev)
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
err:
- pm_runtime_put(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -1244,6 +1245,7 @@ static int omap8250_remove(struct platform_device *pdev)
{
struct omap8250_priv *priv = platform_get_drvdata(pdev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
serial8250_unregister_port(priv->line);
@@ -1343,6 +1345,10 @@ static int omap8250_runtime_suspend(struct device *dev)
struct omap8250_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up;
+ /* In case runtime-pm tries this before we are setup */
+ if (!priv)
+ return 0;
+
up = serial8250_get_port(priv->line);
/*
* When using 'no_console_suspend', the console UART must not be
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 41d7cf6d63ba..858c30814497 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -428,9 +428,6 @@ int hw_port_test_set(struct ci_hdrc *ci, u8 mode);
u8 hw_port_test_get(struct ci_hdrc *ci);
-int hw_wait_reg(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask,
- u32 value, unsigned int timeout_ms);
-
void ci_platform_configure(struct ci_hdrc *ci);
#endif /* __DRIVERS_USB_CHIPIDEA_CI_H */
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index ba4a2a1eb3ff..939c6ad71068 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -518,38 +518,6 @@ int hw_device_reset(struct ci_hdrc *ci)
return 0;
}
-/**
- * hw_wait_reg: wait the register value
- *
- * Sometimes, it needs to wait register value before going on.
- * Eg, when switch to device mode, the vbus value should be lower
- * than OTGSC_BSV before connects to host.
- *
- * @ci: the controller
- * @reg: register index
- * @mask: mast bit
- * @value: the bit value to wait
- * @timeout_ms: timeout in millisecond
- *
- * This function returns an error code if timeout
- */
-int hw_wait_reg(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask,
- u32 value, unsigned int timeout_ms)
-{
- unsigned long elapse = jiffies + msecs_to_jiffies(timeout_ms);
-
- while (hw_read(ci, reg, mask) != value) {
- if (time_after(jiffies, elapse)) {
- dev_err(ci->dev, "timeout waiting for %08x in %d\n",
- mask, reg);
- return -ETIMEDOUT;
- }
- msleep(20);
- }
-
- return 0;
-}
-
static irqreturn_t ci_irq(int irq, void *data)
{
struct ci_hdrc *ci = data;
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 03b6743461d1..0cf149edddd8 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -44,12 +44,15 @@ u32 hw_read_otgsc(struct ci_hdrc *ci, u32 mask)
else
val &= ~OTGSC_BSVIS;
- cable->changed = false;
-
if (cable->state)
val |= OTGSC_BSV;
else
val &= ~OTGSC_BSV;
+
+ if (cable->enabled)
+ val |= OTGSC_BSVIE;
+ else
+ val &= ~OTGSC_BSVIE;
}
cable = &ci->platdata->id_extcon;
@@ -59,15 +62,18 @@ u32 hw_read_otgsc(struct ci_hdrc *ci, u32 mask)
else
val &= ~OTGSC_IDIS;
- cable->changed = false;
-
if (cable->state)
val |= OTGSC_ID;
else
val &= ~OTGSC_ID;
+
+ if (cable->enabled)
+ val |= OTGSC_IDIE;
+ else
+ val &= ~OTGSC_IDIE;
}
- return val;
+ return val & mask;
}
/**
@@ -77,6 +83,36 @@ u32 hw_read_otgsc(struct ci_hdrc *ci, u32 mask)
*/
void hw_write_otgsc(struct ci_hdrc *ci, u32 mask, u32 data)
{
+ struct ci_hdrc_cable *cable;
+
+ cable = &ci->platdata->vbus_extcon;
+ if (!IS_ERR(cable->edev)) {
+ if (data & mask & OTGSC_BSVIS)
+ cable->changed = false;
+
+ /* Don't enable vbus interrupt if using external notifier */
+ if (data & mask & OTGSC_BSVIE) {
+ cable->enabled = true;
+ data &= ~OTGSC_BSVIE;
+ } else if (mask & OTGSC_BSVIE) {
+ cable->enabled = false;
+ }
+ }
+
+ cable = &ci->platdata->id_extcon;
+ if (!IS_ERR(cable->edev)) {
+ if (data & mask & OTGSC_IDIS)
+ cable->changed = false;
+
+ /* Don't enable id interrupt if using external notifier */
+ if (data & mask & OTGSC_IDIE) {
+ cable->enabled = true;
+ data &= ~OTGSC_IDIE;
+ } else if (mask & OTGSC_IDIE) {
+ cable->enabled = false;
+ }
+ }
+
hw_write(ci, OP_OTGSC, mask | OTGSC_INT_STATUS_BITS, data);
}
@@ -104,7 +140,31 @@ void ci_handle_vbus_change(struct ci_hdrc *ci)
usb_gadget_vbus_disconnect(&ci->gadget);
}
-#define CI_VBUS_STABLE_TIMEOUT_MS 5000
+/**
+ * When we switch to device mode, the vbus value should be lower
+ * than OTGSC_BSV before connecting to host.
+ *
+ * @ci: the controller
+ *
+ * This function returns an error code if timeout
+ */
+static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci)
+{
+ unsigned long elapse = jiffies + msecs_to_jiffies(5000);
+ u32 mask = OTGSC_BSV;
+
+ while (hw_read_otgsc(ci, mask)) {
+ if (time_after(jiffies, elapse)) {
+ dev_err(ci->dev, "timeout waiting for %08x in OTGSC\n",
+ mask);
+ return -ETIMEDOUT;
+ }
+ msleep(20);
+ }
+
+ return 0;
+}
+
static void ci_handle_id_switch(struct ci_hdrc *ci)
{
enum ci_role role = ci_otg_role(ci);
@@ -116,9 +176,11 @@ static void ci_handle_id_switch(struct ci_hdrc *ci)
ci_role_stop(ci);
if (role == CI_ROLE_GADGET)
- /* wait vbus lower than OTGSC_BSV */
- hw_wait_reg(ci, OP_OTGSC, OTGSC_BSV, 0,
- CI_VBUS_STABLE_TIMEOUT_MS);
+ /*
+ * wait vbus lower than OTGSC_BSV before connecting
+ * to host
+ */
+ hw_wait_vbus_lower_bsv(ci);
ci_role_start(ci, role);
}
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 453eee734b23..1b4fb562ce4b 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -840,6 +840,7 @@ struct dwc3_scratchpad_array {
* @irq: irq number
* @bh: tasklet which handles the interrupt
* @irq_cnt: total irq count
+ * @last_irq_cnt: last irq count
* @bh_completion_time: time taken for taklet completion
* @bh_handled_evt_cnt: no. of events handled by tasklet per interrupt
* @bh_dbg_index: index for capturing bh_completion_time and bh_handled_evt_cnt
@@ -1028,6 +1029,7 @@ struct dwc3 {
/* IRQ timing statistics */
int irq;
unsigned long irq_cnt;
+ unsigned long last_irq_cnt;
unsigned long ep_cmd_timeout_cnt;
unsigned bh_completion_time[MAX_INTR_STATS];
unsigned bh_handled_evt_cnt[MAX_INTR_STATS];
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 20ac60d6b6a8..940d163788a8 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -401,7 +401,7 @@ static ssize_t dwc3_mode_write(struct file *file,
struct dwc3 *dwc = s->private;
unsigned long flags;
u32 mode = 0;
- char buf[32];
+ char buf[32] = {};
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
@@ -481,7 +481,7 @@ static ssize_t dwc3_testmode_write(struct file *file,
struct dwc3 *dwc = s->private;
unsigned long flags;
u32 testmode = 0;
- char buf[32];
+ char buf[32] = {};
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
@@ -588,7 +588,7 @@ static ssize_t dwc3_link_state_write(struct file *file,
struct dwc3 *dwc = s->private;
unsigned long flags;
enum dwc3_link_state state = 0;
- char buf[32];
+ char buf[32] = {};
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
@@ -629,12 +629,10 @@ static ssize_t dwc3_store_ep_num(struct file *file, const char __user *ubuf,
{
struct seq_file *s = file->private_data;
struct dwc3 *dwc = s->private;
- char kbuf[10];
+ char kbuf[10] = {};
unsigned int num, dir, temp;
unsigned long flags;
- memset(kbuf, 0, 10);
-
if (copy_from_user(kbuf, ubuf, count > 10 ? 10 : count))
return -EFAULT;
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index ad9d6cc4e23f..4ed46d9ca279 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -3321,16 +3321,19 @@ static void msm_dwc3_perf_vote_work(struct work_struct *w)
struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
perf_vote_work.work);
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
- static unsigned long last_irq_cnt;
bool in_perf_mode = false;
+ int latency = mdwc->pm_qos_latency;
+
+ if (!latency)
+ return;
- if (dwc->irq_cnt - last_irq_cnt >= PM_QOS_THRESHOLD)
+ if (dwc->irq_cnt - dwc->last_irq_cnt >= PM_QOS_THRESHOLD)
in_perf_mode = true;
pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n",
- __func__, in_perf_mode, (dwc->irq_cnt - last_irq_cnt));
+ __func__, in_perf_mode, (dwc->irq_cnt - dwc->last_irq_cnt));
- last_irq_cnt = dwc->irq_cnt;
+ dwc->last_irq_cnt = dwc->irq_cnt;
msm_dwc3_perf_vote_update(mdwc, in_perf_mode);
schedule_delayed_work(&mdwc->perf_vote_work,
msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index a844ea4d06db..eeccae8bfc1b 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1654,9 +1654,6 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
DBG(cdev,
"Config HS device with LPM(L1)\n");
}
- } else if (!disable_l1_for_hs) {
- cdev->desc.bcdUSB = cpu_to_le16(0x0210);
- DBG(cdev, "Config HS device with LPM(L1)\n");
}
value = min(w_length, (u16) sizeof cdev->desc);
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index 51ab794ef6f9..ab7441fcf66f 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -369,15 +369,22 @@ static void audio_send(struct audio_dev *audio)
s64 msecs;
s64 frames;
ktime_t now;
+ unsigned long flags;
+ spin_lock_irqsave(&audio->lock, flags);
/* audio->substream will be null if we have been closed */
- if (!audio->substream)
+ if (!audio->substream) {
+ spin_unlock_irqrestore(&audio->lock, flags);
return;
+ }
/* audio->buffer_pos will be null if we have been stopped */
- if (!audio->buffer_pos)
+ if (!audio->buffer_pos) {
+ spin_unlock_irqrestore(&audio->lock, flags);
return;
+ }
runtime = audio->substream->runtime;
+ spin_unlock_irqrestore(&audio->lock, flags);
/* compute number of frames to send */
now = ktime_get();
@@ -400,8 +407,21 @@ static void audio_send(struct audio_dev *audio)
while (frames > 0) {
req = audio_req_get(audio);
- if (!req)
+ spin_lock_irqsave(&audio->lock, flags);
+ /* audio->substream will be null if we have been closed */
+ if (!audio->substream) {
+ spin_unlock_irqrestore(&audio->lock, flags);
+ return;
+ }
+ /* audio->buffer_pos will be null if we have been stopped */
+ if (!audio->buffer_pos) {
+ spin_unlock_irqrestore(&audio->lock, flags);
+ return;
+ }
+ if (!req) {
+ spin_unlock_irqrestore(&audio->lock, flags);
break;
+ }
length = frames_to_bytes(runtime, frames);
if (length > IN_EP_MAX_PACKET_SIZE)
@@ -427,6 +447,7 @@ static void audio_send(struct audio_dev *audio)
}
req->length = length;
+ spin_unlock_irqrestore(&audio->lock, flags);
ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
if (ret < 0) {
pr_err("usb_ep_queue failed ret: %d\n", ret);
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
index 45c39d3c4225..434af820e827 100644
--- a/drivers/usb/gadget/function/f_qc_rndis.c
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -1245,16 +1245,19 @@ usb_function *rndis_qc_bind_config_vendor(struct usb_function_instance *fi,
rndis->func.resume = rndis_qc_resume;
rndis->func.free_func = rndis_qc_free;
- _rndis_qc = rndis;
-
status = rndis_ipa_init(&rndis_ipa_params);
if (status) {
pr_err("%s: failed to init rndis_ipa\n", __func__);
- kfree(rndis);
- return ERR_PTR(status);
+ goto fail;
}
+ _rndis_qc = rndis;
+
return &rndis->func;
+fail:
+ kfree(rndis);
+ _rndis_qc = NULL;
+ return ERR_PTR(status);
}
static struct usb_function *qcrndis_alloc(struct usb_function_instance *fi)
@@ -1264,74 +1267,116 @@ static struct usb_function *qcrndis_alloc(struct usb_function_instance *fi)
static int rndis_qc_open_dev(struct inode *ip, struct file *fp)
{
+ int ret = 0;
+ unsigned long flags;
pr_info("Open rndis QC driver\n");
+ spin_lock_irqsave(&rndis_lock, flags);
if (!_rndis_qc) {
pr_err("rndis_qc_dev not created yet\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
if (rndis_qc_lock(&_rndis_qc->open_excl)) {
pr_err("Already opened\n");
- return -EBUSY;
+ ret = -EBUSY;
+ goto fail;
}
fp->private_data = _rndis_qc;
- pr_info("rndis QC file opened\n");
+fail:
+ spin_unlock_irqrestore(&rndis_lock, flags);
- return 0;
+ if (!ret)
+ pr_info("rndis QC file opened\n");
+
+ return ret;
}
static int rndis_qc_release_dev(struct inode *ip, struct file *fp)
{
- struct f_rndis_qc *rndis = fp->private_data;
-
+ unsigned long flags;
pr_info("Close rndis QC file\n");
- rndis_qc_unlock(&rndis->open_excl);
+ spin_lock_irqsave(&rndis_lock, flags);
+
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not present\n");
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return -ENODEV;
+ }
+ rndis_qc_unlock(&_rndis_qc->open_excl);
+ spin_unlock_irqrestore(&rndis_lock, flags);
return 0;
}
static long rndis_qc_ioctl(struct file *fp, unsigned cmd, unsigned long arg)
{
- struct f_rndis_qc *rndis = fp->private_data;
+ u8 qc_max_pkt_per_xfer = 0;
+ u32 qc_max_pkt_size = 0;
int ret = 0;
+ unsigned long flags;
- pr_info("Received command %d\n", cmd);
+ spin_lock_irqsave(&rndis_lock, flags);
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not present\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ qc_max_pkt_per_xfer = _rndis_qc->ul_max_pkt_per_xfer;
+ qc_max_pkt_size = _rndis_qc->max_pkt_size;
+
+ if (rndis_qc_lock(&_rndis_qc->ioctl_excl)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ spin_unlock_irqrestore(&rndis_lock, flags);
- if (rndis_qc_lock(&rndis->ioctl_excl))
- return -EBUSY;
+ pr_info("Received command %d\n", cmd);
switch (cmd) {
case RNDIS_QC_GET_MAX_PKT_PER_XFER:
ret = copy_to_user((void __user *)arg,
- &rndis->ul_max_pkt_per_xfer,
- sizeof(rndis->ul_max_pkt_per_xfer));
+ &qc_max_pkt_per_xfer,
+ sizeof(qc_max_pkt_per_xfer));
if (ret) {
pr_err("copying to user space failed\n");
ret = -EFAULT;
}
pr_info("Sent UL max packets per xfer %d\n",
- rndis->ul_max_pkt_per_xfer);
+ qc_max_pkt_per_xfer);
break;
case RNDIS_QC_GET_MAX_PKT_SIZE:
ret = copy_to_user((void __user *)arg,
- &rndis->max_pkt_size,
- sizeof(rndis->max_pkt_size));
+ &qc_max_pkt_size,
+ sizeof(qc_max_pkt_size));
if (ret) {
pr_err("copying to user space failed\n");
ret = -EFAULT;
}
pr_debug("Sent max packet size %d\n",
- rndis->max_pkt_size);
+ qc_max_pkt_size);
break;
default:
pr_err("Unsupported IOCTL\n");
ret = -EINVAL;
}
- rndis_qc_unlock(&rndis->ioctl_excl);
+ spin_lock_irqsave(&rndis_lock, flags);
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not present\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ rndis_qc_unlock(&_rndis_qc->ioctl_excl);
+
+fail:
+ spin_unlock_irqrestore(&rndis_lock, flags);
return ret;
}
@@ -1385,11 +1430,11 @@ static int qcrndis_set_inst_name(struct usb_function_instance *fi,
return -ENOMEM;
}
+ spin_lock_init(&rndis_lock);
opts->rndis = rndis;
ret = misc_register(&rndis_qc_device);
if (ret)
pr_err("rndis QC driver failed to register\n");
- spin_lock_init(&rndis_lock);
ret = ipa_data_setup(USB_IPA_FUNC_RNDIS);
if (ret) {
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index df538fd10aa4..46f5354c534d 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -77,10 +77,12 @@ static int exynos_ehci_get_phy(struct device *dev,
if (IS_ERR(phy)) {
ret = PTR_ERR(phy);
if (ret == -EPROBE_DEFER) {
+ of_node_put(child);
return ret;
} else if (ret != -ENOSYS && ret != -ENODEV) {
dev_err(dev,
"Error retrieving usb2 phy: %d\n", ret);
+ of_node_put(child);
return ret;
}
}
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index 2cd105be7319..6865b919403f 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -66,10 +66,12 @@ static int exynos_ohci_get_phy(struct device *dev,
if (IS_ERR(phy)) {
ret = PTR_ERR(phy);
if (ret == -EPROBE_DEFER) {
+ of_node_put(child);
return ret;
} else if (ret != -ENOSYS && ret != -ENODEV) {
dev_err(dev,
"Error retrieving usb2 phy: %d\n", ret);
+ of_node_put(child);
return ret;
}
}
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 055c6203577a..7d893c5815e2 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -1539,6 +1539,11 @@ static void usbpd_sm(struct work_struct *w)
if (pd->current_state == PE_UNKNOWN)
goto sm_done;
+ if (pd->vconn_enabled) {
+ regulator_disable(pd->vconn);
+ pd->vconn_enabled = false;
+ }
+
usbpd_info(&pd->dev, "USB Type-C disconnect\n");
if (pd->pd_phy_opened) {
@@ -1574,11 +1579,6 @@ static void usbpd_sm(struct work_struct *w)
pd->vbus_enabled = false;
}
- if (pd->vconn_enabled) {
- regulator_disable(pd->vconn);
- pd->vconn_enabled = false;
- }
-
if (pd->current_dr == DR_UFP)
stop_usb_peripheral(pd);
else if (pd->current_dr == DR_DFP)
@@ -1927,6 +1927,9 @@ static void usbpd_sm(struct work_struct *w)
val.intval = 0;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+
+ pd_phy_close();
+ pd->pd_phy_opened = false;
}
break;
@@ -2519,6 +2522,11 @@ static int usbpd_dr_set_property(struct dual_role_phy_instance *dual_role,
case DUAL_ROLE_PROP_MODE:
usbpd_dbg(&pd->dev, "Setting mode to %d\n", *val);
+ if (pd->current_state == PE_UNKNOWN) {
+ usbpd_warn(&pd->dev, "No active connection. Don't allow MODE change\n");
+ return -EAGAIN;
+ }
+
/*
* Forces disconnect on CC and re-establishes connection.
* This does not use PD-based PR/DR swap
@@ -3179,7 +3187,7 @@ struct usbpd *usbpd_create(struct device *parent)
if (ret)
goto free_pd;
- pd->wq = alloc_ordered_workqueue("usbpd_wq", WQ_FREEZABLE);
+ pd->wq = alloc_ordered_workqueue("usbpd_wq", WQ_FREEZABLE | WQ_HIGHPRI);
if (!pd->wq) {
ret = -ENOMEM;
goto del_pd;
diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c
index 63fad28fa721..588af94db6cd 100644
--- a/drivers/usb/pd/qpnp-pdphy.c
+++ b/drivers/usb/pd/qpnp-pdphy.c
@@ -76,8 +76,8 @@
#define USB_PDPHY_TRIM_3 0xF3
/* VDD regulator */
-#define VDD_PDPHY_VOL_MIN 3088000 /* uV */
-#define VDD_PDPHY_VOL_MAX 3088000 /* uV */
+#define VDD_PDPHY_VOL_MIN 2800000 /* uV */
+#define VDD_PDPHY_VOL_MAX 3300000 /* uV */
#define VDD_PDPHY_HPM_LOAD 3000 /* uA */
struct usb_pdphy {
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index 5df091a5454b..de4f93afdc97 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -76,6 +76,7 @@ MODULE_PARM_DESC(phy_tune1, "QUSB PHY v2 TUNE1");
struct qusb_phy {
struct usb_phy phy;
+ struct mutex lock;
void __iomem *base;
void __iomem *efuse_reg;
void __iomem *tcsr_clamp_dig_n;
@@ -100,7 +101,7 @@ struct qusb_phy {
int efuse_bit_pos;
int efuse_num_of_bits;
- bool power_enabled;
+ int power_enabled_ref;
bool clocks_enabled;
bool cable_connected;
bool suspended;
@@ -160,35 +161,47 @@ static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high)
return ret;
}
-static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on,
- bool toggle_vdd)
+static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on)
{
int ret = 0;
- dev_dbg(qphy->phy.dev, "%s turn %s regulators. power_enabled:%d\n",
- __func__, on ? "on" : "off", qphy->power_enabled);
+ mutex_lock(&qphy->lock);
- if (toggle_vdd && qphy->power_enabled == on) {
- dev_dbg(qphy->phy.dev, "PHYs' regulators are already ON.\n");
- return 0;
- }
+ dev_dbg(qphy->phy.dev,
+ "%s:req to turn %s regulators. power_enabled_ref:%d\n",
+ __func__, on ? "on" : "off", qphy->power_enabled_ref);
- if (!on)
- goto disable_vdda33;
+ if (on && ++qphy->power_enabled_ref > 1) {
+ dev_dbg(qphy->phy.dev, "PHYs' regulators are already on\n");
+ goto done;
+ }
- if (toggle_vdd) {
- ret = qusb_phy_config_vdd(qphy, true);
- if (ret) {
- dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
- ret);
- goto err_vdd;
+ if (!on) {
+ if (on == qphy->power_enabled_ref) {
+ dev_dbg(qphy->phy.dev,
+ "PHYs' regulators are already off\n");
+ goto done;
}
- ret = regulator_enable(qphy->vdd);
- if (ret) {
- dev_err(qphy->phy.dev, "Unable to enable VDD\n");
- goto unconfig_vdd;
- }
+ qphy->power_enabled_ref--;
+ if (!qphy->power_enabled_ref)
+ goto disable_vdda33;
+
+ dev_dbg(qphy->phy.dev, "Skip turning off PHYs' regulators\n");
+ goto done;
+ }
+
+ ret = qusb_phy_config_vdd(qphy, true);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
+ ret);
+ goto err_vdd;
+ }
+
+ ret = regulator_enable(qphy->vdd);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to enable VDD\n");
+ goto unconfig_vdd;
}
ret = regulator_set_load(qphy->vdda12, QUSB2PHY_1P2_HPM_LOAD);
@@ -251,10 +264,9 @@ static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on,
goto unset_vdd33;
}
- if (toggle_vdd)
- qphy->power_enabled = true;
-
pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__);
+
+ mutex_unlock(&qphy->lock);
return ret;
disable_vdda33:
@@ -304,22 +316,24 @@ put_vdda12_lpm:
dev_err(qphy->phy.dev, "Unable to set LPM of vdda12\n");
disable_vdd:
- if (toggle_vdd) {
- ret = regulator_disable(qphy->vdd);
- if (ret)
- dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
- ret);
+ ret = regulator_disable(qphy->vdd);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
+ ret);
unconfig_vdd:
- ret = qusb_phy_config_vdd(qphy, false);
- if (ret)
- dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
- ret);
- }
+ ret = qusb_phy_config_vdd(qphy, false);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
+ ret);
err_vdd:
- if (toggle_vdd)
- qphy->power_enabled = false;
dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n");
+
+ /* in case of error in turning on regulators */
+ if (qphy->power_enabled_ref)
+ qphy->power_enabled_ref--;
+done:
+ mutex_unlock(&qphy->lock);
return ret;
}
@@ -335,7 +349,7 @@ static int qusb_phy_update_dpdm(struct usb_phy *phy, int value)
case POWER_SUPPLY_DP_DM_DPF_DMF:
dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DPF_DMF\n");
if (!qphy->rm_pulldown) {
- ret = qusb_phy_enable_power(qphy, true, false);
+ ret = qusb_phy_enable_power(qphy, true);
if (ret >= 0) {
qphy->rm_pulldown = true;
dev_dbg(phy->dev, "DP_DM_F: rm_pulldown:%d\n",
@@ -348,7 +362,7 @@ static int qusb_phy_update_dpdm(struct usb_phy *phy, int value)
case POWER_SUPPLY_DP_DM_DPR_DMR:
dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DPR_DMR\n");
if (qphy->rm_pulldown) {
- ret = qusb_phy_enable_power(qphy, false, false);
+ ret = qusb_phy_enable_power(qphy, false);
if (ret >= 0) {
qphy->rm_pulldown = false;
dev_dbg(phy->dev, "DP_DM_R: rm_pulldown:%d\n",
@@ -452,10 +466,6 @@ static int qusb_phy_init(struct usb_phy *phy)
dev_dbg(phy->dev, "%s\n", __func__);
- ret = qusb_phy_enable_power(qphy, true, true);
- if (ret)
- return ret;
-
/* bump up vdda33 voltage to operating level*/
ret = regulator_set_voltage(qphy->vdda33, qphy->vdda33_levels[1],
qphy->vdda33_levels[2]);
@@ -683,7 +693,7 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
wmb();
qusb_phy_enable_clocks(qphy, false);
- qusb_phy_enable_power(qphy, false, true);
+ qusb_phy_enable_power(qphy, false);
}
qphy->suspended = true;
} else {
@@ -715,7 +725,7 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
*/
wmb();
- qusb_phy_enable_power(qphy, true, true);
+ qusb_phy_enable_power(qphy, true);
ret = reset_control_assert(qphy->phy_reset);
if (ret)
dev_err(phy->dev, "%s: phy_reset assert failed\n",
@@ -1063,6 +1073,8 @@ static int qusb_phy_probe(struct platform_device *pdev)
return PTR_ERR(qphy->vdda12);
}
+ mutex_init(&qphy->lock);
+
platform_set_drvdata(pdev, qphy);
qphy->phy.label = "msm-qusb-phy-v2";
@@ -1100,7 +1112,7 @@ static int qusb_phy_remove(struct platform_device *pdev)
qphy->clocks_enabled = false;
}
- qusb_phy_enable_power(qphy, false, true);
+ qusb_phy_enable_power(qphy, false);
return 0;
}
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 5867c6c204c9..6a2529ec1511 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -752,6 +752,9 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
writel_relaxed(0x00,
qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+ /* Disable PHY */
+ writel_relaxed(POWER_DOWN,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
/* Make sure that above write is completed */
wmb();
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 7812052dc700..754fc3e41005 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -373,23 +373,29 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
dev_dbg(&port->dev,
"%s - usb_serial_generic_open failed: %d\n",
__func__, result);
- goto err_out;
+ goto err_free;
}
/* remove any data still left: also clears error state */
ark3116_read_reg(serial, UART_RX, buf);
/* read modem status */
- priv->msr = ark3116_read_reg(serial, UART_MSR, buf);
+ result = ark3116_read_reg(serial, UART_MSR, buf);
+ if (result < 0)
+ goto err_close;
+ priv->msr = *buf;
+
/* read line status */
- priv->lsr = ark3116_read_reg(serial, UART_LSR, buf);
+ result = ark3116_read_reg(serial, UART_LSR, buf);
+ if (result < 0)
+ goto err_close;
+ priv->lsr = *buf;
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "submit irq_in urb failed %d\n",
result);
- ark3116_close(port);
- goto err_out;
+ goto err_close;
}
/* activate interrupts */
@@ -402,8 +408,15 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
if (tty)
ark3116_set_termios(tty, port, NULL);
-err_out:
kfree(buf);
+
+ return 0;
+
+err_close:
+ usb_serial_generic_close(port);
+err_free:
+ kfree(buf);
+
return result;
}
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index e0b1fe2f60e1..be93b9ff2d98 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1399,25 +1399,30 @@ static int digi_read_inb_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct digi_port *priv = usb_get_serial_port_data(port);
- int opcode = ((unsigned char *)urb->transfer_buffer)[0];
- int len = ((unsigned char *)urb->transfer_buffer)[1];
- int port_status = ((unsigned char *)urb->transfer_buffer)[2];
- unsigned char *data = ((unsigned char *)urb->transfer_buffer) + 3;
+ unsigned char *buf = urb->transfer_buffer;
+ int opcode;
+ int len;
+ int port_status;
+ unsigned char *data;
int flag, throttled;
- int status = urb->status;
-
- /* do not process callbacks on closed ports */
- /* but do continue the read chain */
- if (urb->status == -ENOENT)
- return 0;
/* short/multiple packet check */
+ if (urb->actual_length < 2) {
+ dev_warn(&port->dev, "short packet received\n");
+ return -1;
+ }
+
+ opcode = buf[0];
+ len = buf[1];
+
if (urb->actual_length != len + 2) {
- dev_err(&port->dev, "%s: INCOMPLETE OR MULTIPLE PACKET, "
- "status=%d, port=%d, opcode=%d, len=%d, "
- "actual_length=%d, status=%d\n", __func__, status,
- priv->dp_port_num, opcode, len, urb->actual_length,
- port_status);
+ dev_err(&port->dev, "malformed packet received: port=%d, opcode=%d, len=%d, actual_length=%u\n",
+ priv->dp_port_num, opcode, len, urb->actual_length);
+ return -1;
+ }
+
+ if (opcode == DIGI_CMD_RECEIVE_DATA && len < 1) {
+ dev_err(&port->dev, "malformed data packet received\n");
return -1;
}
@@ -1431,6 +1436,9 @@ static int digi_read_inb_callback(struct urb *urb)
/* receive data */
if (opcode == DIGI_CMD_RECEIVE_DATA) {
+ port_status = buf[2];
+ data = &buf[3];
+
/* get flag from port_status */
flag = 0;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 19a98116c2ab..b3a21fcbbaf9 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1439,10 +1439,13 @@ static int read_latency_timer(struct usb_serial_port *port)
FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
0, priv->interface,
buf, 1, WDR_TIMEOUT);
- if (rv < 0)
+ if (rv < 1) {
dev_err(&port->dev, "Unable to read latency timer: %i\n", rv);
- else
+ if (rv >= 0)
+ rv = -EIO;
+ } else {
priv->latency = buf[0];
+ }
kfree(buf);
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index b63a6c3899c5..749e1b674145 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -492,20 +492,24 @@ static int get_epic_descriptor(struct edgeport_serial *ep)
int result;
struct usb_serial *serial = ep->serial;
struct edgeport_product_info *product_info = &ep->product_info;
- struct edge_compatibility_descriptor *epic = &ep->epic_descriptor;
+ struct edge_compatibility_descriptor *epic;
struct edge_compatibility_bits *bits;
struct device *dev = &serial->dev->dev;
ep->is_epic = 0;
+
+ epic = kmalloc(sizeof(*epic), GFP_KERNEL);
+ if (!epic)
+ return -ENOMEM;
+
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
USB_REQUEST_ION_GET_EPIC_DESC,
0xC0, 0x00, 0x00,
- &ep->epic_descriptor,
- sizeof(struct edge_compatibility_descriptor),
+ epic, sizeof(*epic),
300);
-
- if (result > 0) {
+ if (result == sizeof(*epic)) {
ep->is_epic = 1;
+ memcpy(&ep->epic_descriptor, epic, sizeof(*epic));
memset(product_info, 0, sizeof(struct edgeport_product_info));
product_info->NumPorts = epic->NumPorts;
@@ -534,8 +538,16 @@ static int get_epic_descriptor(struct edgeport_serial *ep)
dev_dbg(dev, " IOSPWriteLCR : %s\n", bits->IOSPWriteLCR ? "TRUE": "FALSE");
dev_dbg(dev, " IOSPSetBaudRate : %s\n", bits->IOSPSetBaudRate ? "TRUE": "FALSE");
dev_dbg(dev, " TrueEdgeport : %s\n", bits->TrueEdgeport ? "TRUE": "FALSE");
+
+ result = 0;
+ } else if (result >= 0) {
+ dev_warn(&serial->interface->dev, "short epic descriptor received: %d\n",
+ result);
+ result = -EIO;
}
+ kfree(epic);
+
return result;
}
@@ -2097,8 +2109,7 @@ static int rom_write(struct usb_serial *serial, __u16 extAddr, __u16 addr,
* rom_read
* reads a number of bytes from the Edgeport device starting at the given
* address.
- * If successful returns the number of bytes read, otherwise it returns
- * a negative error number of the problem.
+ * Returns zero on success or a negative error number.
****************************************************************************/
static int rom_read(struct usb_serial *serial, __u16 extAddr,
__u16 addr, __u16 length, __u8 *data)
@@ -2123,12 +2134,17 @@ static int rom_read(struct usb_serial *serial, __u16 extAddr,
USB_REQUEST_ION_READ_ROM,
0xC0, addr, extAddr, transfer_buffer,
current_length, 300);
- if (result < 0)
+ if (result < current_length) {
+ if (result >= 0)
+ result = -EIO;
break;
+ }
memcpy(data, transfer_buffer, current_length);
length -= current_length;
addr += current_length;
data += current_length;
+
+ result = 0;
}
kfree(transfer_buffer);
@@ -2585,9 +2601,10 @@ static void get_manufacturing_desc(struct edgeport_serial *edge_serial)
EDGE_MANUF_DESC_LEN,
(__u8 *)(&edge_serial->manuf_descriptor));
- if (response < 1)
- dev_err(dev, "error in getting manufacturer descriptor\n");
- else {
+ if (response < 0) {
+ dev_err(dev, "error in getting manufacturer descriptor: %d\n",
+ response);
+ } else {
char string[30];
dev_dbg(dev, "**Manufacturer Descriptor\n");
dev_dbg(dev, " RomSize: %dK\n",
@@ -2644,9 +2661,10 @@ static void get_boot_desc(struct edgeport_serial *edge_serial)
EDGE_BOOT_DESC_LEN,
(__u8 *)(&edge_serial->boot_descriptor));
- if (response < 1)
- dev_err(dev, "error in getting boot descriptor\n");
- else {
+ if (response < 0) {
+ dev_err(dev, "error in getting boot descriptor: %d\n",
+ response);
+ } else {
dev_dbg(dev, "**Boot Descriptor:\n");
dev_dbg(dev, " BootCodeLength: %d\n",
le16_to_cpu(edge_serial->boot_descriptor.BootCodeLength));
@@ -2789,7 +2807,7 @@ static int edge_startup(struct usb_serial *serial)
dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name);
/* Read the epic descriptor */
- if (get_epic_descriptor(edge_serial) <= 0) {
+ if (get_epic_descriptor(edge_serial) < 0) {
/* memcpy descriptor to Supports structures */
memcpy(&edge_serial->epic_descriptor.Supports, descriptor,
sizeof(struct edge_compatibility_bits));
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 930be98d59b3..6b0942428917 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -139,6 +139,7 @@ static void keyspan_pda_rx_interrupt(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
+ unsigned int len = urb->actual_length;
int retval;
int status = urb->status;
struct keyspan_pda_private *priv;
@@ -159,18 +160,26 @@ static void keyspan_pda_rx_interrupt(struct urb *urb)
goto exit;
}
+ if (len < 1) {
+ dev_warn(&port->dev, "short message received\n");
+ goto exit;
+ }
+
/* see if the message is data or a status interrupt */
switch (data[0]) {
case 0:
/* rest of message is rx data */
- if (urb->actual_length) {
- tty_insert_flip_string(&port->port, data + 1,
- urb->actual_length - 1);
- tty_flip_buffer_push(&port->port);
- }
+ if (len < 2)
+ break;
+ tty_insert_flip_string(&port->port, data + 1, len - 1);
+ tty_flip_buffer_push(&port->port);
break;
case 1:
/* status interrupt */
+ if (len < 3) {
+ dev_warn(&port->dev, "short interrupt message received\n");
+ break;
+ }
dev_dbg(&port->dev, "rx int, d1=%d, d2=%d\n", data[1], data[2]);
switch (data[1]) {
case 1: /* modemline change */
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 89726f702202..9bf82c262c5b 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -322,8 +322,12 @@ static int mct_u232_get_modem_stat(struct usb_serial_port *port,
MCT_U232_GET_REQUEST_TYPE,
0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE,
WDR_TIMEOUT);
- if (rc < 0) {
+ if (rc < MCT_U232_GET_MODEM_STAT_SIZE) {
dev_err(&port->dev, "Get MODEM STATus failed (error = %d)\n", rc);
+
+ if (rc >= 0)
+ rc = -EIO;
+
*msr = 0;
} else {
*msr = buf[0];
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index a3ed07c58754..af0c87276299 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -188,22 +188,22 @@ static inline int qt2_setdevice(struct usb_device *dev, u8 *data)
}
-static inline int qt2_getdevice(struct usb_device *dev, u8 *data)
-{
- return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- QT_SET_GET_DEVICE, 0xc0, 0, 0,
- data, 3, QT2_USB_TIMEOUT);
-}
-
static inline int qt2_getregister(struct usb_device *dev,
u8 uart,
u8 reg,
u8 *data)
{
- return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- QT_SET_GET_REGISTER, 0xc0, reg,
- uart, data, sizeof(*data), QT2_USB_TIMEOUT);
+ int ret;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ QT_SET_GET_REGISTER, 0xc0, reg,
+ uart, data, sizeof(*data), QT2_USB_TIMEOUT);
+ if (ret < sizeof(*data)) {
+ if (ret >= 0)
+ ret = -EIO;
+ }
+ return ret;
}
static inline int qt2_setregister(struct usb_device *dev,
@@ -372,9 +372,11 @@ static int qt2_open(struct tty_struct *tty, struct usb_serial_port *port)
0xc0, 0,
device_port, data, 2, QT2_USB_TIMEOUT);
- if (status < 0) {
+ if (status < 2) {
dev_err(&port->dev, "%s - open port failed %i\n", __func__,
status);
+ if (status >= 0)
+ status = -EIO;
kfree(data);
return status;
}
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 70a098de429f..886e1294b120 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -80,9 +80,17 @@ static inline int ssu100_setdevice(struct usb_device *dev, u8 *data)
static inline int ssu100_getdevice(struct usb_device *dev, u8 *data)
{
- return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- QT_SET_GET_DEVICE, 0xc0, 0, 0,
- data, 3, 300);
+ int ret;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ QT_SET_GET_DEVICE, 0xc0, 0, 0,
+ data, 3, 300);
+ if (ret < 3) {
+ if (ret >= 0)
+ ret = -EIO;
+ }
+
+ return ret;
}
static inline int ssu100_getregister(struct usb_device *dev,
@@ -90,10 +98,17 @@ static inline int ssu100_getregister(struct usb_device *dev,
unsigned short reg,
u8 *data)
{
- return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- QT_SET_GET_REGISTER, 0xc0, reg,
- uart, data, sizeof(*data), 300);
+ int ret;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ QT_SET_GET_REGISTER, 0xc0, reg,
+ uart, data, sizeof(*data), 300);
+ if (ret < sizeof(*data)) {
+ if (ret >= 0)
+ ret = -EIO;
+ }
+ return ret;
}
@@ -289,8 +304,10 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port)
QT_OPEN_CLOSE_CHANNEL,
QT_TRANSFER_IN, 0x01,
0, data, 2, 300);
- if (result < 0) {
+ if (result < 2) {
dev_dbg(&port->dev, "%s - open failed %i\n", __func__, result);
+ if (result >= 0)
+ result = -EIO;
kfree(data);
return result;
}
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 535fcfafc097..fe7f5ace6064 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1352,13 +1352,10 @@ static int ti_command_out_sync(struct ti_device *tdev, __u8 command,
(USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT),
value, moduleid, data, size, 1000);
- if (status == size)
- status = 0;
-
- if (status > 0)
- status = -ECOMM;
+ if (status < 0)
+ return status;
- return status;
+ return 0;
}
@@ -1374,8 +1371,7 @@ static int ti_command_in_sync(struct ti_device *tdev, __u8 command,
if (status == size)
status = 0;
-
- if (status > 0)
+ else if (status >= 0)
status = -ECOMM;
return status;
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index 37346c40d81d..a74bf6f60774 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -3045,6 +3045,10 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
rc = mdss_dp_on(pdata);
break;
case MDSS_EVENT_PANEL_ON:
+ if (!dp->power_on) {
+ pr_err("DP Controller not powered on\n");
+ break;
+ }
mdss_dp_update_hdcp_info(dp);
if (dp_is_hdcp_enabled(dp)) {
@@ -3065,6 +3069,10 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
complete_all(&dp->notification_comp);
break;
case MDSS_EVENT_BLANK:
+ if (!dp->power_on) {
+ pr_err("DP Controller not powered on\n");
+ break;
+ }
if (dp_is_hdcp_enabled(dp)) {
dp->hdcp_status = HDCP_STATE_INACTIVE;
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index c7cac996e5c0..797c8b4a2eb1 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -1815,6 +1815,18 @@ static int mdss_dsi_post_panel_on(struct mdss_panel_data *pdata)
return 0;
}
+irqreturn_t test_hw_vsync_handler(int irq, void *data)
+{
+ struct mdss_panel_data *pdata = (struct mdss_panel_data *)data;
+
+ pr_debug("HW VSYNC\n");
+ MDSS_XLOG(0xaaa, irq);
+ complete_all(&pdata->te_done);
+ if (pdata->next)
+ complete_all(&pdata->next->te_done);
+ return IRQ_HANDLED;
+}
+
int mdss_dsi_cont_splash_on(struct mdss_panel_data *pdata)
{
int ret = 0;
@@ -3296,6 +3308,8 @@ static int mdss_dsi_ctrl_probe(struct platform_device *pdev)
struct device_node *dsi_pan_node = NULL;
const char *ctrl_name;
struct mdss_util_intf *util;
+ static int te_irq_registered;
+ struct mdss_panel_data *pdata;
if (!pdev || !pdev->dev.of_node) {
pr_err("%s: pdev not found for DSI controller\n", __func__);
@@ -3421,6 +3435,23 @@ static int mdss_dsi_ctrl_probe(struct platform_device *pdev)
disable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
}
+ pdata = &ctrl_pdata->panel_data;
+ init_completion(&pdata->te_done);
+ if (pdata->panel_info.type == MIPI_CMD_PANEL) {
+ if (!te_irq_registered) {
+ rc = devm_request_irq(&pdev->dev,
+ gpio_to_irq(pdata->panel_te_gpio),
+ test_hw_vsync_handler, IRQF_TRIGGER_FALLING,
+ "VSYNC_GPIO", &ctrl_pdata->panel_data);
+ if (rc) {
+ pr_err("%s: TE request_irq failed\n", __func__);
+ goto error_shadow_clk_deinit;
+ }
+ te_irq_registered = 1;
+ disable_irq_nosync(gpio_to_irq(pdata->panel_te_gpio));
+ }
+ }
+
rc = mdss_dsi_get_bridge_chip_params(pinfo, ctrl_pdata, pdev);
if (rc) {
pr_err("%s: Failed to get bridge params\n", __func__);
@@ -4241,6 +4272,8 @@ static int mdss_dsi_parse_gpio_params(struct platform_device *ctrl_pdev,
* If disp_en_gpio has been set previously (disp_en_gpio > 0)
* while parsing the panel node, then do not override it
*/
+ struct mdss_panel_data *pdata = &ctrl_pdata->panel_data;
+
if (ctrl_pdata->disp_en_gpio <= 0) {
ctrl_pdata->disp_en_gpio = of_get_named_gpio(
ctrl_pdev->dev.of_node,
@@ -4257,6 +4290,7 @@ static int mdss_dsi_parse_gpio_params(struct platform_device *ctrl_pdev,
if (!gpio_is_valid(ctrl_pdata->disp_te_gpio))
pr_err("%s:%d, TE gpio not specified\n",
__func__, __LINE__);
+ pdata->panel_te_gpio = ctrl_pdata->disp_te_gpio;
ctrl_pdata->bklt_en_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
"qcom,platform-bklight-en-gpio", 0);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index 1a471155072b..f0fb791a7b8d 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -165,7 +165,14 @@ void mdss_dsi_clk_req(struct mdss_dsi_ctrl_pdata *ctrl,
MDSS_XLOG(ctrl->ndx, enable, ctrl->mdp_busy, current->pid,
client);
- if (enable == 0) {
+ /*
+ * ensure that before going into ecg or turning
+ * off the clocks, cmd_mdp_busy is not true. During a
+ * race condition, clocks are turned off and so the
+ * isr for cmd_mdp_busy does not get cleared in hw.
+ */
+ if (enable == MDSS_DSI_CLK_OFF ||
+ enable == MDSS_DSI_CLK_EARLY_GATE) {
/* need wait before disable */
mutex_lock(&ctrl->cmd_mutex);
mdss_dsi_cmd_mdp_busy(ctrl);
@@ -2558,15 +2565,8 @@ void mdss_dsi_cmd_mdp_busy(struct mdss_dsi_ctrl_pdata *ctrl)
if (!ctrl->mdp_busy)
rc = 1;
spin_unlock_irqrestore(&ctrl->mdp_lock, flags);
- if (!rc) {
- if (mdss_dsi_mdp_busy_tout_check(ctrl)) {
- pr_err("%s: timeout error\n", __func__);
- MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl",
- "dsi0_phy", "dsi1_ctrl", "dsi1_phy",
- "vbif", "vbif_nrt", "dbg_bus",
- "vbif_dbg_bus", "dsi_dbg_bus", "panic");
- }
- }
+ if (!rc && mdss_dsi_mdp_busy_tout_check(ctrl))
+ pr_err("%s: timeout error\n", __func__);
}
pr_debug("%s: done pid=%d\n", __func__, current->pid);
MDSS_XLOG(ctrl->ndx, ctrl->mdp_busy, current->pid, XLOG_FUNC_EXIT);
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 4eca9cb39223..40a79c4af38e 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -3421,6 +3421,10 @@ int mdss_fb_atomic_commit(struct fb_info *info,
MSMFB_ATOMIC_COMMIT, true, false);
if (mfd->panel.type == WRITEBACK_PANEL) {
output_layer = commit_v1->output_layer;
+ if (!output_layer) {
+ pr_err("Output layer is null\n");
+ goto end;
+ }
wb_change = !mdss_fb_is_wb_config_same(mfd,
commit_v1->output_layer);
if (wb_change) {
@@ -4630,6 +4634,7 @@ static int mdss_fb_atomic_commit_ioctl(struct fb_info *info,
struct mdp_output_layer __user *output_layer_user;
struct mdp_destination_scaler_data *ds_data = NULL;
struct mdp_destination_scaler_data __user *ds_data_user;
+ struct msm_fb_data_type *mfd;
ret = copy_from_user(&commit, argp, sizeof(struct mdp_layer_commit));
if (ret) {
@@ -4637,6 +4642,16 @@ static int mdss_fb_atomic_commit_ioctl(struct fb_info *info,
return ret;
}
+ mfd = (struct msm_fb_data_type *)info->par;
+ if (!mfd)
+ return -EINVAL;
+
+ if (mfd->panel_info->panel_dead) {
+ pr_debug("early commit return\n");
+ MDSS_XLOG(mfd->panel_info->panel_dead);
+ return 0;
+ }
+
output_layer_user = commit.commit_v1.output_layer;
if (output_layer_user) {
buffer_size = sizeof(struct mdp_output_layer);
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index 8fa229aaa174..07592fa26a49 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -3211,8 +3211,7 @@ static int hdmi_tx_power_on(struct hdmi_tx_ctrl *hdmi_ctrl)
hdmi_tx_core_on(hdmi_ctrl);
- if (hdmi_ctrl->panel.infoframe &&
- !hdmi_tx_is_encryption_set(hdmi_ctrl) &&
+ if (!hdmi_tx_is_encryption_set(hdmi_ctrl) &&
hdmi_tx_is_stream_shareable(hdmi_ctrl)) {
hdmi_tx_config_avmute(hdmi_ctrl, false);
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index fd2c2cdb3820..cd403f19c088 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -1785,7 +1785,7 @@ void mdss_mdp_ctl_notifier_register(struct mdss_mdp_ctl *ctl,
void mdss_mdp_ctl_notifier_unregister(struct mdss_mdp_ctl *ctl,
struct notifier_block *notifier);
u32 mdss_mdp_ctl_perf_get_transaction_status(struct mdss_mdp_ctl *ctl);
-u32 apply_comp_ratio_factor(u32 quota, struct mdss_mdp_format_params *fmt,
+u64 apply_comp_ratio_factor(u64 quota, struct mdss_mdp_format_params *fmt,
struct mult_factor *factor);
int mdss_mdp_scan_pipes(void);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index c062de3c1e59..929eeb270f32 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -815,7 +815,7 @@ static inline bool validate_comp_ratio(struct mult_factor *factor)
return factor->numer && factor->denom;
}
-u32 apply_comp_ratio_factor(u32 quota,
+u64 apply_comp_ratio_factor(u64 quota,
struct mdss_mdp_format_params *fmt,
struct mult_factor *factor)
{
@@ -2736,10 +2736,7 @@ int mdss_mdp_display_wakeup_time(struct mdss_mdp_ctl *ctl,
if (!clk_period)
return -EINVAL;
- time_of_line = (pinfo->lcdc.h_back_porch +
- pinfo->lcdc.h_front_porch +
- pinfo->lcdc.h_pulse_width +
- pinfo->xres) * clk_period;
+ time_of_line = mdss_panel_get_htotal(pinfo, true) * clk_period;
time_of_line /= 1000; /* in nano second */
if (!time_of_line)
@@ -2747,10 +2744,7 @@ int mdss_mdp_display_wakeup_time(struct mdss_mdp_ctl *ctl,
current_line = ctl->ops.read_line_cnt_fnc(ctl);
- total_line = pinfo->lcdc.v_back_porch +
- pinfo->lcdc.v_front_porch +
- pinfo->lcdc.v_pulse_width +
- pinfo->yres;
+ total_line = mdss_panel_get_vtotal(pinfo);
if (current_line >= total_line)
time_to_vsync = time_of_line * total_line;
@@ -5735,15 +5729,6 @@ static void mdss_mdp_force_border_color(struct mdss_mdp_ctl *ctl)
ctl->mixer_right->params_changed++;
}
-static bool mdss_mdp_handle_backlight_extn(struct mdss_mdp_ctl *ctl)
-{
- if (ctl->intf_type == MDSS_INTF_DSI && !ctl->is_video_mode &&
- ctl->mfd->bl_extn_level >= 0)
- return true;
- else
- return false;
-}
-
int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
struct mdss_mdp_commit_cb *commit_cb)
{
@@ -5910,15 +5895,6 @@ int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
if (ctl->ops.wait_pingpong && !mdata->serialize_wait4pp)
mdss_mdp_display_wait4pingpong(ctl, false);
- /*
- * If backlight needs to change, wait for 1 vsync before setting
- * PCC and kickoff
- */
- if (mdss_mdp_handle_backlight_extn(ctl) &&
- ctl->ops.wait_for_vsync_fnc) {
- ret = ctl->ops.wait_for_vsync_fnc(ctl);
- }
-
/* Moved pp programming to post ping pong */
ATRACE_BEGIN("postproc_programming_deferred");
if (!ctl->is_video_mode && ctl->mfd &&
@@ -6077,10 +6053,10 @@ int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
pr_warn("ctl %d error displaying frame\n", ctl->num);
/* update backlight in commit */
- if (mdss_mdp_handle_backlight_extn(ctl)) {
- if (ctl->mfd && !IS_CALIB_MODE_BL(ctl->mfd) &&
- (!ctl->mfd->ext_bl_ctrl ||
- !ctl->mfd->bl_level)) {
+ if (ctl->intf_type == MDSS_INTF_DSI && !ctl->is_video_mode &&
+ ctl->mfd && ctl->mfd->bl_extn_level >= 0) {
+ if (!IS_CALIB_MODE_BL(ctl->mfd) && (!ctl->mfd->ext_bl_ctrl ||
+ !ctl->mfd->bl_level)) {
mutex_lock(&ctl->mfd->bl_lock);
mdss_fb_set_backlight(ctl->mfd,
ctl->mfd->bl_extn_level);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index bdbec0b4e721..2e017fe5ec02 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -21,6 +21,7 @@
#include "mdss_debug.h"
#include "mdss_mdp_trace.h"
#include "mdss_dsi_clk.h"
+#include <linux/interrupt.h>
#define MAX_RECOVERY_TRIALS 10
#define MAX_SESSIONS 2
@@ -2090,7 +2091,7 @@ static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg)
struct mdss_mdp_cmd_ctx *ctx;
struct mdss_panel_data *pdata;
unsigned long flags;
- int rc = 0;
+ int rc = 0, te_irq;
ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
if (!ctx) {
@@ -2144,7 +2145,21 @@ static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg)
__func__,
ctl->num, rc, ctx->pp_timeout_report_cnt,
atomic_read(&ctx->koff_cnt));
- if (ctx->pp_timeout_report_cnt == 0) {
+
+ /* enable TE irq to check if it is coming from the panel */
+ te_irq = gpio_to_irq(pdata->panel_te_gpio);
+ enable_irq(te_irq);
+
+ /* wait for 20ms to ensure we are getting the next TE */
+ usleep_range(20000, 20010);
+
+ reinit_completion(&pdata->te_done);
+ rc = wait_for_completion_timeout(&pdata->te_done, KOFF_TIMEOUT);
+
+ if (!rc) {
+ MDSS_XLOG(0xbac);
+ mdss_fb_report_panel_dead(ctl->mfd);
+ } else if (ctx->pp_timeout_report_cnt == 0) {
MDSS_XLOG(0xbad);
MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
"dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
@@ -2158,6 +2173,10 @@ static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg)
"dsi_dbg_bus", "panic");
mdss_fb_report_panel_dead(ctl->mfd);
}
+
+ /* disable te irq */
+ disable_irq_nosync(te_irq);
+
ctx->pp_timeout_report_cnt++;
rc = -EPERM;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 87fff44af389..3c679877705d 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -953,13 +953,6 @@ int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
pipe->dst.y += mixer->ctl->border_y_off;
}
- if (mfd->panel_orientation & MDP_FLIP_LR)
- pipe->dst.x = pipe->mixer_left->width
- - pipe->dst.x - pipe->dst.w;
- if (mfd->panel_orientation & MDP_FLIP_UD)
- pipe->dst.y = pipe->mixer_left->height
- - pipe->dst.y - pipe->dst.h;
-
pipe->horz_deci = req->horz_deci;
pipe->vert_deci = req->vert_deci;
@@ -2966,6 +2959,16 @@ static int mdss_mdp_overlay_get_fb_pipe(struct msm_fb_data_type *mfd,
req->src_rect = right_rect;
}
+ if (mfd->panel_orientation == MDP_ROT_180) {
+ if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) {
+ req->src_rect.x = 0;
+ req->dst_rect.x = mixer->width;
+ } else {
+ req->src_rect.x = (split_lm) ? mixer->width : 0;
+ req->dst_rect.x = 0;
+ }
+ }
+
req->z_order = MDSS_MDP_STAGE_BASE;
if (rotate_180)
req->flags |= (MDP_FLIP_LR | MDP_FLIP_UD);
@@ -3505,7 +3508,8 @@ static ssize_t dynamic_fps_sysfs_wta_dfps(struct device *dev,
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct dynamic_fps_data data = {0};
- if (!mdp5_data->ctl || !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)) {
+ if (!mdp5_data->ctl || !mdss_mdp_ctl_is_power_on(mdp5_data->ctl) ||
+ mdss_panel_is_power_off(mfd->panel_power_state)) {
pr_debug("panel is off\n");
return count;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pipe.c b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
index 724913f376a7..e147405fd455 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pipe.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
@@ -2414,9 +2414,9 @@ bool mdss_mdp_is_amortizable_pipe(struct mdss_mdp_pipe *pipe,
(mixer->type == MDSS_MDP_MIXER_TYPE_INTF)))
return false;
- /* do not apply for sdm660 & sdm630 in command mode */
- if ((IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_320) ||
- IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_330))
+ /* do not apply for msm8998, sdm660 & sdm630 in command mode */
+ if (MDSS_GET_MAJOR(mdata->mdp_rev) ==
+ MDSS_GET_MAJOR(MDSS_MDP_HW_REV_300)
&& !mixer->ctl->is_video_mode)
return false;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index 2e85072c4cf7..50c26047185c 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -2567,6 +2567,8 @@ static int pp_dspp_setup(u32 disp_num, struct mdss_mdp_mixer *mixer,
ad_hw->base + MDSS_MDP_REG_AD_TFILT_CTRL);
writel_relaxed(ad->cfg.mode | MDSS_AD_AUTO_TRIGGER,
ad_hw->base + MDSS_MDP_REG_AD_MODE_SEL);
+ ad->last_str = 0xFF & readl_relaxed(ad_hw->base +
+ MDSS_MDP_REG_AD_STR_OUT);
}
pp_ad_bypass_config(ad, ctl, ad_hw->num, &ad_bypass);
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index fa1df94976f9..37b0ca7aa44b 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -997,6 +997,9 @@ struct mdss_panel_data {
* are still on; panel will recover after unblank
*/
bool panel_disable_mode;
+
+ int panel_te_gpio;
+ struct completion te_done;
};
struct mdss_panel_debugfs_info {
diff --git a/drivers/video/fbdev/msm/msm_dba/msm_dba.c b/drivers/video/fbdev/msm/msm_dba/msm_dba.c
index 7a5c9d9d873a..cc6512a4af9b 100644
--- a/drivers/video/fbdev/msm/msm_dba/msm_dba.c
+++ b/drivers/video/fbdev/msm/msm_dba/msm_dba.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -80,6 +80,11 @@ void *msm_dba_register_client(struct msm_dba_reg_info *info,
if (rc) {
pr_err("%s: Client register failed (%s, %d)\n",
__func__, info->chip_name, info->instance_id);
+ /* remove the client from list before freeing */
+ mutex_lock_nested(&device->dev_mutex,
+ SINGLE_DEPTH_NESTING);
+ list_del(&client->list);
+ mutex_unlock(&device->dev_mutex);
kfree(client);
mutex_unlock(&register_mutex);
return ERR_PTR(rc);
diff --git a/drivers/video/fbdev/msm/msm_mdss_io_8974.c b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
index 690d74fa5271..80dbe83972d7 100644
--- a/drivers/video/fbdev/msm/msm_mdss_io_8974.c
+++ b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
@@ -2693,7 +2693,8 @@ int mdss_dsi_pre_clkon_cb(void *priv,
for (i = DSI_CORE_PM; i < DSI_MAX_PM; i++) {
if ((ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) &&
(!pdata->panel_info.cont_splash_enabled) &&
- (!sdata->power_data[i].vreg_config->disabled))
+ (!sdata->power_data[i].vreg_config
+ ->lp_disable_allowed))
continue;
rc = msm_dss_enable_vreg(
sdata->power_data[i].vreg_config,
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 198aea66fe71..e5733bb537c9 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1098,7 +1098,6 @@ int revalidate_disk(struct gendisk *disk)
if (disk->fops->revalidate_disk)
ret = disk->fops->revalidate_disk(disk);
- blk_integrity_revalidate(disk);
bdev = bdget_disk(disk, 0);
if (!bdev)
return ret;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index d2e75fe2e072..ba5e702b19b4 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1059,6 +1059,13 @@ static int sanity_check_raw_super(struct super_block *sb,
return 1;
}
+ if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
+ f2fs_msg(sb, KERN_INFO,
+ "Invalid segment count (%u)",
+ le32_to_cpu(raw_super->segment_count));
+ return 1;
+ }
+
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
if (sanity_check_area_boundary(sb, raw_super))
return 1;
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index f15cb11ca8fd..4f09eebd7d95 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -618,11 +618,8 @@ static int sdcardfs_permission(struct vfsmount *mnt, struct inode *inode, int ma
struct inode tmp;
struct inode *top = grab_top(SDCARDFS_I(inode));
- if (!top) {
- release_top(SDCARDFS_I(inode));
- WARN(1, "Top value was null!\n");
+ if (!top)
return -EINVAL;
- }
/*
* Permission check on sdcardfs inode.
@@ -696,10 +693,8 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct
inode = d_inode(dentry);
top = grab_top(SDCARDFS_I(inode));
- if (!top) {
- release_top(SDCARDFS_I(inode));
+ if (!top)
return -EINVAL;
- }
/*
* Permission check on sdcardfs inode.
diff --git a/fs/sdcardfs/super.c b/fs/sdcardfs/super.c
index a3393e959c63..8a9c9c7adca2 100644
--- a/fs/sdcardfs/super.c
+++ b/fs/sdcardfs/super.c
@@ -192,11 +192,18 @@ static struct inode *sdcardfs_alloc_inode(struct super_block *sb)
return &i->vfs_inode;
}
-static void sdcardfs_destroy_inode(struct inode *inode)
+static void i_callback(struct rcu_head *head)
{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
+
kmem_cache_free(sdcardfs_inode_cachep, SDCARDFS_I(inode));
}
+static void sdcardfs_destroy_inode(struct inode *inode)
+{
+ call_rcu(&inode->i_rcu, i_callback);
+}
+
/* sdcardfs inode cache constructor */
static void init_once(void *obj)
{
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 25c6324a0dd0..3d6e6ce44c5c 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -284,6 +284,12 @@ struct f2fs_nat_block {
#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry))
/*
+ * F2FS uses 4 bytes to represent block address. As a result, supported size of
+ * disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments.
+ */
+#define F2FS_MAX_SEGMENT ((16 * 1024 * 1024) / 2)
+
+/*
* Note that f2fs_sit_entry->vblocks has the following bit-field information.
* [15:10] : allocation type such as CURSEG_XXXX_TYPE
* [9:0] : valid block count
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 847cc1d91634..5012fcdb4c9e 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -742,11 +742,9 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
#if defined(CONFIG_BLK_DEV_INTEGRITY)
extern void blk_integrity_add(struct gendisk *);
extern void blk_integrity_del(struct gendisk *);
-extern void blk_integrity_revalidate(struct gendisk *);
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline void blk_integrity_add(struct gendisk *disk) { }
static inline void blk_integrity_del(struct gendisk *disk) { }
-static inline void blk_integrity_revalidate(struct gendisk *disk) { }
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#else /* CONFIG_BLOCK */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 74921a39edee..a72ce396bb7d 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -91,12 +91,6 @@ enum hrtimer_restart {
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
* @is_rel: Set if the timer was armed relative
- * @start_pid: timer statistics field to store the pid of the task which
- * started the timer
- * @start_site: timer statistics field to store the site where the timer
- * was started
- * @start_comm: timer statistics field to store the name of the process which
- * started the timer
*
* The hrtimer structure must be initialized by hrtimer_init()
*/
@@ -107,11 +101,6 @@ struct hrtimer {
struct hrtimer_clock_base *base;
u8 state;
u8 is_rel;
-#ifdef CONFIG_TIMER_STATS
- int start_pid;
- void *start_site;
- char start_comm[16];
-#endif
};
/**
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 2a1a6fec179f..0065ffc9322b 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -223,6 +223,7 @@ extern void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
bool lock_needed);
extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
bool lock_needed, bool is_cmdq_dcmd);
+extern void mmc_recovery_fallback_lower_speed(struct mmc_host *host);
/**
* mmc_claim_host - exclusively claim a host
diff --git a/include/linux/netfilter/xt_qtaguid.h b/include/linux/netfilter/xt_qtaguid.h
index ca60fbdec2f3..1c671552ec37 100644
--- a/include/linux/netfilter/xt_qtaguid.h
+++ b/include/linux/netfilter/xt_qtaguid.h
@@ -10,4 +10,5 @@
#define XT_QTAGUID_SOCKET XT_OWNER_SOCKET
#define xt_qtaguid_match_info xt_owner_match_info
+int qtaguid_untag(struct socket *sock, bool kernel);
#endif /* _XT_QTAGUID_MATCH_H */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index aa8edf9928eb..5cc13e9fbd8f 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -248,6 +248,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
POWER_SUPPLY_PROP_HW_CURRENT_MAX,
POWER_SUPPLY_PROP_REAL_TYPE,
+ POWER_SUPPLY_PROP_PR_SWAP,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 1239c6ef949e..5ea2c58406de 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -21,11 +21,6 @@ struct timer_list {
u32 flags;
int slack;
-#ifdef CONFIG_TIMER_STATS
- int start_pid;
- void *start_site;
- char start_comm[16];
-#endif
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
@@ -188,46 +183,6 @@ extern bool check_pending_deferrable_timers(int cpu);
/* To be used from cpusets, only */
extern void timer_quiesce_cpu(void *cpup);
-/*
- * Timer-statistics info:
- */
-#ifdef CONFIG_TIMER_STATS
-
-extern int timer_stats_active;
-
-extern void init_timer_stats(void);
-
-extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
- void *timerf, char *comm, u32 flags);
-
-extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
- void *addr);
-
-static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
-{
- if (likely(!timer_stats_active))
- return;
- __timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
-}
-
-static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
-{
- timer->start_site = NULL;
-}
-#else
-static inline void init_timer_stats(void)
-{
-}
-
-static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
-{
-}
-
-static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
-{
-}
-#endif
-
extern void add_timer(struct timer_list *timer);
extern int try_to_del_timer_sync(struct timer_list *timer);
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 5dd75fa47dd8..f9be467d6695 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -14,6 +14,7 @@ struct ci_hdrc;
* struct ci_hdrc_cable - structure for external connector cable state tracking
* @state: current state of the line
* @changed: set to true when extcon event happen
+ * @enabled: set to true if we've enabled the vbus or id interrupt
* @edev: device which generate events
* @ci: driver state of the chipidea device
* @nb: hold event notification callback
@@ -22,6 +23,7 @@ struct ci_hdrc;
struct ci_hdrc_cable {
bool state;
bool changed;
+ bool enabled;
struct extcon_dev *edev;
struct ci_hdrc *ci;
struct notifier_block nb;
diff --git a/include/media/ais/msm_ais.h b/include/media/ais/msm_ais.h
new file mode 100644
index 000000000000..93957a6e467b
--- /dev/null
+++ b/include/media/ais/msm_ais.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MSM_AIS_CAMERA_H
+#define __LINUX_MSM_AIS_CAMERA_H
+
+#include <uapi/media/ais/msm_ais.h>
+
+#ifdef CONFIG_COMPAT
+#define MSM_CAM_V4L2_IOCTL_NOTIFY32 \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 30, struct v4l2_event32)
+
+#define MSM_CAM_V4L2_IOCTL_NOTIFY_META32 \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 31, struct v4l2_event32)
+
+#define MSM_CAM_V4L2_IOCTL_CMD_ACK32 \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 32, struct v4l2_event32)
+
+#define MSM_CAM_V4L2_IOCTL_NOTIFY_ERROR32 \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 33, struct v4l2_event32)
+
+#define MSM_CAM_V4L2_IOCTL_NOTIFY_DEBUG32 \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 34, struct v4l2_event32)
+
+#endif
+
+#endif
+
diff --git a/include/media/ais/msm_ais_buf_mgr.h b/include/media/ais/msm_ais_buf_mgr.h
new file mode 100644
index 000000000000..c2b9ff0f1b7c
--- /dev/null
+++ b/include/media/ais/msm_ais_buf_mgr.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MEDIA_MSM_AIS_BUF_MGR_H__
+#define __MEDIA_MSM_AIS_BUF_MGR_H__
+
+#include <uapi/media/ais/msm_ais_buf_mgr.h>
+#include <linux/compat.h>
+
+struct v4l2_subdev *msm_buf_mngr_get_subdev(void);
+
+#ifdef CONFIG_COMPAT
+
+struct msm_buf_mngr_info32_t {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t frame_id;
+ struct compat_timeval timestamp;
+ uint32_t index;
+ uint32_t reserved;
+ enum msm_camera_buf_mngr_buf_type type;
+ struct msm_camera_user_buf_cont_t user_buf;
+};
+
+#define VIDIOC_MSM_BUF_MNGR_GET_BUF32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 33, struct msm_buf_mngr_info32_t)
+
+#define VIDIOC_MSM_BUF_MNGR_PUT_BUF32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 34, struct msm_buf_mngr_info32_t)
+
+#define VIDIOC_MSM_BUF_MNGR_BUF_DONE32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 35, struct msm_buf_mngr_info32_t)
+
+#define VIDIOC_MSM_BUF_MNGR_FLUSH32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 39, struct msm_buf_mngr_info32_t)
+
+#endif
+
+#endif
+
diff --git a/include/media/ais/msm_ais_isp.h b/include/media/ais/msm_ais_isp.h
new file mode 100644
index 000000000000..9ebb25438f50
--- /dev/null
+++ b/include/media/ais/msm_ais_isp.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_AIS_ISP__
+#define __MSM_AIS_ISP__
+
+#include <uapi/media/ais/msm_ais_isp.h>
+
+#ifdef CONFIG_COMPAT
+struct msm_isp_event_data32 {
+ struct compat_timeval timestamp;
+ struct compat_timeval mono_timestamp;
+ uint32_t frame_id;
+ union {
+ struct msm_isp_stats_event stats;
+ struct msm_isp_buf_event buf_done;
+ struct msm_isp_fetch_eng_event fetch_done;
+ struct msm_isp_error_info error_info;
+ struct msm_isp_output_info output_info;
+ struct msm_isp_sof_info sof_info;
+ } u;
+};
+#endif
+
+#endif
+
diff --git a/include/media/ais/msm_ais_sensor.h b/include/media/ais/msm_ais_sensor.h
new file mode 100644
index 000000000000..982e8489709f
--- /dev/null
+++ b/include/media/ais/msm_ais_sensor.h
@@ -0,0 +1,294 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MSM_AIS_SENSOR_H
+#define __LINUX_MSM_AIS_SENSOR_H
+
+#include <uapi/media/ais/msm_ais_sensor.h>
+#include <uapi/media/ais/msm_ais_sensor_sdk.h>
+
+#include <linux/compat.h>
+
+#ifdef CONFIG_COMPAT
+
+struct msm_sensor_power_setting32 {
+ enum msm_sensor_power_seq_type_t seq_type;
+ uint16_t seq_val;
+ compat_uint_t config_val;
+ uint16_t delay;
+ compat_uptr_t data[10];
+};
+
+struct msm_sensor_power_setting_array32 {
+ struct msm_sensor_power_setting32 power_setting_a[MAX_POWER_CONFIG];
+ compat_uptr_t power_setting;
+ uint16_t size;
+ struct msm_sensor_power_setting32
+ power_down_setting_a[MAX_POWER_CONFIG];
+ compat_uptr_t power_down_setting;
+ uint16_t size_down;
+};
+
+struct msm_camera_sensor_slave_info32 {
+ char sensor_name[32];
+ char eeprom_name[32];
+ char actuator_name[32];
+ char ois_name[32];
+ char flash_name[32];
+ enum msm_sensor_camera_id_t camera_id;
+ uint16_t slave_addr;
+ enum i2c_freq_mode_t i2c_freq_mode;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+ struct msm_sensor_id_info_t sensor_id_info;
+ struct msm_sensor_power_setting_array32 power_setting_array;
+ uint8_t is_init_params_valid;
+ struct msm_sensor_init_params sensor_init_params;
+ enum msm_sensor_output_format_t output_format;
+};
+
+struct msm_camera_csid_lut_params32 {
+ uint8_t num_cid;
+ struct msm_camera_csid_vc_cfg vc_cfg_a[MAX_CID];
+ compat_uptr_t vc_cfg[MAX_CID];
+};
+
+struct msm_camera_csid_params32 {
+ uint8_t lane_cnt;
+ uint16_t lane_assign;
+ uint8_t phy_sel;
+ uint32_t csi_clk;
+ struct msm_camera_csid_lut_params32 lut_params;
+ uint8_t csi_3p_sel;
+};
+
+struct msm_camera_csi2_params32 {
+ struct msm_camera_csid_params32 csid_params;
+ struct msm_camera_csiphy_params csiphy_params;
+ uint8_t csi_clk_scale_enable;
+};
+
+struct csid_cfg_data32 {
+ enum csid_cfg_type_t cfgtype;
+ union {
+ uint32_t csid_version;
+ compat_uptr_t csid_params;
+ compat_uptr_t csid_testmode_params;
+ uint32_t csid_cidmask;
+ } cfg;
+};
+
+struct msm_ir_led_cfg_data_t32 {
+ enum msm_ir_led_cfg_type_t cfg_type;
+ int32_t pwm_duty_on_ns;
+ int32_t pwm_period_ns;
+};
+
+struct msm_ir_cut_cfg_data_t32 {
+ enum msm_ir_cut_cfg_type_t cfg_type;
+};
+
+struct eeprom_read_t32 {
+ compat_uptr_t dbuffer;
+ uint32_t num_bytes;
+};
+
+struct eeprom_write_t32 {
+ compat_uptr_t dbuffer;
+ uint32_t num_bytes;
+};
+
+struct msm_eeprom_info_t32 {
+ compat_uptr_t power_setting_array;
+ enum i2c_freq_mode_t i2c_freq_mode;
+ compat_uptr_t mem_map_array;
+};
+
+struct msm_eeprom_cfg_data32 {
+ enum eeprom_cfg_type_t cfgtype;
+ uint8_t is_supported;
+ union {
+ char eeprom_name[MAX_SENSOR_NAME];
+ struct eeprom_get_t get_data;
+ struct eeprom_read_t32 read_data;
+ struct eeprom_write_t32 write_data;
+ struct msm_eeprom_info_t32 eeprom_info;
+ } cfg;
+};
+
+struct msm_camera_i2c_seq_reg_setting32 {
+ compat_uptr_t reg_setting;
+ uint16_t size;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+ uint16_t delay;
+};
+
+struct msm_camera_i2c_reg_setting32 {
+ compat_uptr_t reg_setting;
+ uint16_t size;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+ enum msm_camera_i2c_data_type data_type;
+ uint16_t delay;
+};
+
+struct msm_camera_i2c_array_write_config32 {
+ struct msm_camera_i2c_reg_setting32 conf_array;
+ uint16_t slave_addr;
+};
+
+struct msm_actuator_tuning_params_t32 {
+ int16_t initial_code;
+ uint16_t pwd_step;
+ uint16_t region_size;
+ uint32_t total_steps;
+ compat_uptr_t region_params;
+};
+
+struct msm_actuator_params_t32 {
+ enum actuator_type act_type;
+ uint8_t reg_tbl_size;
+ uint16_t data_size;
+ uint16_t init_setting_size;
+ uint32_t i2c_addr;
+ enum i2c_freq_mode_t i2c_freq_mode;
+ enum msm_camera_i2c_reg_addr_type i2c_addr_type;
+ enum msm_camera_i2c_data_type i2c_data_type;
+ compat_uptr_t reg_tbl_params;
+ compat_uptr_t init_settings;
+ struct park_lens_data_t park_lens;
+};
+
+struct msm_actuator_set_info_t32 {
+ struct msm_actuator_params_t32 actuator_params;
+ struct msm_actuator_tuning_params_t32 af_tuning_params;
+};
+
+struct sensor_init_cfg_data32 {
+ enum msm_sensor_init_cfg_type_t cfgtype;
+ struct msm_sensor_info_t probed_info;
+ char entity_name[MAX_SENSOR_NAME];
+ union {
+ compat_uptr_t setting;
+ } cfg;
+};
+
+struct msm_actuator_move_params_t32 {
+ int8_t dir;
+ int8_t sign_dir;
+ int16_t dest_step_pos;
+ int32_t num_steps;
+ uint16_t curr_lens_pos;
+ compat_uptr_t ringing_params;
+};
+
+struct msm_actuator_cfg_data32 {
+ int cfgtype;
+ uint8_t is_af_supported;
+ union {
+ struct msm_actuator_move_params_t32 move;
+ struct msm_actuator_set_info_t32 set_info;
+ struct msm_actuator_get_info_t get_info;
+ struct msm_actuator_set_position_t setpos;
+ enum af_camera_name cam_name;
+ } cfg;
+};
+
+struct csiphy_cfg_data32 {
+ enum csiphy_cfg_type_t cfgtype;
+ union {
+ compat_uptr_t csiphy_params;
+ compat_uptr_t csi_lane_params;
+ } cfg;
+};
+
+struct sensorb_cfg_data32 {
+ int cfgtype;
+ union {
+ struct msm_sensor_info_t sensor_info;
+ struct msm_sensor_init_params sensor_init_params;
+ compat_uptr_t setting;
+ struct msm_sensor_i2c_sync_params sensor_i2c_sync_params;
+ } cfg;
+};
+
+struct msm_ois_params_t32 {
+ uint16_t data_size;
+ uint16_t setting_size;
+ uint32_t i2c_addr;
+ enum i2c_freq_mode_t i2c_freq_mode;
+ enum msm_camera_i2c_reg_addr_type i2c_addr_type;
+ enum msm_camera_i2c_data_type i2c_data_type;
+ compat_uptr_t settings;
+};
+
+struct msm_ois_set_info_t32 {
+ struct msm_ois_params_t32 ois_params;
+};
+
+struct msm_ois_cfg_data32 {
+ int cfgtype;
+ union {
+ struct msm_ois_set_info_t32 set_info;
+ compat_uptr_t settings;
+ } cfg;
+};
+
+struct msm_flash_init_info_t32 {
+ enum msm_flash_driver_type flash_driver_type;
+ uint32_t slave_addr;
+ enum i2c_freq_mode_t i2c_freq_mode;
+ compat_uptr_t power_setting_array;
+ compat_uptr_t settings;
+};
+
+struct msm_flash_cfg_data_t32 {
+ enum msm_flash_cfg_type_t cfg_type;
+ int32_t flash_current[MAX_LED_TRIGGERS];
+ int32_t flash_duration[MAX_LED_TRIGGERS];
+ union {
+ compat_uptr_t flash_init_info;
+ compat_uptr_t settings;
+ } cfg;
+};
+
+#define VIDIOC_MSM_ACTUATOR_CFG32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 6, struct msm_actuator_cfg_data32)
+
+#define VIDIOC_MSM_SENSOR_INIT_CFG32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 10, struct sensor_init_cfg_data32)
+
+#define VIDIOC_MSM_CSIPHY_IO_CFG32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 4, struct csiphy_cfg_data32)
+
+#define VIDIOC_MSM_SENSOR_CFG32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct sensorb_cfg_data32)
+
+#define VIDIOC_MSM_EEPROM_CFG32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 8, struct msm_eeprom_cfg_data32)
+
+#define VIDIOC_MSM_OIS_CFG32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 11, struct msm_ois_cfg_data32)
+
+#define VIDIOC_MSM_CSID_IO_CFG32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 5, struct csid_cfg_data32)
+
+#define VIDIOC_MSM_FLASH_CFG32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 13, struct msm_flash_cfg_data_t32)
+
+#define VIDIOC_MSM_IR_LED_CFG32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 14, struct msm_ir_led_cfg_data_t32)
+
+#define VIDIOC_MSM_IR_CUT_CFG32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_ir_cut_cfg_data_t32)
+#endif
+
+#endif /* __LINUX_MSM_AIS_SENSOR_H */
+
diff --git a/include/media/cec-edid.h b/include/media/cec-edid.h
new file mode 100644
index 000000000000..bdf731ecba1a
--- /dev/null
+++ b/include/media/cec-edid.h
@@ -0,0 +1,104 @@
+/*
+ * cec-edid - HDMI Consumer Electronics Control & EDID helpers
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MEDIA_CEC_EDID_H
+#define _MEDIA_CEC_EDID_H
+
+#include <linux/types.h>
+
+#define CEC_PHYS_ADDR_INVALID 0xffff
+#define cec_phys_addr_exp(pa) \
+ ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf
+
+/**
+ * cec_get_edid_phys_addr() - find and return the physical address
+ *
+ * @edid: pointer to the EDID data
+ * @size: size in bytes of the EDID data
+ * @offset: If not %NULL then the location of the physical address
+ * bytes in the EDID will be returned here. This is set to 0
+ * if there is no physical address found.
+ *
+ * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none.
+ */
+u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
+ unsigned int *offset);
+
+/**
+ * cec_set_edid_phys_addr() - find and set the physical address
+ *
+ * @edid: pointer to the EDID data
+ * @size: size in bytes of the EDID data
+ * @phys_addr: the new physical address
+ *
+ * This function finds the location of the physical address in the EDID
+ * and fills in the given physical address and updates the checksum
+ * at the end of the EDID block. It does nothing if the EDID doesn't
+ * contain a physical address.
+ */
+void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
+
+/**
+ * cec_phys_addr_for_input() - calculate the PA for an input
+ *
+ * @phys_addr: the physical address of the parent
+ * @input: the number of the input port, must be between 1 and 15
+ *
+ * This function calculates a new physical address based on the input
+ * port number. For example:
+ *
+ * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
+ *
+ * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
+ *
+ * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
+ *
+ * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
+ *
+ * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
+ */
+u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
+
+/**
+ * cec_phys_addr_validate() - validate a physical address from an EDID
+ *
+ * @phys_addr: the physical address to validate
+ * @parent: if not %NULL, then this is filled with the parents PA.
+ * @port: if not %NULL, then this is filled with the input port.
+ *
+ * This validates a physical address as read from an EDID. If the
+ * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
+ * then it will return -EINVAL.
+ *
+ * The parent PA is passed into %parent and the input port is passed into
+ * %port. For example:
+ *
+ * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
+ *
+ * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
+ *
+ * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
+ *
+ * PA = f.f.f.f: has parent f.f.f.f and input port 0.
+ *
+ * Return: 0 if the PA is valid, -EINVAL if not.
+ */
+int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
+
+#endif /* _MEDIA_CEC_EDID_H */
diff --git a/include/media/cec.h b/include/media/cec.h
new file mode 100644
index 000000000000..96a0aa770d61
--- /dev/null
+++ b/include/media/cec.h
@@ -0,0 +1,239 @@
+/*
+ * cec - HDMI Consumer Electronics Control support header
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MEDIA_CEC_H
+#define _MEDIA_CEC_H
+
+#include <linux/poll.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/kthread.h>
+#include <linux/timer.h>
+#include <linux/cec-funcs.h>
+#include <media/rc-core.h>
+#include <media/cec-edid.h>
+
+/**
+ * struct cec_devnode - cec device node
+ * @dev: cec device
+ * @cdev: cec character device
+ * @minor: device node minor number
+ * @registered: the device was correctly registered
+ * @unregistered: the device was unregistered
+ * @fhs_lock: lock to control access to the filehandle list
+ * @fhs: the list of open filehandles (cec_fh)
+ *
+ * This structure represents a cec-related device node.
+ *
+ * The @parent is a physical device. It must be set by core or device drivers
+ * before registering the node.
+ */
+struct cec_devnode {
+ /* sysfs */
+ struct device dev;
+ struct cdev cdev;
+
+ /* device info */
+ int minor;
+ bool registered;
+ bool unregistered;
+ struct list_head fhs;
+ struct mutex lock;
+};
+
+struct cec_adapter;
+struct cec_data;
+
+struct cec_data {
+ struct list_head list;
+ struct list_head xfer_list;
+ struct cec_adapter *adap;
+ struct cec_msg msg;
+ struct cec_fh *fh;
+ struct delayed_work work;
+ struct completion c;
+ u8 attempts;
+ bool new_initiator;
+ bool blocking;
+ bool completed;
+};
+
+struct cec_msg_entry {
+ struct list_head list;
+ struct cec_msg msg;
+};
+
+#define CEC_NUM_EVENTS CEC_EVENT_LOST_MSGS
+
+struct cec_fh {
+ struct list_head list;
+ struct list_head xfer_list;
+ struct cec_adapter *adap;
+ u8 mode_initiator;
+ u8 mode_follower;
+
+ /* Events */
+ wait_queue_head_t wait;
+ unsigned int pending_events;
+ struct cec_event events[CEC_NUM_EVENTS];
+ struct mutex lock;
+ struct list_head msgs; /* queued messages */
+ unsigned int queued_msgs;
+};
+
+#define CEC_SIGNAL_FREE_TIME_RETRY 3
+#define CEC_SIGNAL_FREE_TIME_NEW_INITIATOR 5
+#define CEC_SIGNAL_FREE_TIME_NEXT_XFER 7
+
+/* The nominal data bit period is 2.4 ms */
+#define CEC_FREE_TIME_TO_USEC(ft) ((ft) * 2400)
+
+struct cec_adap_ops {
+ /* Low-level callbacks */
+ int (*adap_enable)(struct cec_adapter *adap, bool enable);
+ int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable);
+ int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
+ int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg);
+ void (*adap_status)(struct cec_adapter *adap, struct seq_file *file);
+
+ /* High-level CEC message callback */
+ int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
+};
+
+/*
+ * The minimum message length you can receive (excepting poll messages) is 2.
+ * With a transfer rate of at most 36 bytes per second this makes 18 messages
+ * per second worst case.
+ *
+ * We queue at most 3 seconds worth of received messages. The CEC specification
+ * requires that messages are replied to within a second, so 3 seconds should
+ * give more than enough margin. Since most messages are actually more than 2
+ * bytes, this is in practice a lot more than 3 seconds.
+ */
+#define CEC_MAX_MSG_RX_QUEUE_SZ (18 * 3)
+
+/*
+ * The transmit queue is limited to 1 second worth of messages (worst case).
+ * Messages can be transmitted by userspace and kernel space. But for both it
+ * makes no sense to have a lot of messages queued up. One second seems
+ * reasonable.
+ */
+#define CEC_MAX_MSG_TX_QUEUE_SZ (18 * 1)
+
+struct cec_adapter {
+ struct module *owner;
+ char name[32];
+ struct cec_devnode devnode;
+ struct mutex lock;
+ struct rc_dev *rc;
+
+ struct list_head transmit_queue;
+ unsigned int transmit_queue_sz;
+ struct list_head wait_queue;
+ struct cec_data *transmitting;
+
+ struct task_struct *kthread_config;
+ struct completion config_completion;
+
+ struct task_struct *kthread;
+ wait_queue_head_t kthread_waitq;
+ wait_queue_head_t waitq;
+
+ const struct cec_adap_ops *ops;
+ void *priv;
+ u32 capabilities;
+ u8 available_log_addrs;
+
+ u16 phys_addr;
+ bool is_configuring;
+ bool is_configured;
+ u32 monitor_all_cnt;
+ u32 follower_cnt;
+ struct cec_fh *cec_follower;
+ struct cec_fh *cec_initiator;
+ bool passthrough;
+ struct cec_log_addrs log_addrs;
+
+ struct dentry *cec_dir;
+ struct dentry *status_file;
+
+ u16 phys_addrs[15];
+ u32 sequence;
+
+ char input_name[32];
+ char input_phys[32];
+ char input_drv[32];
+};
+
+static inline bool cec_has_log_addr(const struct cec_adapter *adap, u8 log_addr)
+{
+ return adap->log_addrs.log_addr_mask & (1 << log_addr);
+}
+
+static inline bool cec_is_sink(const struct cec_adapter *adap)
+{
+ return adap->phys_addr == 0;
+}
+
+#if IS_ENABLED(CONFIG_MEDIA_CEC_SUPPORT)
+struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
+ void *priv, const char *name, u32 caps, u8 available_las);
+int cec_register_adapter(struct cec_adapter *adap, struct device *parent);
+void cec_unregister_adapter(struct cec_adapter *adap);
+void cec_delete_adapter(struct cec_adapter *adap);
+
+int cec_s_log_addrs(struct cec_adapter *adap, struct cec_log_addrs *log_addrs,
+ bool block);
+void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr,
+ bool block);
+int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg,
+ bool block);
+
+/* Called by the adapter */
+void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
+ u8 nack_cnt, u8 low_drive_cnt, u8 error_cnt);
+void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg);
+
+#else
+
+static inline int cec_register_adapter(struct cec_adapter *adap,
+ struct device *parent)
+{
+ return 0;
+}
+
+static inline void cec_unregister_adapter(struct cec_adapter *adap)
+{
+}
+
+static inline void cec_delete_adapter(struct cec_adapter *adap)
+{
+}
+
+static inline void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr,
+ bool block)
+{
+}
+
+#endif
+
+#endif /* _MEDIA_CEC_H */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index d540657819ef..9cba4907695c 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -20,6 +20,8 @@
#define ADDRCONF_TIMER_FUZZ (HZ / 4)
#define ADDRCONF_TIMER_FUZZ_MAX (HZ)
+#define ADDRCONF_NOTIFY_PRIORITY 0
+
#include <linux/in.h>
#include <linux/in6.h>
diff --git a/include/net/cnss.h b/include/net/cnss.h
index be58e32e6c7a..d6f27759af17 100644
--- a/include/net/cnss.h
+++ b/include/net/cnss.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -187,6 +187,12 @@ extern int cnss_pcie_set_wlan_mac_address(const u8 *in, uint32_t len);
extern u8 *cnss_get_wlan_mac_address(struct device *dev, uint32_t *num);
extern int cnss_sdio_set_wlan_mac_address(const u8 *in, uint32_t len);
+enum cnss_cc_src {
+ CNSS_SOURCE_CORE,
+ CNSS_SOURCE_11D,
+ CNSS_SOURCE_USER
+};
+
enum {
CNSS_RESET_SOC = 0,
CNSS_RESET_SUBSYS_COUPLED,
@@ -250,4 +256,6 @@ extern u8 *cnss_common_get_wlan_mac_address(struct device *dev, uint32_t *num);
extern int cnss_power_up(struct device *dev);
extern int cnss_power_down(struct device *dev);
extern int cnss_sdio_configure_spdt(bool state);
+extern void cnss_set_cc_source(enum cnss_cc_src cc_source);
+extern enum cnss_cc_src cnss_get_cc_source(void);
#endif /* _NET_CNSS_H_ */
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index af0e8c081191..814a13d22df6 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -77,6 +77,7 @@ static inline struct dst_entry *ip6_route_output(struct net *net,
struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
int flags);
+void ip6_route_init_special_entries(void);
int ip6_route_init(void);
void ip6_route_cleanup(void);
diff --git a/include/soc/qcom/ais.h b/include/soc/qcom/ais.h
new file mode 100644
index 000000000000..a1486269dfd5
--- /dev/null
+++ b/include/soc/qcom/ais.h
@@ -0,0 +1,223 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __AIS_H__
+#define __AIS_H__
+
+#include <media/ais/msm_ais_sensor.h>
+#include <linux/interrupt.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+
+
+enum msm_camera_device_type_t {
+ MSM_CAMERA_I2C_DEVICE,
+ MSM_CAMERA_PLATFORM_DEVICE,
+ MSM_CAMERA_SPI_DEVICE,
+};
+
+enum msm_bus_perf_setting {
+ S_INIT,
+ S_PREVIEW,
+ S_VIDEO,
+ S_CAPTURE,
+ S_ZSL,
+ S_STEREO_VIDEO,
+ S_STEREO_CAPTURE,
+ S_DEFAULT,
+ S_LIVESHOT,
+ S_DUAL,
+ S_EXIT
+};
+
+struct msm_camera_slave_info {
+ uint16_t sensor_slave_addr;
+ uint16_t sensor_id_reg_addr;
+ uint16_t sensor_id;
+ uint16_t sensor_id_mask;
+};
+
+struct msm_cam_clk_info {
+ const char *clk_name;
+ long clk_rate;
+ uint32_t delay;
+};
+
+struct msm_pinctrl_info {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state_active;
+ struct pinctrl_state *gpio_state_suspend;
+ bool use_pinctrl;
+};
+
+struct msm_cam_clk_setting {
+ struct msm_cam_clk_info *clk_info;
+ uint16_t num_clk_info;
+ uint8_t enable;
+};
+
+struct v4l2_subdev_info {
+ u32 code;
+ enum v4l2_colorspace colorspace;
+ uint16_t fmt;
+ uint16_t order;
+};
+
+struct msm_camera_gpio_num_info {
+ uint16_t gpio_num[SENSOR_GPIO_MAX];
+ uint8_t valid[SENSOR_GPIO_MAX];
+};
+
+struct msm_camera_gpio_conf {
+ void *cam_gpiomux_conf_tbl;
+ uint8_t cam_gpiomux_conf_tbl_size;
+ struct gpio *cam_gpio_common_tbl;
+ uint8_t cam_gpio_common_tbl_size;
+ struct gpio *cam_gpio_req_tbl;
+ uint8_t cam_gpio_req_tbl_size;
+ uint32_t gpio_no_mux;
+ uint32_t *camera_off_table;
+ uint8_t camera_off_table_size;
+ uint32_t *camera_on_table;
+ uint8_t camera_on_table_size;
+ struct msm_camera_gpio_num_info *gpio_num_info;
+};
+
+struct msm_camera_power_ctrl_t {
+ struct device *dev;
+ struct msm_sensor_power_setting *power_setting;
+ uint16_t power_setting_size;
+ struct msm_sensor_power_setting *power_down_setting;
+ uint16_t power_down_setting_size;
+ struct msm_camera_gpio_conf *gpio_conf;
+ struct camera_vreg_t *cam_vreg;
+ int num_vreg;
+ struct msm_camera_i2c_conf *i2c_conf;
+ struct clk **clk_ptr;
+ struct msm_cam_clk_info *clk_info;
+ struct msm_pinctrl_info pinctrl_info;
+ uint8_t cam_pinctrl_status;
+ size_t clk_info_size;
+};
+
+enum msm_camera_actuator_name {
+ MSM_ACTUATOR_MAIN_CAM_0,
+ MSM_ACTUATOR_MAIN_CAM_1,
+ MSM_ACTUATOR_MAIN_CAM_2,
+ MSM_ACTUATOR_MAIN_CAM_3,
+ MSM_ACTUATOR_MAIN_CAM_4,
+ MSM_ACTUATOR_MAIN_CAM_5,
+ MSM_ACTUATOR_WEB_CAM_0,
+ MSM_ACTUATOR_WEB_CAM_1,
+ MSM_ACTUATOR_WEB_CAM_2,
+};
+
+struct msm_actuator_info {
+ struct i2c_board_info const *board_info;
+ enum msm_camera_actuator_name cam_name;
+ int bus_id;
+ int vcm_pwd;
+ int vcm_enable;
+};
+enum msm_camera_i2c_mux_mode {
+ MODE_R,
+ MODE_L,
+ MODE_DUAL
+};
+
+struct msm_camera_i2c_conf {
+ uint8_t use_i2c_mux;
+ struct platform_device *mux_dev;
+ enum msm_camera_i2c_mux_mode i2c_mux_mode;
+};
+
+struct msm_camera_sensor_board_info {
+ const char *sensor_name;
+ const char *eeprom_name;
+ const char *actuator_name;
+ const char *ois_name;
+ struct msm_camera_slave_info *slave_info;
+ struct msm_camera_csi_lane_params *csi_lane_params;
+ struct msm_camera_sensor_strobe_flash_data *strobe_flash_data;
+ struct msm_actuator_info *actuator_info;
+ struct msm_sensor_info_t *sensor_info;
+ const char *misc_regulator;
+ struct msm_camera_power_ctrl_t power_info;
+ struct msm_camera_sensor_slave_info *cam_slave_info;
+};
+
+enum msm_camera_i2c_cmd_type {
+ MSM_CAMERA_I2C_CMD_WRITE,
+ MSM_CAMERA_I2C_CMD_POLL,
+};
+
+struct msm_camera_i2c_reg_conf {
+ uint16_t reg_addr;
+ uint16_t reg_data;
+ enum msm_camera_i2c_data_type dt;
+ enum msm_camera_i2c_cmd_type cmd_type;
+ int16_t mask;
+};
+
+struct msm_camera_i2c_conf_array {
+ struct msm_camera_i2c_reg_conf *conf;
+ uint16_t size;
+ uint16_t delay;
+ enum msm_camera_i2c_data_type data_type;
+};
+
+struct eeprom_map_t {
+ uint32_t valid_size;
+ uint32_t addr;
+ uint32_t addr_t;
+ uint32_t data;
+ uint32_t data_t;
+ uint32_t delay;
+};
+
+struct eeprom_slave_add_t {
+ uint32_t addr;
+};
+
+struct msm_eeprom_memory_map_t {
+ struct eeprom_map_t page;
+ struct eeprom_map_t pageen;
+ struct eeprom_map_t poll;
+ struct eeprom_map_t mem;
+ struct eeprom_slave_add_t saddr;
+};
+
+struct msm_eeprom_memory_block_t {
+ struct msm_eeprom_memory_map_t *map;
+ uint32_t num_map; /* number of map blocks */
+ uint8_t *mapdata;
+ uint32_t num_data; /* size of total mapdata */
+};
+
+struct msm_eeprom_cmm_t {
+ uint32_t cmm_support;
+ uint32_t cmm_compression;
+ uint32_t cmm_offset;
+ uint32_t cmm_size;
+};
+
+struct msm_eeprom_board_info {
+ const char *eeprom_name;
+ uint16_t i2c_slaveaddr;
+ struct msm_camera_power_ctrl_t power_info;
+ struct msm_eeprom_cmm_t cmm_data;
+ enum i2c_freq_mode_t i2c_freq_mode;
+};
+
+#endif
diff --git a/include/soc/qcom/glink.h b/include/soc/qcom/glink.h
index fedca64ec9a2..cecd0c01d69a 100644
--- a/include/soc/qcom/glink.h
+++ b/include/soc/qcom/glink.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -339,6 +339,22 @@ int glink_qos_start(void *handle);
*/
unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size);
+/**
+ * glink_start_rx_rt() - Vote for RT thread priority on RX.
+ * @handle: Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_start_rx_rt(void *handle);
+
+/**
+ * glink_end_rx_rt() - Vote for RT thread priority on RX.
+ * @handle: Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_end_rx_rt(void *handle);
+
#else /* CONFIG_MSM_GLINK */
static inline void *glink_open(const struct glink_open_config *cfg_ptr)
{
@@ -427,5 +443,16 @@ static inline unsigned long glink_qos_get_ramp_time(void *handle,
{
return 0;
}
+
+static inline int glink_start_rx_rt(void *handle)
+{
+ return -ENODEV;
+}
+
+static inline int glink_end_rx_rt(void *handle)
+{
+ return -ENODEV;
+}
+
#endif /* CONFIG_MSM_GLINK */
#endif /* _SOC_QCOM_GLINK_H_ */
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 7ef984afc442..b434da092b8e 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -23,6 +23,12 @@ enum icnss_uevent {
ICNSS_UEVENT_FW_DOWN,
};
+enum cnss_cc_src {
+ CNSS_SOURCE_CORE,
+ CNSS_SOURCE_11D,
+ CNSS_SOURCE_USER
+};
+
struct icnss_uevent_fw_down_data {
bool crashed;
};
@@ -144,5 +150,6 @@ extern bool icnss_is_fw_ready(void);
extern int icnss_set_wlan_mac_address(const u8 *in, const uint32_t len);
extern u8 *icnss_get_wlan_mac_address(struct device *dev, uint32_t *num);
extern int icnss_trigger_recovery(struct device *dev);
-
+extern void cnss_set_cc_source(enum cnss_cc_src cc_source);
+extern enum cnss_cc_src cnss_get_cc_source(void);
#endif /* _ICNSS_WLAN_H_ */
diff --git a/include/soc/qcom/subsystem_restart.h b/include/soc/qcom/subsystem_restart.h
index b08cc7ded26e..9a4d013b363c 100644
--- a/include/soc/qcom/subsystem_restart.h
+++ b/include/soc/qcom/subsystem_restart.h
@@ -133,6 +133,7 @@ extern void subsys_default_online(struct subsys_device *dev);
extern void subsys_set_crash_status(struct subsys_device *dev,
enum crash_status crashed);
extern enum crash_status subsys_get_crash_status(struct subsys_device *dev);
+extern void subsys_set_error(struct subsys_device *dev, const char *error_msg);
void notify_proxy_vote(struct device *device);
void notify_proxy_unvote(struct device *device);
void complete_err_ready(struct subsys_device *subsys);
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 084232a1d06e..1f19ff2210f8 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -4006,6 +4006,32 @@ struct asm_generic_compressed_fmt_blk_t {
} __packed;
+
+/* Command to send sample rate & channels for IEC61937 (compressed) or IEC60958
+ * (pcm) streams. Both audio standards use the same format and are used for
+ * HDMI or SPDIF.
+ */
+#define ASM_DATA_CMD_IEC_60958_MEDIA_FMT 0x0001321E
+
+struct asm_iec_compressed_fmt_blk_t {
+ struct apr_hdr hdr;
+
+ /*
+ * Nominal sampling rate of the incoming bitstream.
+ * Supported values: 8000, 11025, 16000, 22050, 24000, 32000,
+ * 44100, 48000, 88200, 96000, 176400, 192000,
+ * 352800, 384000
+ */
+ uint32_t sampling_rate;
+
+ /*
+ * Number of channels of the incoming bitstream.
+ * Supported values: 1,2,3,4,5,6,7,8
+ */
+ uint32_t num_channels;
+
+} __packed;
+
struct asm_multi_channel_pcm_fmt_blk_v2 {
struct apr_hdr hdr;
struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
@@ -5071,6 +5097,11 @@ struct asm_amrwbplus_fmt_blk_v2 {
#define ASM_MEDIA_FMT_APE 0x00012F32
#define ASM_MEDIA_FMT_DSD 0x00012F3E
#define ASM_MEDIA_FMT_TRUEHD 0x00013215
+/* 0x0 is used for fomat ID since ADSP dynamically determines the
+ * format encapsulated in the IEC61937 (compressed) or IEC60958
+ * (pcm) packets.
+ */
+#define ASM_MEDIA_FMT_IEC 0x00000000
/* Media format ID for adaptive transform acoustic coding. This
* ID is used by the #ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED command
@@ -10544,6 +10575,7 @@ enum {
COMPRESSED_PASSTHROUGH_DSD,
LISTEN,
COMPRESSED_PASSTHROUGH_GEN,
+ COMPRESSED_PASSTHROUGH_IEC61937
};
#define AUDPROC_MODULE_ID_COMPRESSED_MUTE 0x00010770
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index 42dd677610d9..d0dffbd15923 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -56,6 +56,7 @@
#define FORMAT_APTX 0x001e
#define FORMAT_GEN_COMPR 0x001f
#define FORMAT_TRUEHD 0x0020
+#define FORMAT_IEC61937 0x0021
#define ENCDEC_SBCBITRATE 0x0001
#define ENCDEC_IMMEDIATE_DECODE 0x0002
@@ -318,6 +319,10 @@ int q6asm_open_read_write_v2(struct audio_client *ac, uint32_t rd_format,
int q6asm_open_loopback_v2(struct audio_client *ac,
uint16_t bits_per_sample);
+int q6asm_open_transcode_loopback(struct audio_client *ac,
+ uint16_t bits_per_sample, uint32_t source_format,
+ uint32_t sink_format);
+
int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
uint32_t lsw_ts, uint32_t flags);
int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
@@ -509,6 +514,10 @@ int q6asm_media_format_block_gen_compr(
bool use_default_chmap, char *channel_map,
uint16_t bits_per_sample);
+int q6asm_media_format_block_iec(
+ struct audio_client *ac,
+ uint32_t rate, uint32_t channels);
+
int q6asm_media_format_block_multi_ch_pcm_v3(struct audio_client *ac,
uint32_t rate, uint32_t channels,
bool use_default_chmap,
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 48c49741d77f..7aa6496c7608 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -221,8 +221,8 @@ struct drm_msm_gem_submit {
};
struct drm_msm_gem_submit_profile_buffer {
- __s64 queue_time; /* out, Ringbuffer queue time (seconds) */
- __s64 submit_time; /* out, Ringbuffer submission time (seconds) */
+ __s64 queue_time; /* out, Ringbuffer queue time (nsecs) */
+ __s64 submit_time; /* out, Ringbuffer submission time (nsecs) */
__u64 ticks_queued; /* out, GPU ticks at ringbuffer submission */
__u64 ticks_submitted; /* out, GPU ticks before cmdstream execution*/
__u64 ticks_retired; /* out, GPU ticks after cmdstream execution */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 06f2ca2b0a95..3d912dd57c08 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -86,6 +86,8 @@ header-y += capi.h
header-y += cciss_defs.h
header-y += cciss_ioctl.h
header-y += cdrom.h
+header-y += cec.h
+header-y += cec-funcs.h
header-y += cgroupstats.h
header-y += chio.h
header-y += cm4000_cs.h
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
new file mode 100644
index 000000000000..14be2c6f20fd
--- /dev/null
+++ b/include/uapi/linux/cec-funcs.h
@@ -0,0 +1,1969 @@
+/*
+ * cec - HDMI Consumer Electronics Control message functions
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * Alternatively you can redistribute this file under the terms of the
+ * BSD license as stated below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. The names of its contributors may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _CEC_UAPI_FUNCS_H
+#define _CEC_UAPI_FUNCS_H
+
+#include <linux/cec.h>
+
+/* One Touch Play Feature */
+static inline void cec_msg_active_source(struct cec_msg *msg, __u16 phys_addr)
+{
+ msg->len = 4;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_ACTIVE_SOURCE;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_active_source(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_image_view_on(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_IMAGE_VIEW_ON;
+}
+
+static inline void cec_msg_text_view_on(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_TEXT_VIEW_ON;
+}
+
+
+/* Routing Control Feature */
+static inline void cec_msg_inactive_source(struct cec_msg *msg,
+ __u16 phys_addr)
+{
+ msg->len = 4;
+ msg->msg[1] = CEC_MSG_INACTIVE_SOURCE;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_inactive_source(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_request_active_source(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_REQUEST_ACTIVE_SOURCE;
+ msg->reply = reply ? CEC_MSG_ACTIVE_SOURCE : 0;
+}
+
+static inline void cec_msg_routing_information(struct cec_msg *msg,
+ __u16 phys_addr)
+{
+ msg->len = 4;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_ROUTING_INFORMATION;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_routing_information(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_routing_change(struct cec_msg *msg,
+ bool reply,
+ __u16 orig_phys_addr,
+ __u16 new_phys_addr)
+{
+ msg->len = 6;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_ROUTING_CHANGE;
+ msg->msg[2] = orig_phys_addr >> 8;
+ msg->msg[3] = orig_phys_addr & 0xff;
+ msg->msg[4] = new_phys_addr >> 8;
+ msg->msg[5] = new_phys_addr & 0xff;
+ msg->reply = reply ? CEC_MSG_ROUTING_INFORMATION : 0;
+}
+
+static inline void cec_ops_routing_change(const struct cec_msg *msg,
+ __u16 *orig_phys_addr,
+ __u16 *new_phys_addr)
+{
+ *orig_phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *new_phys_addr = (msg->msg[4] << 8) | msg->msg[5];
+}
+
+static inline void cec_msg_set_stream_path(struct cec_msg *msg, __u16 phys_addr)
+{
+ msg->len = 4;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_SET_STREAM_PATH;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_set_stream_path(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+
+/* Standby Feature */
+static inline void cec_msg_standby(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_STANDBY;
+}
+
+
+/* One Touch Record Feature */
+static inline void cec_msg_record_off(struct cec_msg *msg, bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_RECORD_OFF;
+ msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
+}
+
+struct cec_op_arib_data {
+ __u16 transport_id;
+ __u16 service_id;
+ __u16 orig_network_id;
+};
+
+struct cec_op_atsc_data {
+ __u16 transport_id;
+ __u16 program_number;
+};
+
+struct cec_op_dvb_data {
+ __u16 transport_id;
+ __u16 service_id;
+ __u16 orig_network_id;
+};
+
+struct cec_op_channel_data {
+ __u8 channel_number_fmt;
+ __u16 major;
+ __u16 minor;
+};
+
+struct cec_op_digital_service_id {
+ __u8 service_id_method;
+ __u8 dig_bcast_system;
+ union {
+ struct cec_op_arib_data arib;
+ struct cec_op_atsc_data atsc;
+ struct cec_op_dvb_data dvb;
+ struct cec_op_channel_data channel;
+ };
+};
+
+struct cec_op_record_src {
+ __u8 type;
+ union {
+ struct cec_op_digital_service_id digital;
+ struct {
+ __u8 ana_bcast_type;
+ __u16 ana_freq;
+ __u8 bcast_system;
+ } analog;
+ struct {
+ __u8 plug;
+ } ext_plug;
+ struct {
+ __u16 phys_addr;
+ } ext_phys_addr;
+ };
+};
+
+static inline void cec_set_digital_service_id(__u8 *msg,
+ const struct cec_op_digital_service_id *digital)
+{
+ *msg++ = (digital->service_id_method << 7) | digital->dig_bcast_system;
+ if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
+ *msg++ = (digital->channel.channel_number_fmt << 2) |
+ (digital->channel.major >> 8);
+ *msg++ = digital->channel.major & 0xff;
+ *msg++ = digital->channel.minor >> 8;
+ *msg++ = digital->channel.minor & 0xff;
+ *msg++ = 0;
+ *msg++ = 0;
+ return;
+ }
+ switch (digital->dig_bcast_system) {
+ case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN:
+ case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE:
+ case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT:
+ case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T:
+ *msg++ = digital->atsc.transport_id >> 8;
+ *msg++ = digital->atsc.transport_id & 0xff;
+ *msg++ = digital->atsc.program_number >> 8;
+ *msg++ = digital->atsc.program_number & 0xff;
+ *msg++ = 0;
+ *msg++ = 0;
+ break;
+ default:
+ *msg++ = digital->dvb.transport_id >> 8;
+ *msg++ = digital->dvb.transport_id & 0xff;
+ *msg++ = digital->dvb.service_id >> 8;
+ *msg++ = digital->dvb.service_id & 0xff;
+ *msg++ = digital->dvb.orig_network_id >> 8;
+ *msg++ = digital->dvb.orig_network_id & 0xff;
+ break;
+ }
+}
+
+static inline void cec_get_digital_service_id(const __u8 *msg,
+ struct cec_op_digital_service_id *digital)
+{
+ digital->service_id_method = msg[0] >> 7;
+ digital->dig_bcast_system = msg[0] & 0x7f;
+ if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
+ digital->channel.channel_number_fmt = msg[1] >> 2;
+ digital->channel.major = ((msg[1] & 3) << 6) | msg[2];
+ digital->channel.minor = (msg[3] << 8) | msg[4];
+ return;
+ }
+ digital->dvb.transport_id = (msg[1] << 8) | msg[2];
+ digital->dvb.service_id = (msg[3] << 8) | msg[4];
+ digital->dvb.orig_network_id = (msg[5] << 8) | msg[6];
+}
+
+static inline void cec_msg_record_on_own(struct cec_msg *msg)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_RECORD_ON;
+ msg->msg[2] = CEC_OP_RECORD_SRC_OWN;
+}
+
+static inline void cec_msg_record_on_digital(struct cec_msg *msg,
+ const struct cec_op_digital_service_id *digital)
+{
+ msg->len = 10;
+ msg->msg[1] = CEC_MSG_RECORD_ON;
+ msg->msg[2] = CEC_OP_RECORD_SRC_DIGITAL;
+ cec_set_digital_service_id(msg->msg + 3, digital);
+}
+
+static inline void cec_msg_record_on_analog(struct cec_msg *msg,
+ __u8 ana_bcast_type,
+ __u16 ana_freq,
+ __u8 bcast_system)
+{
+ msg->len = 7;
+ msg->msg[1] = CEC_MSG_RECORD_ON;
+ msg->msg[2] = CEC_OP_RECORD_SRC_ANALOG;
+ msg->msg[3] = ana_bcast_type;
+ msg->msg[4] = ana_freq >> 8;
+ msg->msg[5] = ana_freq & 0xff;
+ msg->msg[6] = bcast_system;
+}
+
+static inline void cec_msg_record_on_plug(struct cec_msg *msg,
+ __u8 plug)
+{
+ msg->len = 4;
+ msg->msg[1] = CEC_MSG_RECORD_ON;
+ msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PLUG;
+ msg->msg[3] = plug;
+}
+
+static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg,
+ __u16 phys_addr)
+{
+ msg->len = 5;
+ msg->msg[1] = CEC_MSG_RECORD_ON;
+ msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PHYS_ADDR;
+ msg->msg[3] = phys_addr >> 8;
+ msg->msg[4] = phys_addr & 0xff;
+}
+
+static inline void cec_msg_record_on(struct cec_msg *msg,
+ bool reply,
+ const struct cec_op_record_src *rec_src)
+{
+ switch (rec_src->type) {
+ case CEC_OP_RECORD_SRC_OWN:
+ cec_msg_record_on_own(msg);
+ break;
+ case CEC_OP_RECORD_SRC_DIGITAL:
+ cec_msg_record_on_digital(msg, &rec_src->digital);
+ break;
+ case CEC_OP_RECORD_SRC_ANALOG:
+ cec_msg_record_on_analog(msg,
+ rec_src->analog.ana_bcast_type,
+ rec_src->analog.ana_freq,
+ rec_src->analog.bcast_system);
+ break;
+ case CEC_OP_RECORD_SRC_EXT_PLUG:
+ cec_msg_record_on_plug(msg, rec_src->ext_plug.plug);
+ break;
+ case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
+ cec_msg_record_on_phys_addr(msg,
+ rec_src->ext_phys_addr.phys_addr);
+ break;
+ }
+ msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
+}
+
+static inline void cec_ops_record_on(const struct cec_msg *msg,
+ struct cec_op_record_src *rec_src)
+{
+ rec_src->type = msg->msg[2];
+ switch (rec_src->type) {
+ case CEC_OP_RECORD_SRC_OWN:
+ break;
+ case CEC_OP_RECORD_SRC_DIGITAL:
+ cec_get_digital_service_id(msg->msg + 3, &rec_src->digital);
+ break;
+ case CEC_OP_RECORD_SRC_ANALOG:
+ rec_src->analog.ana_bcast_type = msg->msg[3];
+ rec_src->analog.ana_freq =
+ (msg->msg[4] << 8) | msg->msg[5];
+ rec_src->analog.bcast_system = msg->msg[6];
+ break;
+ case CEC_OP_RECORD_SRC_EXT_PLUG:
+ rec_src->ext_plug.plug = msg->msg[3];
+ break;
+ case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
+ rec_src->ext_phys_addr.phys_addr =
+ (msg->msg[3] << 8) | msg->msg[4];
+ break;
+ }
+}
+
+static inline void cec_msg_record_status(struct cec_msg *msg, __u8 rec_status)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_RECORD_STATUS;
+ msg->msg[2] = rec_status;
+}
+
+static inline void cec_ops_record_status(const struct cec_msg *msg,
+ __u8 *rec_status)
+{
+ *rec_status = msg->msg[2];
+}
+
+static inline void cec_msg_record_tv_screen(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_RECORD_TV_SCREEN;
+ msg->reply = reply ? CEC_MSG_RECORD_ON : 0;
+}
+
+
+/* Timer Programming Feature */
+static inline void cec_msg_timer_status(struct cec_msg *msg,
+ __u8 timer_overlap_warning,
+ __u8 media_info,
+ __u8 prog_info,
+ __u8 prog_error,
+ __u8 duration_hr,
+ __u8 duration_min)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_TIMER_STATUS;
+ msg->msg[2] = (timer_overlap_warning << 7) |
+ (media_info << 5) |
+ (prog_info ? 0x10 : 0) |
+ (prog_info ? prog_info : prog_error);
+ if (prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE ||
+ prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE ||
+ prog_error == CEC_OP_PROG_ERROR_DUPLICATE) {
+ msg->len += 2;
+ msg->msg[3] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+ msg->msg[4] = ((duration_min / 10) << 4) | (duration_min % 10);
+ }
+}
+
+static inline void cec_ops_timer_status(const struct cec_msg *msg,
+ __u8 *timer_overlap_warning,
+ __u8 *media_info,
+ __u8 *prog_info,
+ __u8 *prog_error,
+ __u8 *duration_hr,
+ __u8 *duration_min)
+{
+ *timer_overlap_warning = msg->msg[2] >> 7;
+ *media_info = (msg->msg[2] >> 5) & 3;
+ if (msg->msg[2] & 0x10) {
+ *prog_info = msg->msg[2] & 0xf;
+ *prog_error = 0;
+ } else {
+ *prog_info = 0;
+ *prog_error = msg->msg[2] & 0xf;
+ }
+ if (*prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE ||
+ *prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE ||
+ *prog_error == CEC_OP_PROG_ERROR_DUPLICATE) {
+ *duration_hr = (msg->msg[3] >> 4) * 10 + (msg->msg[3] & 0xf);
+ *duration_min = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+ } else {
+ *duration_hr = *duration_min = 0;
+ }
+}
+
+static inline void cec_msg_timer_cleared_status(struct cec_msg *msg,
+ __u8 timer_cleared_status)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_TIMER_CLEARED_STATUS;
+ msg->msg[2] = timer_cleared_status;
+}
+
+static inline void cec_ops_timer_cleared_status(const struct cec_msg *msg,
+ __u8 *timer_cleared_status)
+{
+ *timer_cleared_status = msg->msg[2];
+}
+
+static inline void cec_msg_clear_analogue_timer(struct cec_msg *msg,
+ bool reply,
+ __u8 day,
+ __u8 month,
+ __u8 start_hr,
+ __u8 start_min,
+ __u8 duration_hr,
+ __u8 duration_min,
+ __u8 recording_seq,
+ __u8 ana_bcast_type,
+ __u16 ana_freq,
+ __u8 bcast_system)
+{
+ msg->len = 13;
+ msg->msg[1] = CEC_MSG_CLEAR_ANALOGUE_TIMER;
+ msg->msg[2] = day;
+ msg->msg[3] = month;
+ /* Hours and minutes are in BCD format */
+ msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+ msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+ msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+ msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+ msg->msg[8] = recording_seq;
+ msg->msg[9] = ana_bcast_type;
+ msg->msg[10] = ana_freq >> 8;
+ msg->msg[11] = ana_freq & 0xff;
+ msg->msg[12] = bcast_system;
+ msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0;
+}
+
+static inline void cec_ops_clear_analogue_timer(const struct cec_msg *msg,
+ __u8 *day,
+ __u8 *month,
+ __u8 *start_hr,
+ __u8 *start_min,
+ __u8 *duration_hr,
+ __u8 *duration_min,
+ __u8 *recording_seq,
+ __u8 *ana_bcast_type,
+ __u16 *ana_freq,
+ __u8 *bcast_system)
+{
+ *day = msg->msg[2];
+ *month = msg->msg[3];
+ /* Hours and minutes are in BCD format */
+ *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+ *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+ *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+ *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+ *recording_seq = msg->msg[8];
+ *ana_bcast_type = msg->msg[9];
+ *ana_freq = (msg->msg[10] << 8) | msg->msg[11];
+ *bcast_system = msg->msg[12];
+}
+
+static inline void cec_msg_clear_digital_timer(struct cec_msg *msg,
+ bool reply,
+ __u8 day,
+ __u8 month,
+ __u8 start_hr,
+ __u8 start_min,
+ __u8 duration_hr,
+ __u8 duration_min,
+ __u8 recording_seq,
+ const struct cec_op_digital_service_id *digital)
+{
+ msg->len = 16;
+ msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0;
+ msg->msg[1] = CEC_MSG_CLEAR_DIGITAL_TIMER;
+ msg->msg[2] = day;
+ msg->msg[3] = month;
+ /* Hours and minutes are in BCD format */
+ msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+ msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+ msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+ msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+ msg->msg[8] = recording_seq;
+ cec_set_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_ops_clear_digital_timer(const struct cec_msg *msg,
+ __u8 *day,
+ __u8 *month,
+ __u8 *start_hr,
+ __u8 *start_min,
+ __u8 *duration_hr,
+ __u8 *duration_min,
+ __u8 *recording_seq,
+ struct cec_op_digital_service_id *digital)
+{
+ *day = msg->msg[2];
+ *month = msg->msg[3];
+ /* Hours and minutes are in BCD format */
+ *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+ *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+ *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+ *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+ *recording_seq = msg->msg[8];
+ cec_get_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_msg_clear_ext_timer(struct cec_msg *msg,
+ bool reply,
+ __u8 day,
+ __u8 month,
+ __u8 start_hr,
+ __u8 start_min,
+ __u8 duration_hr,
+ __u8 duration_min,
+ __u8 recording_seq,
+ __u8 ext_src_spec,
+ __u8 plug,
+ __u16 phys_addr)
+{
+ msg->len = 13;
+ msg->msg[1] = CEC_MSG_CLEAR_EXT_TIMER;
+ msg->msg[2] = day;
+ msg->msg[3] = month;
+ /* Hours and minutes are in BCD format */
+ msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+ msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+ msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+ msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+ msg->msg[8] = recording_seq;
+ msg->msg[9] = ext_src_spec;
+ msg->msg[10] = plug;
+ msg->msg[11] = phys_addr >> 8;
+ msg->msg[12] = phys_addr & 0xff;
+ msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0;
+}
+
+static inline void cec_ops_clear_ext_timer(const struct cec_msg *msg,
+ __u8 *day,
+ __u8 *month,
+ __u8 *start_hr,
+ __u8 *start_min,
+ __u8 *duration_hr,
+ __u8 *duration_min,
+ __u8 *recording_seq,
+ __u8 *ext_src_spec,
+ __u8 *plug,
+ __u16 *phys_addr)
+{
+ *day = msg->msg[2];
+ *month = msg->msg[3];
+ /* Hours and minutes are in BCD format */
+ *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+ *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+ *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+ *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+ *recording_seq = msg->msg[8];
+ *ext_src_spec = msg->msg[9];
+ *plug = msg->msg[10];
+ *phys_addr = (msg->msg[11] << 8) | msg->msg[12];
+}
+
+static inline void cec_msg_set_analogue_timer(struct cec_msg *msg,
+ bool reply,
+ __u8 day,
+ __u8 month,
+ __u8 start_hr,
+ __u8 start_min,
+ __u8 duration_hr,
+ __u8 duration_min,
+ __u8 recording_seq,
+ __u8 ana_bcast_type,
+ __u16 ana_freq,
+ __u8 bcast_system)
+{
+ msg->len = 13;
+ msg->msg[1] = CEC_MSG_SET_ANALOGUE_TIMER;
+ msg->msg[2] = day;
+ msg->msg[3] = month;
+ /* Hours and minutes are in BCD format */
+ msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+ msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+ msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+ msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+ msg->msg[8] = recording_seq;
+ msg->msg[9] = ana_bcast_type;
+ msg->msg[10] = ana_freq >> 8;
+ msg->msg[11] = ana_freq & 0xff;
+ msg->msg[12] = bcast_system;
+ msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0;
+}
+
+static inline void cec_ops_set_analogue_timer(const struct cec_msg *msg,
+ __u8 *day,
+ __u8 *month,
+ __u8 *start_hr,
+ __u8 *start_min,
+ __u8 *duration_hr,
+ __u8 *duration_min,
+ __u8 *recording_seq,
+ __u8 *ana_bcast_type,
+ __u16 *ana_freq,
+ __u8 *bcast_system)
+{
+ *day = msg->msg[2];
+ *month = msg->msg[3];
+ /* Hours and minutes are in BCD format */
+ *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+ *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+ *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+ *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+ *recording_seq = msg->msg[8];
+ *ana_bcast_type = msg->msg[9];
+ *ana_freq = (msg->msg[10] << 8) | msg->msg[11];
+ *bcast_system = msg->msg[12];
+}
+
+static inline void cec_msg_set_digital_timer(struct cec_msg *msg,
+ bool reply,
+ __u8 day,
+ __u8 month,
+ __u8 start_hr,
+ __u8 start_min,
+ __u8 duration_hr,
+ __u8 duration_min,
+ __u8 recording_seq,
+ const struct cec_op_digital_service_id *digital)
+{
+ msg->len = 16;
+ msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0;
+ msg->msg[1] = CEC_MSG_SET_DIGITAL_TIMER;
+ msg->msg[2] = day;
+ msg->msg[3] = month;
+ /* Hours and minutes are in BCD format */
+ msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+ msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+ msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+ msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+ msg->msg[8] = recording_seq;
+ cec_set_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_ops_set_digital_timer(const struct cec_msg *msg,
+ __u8 *day,
+ __u8 *month,
+ __u8 *start_hr,
+ __u8 *start_min,
+ __u8 *duration_hr,
+ __u8 *duration_min,
+ __u8 *recording_seq,
+ struct cec_op_digital_service_id *digital)
+{
+ *day = msg->msg[2];
+ *month = msg->msg[3];
+ /* Hours and minutes are in BCD format */
+ *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+ *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+ *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+ *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+ *recording_seq = msg->msg[8];
+ cec_get_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_msg_set_ext_timer(struct cec_msg *msg,
+ bool reply,
+ __u8 day,
+ __u8 month,
+ __u8 start_hr,
+ __u8 start_min,
+ __u8 duration_hr,
+ __u8 duration_min,
+ __u8 recording_seq,
+ __u8 ext_src_spec,
+ __u8 plug,
+ __u16 phys_addr)
+{
+ msg->len = 13;
+ msg->msg[1] = CEC_MSG_SET_EXT_TIMER;
+ msg->msg[2] = day;
+ msg->msg[3] = month;
+ /* Hours and minutes are in BCD format */
+ msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+ msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+ msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+ msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+ msg->msg[8] = recording_seq;
+ msg->msg[9] = ext_src_spec;
+ msg->msg[10] = plug;
+ msg->msg[11] = phys_addr >> 8;
+ msg->msg[12] = phys_addr & 0xff;
+ msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0;
+}
+
+static inline void cec_ops_set_ext_timer(const struct cec_msg *msg,
+ __u8 *day,
+ __u8 *month,
+ __u8 *start_hr,
+ __u8 *start_min,
+ __u8 *duration_hr,
+ __u8 *duration_min,
+ __u8 *recording_seq,
+ __u8 *ext_src_spec,
+ __u8 *plug,
+ __u16 *phys_addr)
+{
+ *day = msg->msg[2];
+ *month = msg->msg[3];
+ /* Hours and minutes are in BCD format */
+ *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+ *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+ *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+ *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+ *recording_seq = msg->msg[8];
+ *ext_src_spec = msg->msg[9];
+ *plug = msg->msg[10];
+ *phys_addr = (msg->msg[11] << 8) | msg->msg[12];
+}
+
+static inline void cec_msg_set_timer_program_title(struct cec_msg *msg,
+ const char *prog_title)
+{
+ unsigned int len = strlen(prog_title);
+
+ if (len > 14)
+ len = 14;
+ msg->len = 2 + len;
+ msg->msg[1] = CEC_MSG_SET_TIMER_PROGRAM_TITLE;
+ memcpy(msg->msg + 2, prog_title, len);
+}
+
+static inline void cec_ops_set_timer_program_title(const struct cec_msg *msg,
+ char *prog_title)
+{
+ unsigned int len = msg->len > 2 ? msg->len - 2 : 0;
+
+ if (len > 14)
+ len = 14;
+ memcpy(prog_title, msg->msg + 2, len);
+ prog_title[len] = '\0';
+}
+
+/* System Information Feature */
+static inline void cec_msg_cec_version(struct cec_msg *msg, __u8 cec_version)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_CEC_VERSION;
+ msg->msg[2] = cec_version;
+}
+
+static inline void cec_ops_cec_version(const struct cec_msg *msg,
+ __u8 *cec_version)
+{
+ *cec_version = msg->msg[2];
+}
+
+static inline void cec_msg_get_cec_version(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GET_CEC_VERSION;
+ msg->reply = reply ? CEC_MSG_CEC_VERSION : 0;
+}
+
+static inline void cec_msg_report_physical_addr(struct cec_msg *msg,
+ __u16 phys_addr, __u8 prim_devtype)
+{
+ msg->len = 5;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_REPORT_PHYSICAL_ADDR;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+ msg->msg[4] = prim_devtype;
+}
+
+static inline void cec_ops_report_physical_addr(const struct cec_msg *msg,
+ __u16 *phys_addr, __u8 *prim_devtype)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *prim_devtype = msg->msg[4];
+}
+
+static inline void cec_msg_give_physical_addr(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GIVE_PHYSICAL_ADDR;
+ msg->reply = reply ? CEC_MSG_REPORT_PHYSICAL_ADDR : 0;
+}
+
+static inline void cec_msg_set_menu_language(struct cec_msg *msg,
+ const char *language)
+{
+ msg->len = 5;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_SET_MENU_LANGUAGE;
+ memcpy(msg->msg + 2, language, 3);
+}
+
+static inline void cec_ops_set_menu_language(const struct cec_msg *msg,
+ char *language)
+{
+ memcpy(language, msg->msg + 2, 3);
+ language[3] = '\0';
+}
+
+static inline void cec_msg_get_menu_language(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GET_MENU_LANGUAGE;
+ msg->reply = reply ? CEC_MSG_SET_MENU_LANGUAGE : 0;
+}
+
+/*
+ * Assumes a single RC Profile byte and a single Device Features byte,
+ * i.e. no extended features are supported by this helper function.
+ *
+ * As of CEC 2.0 no extended features are defined, should those be added
+ * in the future, then this function needs to be adapted or a new function
+ * should be added.
+ */
+static inline void cec_msg_report_features(struct cec_msg *msg,
+ __u8 cec_version, __u8 all_device_types,
+ __u8 rc_profile, __u8 dev_features)
+{
+ msg->len = 6;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_REPORT_FEATURES;
+ msg->msg[2] = cec_version;
+ msg->msg[3] = all_device_types;
+ msg->msg[4] = rc_profile;
+ msg->msg[5] = dev_features;
+}
+
+static inline void cec_ops_report_features(const struct cec_msg *msg,
+ __u8 *cec_version, __u8 *all_device_types,
+ const __u8 **rc_profile, const __u8 **dev_features)
+{
+ const __u8 *p = &msg->msg[4];
+
+ *cec_version = msg->msg[2];
+ *all_device_types = msg->msg[3];
+ *rc_profile = p;
+ while (p < &msg->msg[14] && (*p & CEC_OP_FEAT_EXT))
+ p++;
+ if (!(*p & CEC_OP_FEAT_EXT)) {
+ *dev_features = p + 1;
+ while (p < &msg->msg[15] && (*p & CEC_OP_FEAT_EXT))
+ p++;
+ }
+ if (*p & CEC_OP_FEAT_EXT)
+ *rc_profile = *dev_features = NULL;
+}
+
+static inline void cec_msg_give_features(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GIVE_FEATURES;
+ msg->reply = reply ? CEC_MSG_REPORT_FEATURES : 0;
+}
+
+/* Deck Control Feature */
+static inline void cec_msg_deck_control(struct cec_msg *msg,
+ __u8 deck_control_mode)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_DECK_CONTROL;
+ msg->msg[2] = deck_control_mode;
+}
+
+static inline void cec_ops_deck_control(const struct cec_msg *msg,
+ __u8 *deck_control_mode)
+{
+ *deck_control_mode = msg->msg[2];
+}
+
+static inline void cec_msg_deck_status(struct cec_msg *msg,
+ __u8 deck_info)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_DECK_STATUS;
+ msg->msg[2] = deck_info;
+}
+
+static inline void cec_ops_deck_status(const struct cec_msg *msg,
+ __u8 *deck_info)
+{
+ *deck_info = msg->msg[2];
+}
+
+static inline void cec_msg_give_deck_status(struct cec_msg *msg,
+ bool reply,
+ __u8 status_req)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS;
+ msg->msg[2] = status_req;
+ msg->reply = reply ? CEC_MSG_DECK_STATUS : 0;
+}
+
+static inline void cec_ops_give_deck_status(const struct cec_msg *msg,
+ __u8 *status_req)
+{
+ *status_req = msg->msg[2];
+}
+
+static inline void cec_msg_play(struct cec_msg *msg,
+ __u8 play_mode)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_PLAY;
+ msg->msg[2] = play_mode;
+}
+
+static inline void cec_ops_play(const struct cec_msg *msg,
+ __u8 *play_mode)
+{
+ *play_mode = msg->msg[2];
+}
+
+
+/* Tuner Control Feature */
+struct cec_op_tuner_device_info {
+ __u8 rec_flag;
+ __u8 tuner_display_info;
+ bool is_analog;
+ union {
+ struct cec_op_digital_service_id digital;
+ struct {
+ __u8 ana_bcast_type;
+ __u16 ana_freq;
+ __u8 bcast_system;
+ } analog;
+ };
+};
+
+static inline void cec_msg_tuner_device_status_analog(struct cec_msg *msg,
+ __u8 rec_flag,
+ __u8 tuner_display_info,
+ __u8 ana_bcast_type,
+ __u16 ana_freq,
+ __u8 bcast_system)
+{
+ msg->len = 7;
+ msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS;
+ msg->msg[2] = (rec_flag << 7) | tuner_display_info;
+ msg->msg[3] = ana_bcast_type;
+ msg->msg[4] = ana_freq >> 8;
+ msg->msg[5] = ana_freq & 0xff;
+ msg->msg[6] = bcast_system;
+}
+
+static inline void cec_msg_tuner_device_status_digital(struct cec_msg *msg,
+ __u8 rec_flag, __u8 tuner_display_info,
+ const struct cec_op_digital_service_id *digital)
+{
+ msg->len = 10;
+ msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS;
+ msg->msg[2] = (rec_flag << 7) | tuner_display_info;
+ cec_set_digital_service_id(msg->msg + 3, digital);
+}
+
+static inline void cec_msg_tuner_device_status(struct cec_msg *msg,
+ const struct cec_op_tuner_device_info *tuner_dev_info)
+{
+ if (tuner_dev_info->is_analog)
+ cec_msg_tuner_device_status_analog(msg,
+ tuner_dev_info->rec_flag,
+ tuner_dev_info->tuner_display_info,
+ tuner_dev_info->analog.ana_bcast_type,
+ tuner_dev_info->analog.ana_freq,
+ tuner_dev_info->analog.bcast_system);
+ else
+ cec_msg_tuner_device_status_digital(msg,
+ tuner_dev_info->rec_flag,
+ tuner_dev_info->tuner_display_info,
+ &tuner_dev_info->digital);
+}
+
+static inline void cec_ops_tuner_device_status(const struct cec_msg *msg,
+ struct cec_op_tuner_device_info *tuner_dev_info)
+{
+ tuner_dev_info->is_analog = msg->len < 10;
+ tuner_dev_info->rec_flag = msg->msg[2] >> 7;
+ tuner_dev_info->tuner_display_info = msg->msg[2] & 0x7f;
+ if (tuner_dev_info->is_analog) {
+ tuner_dev_info->analog.ana_bcast_type = msg->msg[3];
+ tuner_dev_info->analog.ana_freq = (msg->msg[4] << 8) | msg->msg[5];
+ tuner_dev_info->analog.bcast_system = msg->msg[6];
+ return;
+ }
+ cec_get_digital_service_id(msg->msg + 3, &tuner_dev_info->digital);
+}
+
+static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg,
+ bool reply,
+ __u8 status_req)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS;
+ msg->msg[2] = status_req;
+ msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0;
+}
+
+static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg,
+ __u8 *status_req)
+{
+ *status_req = msg->msg[2];
+}
+
+static inline void cec_msg_select_analogue_service(struct cec_msg *msg,
+ __u8 ana_bcast_type,
+ __u16 ana_freq,
+ __u8 bcast_system)
+{
+ msg->len = 6;
+ msg->msg[1] = CEC_MSG_SELECT_ANALOGUE_SERVICE;
+ msg->msg[2] = ana_bcast_type;
+ msg->msg[3] = ana_freq >> 8;
+ msg->msg[4] = ana_freq & 0xff;
+ msg->msg[5] = bcast_system;
+}
+
+static inline void cec_ops_select_analogue_service(const struct cec_msg *msg,
+ __u8 *ana_bcast_type,
+ __u16 *ana_freq,
+ __u8 *bcast_system)
+{
+ *ana_bcast_type = msg->msg[2];
+ *ana_freq = (msg->msg[3] << 8) | msg->msg[4];
+ *bcast_system = msg->msg[5];
+}
+
+static inline void cec_msg_select_digital_service(struct cec_msg *msg,
+ const struct cec_op_digital_service_id *digital)
+{
+ msg->len = 9;
+ msg->msg[1] = CEC_MSG_SELECT_DIGITAL_SERVICE;
+ cec_set_digital_service_id(msg->msg + 2, digital);
+}
+
+static inline void cec_ops_select_digital_service(const struct cec_msg *msg,
+ struct cec_op_digital_service_id *digital)
+{
+ cec_get_digital_service_id(msg->msg + 2, digital);
+}
+
+static inline void cec_msg_tuner_step_decrement(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_TUNER_STEP_DECREMENT;
+}
+
+static inline void cec_msg_tuner_step_increment(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_TUNER_STEP_INCREMENT;
+}
+
+
+/* Vendor Specific Commands Feature */
+static inline void cec_msg_device_vendor_id(struct cec_msg *msg, __u32 vendor_id)
+{
+ msg->len = 5;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_DEVICE_VENDOR_ID;
+ msg->msg[2] = vendor_id >> 16;
+ msg->msg[3] = (vendor_id >> 8) & 0xff;
+ msg->msg[4] = vendor_id & 0xff;
+}
+
+static inline void cec_ops_device_vendor_id(const struct cec_msg *msg,
+ __u32 *vendor_id)
+{
+ *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
+}
+
+static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GIVE_DEVICE_VENDOR_ID;
+ msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0;
+}
+
+static inline void cec_msg_vendor_command(struct cec_msg *msg,
+ __u8 size, const __u8 *vendor_cmd)
+{
+ if (size > 14)
+ size = 14;
+ msg->len = 2 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_COMMAND;
+ memcpy(msg->msg + 2, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command(const struct cec_msg *msg,
+ __u8 *size,
+ const __u8 **vendor_cmd)
+{
+ *size = msg->len - 2;
+
+ if (*size > 14)
+ *size = 14;
+ *vendor_cmd = msg->msg + 2;
+}
+
+static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg,
+ __u32 vendor_id, __u8 size,
+ const __u8 *vendor_cmd)
+{
+ if (size > 11)
+ size = 11;
+ msg->len = 5 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID;
+ msg->msg[2] = vendor_id >> 16;
+ msg->msg[3] = (vendor_id >> 8) & 0xff;
+ msg->msg[4] = vendor_id & 0xff;
+ memcpy(msg->msg + 5, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg,
+ __u32 *vendor_id, __u8 *size,
+ const __u8 **vendor_cmd)
+{
+ *size = msg->len - 5;
+
+ if (*size > 11)
+ *size = 11;
+ *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
+ *vendor_cmd = msg->msg + 5;
+}
+
+static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg,
+ __u8 size,
+ const __u8 *rc_code)
+{
+ if (size > 14)
+ size = 14;
+ msg->len = 2 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN;
+ memcpy(msg->msg + 2, rc_code, size);
+}
+
+static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg,
+ __u8 *size,
+ const __u8 **rc_code)
+{
+ *size = msg->len - 2;
+
+ if (*size > 14)
+ *size = 14;
+ *rc_code = msg->msg + 2;
+}
+
+static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_UP;
+}
+
+
+/* OSD Display Feature */
+static inline void cec_msg_set_osd_string(struct cec_msg *msg,
+ __u8 disp_ctl,
+ const char *osd)
+{
+ unsigned int len = strlen(osd);
+
+ if (len > 13)
+ len = 13;
+ msg->len = 3 + len;
+ msg->msg[1] = CEC_MSG_SET_OSD_STRING;
+ msg->msg[2] = disp_ctl;
+ memcpy(msg->msg + 3, osd, len);
+}
+
+static inline void cec_ops_set_osd_string(const struct cec_msg *msg,
+ __u8 *disp_ctl,
+ char *osd)
+{
+ unsigned int len = msg->len > 3 ? msg->len - 3 : 0;
+
+ *disp_ctl = msg->msg[2];
+ if (len > 13)
+ len = 13;
+ memcpy(osd, msg->msg + 3, len);
+ osd[len] = '\0';
+}
+
+
+/* Device OSD Transfer Feature */
+static inline void cec_msg_set_osd_name(struct cec_msg *msg, const char *name)
+{
+ unsigned int len = strlen(name);
+
+ if (len > 14)
+ len = 14;
+ msg->len = 2 + len;
+ msg->msg[1] = CEC_MSG_SET_OSD_NAME;
+ memcpy(msg->msg + 2, name, len);
+}
+
+static inline void cec_ops_set_osd_name(const struct cec_msg *msg,
+ char *name)
+{
+ unsigned int len = msg->len > 2 ? msg->len - 2 : 0;
+
+ if (len > 14)
+ len = 14;
+ memcpy(name, msg->msg + 2, len);
+ name[len] = '\0';
+}
+
+static inline void cec_msg_give_osd_name(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GIVE_OSD_NAME;
+ msg->reply = reply ? CEC_MSG_SET_OSD_NAME : 0;
+}
+
+
+/* Device Menu Control Feature */
+static inline void cec_msg_menu_status(struct cec_msg *msg,
+ __u8 menu_state)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_MENU_STATUS;
+ msg->msg[2] = menu_state;
+}
+
+static inline void cec_ops_menu_status(const struct cec_msg *msg,
+ __u8 *menu_state)
+{
+ *menu_state = msg->msg[2];
+}
+
+static inline void cec_msg_menu_request(struct cec_msg *msg,
+ bool reply,
+ __u8 menu_req)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_MENU_REQUEST;
+ msg->msg[2] = menu_req;
+ msg->reply = reply ? CEC_MSG_MENU_STATUS : 0;
+}
+
+static inline void cec_ops_menu_request(const struct cec_msg *msg,
+ __u8 *menu_req)
+{
+ *menu_req = msg->msg[2];
+}
+
+struct cec_op_ui_command {
+ __u8 ui_cmd;
+ bool has_opt_arg;
+ union {
+ struct cec_op_channel_data channel_identifier;
+ __u8 ui_broadcast_type;
+ __u8 ui_sound_presentation_control;
+ __u8 play_mode;
+ __u8 ui_function_media;
+ __u8 ui_function_select_av_input;
+ __u8 ui_function_select_audio_input;
+ };
+};
+
+static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
+ const struct cec_op_ui_command *ui_cmd)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_USER_CONTROL_PRESSED;
+ msg->msg[2] = ui_cmd->ui_cmd;
+ if (!ui_cmd->has_opt_arg)
+ return;
+ switch (ui_cmd->ui_cmd) {
+ case 0x56:
+ case 0x57:
+ case 0x60:
+ case 0x68:
+ case 0x69:
+ case 0x6a:
+ /* The optional operand is one byte for all these ui commands */
+ msg->len++;
+ msg->msg[3] = ui_cmd->play_mode;
+ break;
+ case 0x67:
+ msg->len += 4;
+ msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
+ (ui_cmd->channel_identifier.major >> 8);
+ msg->msg[4] = ui_cmd->channel_identifier.major & 0xff;
+ msg->msg[5] = ui_cmd->channel_identifier.minor >> 8;
+ msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff;
+ break;
+ }
+}
+
+static inline void cec_ops_user_control_pressed(const struct cec_msg *msg,
+ struct cec_op_ui_command *ui_cmd)
+{
+ ui_cmd->ui_cmd = msg->msg[2];
+ ui_cmd->has_opt_arg = false;
+ if (msg->len == 3)
+ return;
+ switch (ui_cmd->ui_cmd) {
+ case 0x56:
+ case 0x57:
+ case 0x60:
+ case 0x68:
+ case 0x69:
+ case 0x6a:
+ /* The optional operand is one byte for all these ui commands */
+ ui_cmd->play_mode = msg->msg[3];
+ ui_cmd->has_opt_arg = true;
+ break;
+ case 0x67:
+ if (msg->len < 7)
+ break;
+ ui_cmd->has_opt_arg = true;
+ ui_cmd->channel_identifier.channel_number_fmt = msg->msg[3] >> 2;
+ ui_cmd->channel_identifier.major = ((msg->msg[3] & 3) << 6) | msg->msg[4];
+ ui_cmd->channel_identifier.minor = (msg->msg[5] << 8) | msg->msg[6];
+ break;
+ }
+}
+
+static inline void cec_msg_user_control_released(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_USER_CONTROL_RELEASED;
+}
+
+/* Remote Control Passthrough Feature */
+
+/* Power Status Feature */
+static inline void cec_msg_report_power_status(struct cec_msg *msg,
+ __u8 pwr_state)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_REPORT_POWER_STATUS;
+ msg->msg[2] = pwr_state;
+}
+
+static inline void cec_ops_report_power_status(const struct cec_msg *msg,
+ __u8 *pwr_state)
+{
+ *pwr_state = msg->msg[2];
+}
+
+static inline void cec_msg_give_device_power_status(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GIVE_DEVICE_POWER_STATUS;
+ msg->reply = reply ? CEC_MSG_REPORT_POWER_STATUS : 0;
+}
+
+/* General Protocol Messages */
+static inline void cec_msg_feature_abort(struct cec_msg *msg,
+ __u8 abort_msg, __u8 reason)
+{
+ msg->len = 4;
+ msg->msg[1] = CEC_MSG_FEATURE_ABORT;
+ msg->msg[2] = abort_msg;
+ msg->msg[3] = reason;
+}
+
+static inline void cec_ops_feature_abort(const struct cec_msg *msg,
+ __u8 *abort_msg, __u8 *reason)
+{
+ *abort_msg = msg->msg[2];
+ *reason = msg->msg[3];
+}
+
+/* This changes the current message into a feature abort message */
+static inline void cec_msg_reply_feature_abort(struct cec_msg *msg, __u8 reason)
+{
+ cec_msg_set_reply_to(msg, msg);
+ msg->len = 4;
+ msg->msg[2] = msg->msg[1];
+ msg->msg[3] = reason;
+ msg->msg[1] = CEC_MSG_FEATURE_ABORT;
+}
+
+static inline void cec_msg_abort(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_ABORT;
+}
+
+
+/* System Audio Control Feature */
+static inline void cec_msg_report_audio_status(struct cec_msg *msg,
+ __u8 aud_mute_status,
+ __u8 aud_vol_status)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_REPORT_AUDIO_STATUS;
+ msg->msg[2] = (aud_mute_status << 7) | (aud_vol_status & 0x7f);
+}
+
+static inline void cec_ops_report_audio_status(const struct cec_msg *msg,
+ __u8 *aud_mute_status,
+ __u8 *aud_vol_status)
+{
+ *aud_mute_status = msg->msg[2] >> 7;
+ *aud_vol_status = msg->msg[2] & 0x7f;
+}
+
+static inline void cec_msg_give_audio_status(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GIVE_AUDIO_STATUS;
+ msg->reply = reply ? CEC_MSG_REPORT_AUDIO_STATUS : 0;
+}
+
+static inline void cec_msg_set_system_audio_mode(struct cec_msg *msg,
+ __u8 sys_aud_status)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_SET_SYSTEM_AUDIO_MODE;
+ msg->msg[2] = sys_aud_status;
+}
+
+static inline void cec_ops_set_system_audio_mode(const struct cec_msg *msg,
+ __u8 *sys_aud_status)
+{
+ *sys_aud_status = msg->msg[2];
+}
+
+static inline void cec_msg_system_audio_mode_request(struct cec_msg *msg,
+ bool reply,
+ __u16 phys_addr)
+{
+ msg->len = phys_addr == 0xffff ? 2 : 4;
+ msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+ msg->reply = reply ? CEC_MSG_SET_SYSTEM_AUDIO_MODE : 0;
+
+}
+
+static inline void cec_ops_system_audio_mode_request(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ if (msg->len < 4)
+ *phys_addr = 0xffff;
+ else
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_system_audio_mode_status(struct cec_msg *msg,
+ __u8 sys_aud_status)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_STATUS;
+ msg->msg[2] = sys_aud_status;
+}
+
+static inline void cec_ops_system_audio_mode_status(const struct cec_msg *msg,
+ __u8 *sys_aud_status)
+{
+ *sys_aud_status = msg->msg[2];
+}
+
+static inline void cec_msg_give_system_audio_mode_status(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS;
+ msg->reply = reply ? CEC_MSG_SYSTEM_AUDIO_MODE_STATUS : 0;
+}
+
+static inline void cec_msg_report_short_audio_descriptor(struct cec_msg *msg,
+ __u8 num_descriptors,
+ const __u32 *descriptors)
+{
+ unsigned int i;
+
+ if (num_descriptors > 4)
+ num_descriptors = 4;
+ msg->len = 2 + num_descriptors * 3;
+ msg->msg[1] = CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR;
+ for (i = 0; i < num_descriptors; i++) {
+ msg->msg[2 + i * 3] = (descriptors[i] >> 16) & 0xff;
+ msg->msg[3 + i * 3] = (descriptors[i] >> 8) & 0xff;
+ msg->msg[4 + i * 3] = descriptors[i] & 0xff;
+ }
+}
+
+static inline void cec_ops_report_short_audio_descriptor(const struct cec_msg *msg,
+ __u8 *num_descriptors,
+ __u32 *descriptors)
+{
+ unsigned int i;
+
+ *num_descriptors = (msg->len - 2) / 3;
+ if (*num_descriptors > 4)
+ *num_descriptors = 4;
+ for (i = 0; i < *num_descriptors; i++)
+ descriptors[i] = (msg->msg[2 + i * 3] << 16) |
+ (msg->msg[3 + i * 3] << 8) |
+ msg->msg[4 + i * 3];
+}
+
+static inline void cec_msg_request_short_audio_descriptor(struct cec_msg *msg,
+ bool reply,
+ __u8 num_descriptors,
+ const __u8 *audio_format_id,
+ const __u8 *audio_format_code)
+{
+ unsigned int i;
+
+ if (num_descriptors > 4)
+ num_descriptors = 4;
+ msg->len = 2 + num_descriptors;
+ msg->msg[1] = CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR;
+ msg->reply = reply ? CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR : 0;
+ for (i = 0; i < num_descriptors; i++)
+ msg->msg[2 + i] = (audio_format_id[i] << 6) |
+ (audio_format_code[i] & 0x3f);
+}
+
+static inline void cec_ops_request_short_audio_descriptor(const struct cec_msg *msg,
+ __u8 *num_descriptors,
+ __u8 *audio_format_id,
+ __u8 *audio_format_code)
+{
+ unsigned int i;
+
+ *num_descriptors = msg->len - 2;
+ if (*num_descriptors > 4)
+ *num_descriptors = 4;
+ for (i = 0; i < *num_descriptors; i++) {
+ audio_format_id[i] = msg->msg[2 + i] >> 6;
+ audio_format_code[i] = msg->msg[2 + i] & 0x3f;
+ }
+}
+
+
+/* Audio Rate Control Feature */
+static inline void cec_msg_set_audio_rate(struct cec_msg *msg,
+ __u8 audio_rate)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_SET_AUDIO_RATE;
+ msg->msg[2] = audio_rate;
+}
+
+static inline void cec_ops_set_audio_rate(const struct cec_msg *msg,
+ __u8 *audio_rate)
+{
+ *audio_rate = msg->msg[2];
+}
+
+
+/* Audio Return Channel Control Feature */
+static inline void cec_msg_report_arc_initiated(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_REPORT_ARC_INITIATED;
+}
+
+static inline void cec_msg_initiate_arc(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_INITIATE_ARC;
+ msg->reply = reply ? CEC_MSG_REPORT_ARC_INITIATED : 0;
+}
+
+static inline void cec_msg_request_arc_initiation(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_REQUEST_ARC_INITIATION;
+ msg->reply = reply ? CEC_MSG_INITIATE_ARC : 0;
+}
+
+static inline void cec_msg_report_arc_terminated(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_REPORT_ARC_TERMINATED;
+}
+
+static inline void cec_msg_terminate_arc(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_TERMINATE_ARC;
+ msg->reply = reply ? CEC_MSG_REPORT_ARC_TERMINATED : 0;
+}
+
+static inline void cec_msg_request_arc_termination(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_REQUEST_ARC_TERMINATION;
+ msg->reply = reply ? CEC_MSG_TERMINATE_ARC : 0;
+}
+
+
+/* Dynamic Audio Lipsync Feature */
+/* Only for CEC 2.0 and up */
+static inline void cec_msg_report_current_latency(struct cec_msg *msg,
+ __u16 phys_addr,
+ __u8 video_latency,
+ __u8 low_latency_mode,
+ __u8 audio_out_compensated,
+ __u8 audio_out_delay)
+{
+ msg->len = 6;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+ msg->msg[4] = video_latency;
+ msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated;
+ if (audio_out_compensated == 3)
+ msg->msg[msg->len++] = audio_out_delay;
+}
+
+static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u8 *video_latency,
+ __u8 *low_latency_mode,
+ __u8 *audio_out_compensated,
+ __u8 *audio_out_delay)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *video_latency = msg->msg[4];
+ *low_latency_mode = (msg->msg[5] >> 2) & 1;
+ *audio_out_compensated = msg->msg[5] & 3;
+ if (*audio_out_compensated == 3 && msg->len >= 7)
+ *audio_out_delay = msg->msg[6];
+ else
+ *audio_out_delay = 0;
+}
+
+static inline void cec_msg_request_current_latency(struct cec_msg *msg,
+ bool reply,
+ __u16 phys_addr)
+{
+ msg->len = 4;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_REQUEST_CURRENT_LATENCY;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+ msg->reply = reply ? CEC_MSG_REPORT_CURRENT_LATENCY : 0;
+}
+
+static inline void cec_ops_request_current_latency(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+
+/* Capability Discovery and Control Feature */
+static inline void cec_msg_cdc_hec_inquire_state(struct cec_msg *msg,
+ __u16 phys_addr1,
+ __u16 phys_addr2)
+{
+ msg->len = 9;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE;
+ msg->msg[5] = phys_addr1 >> 8;
+ msg->msg[6] = phys_addr1 & 0xff;
+ msg->msg[7] = phys_addr2 >> 8;
+ msg->msg[8] = phys_addr2 & 0xff;
+}
+
+static inline void cec_ops_cdc_hec_inquire_state(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u16 *phys_addr1,
+ __u16 *phys_addr2)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+ *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8];
+}
+
+static inline void cec_msg_cdc_hec_report_state(struct cec_msg *msg,
+ __u16 target_phys_addr,
+ __u8 hec_func_state,
+ __u8 host_func_state,
+ __u8 enc_func_state,
+ __u8 cdc_errcode,
+ __u8 has_field,
+ __u16 hec_field)
+{
+ msg->len = has_field ? 10 : 8;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HEC_REPORT_STATE;
+ msg->msg[5] = target_phys_addr >> 8;
+ msg->msg[6] = target_phys_addr & 0xff;
+ msg->msg[7] = (hec_func_state << 6) |
+ (host_func_state << 4) |
+ (enc_func_state << 2) |
+ cdc_errcode;
+ if (has_field) {
+ msg->msg[8] = hec_field >> 8;
+ msg->msg[9] = hec_field & 0xff;
+ }
+}
+
+static inline void cec_ops_cdc_hec_report_state(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u16 *target_phys_addr,
+ __u8 *hec_func_state,
+ __u8 *host_func_state,
+ __u8 *enc_func_state,
+ __u8 *cdc_errcode,
+ __u8 *has_field,
+ __u16 *hec_field)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *target_phys_addr = (msg->msg[5] << 8) | msg->msg[6];
+ *hec_func_state = msg->msg[7] >> 6;
+ *host_func_state = (msg->msg[7] >> 4) & 3;
+ *enc_func_state = (msg->msg[7] >> 4) & 3;
+ *cdc_errcode = msg->msg[7] & 3;
+ *has_field = msg->len >= 10;
+ *hec_field = *has_field ? ((msg->msg[8] << 8) | msg->msg[9]) : 0;
+}
+
+static inline void cec_msg_cdc_hec_set_state(struct cec_msg *msg,
+ __u16 phys_addr1,
+ __u16 phys_addr2,
+ __u8 hec_set_state,
+ __u16 phys_addr3,
+ __u16 phys_addr4,
+ __u16 phys_addr5)
+{
+ msg->len = 10;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE;
+ msg->msg[5] = phys_addr1 >> 8;
+ msg->msg[6] = phys_addr1 & 0xff;
+ msg->msg[7] = phys_addr2 >> 8;
+ msg->msg[8] = phys_addr2 & 0xff;
+ msg->msg[9] = hec_set_state;
+ if (phys_addr3 != CEC_PHYS_ADDR_INVALID) {
+ msg->msg[msg->len++] = phys_addr3 >> 8;
+ msg->msg[msg->len++] = phys_addr3 & 0xff;
+ if (phys_addr4 != CEC_PHYS_ADDR_INVALID) {
+ msg->msg[msg->len++] = phys_addr4 >> 8;
+ msg->msg[msg->len++] = phys_addr4 & 0xff;
+ if (phys_addr5 != CEC_PHYS_ADDR_INVALID) {
+ msg->msg[msg->len++] = phys_addr5 >> 8;
+ msg->msg[msg->len++] = phys_addr5 & 0xff;
+ }
+ }
+ }
+}
+
+static inline void cec_ops_cdc_hec_set_state(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u16 *phys_addr1,
+ __u16 *phys_addr2,
+ __u8 *hec_set_state,
+ __u16 *phys_addr3,
+ __u16 *phys_addr4,
+ __u16 *phys_addr5)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+ *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8];
+ *hec_set_state = msg->msg[9];
+ *phys_addr3 = *phys_addr4 = *phys_addr5 = CEC_PHYS_ADDR_INVALID;
+ if (msg->len >= 12)
+ *phys_addr3 = (msg->msg[10] << 8) | msg->msg[11];
+ if (msg->len >= 14)
+ *phys_addr4 = (msg->msg[12] << 8) | msg->msg[13];
+ if (msg->len >= 16)
+ *phys_addr5 = (msg->msg[14] << 8) | msg->msg[15];
+}
+
+static inline void cec_msg_cdc_hec_set_state_adjacent(struct cec_msg *msg,
+ __u16 phys_addr1,
+ __u8 hec_set_state)
+{
+ msg->len = 8;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HEC_SET_STATE_ADJACENT;
+ msg->msg[5] = phys_addr1 >> 8;
+ msg->msg[6] = phys_addr1 & 0xff;
+ msg->msg[7] = hec_set_state;
+}
+
+static inline void cec_ops_cdc_hec_set_state_adjacent(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u16 *phys_addr1,
+ __u8 *hec_set_state)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+ *hec_set_state = msg->msg[7];
+}
+
+static inline void cec_msg_cdc_hec_request_deactivation(struct cec_msg *msg,
+ __u16 phys_addr1,
+ __u16 phys_addr2,
+ __u16 phys_addr3)
+{
+ msg->len = 11;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION;
+ msg->msg[5] = phys_addr1 >> 8;
+ msg->msg[6] = phys_addr1 & 0xff;
+ msg->msg[7] = phys_addr2 >> 8;
+ msg->msg[8] = phys_addr2 & 0xff;
+ msg->msg[9] = phys_addr3 >> 8;
+ msg->msg[10] = phys_addr3 & 0xff;
+}
+
+static inline void cec_ops_cdc_hec_request_deactivation(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u16 *phys_addr1,
+ __u16 *phys_addr2,
+ __u16 *phys_addr3)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+ *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8];
+ *phys_addr3 = (msg->msg[9] << 8) | msg->msg[10];
+}
+
+static inline void cec_msg_cdc_hec_notify_alive(struct cec_msg *msg)
+{
+ msg->len = 5;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HEC_NOTIFY_ALIVE;
+}
+
+static inline void cec_ops_cdc_hec_notify_alive(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_cdc_hec_discover(struct cec_msg *msg)
+{
+ msg->len = 5;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HEC_DISCOVER;
+}
+
+static inline void cec_ops_cdc_hec_discover(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_cdc_hpd_set_state(struct cec_msg *msg,
+ __u8 input_port,
+ __u8 hpd_state)
+{
+ msg->len = 6;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HPD_SET_STATE;
+ msg->msg[5] = (input_port << 4) | hpd_state;
+}
+
+static inline void cec_ops_cdc_hpd_set_state(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u8 *input_port,
+ __u8 *hpd_state)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *input_port = msg->msg[5] >> 4;
+ *hpd_state = msg->msg[5] & 0xf;
+}
+
+static inline void cec_msg_cdc_hpd_report_state(struct cec_msg *msg,
+ __u8 hpd_state,
+ __u8 hpd_error)
+{
+ msg->len = 6;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HPD_REPORT_STATE;
+ msg->msg[5] = (hpd_state << 4) | hpd_error;
+}
+
+static inline void cec_ops_cdc_hpd_report_state(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u8 *hpd_state,
+ __u8 *hpd_error)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *hpd_state = msg->msg[5] >> 4;
+ *hpd_error = msg->msg[5] & 0xf;
+}
+
+#endif
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
new file mode 100644
index 000000000000..f4ec0af67707
--- /dev/null
+++ b/include/uapi/linux/cec.h
@@ -0,0 +1,1065 @@
+/*
+ * cec - HDMI Consumer Electronics Control public header
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * Alternatively you can redistribute this file under the terms of the
+ * BSD license as stated below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. The names of its contributors may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _CEC_UAPI_H
+#define _CEC_UAPI_H
+
+#include <linux/types.h>
+
+#define CEC_MAX_MSG_SIZE 16
+
+/**
+ * struct cec_msg - CEC message structure.
+ * @tx_ts: Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the
+ * driver when the message transmission has finished.
+ * @rx_ts: Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the
+ * driver when the message was received.
+ * @len: Length in bytes of the message.
+ * @timeout: The timeout (in ms) that is used to timeout CEC_RECEIVE.
+ * Set to 0 if you want to wait forever. This timeout can also be
+ * used with CEC_TRANSMIT as the timeout for waiting for a reply.
+ * If 0, then it will use a 1 second timeout instead of waiting
+ * forever as is done with CEC_RECEIVE.
+ * @sequence: The framework assigns a sequence number to messages that are
+ * sent. This can be used to track replies to previously sent
+ * messages.
+ * @flags: Set to 0.
+ * @msg: The message payload.
+ * @reply: This field is ignored with CEC_RECEIVE and is only used by
+ * CEC_TRANSMIT. If non-zero, then wait for a reply with this
+ * opcode. Set to CEC_MSG_FEATURE_ABORT if you want to wait for
+ * a possible ABORT reply. If there was an error when sending the
+ * msg or FeatureAbort was returned, then reply is set to 0.
+ * If reply is non-zero upon return, then len/msg are set to
+ * the received message.
+ * If reply is zero upon return and status has the
+ * CEC_TX_STATUS_FEATURE_ABORT bit set, then len/msg are set to
+ * the received feature abort message.
+ * If reply is zero upon return and status has the
+ * CEC_TX_STATUS_MAX_RETRIES bit set, then no reply was seen at
+ * all. If reply is non-zero for CEC_TRANSMIT and the message is a
+ * broadcast, then -EINVAL is returned.
+ * if reply is non-zero, then timeout is set to 1000 (the required
+ * maximum response time).
+ * @rx_status: The message receive status bits. Set by the driver.
+ * @tx_status: The message transmit status bits. Set by the driver.
+ * @tx_arb_lost_cnt: The number of 'Arbitration Lost' events. Set by the driver.
+ * @tx_nack_cnt: The number of 'Not Acknowledged' events. Set by the driver.
+ * @tx_low_drive_cnt: The number of 'Low Drive Detected' events. Set by the
+ * driver.
+ * @tx_error_cnt: The number of 'Error' events. Set by the driver.
+ */
+struct cec_msg {
+ __u64 tx_ts;
+ __u64 rx_ts;
+ __u32 len;
+ __u32 timeout;
+ __u32 sequence;
+ __u32 flags;
+ __u8 msg[CEC_MAX_MSG_SIZE];
+ __u8 reply;
+ __u8 rx_status;
+ __u8 tx_status;
+ __u8 tx_arb_lost_cnt;
+ __u8 tx_nack_cnt;
+ __u8 tx_low_drive_cnt;
+ __u8 tx_error_cnt;
+};
+
+/**
+ * cec_msg_initiator - return the initiator's logical address.
+ * @msg: the message structure
+ */
+static inline __u8 cec_msg_initiator(const struct cec_msg *msg)
+{
+ return msg->msg[0] >> 4;
+}
+
+/**
+ * cec_msg_destination - return the destination's logical address.
+ * @msg: the message structure
+ */
+static inline __u8 cec_msg_destination(const struct cec_msg *msg)
+{
+ return msg->msg[0] & 0xf;
+}
+
+/**
+ * cec_msg_opcode - return the opcode of the message, -1 for poll
+ * @msg: the message structure
+ */
+static inline int cec_msg_opcode(const struct cec_msg *msg)
+{
+ return msg->len > 1 ? msg->msg[1] : -1;
+}
+
+/**
+ * cec_msg_is_broadcast - return true if this is a broadcast message.
+ * @msg: the message structure
+ */
+static inline bool cec_msg_is_broadcast(const struct cec_msg *msg)
+{
+ return (msg->msg[0] & 0xf) == 0xf;
+}
+
+/**
+ * cec_msg_init - initialize the message structure.
+ * @msg: the message structure
+ * @initiator: the logical address of the initiator
+ * @destination:the logical address of the destination (0xf for broadcast)
+ *
+ * The whole structure is zeroed, the len field is set to 1 (i.e. a poll
+ * message) and the initiator and destination are filled in.
+ */
+static inline void cec_msg_init(struct cec_msg *msg,
+ __u8 initiator, __u8 destination)
+{
+ memset(msg, 0, sizeof(*msg));
+ msg->msg[0] = (initiator << 4) | destination;
+ msg->len = 1;
+}
+
+/**
+ * cec_msg_set_reply_to - fill in destination/initiator in a reply message.
+ * @msg: the message structure for the reply
+ * @orig: the original message structure
+ *
+ * Set the msg destination to the orig initiator and the msg initiator to the
+ * orig destination. Note that msg and orig may be the same pointer, in which
+ * case the change is done in place.
+ */
+static inline void cec_msg_set_reply_to(struct cec_msg *msg,
+ struct cec_msg *orig)
+{
+ /* The destination becomes the initiator and vice versa */
+ msg->msg[0] = (cec_msg_destination(orig) << 4) |
+ cec_msg_initiator(orig);
+ msg->reply = msg->timeout = 0;
+}
+
+/* cec_msg flags field */
+#define CEC_MSG_FL_REPLY_TO_FOLLOWERS (1 << 0)
+
+/* cec_msg tx/rx_status field */
+#define CEC_TX_STATUS_OK (1 << 0)
+#define CEC_TX_STATUS_ARB_LOST (1 << 1)
+#define CEC_TX_STATUS_NACK (1 << 2)
+#define CEC_TX_STATUS_LOW_DRIVE (1 << 3)
+#define CEC_TX_STATUS_ERROR (1 << 4)
+#define CEC_TX_STATUS_MAX_RETRIES (1 << 5)
+
+#define CEC_RX_STATUS_OK (1 << 0)
+#define CEC_RX_STATUS_TIMEOUT (1 << 1)
+#define CEC_RX_STATUS_FEATURE_ABORT (1 << 2)
+
+static inline bool cec_msg_status_is_ok(const struct cec_msg *msg)
+{
+ if (msg->tx_status && !(msg->tx_status & CEC_TX_STATUS_OK))
+ return false;
+ if (msg->rx_status && !(msg->rx_status & CEC_RX_STATUS_OK))
+ return false;
+ if (!msg->tx_status && !msg->rx_status)
+ return false;
+ return !(msg->rx_status & CEC_RX_STATUS_FEATURE_ABORT);
+}
+
+#define CEC_LOG_ADDR_INVALID 0xff
+#define CEC_PHYS_ADDR_INVALID 0xffff
+
+/*
+ * The maximum number of logical addresses one device can be assigned to.
+ * The CEC 2.0 spec allows for only 2 logical addresses at the moment. The
+ * Analog Devices CEC hardware supports 3. So let's go wild and go for 4.
+ */
+#define CEC_MAX_LOG_ADDRS 4
+
+/* The logical addresses defined by CEC 2.0 */
+#define CEC_LOG_ADDR_TV 0
+#define CEC_LOG_ADDR_RECORD_1 1
+#define CEC_LOG_ADDR_RECORD_2 2
+#define CEC_LOG_ADDR_TUNER_1 3
+#define CEC_LOG_ADDR_PLAYBACK_1 4
+#define CEC_LOG_ADDR_AUDIOSYSTEM 5
+#define CEC_LOG_ADDR_TUNER_2 6
+#define CEC_LOG_ADDR_TUNER_3 7
+#define CEC_LOG_ADDR_PLAYBACK_2 8
+#define CEC_LOG_ADDR_RECORD_3 9
+#define CEC_LOG_ADDR_TUNER_4 10
+#define CEC_LOG_ADDR_PLAYBACK_3 11
+#define CEC_LOG_ADDR_BACKUP_1 12
+#define CEC_LOG_ADDR_BACKUP_2 13
+#define CEC_LOG_ADDR_SPECIFIC 14
+#define CEC_LOG_ADDR_UNREGISTERED 15 /* as initiator address */
+#define CEC_LOG_ADDR_BROADCAST 15 /* ad destination address */
+
+/* The logical address types that the CEC device wants to claim */
+#define CEC_LOG_ADDR_TYPE_TV 0
+#define CEC_LOG_ADDR_TYPE_RECORD 1
+#define CEC_LOG_ADDR_TYPE_TUNER 2
+#define CEC_LOG_ADDR_TYPE_PLAYBACK 3
+#define CEC_LOG_ADDR_TYPE_AUDIOSYSTEM 4
+#define CEC_LOG_ADDR_TYPE_SPECIFIC 5
+#define CEC_LOG_ADDR_TYPE_UNREGISTERED 6
+/*
+ * Switches should use UNREGISTERED.
+ * Processors should use SPECIFIC.
+ */
+
+#define CEC_LOG_ADDR_MASK_TV (1 << CEC_LOG_ADDR_TV)
+#define CEC_LOG_ADDR_MASK_RECORD ((1 << CEC_LOG_ADDR_RECORD_1) | \
+ (1 << CEC_LOG_ADDR_RECORD_2) | \
+ (1 << CEC_LOG_ADDR_RECORD_3))
+#define CEC_LOG_ADDR_MASK_TUNER ((1 << CEC_LOG_ADDR_TUNER_1) | \
+ (1 << CEC_LOG_ADDR_TUNER_2) | \
+ (1 << CEC_LOG_ADDR_TUNER_3) | \
+ (1 << CEC_LOG_ADDR_TUNER_4))
+#define CEC_LOG_ADDR_MASK_PLAYBACK ((1 << CEC_LOG_ADDR_PLAYBACK_1) | \
+ (1 << CEC_LOG_ADDR_PLAYBACK_2) | \
+ (1 << CEC_LOG_ADDR_PLAYBACK_3))
+#define CEC_LOG_ADDR_MASK_AUDIOSYSTEM (1 << CEC_LOG_ADDR_AUDIOSYSTEM)
+#define CEC_LOG_ADDR_MASK_BACKUP ((1 << CEC_LOG_ADDR_BACKUP_1) | \
+ (1 << CEC_LOG_ADDR_BACKUP_2))
+#define CEC_LOG_ADDR_MASK_SPECIFIC (1 << CEC_LOG_ADDR_SPECIFIC)
+#define CEC_LOG_ADDR_MASK_UNREGISTERED (1 << CEC_LOG_ADDR_UNREGISTERED)
+
+static inline bool cec_has_tv(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_TV;
+}
+
+static inline bool cec_has_record(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_RECORD;
+}
+
+static inline bool cec_has_tuner(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_TUNER;
+}
+
+static inline bool cec_has_playback(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_PLAYBACK;
+}
+
+static inline bool cec_has_audiosystem(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_AUDIOSYSTEM;
+}
+
+static inline bool cec_has_backup(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_BACKUP;
+}
+
+static inline bool cec_has_specific(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_SPECIFIC;
+}
+
+static inline bool cec_is_unregistered(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_UNREGISTERED;
+}
+
+static inline bool cec_is_unconfigured(__u16 log_addr_mask)
+{
+ return log_addr_mask == 0;
+}
+
+/*
+ * Use this if there is no vendor ID (CEC_G_VENDOR_ID) or if the vendor ID
+ * should be disabled (CEC_S_VENDOR_ID)
+ */
+#define CEC_VENDOR_ID_NONE 0xffffffff
+
+/* The message handling modes */
+/* Modes for initiator */
+#define CEC_MODE_NO_INITIATOR (0x0 << 0)
+#define CEC_MODE_INITIATOR (0x1 << 0)
+#define CEC_MODE_EXCL_INITIATOR (0x2 << 0)
+#define CEC_MODE_INITIATOR_MSK 0x0f
+
+/* Modes for follower */
+#define CEC_MODE_NO_FOLLOWER (0x0 << 4)
+#define CEC_MODE_FOLLOWER (0x1 << 4)
+#define CEC_MODE_EXCL_FOLLOWER (0x2 << 4)
+#define CEC_MODE_EXCL_FOLLOWER_PASSTHRU (0x3 << 4)
+#define CEC_MODE_MONITOR (0xe << 4)
+#define CEC_MODE_MONITOR_ALL (0xf << 4)
+#define CEC_MODE_FOLLOWER_MSK 0xf0
+
+/* Userspace has to configure the physical address */
+#define CEC_CAP_PHYS_ADDR (1 << 0)
+/* Userspace has to configure the logical addresses */
+#define CEC_CAP_LOG_ADDRS (1 << 1)
+/* Userspace can transmit messages (and thus become follower as well) */
+#define CEC_CAP_TRANSMIT (1 << 2)
+/*
+ * Passthrough all messages instead of processing them.
+ */
+#define CEC_CAP_PASSTHROUGH (1 << 3)
+/* Supports remote control */
+#define CEC_CAP_RC (1 << 4)
+/* Hardware can monitor all messages, not just directed and broadcast. */
+#define CEC_CAP_MONITOR_ALL (1 << 5)
+
+/**
+ * struct cec_caps - CEC capabilities structure.
+ * @driver: name of the CEC device driver.
+ * @name: name of the CEC device. @driver + @name must be unique.
+ * @available_log_addrs: number of available logical addresses.
+ * @capabilities: capabilities of the CEC adapter.
+ * @version: version of the CEC adapter framework.
+ */
+struct cec_caps {
+ char driver[32];
+ char name[32];
+ __u32 available_log_addrs;
+ __u32 capabilities;
+ __u32 version;
+};
+
+/**
+ * struct cec_log_addrs - CEC logical addresses structure.
+ * @log_addr: the claimed logical addresses. Set by the driver.
+ * @log_addr_mask: current logical address mask. Set by the driver.
+ * @cec_version: the CEC version that the adapter should implement. Set by the
+ * caller.
+ * @num_log_addrs: how many logical addresses should be claimed. Set by the
+ * caller.
+ * @vendor_id: the vendor ID of the device. Set by the caller.
+ * @flags: flags.
+ * @osd_name: the OSD name of the device. Set by the caller.
+ * @primary_device_type: the primary device type for each logical address.
+ * Set by the caller.
+ * @log_addr_type: the logical address types. Set by the caller.
+ * @all_device_types: CEC 2.0: all device types represented by the logical
+ * address. Set by the caller.
+ * @features: CEC 2.0: The logical address features. Set by the caller.
+ */
+struct cec_log_addrs {
+ __u8 log_addr[CEC_MAX_LOG_ADDRS];
+ __u16 log_addr_mask;
+ __u8 cec_version;
+ __u8 num_log_addrs;
+ __u32 vendor_id;
+ __u32 flags;
+ char osd_name[15];
+ __u8 primary_device_type[CEC_MAX_LOG_ADDRS];
+ __u8 log_addr_type[CEC_MAX_LOG_ADDRS];
+
+ /* CEC 2.0 */
+ __u8 all_device_types[CEC_MAX_LOG_ADDRS];
+ __u8 features[CEC_MAX_LOG_ADDRS][12];
+};
+
+/* Allow a fallback to unregistered */
+#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK (1 << 0)
+/* Passthrough RC messages to the input subsystem */
+#define CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU (1 << 1)
+/* CDC-Only device: supports only CDC messages */
+#define CEC_LOG_ADDRS_FL_CDC_ONLY (1 << 2)
+
+/* Events */
+
+/* Event that occurs when the adapter state changes */
+#define CEC_EVENT_STATE_CHANGE 1
+/*
+ * This event is sent when messages are lost because the application
+ * didn't empty the message queue in time
+ */
+#define CEC_EVENT_LOST_MSGS 2
+
+#define CEC_EVENT_FL_INITIAL_STATE (1 << 0)
+
+/**
+ * struct cec_event_state_change - used when the CEC adapter changes state.
+ * @phys_addr: the current physical address
+ * @log_addr_mask: the current logical address mask
+ */
+struct cec_event_state_change {
+ __u16 phys_addr;
+ __u16 log_addr_mask;
+};
+
+/**
+ * struct cec_event_lost_msgs - tells you how many messages were lost due.
+ * @lost_msgs: how many messages were lost.
+ */
+struct cec_event_lost_msgs {
+ __u32 lost_msgs;
+};
+
+/**
+ * struct cec_event - CEC event structure
+ * @ts: the timestamp of when the event was sent.
+ * @event: the event.
+ * array.
+ * @state_change: the event payload for CEC_EVENT_STATE_CHANGE.
+ * @lost_msgs: the event payload for CEC_EVENT_LOST_MSGS.
+ * @raw: array to pad the union.
+ */
+struct cec_event {
+ __u64 ts;
+ __u32 event;
+ __u32 flags;
+ union {
+ struct cec_event_state_change state_change;
+ struct cec_event_lost_msgs lost_msgs;
+ __u32 raw[16];
+ };
+};
+
+/* ioctls */
+
+/* Adapter capabilities */
+#define CEC_ADAP_G_CAPS _IOWR('a', 0, struct cec_caps)
+
+/*
+ * phys_addr is either 0 (if this is the CEC root device)
+ * or a valid physical address obtained from the sink's EDID
+ * as read by this CEC device (if this is a source device)
+ * or a physical address obtained and modified from a sink
+ * EDID and used for a sink CEC device.
+ * If nothing is connected, then phys_addr is 0xffff.
+ * See HDMI 1.4b, section 8.7 (Physical Address).
+ *
+ * The CEC_ADAP_S_PHYS_ADDR ioctl may not be available if that is handled
+ * internally.
+ */
+#define CEC_ADAP_G_PHYS_ADDR _IOR('a', 1, __u16)
+#define CEC_ADAP_S_PHYS_ADDR _IOW('a', 2, __u16)
+
+/*
+ * Configure the CEC adapter. It sets the device type and which
+ * logical types it will try to claim. It will return which
+ * logical addresses it could actually claim.
+ * An error is returned if the adapter is disabled or if there
+ * is no physical address assigned.
+ */
+
+#define CEC_ADAP_G_LOG_ADDRS _IOR('a', 3, struct cec_log_addrs)
+#define CEC_ADAP_S_LOG_ADDRS _IOWR('a', 4, struct cec_log_addrs)
+
+/* Transmit/receive a CEC command */
+#define CEC_TRANSMIT _IOWR('a', 5, struct cec_msg)
+#define CEC_RECEIVE _IOWR('a', 6, struct cec_msg)
+
+/* Dequeue CEC events */
+#define CEC_DQEVENT _IOWR('a', 7, struct cec_event)
+
+/*
+ * Get and set the message handling mode for this filehandle.
+ */
+#define CEC_G_MODE _IOR('a', 8, __u32)
+#define CEC_S_MODE _IOW('a', 9, __u32)
+
+/*
+ * The remainder of this header defines all CEC messages and operands.
+ * The format matters since it the cec-ctl utility parses it to generate
+ * code for implementing all these messages.
+ *
+ * Comments ending with 'Feature' group messages for each feature.
+ * If messages are part of multiple features, then the "Has also"
+ * comment is used to list the previously defined messages that are
+ * supported by the feature.
+ *
+ * Before operands are defined a comment is added that gives the
+ * name of the operand and in brackets the variable name of the
+ * corresponding argument in the cec-funcs.h function.
+ */
+
+/* Messages */
+
+/* One Touch Play Feature */
+#define CEC_MSG_ACTIVE_SOURCE 0x82
+#define CEC_MSG_IMAGE_VIEW_ON 0x04
+#define CEC_MSG_TEXT_VIEW_ON 0x0d
+
+
+/* Routing Control Feature */
+
+/*
+ * Has also:
+ * CEC_MSG_ACTIVE_SOURCE
+ */
+
+#define CEC_MSG_INACTIVE_SOURCE 0x9d
+#define CEC_MSG_REQUEST_ACTIVE_SOURCE 0x85
+#define CEC_MSG_ROUTING_CHANGE 0x80
+#define CEC_MSG_ROUTING_INFORMATION 0x81
+#define CEC_MSG_SET_STREAM_PATH 0x86
+
+
+/* Standby Feature */
+#define CEC_MSG_STANDBY 0x36
+
+
+/* One Touch Record Feature */
+#define CEC_MSG_RECORD_OFF 0x0b
+#define CEC_MSG_RECORD_ON 0x09
+/* Record Source Type Operand (rec_src_type) */
+#define CEC_OP_RECORD_SRC_OWN 1
+#define CEC_OP_RECORD_SRC_DIGITAL 2
+#define CEC_OP_RECORD_SRC_ANALOG 3
+#define CEC_OP_RECORD_SRC_EXT_PLUG 4
+#define CEC_OP_RECORD_SRC_EXT_PHYS_ADDR 5
+/* Service Identification Method Operand (service_id_method) */
+#define CEC_OP_SERVICE_ID_METHOD_BY_DIG_ID 0
+#define CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL 1
+/* Digital Service Broadcast System Operand (dig_bcast_system) */
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_GEN 0x00
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN 0x01
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_GEN 0x02
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_BS 0x08
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_CS 0x09
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_T 0x0a
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE 0x10
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT 0x11
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T 0x12
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_C 0x18
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S 0x19
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S2 0x1a
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_T 0x1b
+/* Analogue Broadcast Type Operand (ana_bcast_type) */
+#define CEC_OP_ANA_BCAST_TYPE_CABLE 0
+#define CEC_OP_ANA_BCAST_TYPE_SATELLITE 1
+#define CEC_OP_ANA_BCAST_TYPE_TERRESTRIAL 2
+/* Broadcast System Operand (bcast_system) */
+#define CEC_OP_BCAST_SYSTEM_PAL_BG 0x00
+#define CEC_OP_BCAST_SYSTEM_SECAM_LQ 0x01 /* SECAM L' */
+#define CEC_OP_BCAST_SYSTEM_PAL_M 0x02
+#define CEC_OP_BCAST_SYSTEM_NTSC_M 0x03
+#define CEC_OP_BCAST_SYSTEM_PAL_I 0x04
+#define CEC_OP_BCAST_SYSTEM_SECAM_DK 0x05
+#define CEC_OP_BCAST_SYSTEM_SECAM_BG 0x06
+#define CEC_OP_BCAST_SYSTEM_SECAM_L 0x07
+#define CEC_OP_BCAST_SYSTEM_PAL_DK 0x08
+#define CEC_OP_BCAST_SYSTEM_OTHER 0x1f
+/* Channel Number Format Operand (channel_number_fmt) */
+#define CEC_OP_CHANNEL_NUMBER_FMT_1_PART 0x01
+#define CEC_OP_CHANNEL_NUMBER_FMT_2_PART 0x02
+
+#define CEC_MSG_RECORD_STATUS 0x0a
+/* Record Status Operand (rec_status) */
+#define CEC_OP_RECORD_STATUS_CUR_SRC 0x01
+#define CEC_OP_RECORD_STATUS_DIG_SERVICE 0x02
+#define CEC_OP_RECORD_STATUS_ANA_SERVICE 0x03
+#define CEC_OP_RECORD_STATUS_EXT_INPUT 0x04
+#define CEC_OP_RECORD_STATUS_NO_DIG_SERVICE 0x05
+#define CEC_OP_RECORD_STATUS_NO_ANA_SERVICE 0x06
+#define CEC_OP_RECORD_STATUS_NO_SERVICE 0x07
+#define CEC_OP_RECORD_STATUS_INVALID_EXT_PLUG 0x09
+#define CEC_OP_RECORD_STATUS_INVALID_EXT_PHYS_ADDR 0x0a
+#define CEC_OP_RECORD_STATUS_UNSUP_CA 0x0b
+#define CEC_OP_RECORD_STATUS_NO_CA_ENTITLEMENTS 0x0c
+#define CEC_OP_RECORD_STATUS_CANT_COPY_SRC 0x0d
+#define CEC_OP_RECORD_STATUS_NO_MORE_COPIES 0x0e
+#define CEC_OP_RECORD_STATUS_NO_MEDIA 0x10
+#define CEC_OP_RECORD_STATUS_PLAYING 0x11
+#define CEC_OP_RECORD_STATUS_ALREADY_RECORDING 0x12
+#define CEC_OP_RECORD_STATUS_MEDIA_PROT 0x13
+#define CEC_OP_RECORD_STATUS_NO_SIGNAL 0x14
+#define CEC_OP_RECORD_STATUS_MEDIA_PROBLEM 0x15
+#define CEC_OP_RECORD_STATUS_NO_SPACE 0x16
+#define CEC_OP_RECORD_STATUS_PARENTAL_LOCK 0x17
+#define CEC_OP_RECORD_STATUS_TERMINATED_OK 0x1a
+#define CEC_OP_RECORD_STATUS_ALREADY_TERM 0x1b
+#define CEC_OP_RECORD_STATUS_OTHER 0x1f
+
+#define CEC_MSG_RECORD_TV_SCREEN 0x0f
+
+
+/* Timer Programming Feature */
+#define CEC_MSG_CLEAR_ANALOGUE_TIMER 0x33
+/* Recording Sequence Operand (recording_seq) */
+#define CEC_OP_REC_SEQ_SUNDAY 0x01
+#define CEC_OP_REC_SEQ_MONDAY 0x02
+#define CEC_OP_REC_SEQ_TUESDAY 0x04
+#define CEC_OP_REC_SEQ_WEDNESDAY 0x08
+#define CEC_OP_REC_SEQ_THURSDAY 0x10
+#define CEC_OP_REC_SEQ_FRIDAY 0x20
+#define CEC_OP_REC_SEQ_SATERDAY 0x40
+#define CEC_OP_REC_SEQ_ONCE_ONLY 0x00
+
+#define CEC_MSG_CLEAR_DIGITAL_TIMER 0x99
+
+#define CEC_MSG_CLEAR_EXT_TIMER 0xa1
+/* External Source Specifier Operand (ext_src_spec) */
+#define CEC_OP_EXT_SRC_PLUG 0x04
+#define CEC_OP_EXT_SRC_PHYS_ADDR 0x05
+
+#define CEC_MSG_SET_ANALOGUE_TIMER 0x34
+#define CEC_MSG_SET_DIGITAL_TIMER 0x97
+#define CEC_MSG_SET_EXT_TIMER 0xa2
+
+#define CEC_MSG_SET_TIMER_PROGRAM_TITLE 0x67
+#define CEC_MSG_TIMER_CLEARED_STATUS 0x43
+/* Timer Cleared Status Data Operand (timer_cleared_status) */
+#define CEC_OP_TIMER_CLR_STAT_RECORDING 0x00
+#define CEC_OP_TIMER_CLR_STAT_NO_MATCHING 0x01
+#define CEC_OP_TIMER_CLR_STAT_NO_INFO 0x02
+#define CEC_OP_TIMER_CLR_STAT_CLEARED 0x80
+
+#define CEC_MSG_TIMER_STATUS 0x35
+/* Timer Overlap Warning Operand (timer_overlap_warning) */
+#define CEC_OP_TIMER_OVERLAP_WARNING_NO_OVERLAP 0
+#define CEC_OP_TIMER_OVERLAP_WARNING_OVERLAP 1
+/* Media Info Operand (media_info) */
+#define CEC_OP_MEDIA_INFO_UNPROT_MEDIA 0
+#define CEC_OP_MEDIA_INFO_PROT_MEDIA 1
+#define CEC_OP_MEDIA_INFO_NO_MEDIA 2
+/* Programmed Indicator Operand (prog_indicator) */
+#define CEC_OP_PROG_IND_NOT_PROGRAMMED 0
+#define CEC_OP_PROG_IND_PROGRAMMED 1
+/* Programmed Info Operand (prog_info) */
+#define CEC_OP_PROG_INFO_ENOUGH_SPACE 0x08
+#define CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE 0x09
+#define CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE 0x0b
+#define CEC_OP_PROG_INFO_NONE_AVAILABLE 0x0a
+/* Not Programmed Error Info Operand (prog_error) */
+#define CEC_OP_PROG_ERROR_NO_FREE_TIMER 0x01
+#define CEC_OP_PROG_ERROR_DATE_OUT_OF_RANGE 0x02
+#define CEC_OP_PROG_ERROR_REC_SEQ_ERROR 0x03
+#define CEC_OP_PROG_ERROR_INV_EXT_PLUG 0x04
+#define CEC_OP_PROG_ERROR_INV_EXT_PHYS_ADDR 0x05
+#define CEC_OP_PROG_ERROR_CA_UNSUPP 0x06
+#define CEC_OP_PROG_ERROR_INSUF_CA_ENTITLEMENTS 0x07
+#define CEC_OP_PROG_ERROR_RESOLUTION_UNSUPP 0x08
+#define CEC_OP_PROG_ERROR_PARENTAL_LOCK 0x09
+#define CEC_OP_PROG_ERROR_CLOCK_FAILURE 0x0a
+#define CEC_OP_PROG_ERROR_DUPLICATE 0x0e
+
+
+/* System Information Feature */
+#define CEC_MSG_CEC_VERSION 0x9e
+/* CEC Version Operand (cec_version) */
+#define CEC_OP_CEC_VERSION_1_3A 4
+#define CEC_OP_CEC_VERSION_1_4 5
+#define CEC_OP_CEC_VERSION_2_0 6
+
+#define CEC_MSG_GET_CEC_VERSION 0x9f
+#define CEC_MSG_GIVE_PHYSICAL_ADDR 0x83
+#define CEC_MSG_GET_MENU_LANGUAGE 0x91
+#define CEC_MSG_REPORT_PHYSICAL_ADDR 0x84
+/* Primary Device Type Operand (prim_devtype) */
+#define CEC_OP_PRIM_DEVTYPE_TV 0
+#define CEC_OP_PRIM_DEVTYPE_RECORD 1
+#define CEC_OP_PRIM_DEVTYPE_TUNER 3
+#define CEC_OP_PRIM_DEVTYPE_PLAYBACK 4
+#define CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM 5
+#define CEC_OP_PRIM_DEVTYPE_SWITCH 6
+#define CEC_OP_PRIM_DEVTYPE_PROCESSOR 7
+
+#define CEC_MSG_SET_MENU_LANGUAGE 0x32
+#define CEC_MSG_REPORT_FEATURES 0xa6 /* HDMI 2.0 */
+/* All Device Types Operand (all_device_types) */
+#define CEC_OP_ALL_DEVTYPE_TV 0x80
+#define CEC_OP_ALL_DEVTYPE_RECORD 0x40
+#define CEC_OP_ALL_DEVTYPE_TUNER 0x20
+#define CEC_OP_ALL_DEVTYPE_PLAYBACK 0x10
+#define CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM 0x08
+#define CEC_OP_ALL_DEVTYPE_SWITCH 0x04
+/*
+ * And if you wondering what happened to PROCESSOR devices: those should
+ * be mapped to a SWITCH.
+ */
+
+/* Valid for RC Profile and Device Feature operands */
+#define CEC_OP_FEAT_EXT 0x80 /* Extension bit */
+/* RC Profile Operand (rc_profile) */
+#define CEC_OP_FEAT_RC_TV_PROFILE_NONE 0x00
+#define CEC_OP_FEAT_RC_TV_PROFILE_1 0x02
+#define CEC_OP_FEAT_RC_TV_PROFILE_2 0x06
+#define CEC_OP_FEAT_RC_TV_PROFILE_3 0x0a
+#define CEC_OP_FEAT_RC_TV_PROFILE_4 0x0e
+#define CEC_OP_FEAT_RC_SRC_HAS_DEV_ROOT_MENU 0x50
+#define CEC_OP_FEAT_RC_SRC_HAS_DEV_SETUP_MENU 0x48
+#define CEC_OP_FEAT_RC_SRC_HAS_CONTENTS_MENU 0x44
+#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_TOP_MENU 0x42
+#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_CONTEXT_MENU 0x41
+/* Device Feature Operand (dev_features) */
+#define CEC_OP_FEAT_DEV_HAS_RECORD_TV_SCREEN 0x40
+#define CEC_OP_FEAT_DEV_HAS_SET_OSD_STRING 0x20
+#define CEC_OP_FEAT_DEV_HAS_DECK_CONTROL 0x10
+#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE 0x08
+#define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX 0x04
+#define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX 0x02
+
+#define CEC_MSG_GIVE_FEATURES 0xa5 /* HDMI 2.0 */
+
+
+/* Deck Control Feature */
+#define CEC_MSG_DECK_CONTROL 0x42
+/* Deck Control Mode Operand (deck_control_mode) */
+#define CEC_OP_DECK_CTL_MODE_SKIP_FWD 1
+#define CEC_OP_DECK_CTL_MODE_SKIP_REV 2
+#define CEC_OP_DECK_CTL_MODE_STOP 3
+#define CEC_OP_DECK_CTL_MODE_EJECT 4
+
+#define CEC_MSG_DECK_STATUS 0x1b
+/* Deck Info Operand (deck_info) */
+#define CEC_OP_DECK_INFO_PLAY 0x11
+#define CEC_OP_DECK_INFO_RECORD 0x12
+#define CEC_OP_DECK_INFO_PLAY_REV 0x13
+#define CEC_OP_DECK_INFO_STILL 0x14
+#define CEC_OP_DECK_INFO_SLOW 0x15
+#define CEC_OP_DECK_INFO_SLOW_REV 0x16
+#define CEC_OP_DECK_INFO_FAST_FWD 0x17
+#define CEC_OP_DECK_INFO_FAST_REV 0x18
+#define CEC_OP_DECK_INFO_NO_MEDIA 0x19
+#define CEC_OP_DECK_INFO_STOP 0x1a
+#define CEC_OP_DECK_INFO_SKIP_FWD 0x1b
+#define CEC_OP_DECK_INFO_SKIP_REV 0x1c
+#define CEC_OP_DECK_INFO_INDEX_SEARCH_FWD 0x1d
+#define CEC_OP_DECK_INFO_INDEX_SEARCH_REV 0x1e
+#define CEC_OP_DECK_INFO_OTHER 0x1f
+
+#define CEC_MSG_GIVE_DECK_STATUS 0x1a
+/* Status Request Operand (status_req) */
+#define CEC_OP_STATUS_REQ_ON 1
+#define CEC_OP_STATUS_REQ_OFF 2
+#define CEC_OP_STATUS_REQ_ONCE 3
+
+#define CEC_MSG_PLAY 0x41
+/* Play Mode Operand (play_mode) */
+#define CEC_OP_PLAY_MODE_PLAY_FWD 0x24
+#define CEC_OP_PLAY_MODE_PLAY_REV 0x20
+#define CEC_OP_PLAY_MODE_PLAY_STILL 0x25
+#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MIN 0x05
+#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MED 0x06
+#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MAX 0x07
+#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MIN 0x09
+#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MED 0x0a
+#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MAX 0x0b
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MIN 0x15
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MED 0x16
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MAX 0x17
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MIN 0x19
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MED 0x1a
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MAX 0x1b
+
+
+/* Tuner Control Feature */
+#define CEC_MSG_GIVE_TUNER_DEVICE_STATUS 0x08
+#define CEC_MSG_SELECT_ANALOGUE_SERVICE 0x92
+#define CEC_MSG_SELECT_DIGITAL_SERVICE 0x93
+#define CEC_MSG_TUNER_DEVICE_STATUS 0x07
+/* Recording Flag Operand (rec_flag) */
+#define CEC_OP_REC_FLAG_USED 0
+#define CEC_OP_REC_FLAG_NOT_USED 1
+/* Tuner Display Info Operand (tuner_display_info) */
+#define CEC_OP_TUNER_DISPLAY_INFO_DIGITAL 0
+#define CEC_OP_TUNER_DISPLAY_INFO_NONE 1
+#define CEC_OP_TUNER_DISPLAY_INFO_ANALOGUE 2
+
+#define CEC_MSG_TUNER_STEP_DECREMENT 0x06
+#define CEC_MSG_TUNER_STEP_INCREMENT 0x05
+
+
+/* Vendor Specific Commands Feature */
+
+/*
+ * Has also:
+ * CEC_MSG_CEC_VERSION
+ * CEC_MSG_GET_CEC_VERSION
+ */
+#define CEC_MSG_DEVICE_VENDOR_ID 0x87
+#define CEC_MSG_GIVE_DEVICE_VENDOR_ID 0x8c
+#define CEC_MSG_VENDOR_COMMAND 0x89
+#define CEC_MSG_VENDOR_COMMAND_WITH_ID 0xa0
+#define CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN 0x8a
+#define CEC_MSG_VENDOR_REMOTE_BUTTON_UP 0x8b
+
+
+/* OSD Display Feature */
+#define CEC_MSG_SET_OSD_STRING 0x64
+/* Display Control Operand (disp_ctl) */
+#define CEC_OP_DISP_CTL_DEFAULT 0x00
+#define CEC_OP_DISP_CTL_UNTIL_CLEARED 0x40
+#define CEC_OP_DISP_CTL_CLEAR 0x80
+
+
+/* Device OSD Transfer Feature */
+#define CEC_MSG_GIVE_OSD_NAME 0x46
+#define CEC_MSG_SET_OSD_NAME 0x47
+
+
+/* Device Menu Control Feature */
+#define CEC_MSG_MENU_REQUEST 0x8d
+/* Menu Request Type Operand (menu_req) */
+#define CEC_OP_MENU_REQUEST_ACTIVATE 0x00
+#define CEC_OP_MENU_REQUEST_DEACTIVATE 0x01
+#define CEC_OP_MENU_REQUEST_QUERY 0x02
+
+#define CEC_MSG_MENU_STATUS 0x8e
+/* Menu State Operand (menu_state) */
+#define CEC_OP_MENU_STATE_ACTIVATED 0x00
+#define CEC_OP_MENU_STATE_DEACTIVATED 0x01
+
+#define CEC_MSG_USER_CONTROL_PRESSED 0x44
+/* UI Broadcast Type Operand (ui_bcast_type) */
+#define CEC_OP_UI_BCAST_TYPE_TOGGLE_ALL 0x00
+#define CEC_OP_UI_BCAST_TYPE_TOGGLE_DIG_ANA 0x01
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE 0x10
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_T 0x20
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_CABLE 0x30
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_SAT 0x40
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL 0x50
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_T 0x60
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_CABLE 0x70
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_SAT 0x80
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT 0x90
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT2 0x91
+#define CEC_OP_UI_BCAST_TYPE_IP 0xa0
+/* UI Sound Presentation Control Operand (ui_snd_pres_ctl) */
+#define CEC_OP_UI_SND_PRES_CTL_DUAL_MONO 0x10
+#define CEC_OP_UI_SND_PRES_CTL_KARAOKE 0x20
+#define CEC_OP_UI_SND_PRES_CTL_DOWNMIX 0x80
+#define CEC_OP_UI_SND_PRES_CTL_REVERB 0x90
+#define CEC_OP_UI_SND_PRES_CTL_EQUALIZER 0xa0
+#define CEC_OP_UI_SND_PRES_CTL_BASS_UP 0xb1
+#define CEC_OP_UI_SND_PRES_CTL_BASS_NEUTRAL 0xb2
+#define CEC_OP_UI_SND_PRES_CTL_BASS_DOWN 0xb3
+#define CEC_OP_UI_SND_PRES_CTL_TREBLE_UP 0xc1
+#define CEC_OP_UI_SND_PRES_CTL_TREBLE_NEUTRAL 0xc2
+#define CEC_OP_UI_SND_PRES_CTL_TREBLE_DOWN 0xc3
+
+#define CEC_MSG_USER_CONTROL_RELEASED 0x45
+
+
+/* Remote Control Passthrough Feature */
+
+/*
+ * Has also:
+ * CEC_MSG_USER_CONTROL_PRESSED
+ * CEC_MSG_USER_CONTROL_RELEASED
+ */
+
+
+/* Power Status Feature */
+#define CEC_MSG_GIVE_DEVICE_POWER_STATUS 0x8f
+#define CEC_MSG_REPORT_POWER_STATUS 0x90
+/* Power Status Operand (pwr_state) */
+#define CEC_OP_POWER_STATUS_ON 0
+#define CEC_OP_POWER_STATUS_STANDBY 1
+#define CEC_OP_POWER_STATUS_TO_ON 2
+#define CEC_OP_POWER_STATUS_TO_STANDBY 3
+
+
+/* General Protocol Messages */
+#define CEC_MSG_FEATURE_ABORT 0x00
+/* Abort Reason Operand (reason) */
+#define CEC_OP_ABORT_UNRECOGNIZED_OP 0
+#define CEC_OP_ABORT_INCORRECT_MODE 1
+#define CEC_OP_ABORT_NO_SOURCE 2
+#define CEC_OP_ABORT_INVALID_OP 3
+#define CEC_OP_ABORT_REFUSED 4
+#define CEC_OP_ABORT_UNDETERMINED 5
+
+#define CEC_MSG_ABORT 0xff
+
+
+/* System Audio Control Feature */
+
+/*
+ * Has also:
+ * CEC_MSG_USER_CONTROL_PRESSED
+ * CEC_MSG_USER_CONTROL_RELEASED
+ */
+#define CEC_MSG_GIVE_AUDIO_STATUS 0x71
+#define CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS 0x7d
+#define CEC_MSG_REPORT_AUDIO_STATUS 0x7a
+/* Audio Mute Status Operand (aud_mute_status) */
+#define CEC_OP_AUD_MUTE_STATUS_OFF 0
+#define CEC_OP_AUD_MUTE_STATUS_ON 1
+
+#define CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR 0xa3
+#define CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR 0xa4
+#define CEC_MSG_SET_SYSTEM_AUDIO_MODE 0x72
+/* System Audio Status Operand (sys_aud_status) */
+#define CEC_OP_SYS_AUD_STATUS_OFF 0
+#define CEC_OP_SYS_AUD_STATUS_ON 1
+
+#define CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST 0x70
+#define CEC_MSG_SYSTEM_AUDIO_MODE_STATUS 0x7e
+/* Audio Format ID Operand (audio_format_id) */
+#define CEC_OP_AUD_FMT_ID_CEA861 0
+#define CEC_OP_AUD_FMT_ID_CEA861_CXT 1
+
+
+/* Audio Rate Control Feature */
+#define CEC_MSG_SET_AUDIO_RATE 0x9a
+/* Audio Rate Operand (audio_rate) */
+#define CEC_OP_AUD_RATE_OFF 0
+#define CEC_OP_AUD_RATE_WIDE_STD 1
+#define CEC_OP_AUD_RATE_WIDE_FAST 2
+#define CEC_OP_AUD_RATE_WIDE_SLOW 3
+#define CEC_OP_AUD_RATE_NARROW_STD 4
+#define CEC_OP_AUD_RATE_NARROW_FAST 5
+#define CEC_OP_AUD_RATE_NARROW_SLOW 6
+
+
+/* Audio Return Channel Control Feature */
+#define CEC_MSG_INITIATE_ARC 0xc0
+#define CEC_MSG_REPORT_ARC_INITIATED 0xc1
+#define CEC_MSG_REPORT_ARC_TERMINATED 0xc2
+#define CEC_MSG_REQUEST_ARC_INITIATION 0xc3
+#define CEC_MSG_REQUEST_ARC_TERMINATION 0xc4
+#define CEC_MSG_TERMINATE_ARC 0xc5
+
+
+/* Dynamic Audio Lipsync Feature */
+/* Only for CEC 2.0 and up */
+#define CEC_MSG_REQUEST_CURRENT_LATENCY 0xa7
+#define CEC_MSG_REPORT_CURRENT_LATENCY 0xa8
+/* Low Latency Mode Operand (low_latency_mode) */
+#define CEC_OP_LOW_LATENCY_MODE_OFF 0
+#define CEC_OP_LOW_LATENCY_MODE_ON 1
+/* Audio Output Compensated Operand (audio_out_compensated) */
+#define CEC_OP_AUD_OUT_COMPENSATED_NA 0
+#define CEC_OP_AUD_OUT_COMPENSATED_DELAY 1
+#define CEC_OP_AUD_OUT_COMPENSATED_NO_DELAY 2
+#define CEC_OP_AUD_OUT_COMPENSATED_PARTIAL_DELAY 3
+
+
+/* Capability Discovery and Control Feature */
+#define CEC_MSG_CDC_MESSAGE 0xf8
+/* Ethernet-over-HDMI: nobody ever does this... */
+#define CEC_MSG_CDC_HEC_INQUIRE_STATE 0x00
+#define CEC_MSG_CDC_HEC_REPORT_STATE 0x01
+/* HEC Functionality State Operand (hec_func_state) */
+#define CEC_OP_HEC_FUNC_STATE_NOT_SUPPORTED 0
+#define CEC_OP_HEC_FUNC_STATE_INACTIVE 1
+#define CEC_OP_HEC_FUNC_STATE_ACTIVE 2
+#define CEC_OP_HEC_FUNC_STATE_ACTIVATION_FIELD 3
+/* Host Functionality State Operand (host_func_state) */
+#define CEC_OP_HOST_FUNC_STATE_NOT_SUPPORTED 0
+#define CEC_OP_HOST_FUNC_STATE_INACTIVE 1
+#define CEC_OP_HOST_FUNC_STATE_ACTIVE 2
+/* ENC Functionality State Operand (enc_func_state) */
+#define CEC_OP_ENC_FUNC_STATE_EXT_CON_NOT_SUPPORTED 0
+#define CEC_OP_ENC_FUNC_STATE_EXT_CON_INACTIVE 1
+#define CEC_OP_ENC_FUNC_STATE_EXT_CON_ACTIVE 2
+/* CDC Error Code Operand (cdc_errcode) */
+#define CEC_OP_CDC_ERROR_CODE_NONE 0
+#define CEC_OP_CDC_ERROR_CODE_CAP_UNSUPPORTED 1
+#define CEC_OP_CDC_ERROR_CODE_WRONG_STATE 2
+#define CEC_OP_CDC_ERROR_CODE_OTHER 3
+/* HEC Support Operand (hec_support) */
+#define CEC_OP_HEC_SUPPORT_NO 0
+#define CEC_OP_HEC_SUPPORT_YES 1
+/* HEC Activation Operand (hec_activation) */
+#define CEC_OP_HEC_ACTIVATION_ON 0
+#define CEC_OP_HEC_ACTIVATION_OFF 1
+
+#define CEC_MSG_CDC_HEC_SET_STATE_ADJACENT 0x02
+#define CEC_MSG_CDC_HEC_SET_STATE 0x03
+/* HEC Set State Operand (hec_set_state) */
+#define CEC_OP_HEC_SET_STATE_DEACTIVATE 0
+#define CEC_OP_HEC_SET_STATE_ACTIVATE 1
+
+#define CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION 0x04
+#define CEC_MSG_CDC_HEC_NOTIFY_ALIVE 0x05
+#define CEC_MSG_CDC_HEC_DISCOVER 0x06
+/* Hotplug Detect messages */
+#define CEC_MSG_CDC_HPD_SET_STATE 0x10
+/* HPD State Operand (hpd_state) */
+#define CEC_OP_HPD_STATE_CP_EDID_DISABLE 0
+#define CEC_OP_HPD_STATE_CP_EDID_ENABLE 1
+#define CEC_OP_HPD_STATE_CP_EDID_DISABLE_ENABLE 2
+#define CEC_OP_HPD_STATE_EDID_DISABLE 3
+#define CEC_OP_HPD_STATE_EDID_ENABLE 4
+#define CEC_OP_HPD_STATE_EDID_DISABLE_ENABLE 5
+#define CEC_MSG_CDC_HPD_REPORT_STATE 0x11
+/* HPD Error Code Operand (hpd_error) */
+#define CEC_OP_HPD_ERROR_NONE 0
+#define CEC_OP_HPD_ERROR_INITIATOR_NOT_CAPABLE 1
+#define CEC_OP_HPD_ERROR_INITIATOR_WRONG_STATE 2
+#define CEC_OP_HPD_ERROR_OTHER 3
+#define CEC_OP_HPD_ERROR_NONE_NO_VIDEO 4
+
+/* End of Messages */
+
+/* Helper functions to identify the 'special' CEC devices */
+
+static inline bool cec_is_2nd_tv(const struct cec_log_addrs *las)
+{
+ /*
+ * It is a second TV if the logical address is 14 or 15 and the
+ * primary device type is a TV.
+ */
+ return las->num_log_addrs &&
+ las->log_addr[0] >= CEC_LOG_ADDR_SPECIFIC &&
+ las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_TV;
+}
+
+static inline bool cec_is_processor(const struct cec_log_addrs *las)
+{
+ /*
+ * It is a processor if the logical address is 12-15 and the
+ * primary device type is a Processor.
+ */
+ return las->num_log_addrs &&
+ las->log_addr[0] >= CEC_LOG_ADDR_BACKUP_1 &&
+ las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_PROCESSOR;
+}
+
+static inline bool cec_is_switch(const struct cec_log_addrs *las)
+{
+ /*
+ * It is a switch if the logical address is 15 and the
+ * primary device type is a Switch and the CDC-Only flag is not set.
+ */
+ return las->num_log_addrs == 1 &&
+ las->log_addr[0] == CEC_LOG_ADDR_UNREGISTERED &&
+ las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_SWITCH &&
+ !(las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY);
+}
+
+static inline bool cec_is_cdc_only(const struct cec_log_addrs *las)
+{
+ /*
+ * It is a CDC-only device if the logical address is 15 and the
+ * primary device type is a Switch and the CDC-Only flag is set.
+ */
+ return las->num_log_addrs == 1 &&
+ las->log_addr[0] == CEC_LOG_ADDR_UNREGISTERED &&
+ las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_SWITCH &&
+ (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY);
+}
+
+#endif
diff --git a/include/uapi/linux/msm_audio.h b/include/uapi/linux/msm_audio.h
index f306949eb5e6..5e03e63cb7b6 100644
--- a/include/uapi/linux/msm_audio.h
+++ b/include/uapi/linux/msm_audio.h
@@ -460,9 +460,10 @@ struct msm_hwacc_effects_config {
__s32 topology;
};
-#define ADSP_STREAM_PP_EVENT 0
-#define ADSP_STREAM_ENCDEC_EVENT 1
-#define ADSP_STREAM_EVENT_MAX 2
+#define ADSP_STREAM_PP_EVENT 0
+#define ADSP_STREAM_ENCDEC_EVENT 1
+#define ADSP_STREAM_IEC_61937_FMT_UPDATE_EVENT 2
+#define ADSP_STREAM_EVENT_MAX 3
struct msm_adsp_event_data {
__u32 event_type;
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index 8405472d8674..421c65d8a901 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -1,3 +1,4 @@
+header-y += ais/
header-y += msm_cam_sensor.h
header-y += msm_camera.h
header-y += msm_camsensor_sdk.h
diff --git a/include/uapi/media/ais/Kbuild b/include/uapi/media/ais/Kbuild
new file mode 100644
index 000000000000..121e3a61560f
--- /dev/null
+++ b/include/uapi/media/ais/Kbuild
@@ -0,0 +1,6 @@
+header-y += msm_ais.h
+header-y += msm_ais_buf_mgr.h
+header-y += msm_ais_isp.h
+header-y += msm_ais_ispif.h
+header-y += msm_ais_sensor.h
+header-y += msm_ais_sensor_sdk.h
diff --git a/include/uapi/media/ais/msm_ais.h b/include/uapi/media/ais/msm_ais.h
new file mode 100644
index 000000000000..e3592b672035
--- /dev/null
+++ b/include/uapi/media/ais/msm_ais.h
@@ -0,0 +1,230 @@
+#ifndef __UAPI_MSM_AIS__
+#define __UAPI_MSM_AIS__
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define MSM_CAM_LOGSYNC_FILE_NAME "logsync"
+#define MSM_CAM_LOGSYNC_FILE_BASEDIR "camera"
+
+#define MSM_CAM_V4L2_IOCTL_NOTIFY \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 30, struct msm_v4l2_event_data)
+
+#define MSM_CAM_V4L2_IOCTL_NOTIFY_META \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 31, struct msm_v4l2_event_data)
+
+#define MSM_CAM_V4L2_IOCTL_CMD_ACK \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 32, struct msm_v4l2_event_data)
+
+#define MSM_CAM_V4L2_IOCTL_NOTIFY_ERROR \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 33, struct msm_v4l2_event_data)
+
+#define MSM_CAM_V4L2_IOCTL_NOTIFY_DEBUG \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 34, struct msm_v4l2_event_data)
+
+#define MSM_CAM_V4L2_IOCTL_DAEMON_DISABLED \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 35, struct msm_v4l2_event_data)
+
+#define QCAMERA_DEVICE_GROUP_ID 1
+#define QCAMERA_VNODE_GROUP_ID 2
+#define MSM_CAMERA_NAME "msm_camera"
+#define MSM_CONFIGURATION_NAME "msm_config"
+
+#define MSM_CAMERA_SUBDEV_UNKNOWN 0
+#define MSM_CAMERA_SUBDEV_CSIPHY 1
+#define MSM_CAMERA_SUBDEV_CSID 2
+#define MSM_CAMERA_SUBDEV_ISPIF 3
+#define MSM_CAMERA_SUBDEV_VFE 4
+#define MSM_CAMERA_SUBDEV_AXI 5
+#define MSM_CAMERA_SUBDEV_VPE 6
+#define MSM_CAMERA_SUBDEV_SENSOR 7
+#define MSM_CAMERA_SUBDEV_ACTUATOR 8
+#define MSM_CAMERA_SUBDEV_EEPROM 9
+#define MSM_CAMERA_SUBDEV_CPP 10
+#define MSM_CAMERA_SUBDEV_CCI 11
+#define MSM_CAMERA_SUBDEV_LED_FLASH 12
+#define MSM_CAMERA_SUBDEV_STROBE_FLASH 13
+#define MSM_CAMERA_SUBDEV_BUF_MNGR 14
+#define MSM_CAMERA_SUBDEV_SENSOR_INIT 15
+#define MSM_CAMERA_SUBDEV_OIS 16
+#define MSM_CAMERA_SUBDEV_FLASH 17
+#define MSM_CAMERA_SUBDEV_IR_LED 18
+#define MSM_CAMERA_SUBDEV_IR_CUT 19
+#define MSM_CAMERA_SUBDEV_EXT 20
+
+#define MSM_MAX_CAMERA_SENSORS 5
+
+/* The below macro is defined to put an upper limit on maximum
+ * number of buffer requested per stream. In case of extremely
+ * large value for number of buffer due to data structure corruption
+ * we return error to avoid integer overflow. Group processing
+ * can have max of 9 groups of 8 bufs each. This value may be
+ * configured in future
+ */
+#define MSM_CAMERA_MAX_STREAM_BUF 72
+
+/* Max batch size of processing */
+#define MSM_CAMERA_MAX_USER_BUFF_CNT 16
+
+/* featur base */
+#define MSM_CAMERA_FEATURE_BASE 0x00010000
+#define MSM_CAMERA_FEATURE_SHUTDOWN (MSM_CAMERA_FEATURE_BASE + 1)
+
+#define MSM_CAMERA_STATUS_BASE 0x00020000
+#define MSM_CAMERA_STATUS_FAIL (MSM_CAMERA_STATUS_BASE + 1)
+#define MSM_CAMERA_STATUS_SUCCESS (MSM_CAMERA_STATUS_BASE + 2)
+
+/* event type */
+#define MSM_CAMERA_V4L2_EVENT_TYPE (V4L2_EVENT_PRIVATE_START + 0x00002000)
+
+/* event id */
+#define MSM_CAMERA_EVENT_MIN 0
+#define MSM_CAMERA_NEW_SESSION (MSM_CAMERA_EVENT_MIN + 1)
+#define MSM_CAMERA_DEL_SESSION (MSM_CAMERA_EVENT_MIN + 2)
+#define MSM_CAMERA_SET_PARM (MSM_CAMERA_EVENT_MIN + 3)
+#define MSM_CAMERA_GET_PARM (MSM_CAMERA_EVENT_MIN + 4)
+#define MSM_CAMERA_MAPPING_CFG (MSM_CAMERA_EVENT_MIN + 5)
+#define MSM_CAMERA_MAPPING_SES (MSM_CAMERA_EVENT_MIN + 6)
+#define MSM_CAMERA_MSM_NOTIFY (MSM_CAMERA_EVENT_MIN + 7)
+#define MSM_CAMERA_EVENT_MAX (MSM_CAMERA_EVENT_MIN + 8)
+
+/* data.command */
+#define MSM_CAMERA_PRIV_S_CROP (V4L2_CID_PRIVATE_BASE + 1)
+#define MSM_CAMERA_PRIV_G_CROP (V4L2_CID_PRIVATE_BASE + 2)
+#define MSM_CAMERA_PRIV_G_FMT (V4L2_CID_PRIVATE_BASE + 3)
+#define MSM_CAMERA_PRIV_S_FMT (V4L2_CID_PRIVATE_BASE + 4)
+#define MSM_CAMERA_PRIV_TRY_FMT (V4L2_CID_PRIVATE_BASE + 5)
+#define MSM_CAMERA_PRIV_METADATA (V4L2_CID_PRIVATE_BASE + 6)
+#define MSM_CAMERA_PRIV_QUERY_CAP (V4L2_CID_PRIVATE_BASE + 7)
+#define MSM_CAMERA_PRIV_STREAM_ON (V4L2_CID_PRIVATE_BASE + 8)
+#define MSM_CAMERA_PRIV_STREAM_OFF (V4L2_CID_PRIVATE_BASE + 9)
+#define MSM_CAMERA_PRIV_NEW_STREAM (V4L2_CID_PRIVATE_BASE + 10)
+#define MSM_CAMERA_PRIV_DEL_STREAM (V4L2_CID_PRIVATE_BASE + 11)
+#define MSM_CAMERA_PRIV_SHUTDOWN (V4L2_CID_PRIVATE_BASE + 12)
+#define MSM_CAMERA_PRIV_STREAM_INFO_SYNC \
+ (V4L2_CID_PRIVATE_BASE + 13)
+#define MSM_CAMERA_PRIV_G_SESSION_ID (V4L2_CID_PRIVATE_BASE + 14)
+#define MSM_CAMERA_PRIV_CMD_MAX 20
+
+/* data.status - success */
+#define MSM_CAMERA_CMD_SUCCESS 0x00000001
+#define MSM_CAMERA_BUF_MAP_SUCCESS 0x00000002
+
+/* data.status - error */
+#define MSM_CAMERA_ERR_EVT_BASE 0x00010000
+#define MSM_CAMERA_ERR_CMD_FAIL (MSM_CAMERA_ERR_EVT_BASE + 1)
+#define MSM_CAMERA_ERR_MAPPING (MSM_CAMERA_ERR_EVT_BASE + 2)
+#define MSM_CAMERA_ERR_DEVICE_BUSY (MSM_CAMERA_ERR_EVT_BASE + 3)
+
+/* The msm_v4l2_event_data structure should match the
+ * v4l2_event.u.data field.
+ * should not exceed 16 elements
+ */
+struct msm_v4l2_event_data {
+ /*word 0*/
+ unsigned int command;
+ /*word 1*/
+ unsigned int status;
+ /*word 2*/
+ unsigned int session_id;
+ /*word 3*/
+ unsigned int stream_id;
+ /*word 4*/
+ unsigned int map_op;
+ /*word 5*/
+ unsigned int map_buf_idx;
+ /*word 6*/
+ unsigned int notify;
+ /*word 7*/
+ unsigned int arg_value;
+ /*word 8*/
+ unsigned int ret_value;
+ /*word 9*/
+ unsigned int v4l2_event_type;
+ /*word 10*/
+ unsigned int v4l2_event_id;
+ /*word 11*/
+ unsigned int handle;
+ /*word 12*/
+ unsigned int nop6;
+ /*word 13*/
+ unsigned int nop7;
+ /*word 14*/
+ unsigned int nop8;
+ /*word 15*/
+ unsigned int nop9;
+};
+
+/* map to v4l2_format.fmt.raw_data */
+struct msm_v4l2_format_data {
+ enum v4l2_buf_type type;
+ unsigned int width;
+ unsigned int height;
+ unsigned int pixelformat; /* FOURCC */
+ unsigned char num_planes;
+ unsigned int plane_sizes[VIDEO_MAX_PLANES];
+};
+
+/* MSM Four-character-code (FOURCC) */
+#define msm_v4l2_fourcc(a, b, c, d)\
+ ((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) |\
+ ((__u32)(d) << 24))
+
+/* Composite stats */
+#define MSM_V4L2_PIX_FMT_STATS_COMB v4l2_fourcc('S', 'T', 'C', 'M')
+/* AEC stats */
+#define MSM_V4L2_PIX_FMT_STATS_AE v4l2_fourcc('S', 'T', 'A', 'E')
+/* AF stats */
+#define MSM_V4L2_PIX_FMT_STATS_AF v4l2_fourcc('S', 'T', 'A', 'F')
+/* AWB stats */
+#define MSM_V4L2_PIX_FMT_STATS_AWB v4l2_fourcc('S', 'T', 'W', 'B')
+/* IHIST stats */
+#define MSM_V4L2_PIX_FMT_STATS_IHST v4l2_fourcc('I', 'H', 'S', 'T')
+/* Column count stats */
+#define MSM_V4L2_PIX_FMT_STATS_CS v4l2_fourcc('S', 'T', 'C', 'S')
+/* Row count stats */
+#define MSM_V4L2_PIX_FMT_STATS_RS v4l2_fourcc('S', 'T', 'R', 'S')
+/* Bayer Grid stats */
+#define MSM_V4L2_PIX_FMT_STATS_BG v4l2_fourcc('S', 'T', 'B', 'G')
+/* Bayer focus stats */
+#define MSM_V4L2_PIX_FMT_STATS_BF v4l2_fourcc('S', 'T', 'B', 'F')
+/* Bayer hist stats */
+#define MSM_V4L2_PIX_FMT_STATS_BHST v4l2_fourcc('B', 'H', 'S', 'T')
+
+enum smmu_attach_mode {
+ NON_SECURE_MODE = 0x01,
+ SECURE_MODE = 0x02,
+ MAX_PROTECTION_MODE = 0x03,
+};
+
+struct msm_camera_smmu_attach_type {
+ enum smmu_attach_mode attach;
+};
+
+struct msm_camera_user_buf_cont_t {
+ unsigned int buf_cnt;
+ unsigned int buf_idx[MSM_CAMERA_MAX_USER_BUFF_CNT];
+};
+
+struct msm_camera_return_buf {
+ __u32 index;
+ __u32 reserved;
+};
+
+#define MSM_CAMERA_PRIV_IOCTL_ID_BASE 0
+#define MSM_CAMERA_PRIV_IOCTL_ID_RETURN_BUF 1
+
+struct msm_camera_private_ioctl_arg {
+ __u32 id;
+ __u32 size;
+ __u32 result;
+ __u32 reserved;
+ __user __u64 ioctl_ptr;
+};
+
+#define VIDIOC_MSM_CAMERA_PRIVATE_IOCTL_CMD \
+ _IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_camera_private_ioctl_arg)
+
+#endif /* __UAPI_MSM_AIS__ */
+
diff --git a/include/uapi/media/ais/msm_ais_buf_mgr.h b/include/uapi/media/ais/msm_ais_buf_mgr.h
new file mode 100644
index 000000000000..08383b63897c
--- /dev/null
+++ b/include/uapi/media/ais/msm_ais_buf_mgr.h
@@ -0,0 +1,66 @@
+#ifndef __UAPI_MEDIA_MSM_AIS_BUF_MGR_H__
+#define __UAPI_MEDIA_MSM_AIS_BUF_MGR_H__
+
+#include <media/ais/msm_ais.h>
+
+enum msm_camera_buf_mngr_cmd {
+ MSM_CAMERA_BUF_MNGR_CONT_MAP,
+ MSM_CAMERA_BUF_MNGR_CONT_UNMAP,
+ MSM_CAMERA_BUF_MNGR_CONT_MAX,
+};
+
+enum msm_camera_buf_mngr_buf_type {
+ MSM_CAMERA_BUF_MNGR_BUF_PLANAR,
+ MSM_CAMERA_BUF_MNGR_BUF_USER,
+ MSM_CAMERA_BUF_MNGR_BUF_INVALID,
+};
+
+struct msm_buf_mngr_info {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t frame_id;
+ struct timeval timestamp;
+ uint32_t index;
+ uint32_t reserved;
+ enum msm_camera_buf_mngr_buf_type type;
+ struct msm_camera_user_buf_cont_t user_buf;
+};
+
+struct msm_buf_mngr_main_cont_info {
+ uint32_t session_id;
+ uint32_t stream_id;
+ enum msm_camera_buf_mngr_cmd cmd;
+ uint32_t cnt;
+ int32_t cont_fd;
+};
+
+#define MSM_CAMERA_BUF_MNGR_IOCTL_ID_BASE 0
+#define MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX 1
+
+#define VIDIOC_MSM_BUF_MNGR_GET_BUF \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 33, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_PUT_BUF \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 34, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_BUF_DONE \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 35, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_CONT_CMD \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 36, struct msm_buf_mngr_main_cont_info)
+
+#define VIDIOC_MSM_BUF_MNGR_INIT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 37, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_DEINIT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 38, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_FLUSH \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 39, struct msm_buf_mngr_info)
+
+#define VIDIOC_MSM_BUF_MNGR_IOCTL_CMD \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 40, \
+ struct msm_camera_private_ioctl_arg)
+
+#endif
+
diff --git a/include/uapi/media/ais/msm_ais_isp.h b/include/uapi/media/ais/msm_ais_isp.h
new file mode 100644
index 000000000000..7c9daafc404d
--- /dev/null
+++ b/include/uapi/media/ais/msm_ais_isp.h
@@ -0,0 +1,1105 @@
+#ifndef __UAPI_MSM_AIS_ISP__
+#define __UAPI_MSM_AIS_ISP__
+
+#include <linux/videodev2.h>
+
+#define MAX_PLANES_PER_STREAM 3
+#define MAX_NUM_STREAM 7
+
+#define ISP_VERSION_48 48
+#define ISP_VERSION_47 47
+#define ISP_VERSION_46 46
+#define ISP_VERSION_44 44
+#define ISP_VERSION_40 40
+#define ISP_VERSION_32 32
+#define ISP_NATIVE_BUF_BIT (0x10000 << 0)
+#define ISP0_BIT (0x10000 << 1)
+#define ISP1_BIT (0x10000 << 2)
+#define ISP_META_CHANNEL_BIT (0x10000 << 3)
+#define ISP_SCRATCH_BUF_BIT (0x10000 << 4)
+#define ISP_OFFLINE_STATS_BIT (0x10000 << 5)
+#define ISP_SVHDR_IN_BIT (0x10000 << 6) /* RDI hw stream for SVHDR */
+#define ISP_SVHDR_OUT_BIT (0x10000 << 7) /* SVHDR output bufq stream*/
+
+#define ISP_STATS_STREAM_BIT 0x80000000
+
+struct msm_vfe_cfg_cmd_list;
+
+enum ISP_START_PIXEL_PATTERN {
+ ISP_BAYER_RGRGRG,
+ ISP_BAYER_GRGRGR,
+ ISP_BAYER_BGBGBG,
+ ISP_BAYER_GBGBGB,
+ ISP_YUV_YCbYCr,
+ ISP_YUV_YCrYCb,
+ ISP_YUV_CbYCrY,
+ ISP_YUV_CrYCbY,
+ ISP_PIX_PATTERN_MAX
+};
+
+enum msm_vfe_plane_fmt {
+ Y_PLANE,
+ CB_PLANE,
+ CR_PLANE,
+ CRCB_PLANE,
+ CBCR_PLANE,
+ RAW_PLANE,
+ RDI_PLANE,
+ VFE_PLANE_FMT_MAX
+};
+
+enum msm_vfe_input_src {
+ VFE_PIX_0,
+ VFE_RAW_0,
+ VFE_RAW_1,
+ VFE_RAW_2,
+ VFE_SRC_MAX,
+};
+
+enum msm_vfe_axi_stream_src {
+ PIX_ENCODER,
+ PIX_VIEWFINDER,
+ PIX_VIDEO,
+ ARGB_RAW,
+ CAMIF_RAW,
+ IDEAL_RAW,
+ RDI_INTF_0,
+ RDI_INTF_1,
+ RDI_INTF_2,
+ VFE_AXI_SRC_MAX
+};
+
+enum msm_vfe_frame_skip_pattern {
+ NO_SKIP,
+ EVERY_2FRAME,
+ EVERY_3FRAME,
+ EVERY_4FRAME,
+ EVERY_5FRAME,
+ EVERY_6FRAME,
+ EVERY_7FRAME,
+ EVERY_8FRAME,
+ EVERY_16FRAME,
+ EVERY_32FRAME,
+ SKIP_ALL,
+ SKIP_RANGE,
+ MAX_SKIP,
+};
+
+/*
+ * Define an unused period. When this period is set it means that the stream is
+ * stopped(i.e the pattern is 0). We don't track the current pattern, just the
+ * period defines what the pattern is, if period is this then pattern is 0 else
+ * pattern is 1
+ */
+#define MSM_VFE_STREAM_STOP_PERIOD 15
+
+enum msm_isp_stats_type {
+ MSM_ISP_STATS_AEC, /* legacy based AEC */
+ MSM_ISP_STATS_AF, /* legacy based AF */
+ MSM_ISP_STATS_AWB, /* legacy based AWB */
+ MSM_ISP_STATS_RS, /* legacy based RS */
+ MSM_ISP_STATS_CS, /* legacy based CS */
+ MSM_ISP_STATS_IHIST, /* legacy based HIST */
+ MSM_ISP_STATS_SKIN, /* legacy based SKIN */
+ MSM_ISP_STATS_BG, /* Bayer Grids */
+ MSM_ISP_STATS_BF, /* Bayer Focus */
+ MSM_ISP_STATS_BE, /* Bayer Exposure*/
+ MSM_ISP_STATS_BHIST, /* Bayer Hist */
+ MSM_ISP_STATS_BF_SCALE, /* Bayer Focus scale */
+ MSM_ISP_STATS_HDR_BE, /* HDR Bayer Exposure */
+ MSM_ISP_STATS_HDR_BHIST, /* HDR Bayer Hist */
+ MSM_ISP_STATS_AEC_BG, /* AEC BG */
+ MSM_ISP_STATS_MAX /* MAX */
+};
+
+/*
+ * @stats_type_mask: Stats type mask (enum msm_isp_stats_type).
+ * @stream_src_mask: Stream src mask (enum msm_vfe_axi_stream_src)
+ * @skip_mode: skip pattern, if skip mode is range only then min/max is used
+ * @min_frame_id: minimum frame id (valid only if skip_mode = RANGE)
+ * @max_frame_id: maximum frame id (valid only if skip_mode = RANGE)
+*/
+struct msm_isp_sw_framskip {
+ uint32_t stats_type_mask;
+ uint32_t stream_src_mask;
+ enum msm_vfe_frame_skip_pattern skip_mode;
+ uint32_t min_frame_id;
+ uint32_t max_frame_id;
+};
+
+enum msm_vfe_testgen_color_pattern {
+ COLOR_BAR_8_COLOR,
+ UNICOLOR_WHITE,
+ UNICOLOR_YELLOW,
+ UNICOLOR_CYAN,
+ UNICOLOR_GREEN,
+ UNICOLOR_MAGENTA,
+ UNICOLOR_RED,
+ UNICOLOR_BLUE,
+ UNICOLOR_BLACK,
+ MAX_COLOR,
+};
+
+enum msm_vfe_camif_input {
+ CAMIF_DISABLED,
+ CAMIF_PAD_REG_INPUT,
+ CAMIF_MIDDI_INPUT,
+ CAMIF_MIPI_INPUT,
+};
+
+struct msm_vfe_fetch_engine_cfg {
+ uint32_t input_format;
+ uint32_t buf_width;
+ uint32_t buf_height;
+ uint32_t fetch_width;
+ uint32_t fetch_height;
+ uint32_t x_offset;
+ uint32_t y_offset;
+ uint32_t buf_stride;
+};
+
+enum msm_vfe_camif_output_format {
+ CAMIF_QCOM_RAW,
+ CAMIF_MIPI_RAW,
+ CAMIF_PLAIN_8,
+ CAMIF_PLAIN_16,
+ CAMIF_MAX_FORMAT,
+};
+
+/*
+ * Camif output general configuration
+ */
+struct msm_vfe_camif_subsample_cfg {
+ uint32_t irq_subsample_period;
+ uint32_t irq_subsample_pattern;
+ uint32_t sof_counter_step;
+ uint32_t pixel_skip;
+ uint32_t line_skip;
+ uint32_t first_line;
+ uint32_t last_line;
+ uint32_t first_pixel;
+ uint32_t last_pixel;
+ enum msm_vfe_camif_output_format output_format;
+};
+
+/*
+ * Camif frame and window configuration
+ */
+struct msm_vfe_camif_cfg {
+ uint32_t lines_per_frame;
+ uint32_t pixels_per_line;
+ uint32_t first_pixel;
+ uint32_t last_pixel;
+ uint32_t first_line;
+ uint32_t last_line;
+ uint32_t epoch_line0;
+ uint32_t epoch_line1;
+ uint32_t is_split;
+ uint32_t vsync_edge;
+ uint32_t hsync_edge;
+ uint32_t sync_mode;
+ uint32_t vfe_subsample_en;
+ uint32_t bus_subsample_en;
+ uint32_t vfe_output_en;
+ uint32_t bus_output_en;
+ uint32_t binning_enable;
+ uint32_t irq_subsample_period;
+ uint32_t misr_en;
+ uint32_t irq_subsample_pattern;
+ uint32_t frame_based_en;
+ uint32_t frame_drop_Period;
+ uint32_t frame_drop_pattern;
+ uint32_t frame_drop_irq_en;
+ enum msm_vfe_camif_input camif_input;
+ struct msm_vfe_camif_subsample_cfg subsample_cfg;
+};
+
+struct msm_vfe_testgen_cfg {
+ uint32_t lines_per_frame;
+ uint32_t pixels_per_line;
+ uint32_t v_blank;
+ uint32_t h_blank;
+ enum ISP_START_PIXEL_PATTERN pixel_bayer_pattern;
+ uint32_t rotate_period;
+ enum msm_vfe_testgen_color_pattern color_bar_pattern;
+ uint32_t burst_num_frame;
+};
+
+enum msm_vfe_inputmux {
+ CAMIF,
+ TESTGEN,
+ EXTERNAL_READ,
+};
+
+enum msm_vfe_stats_composite_group {
+ STATS_COMPOSITE_GRP_NONE,
+ STATS_COMPOSITE_GRP_1,
+ STATS_COMPOSITE_GRP_2,
+ STATS_COMPOSITE_GRP_MAX,
+};
+
+enum msm_vfe_hvx_streaming_cmd {
+ HVX_DISABLE,
+ HVX_ONE_WAY,
+ HVX_ROUND_TRIP
+};
+
+struct msm_vfe_pix_cfg {
+ struct msm_vfe_camif_cfg camif_cfg;
+ struct msm_vfe_testgen_cfg testgen_cfg;
+ struct msm_vfe_fetch_engine_cfg fetch_engine_cfg;
+ enum msm_vfe_inputmux input_mux;
+ enum ISP_START_PIXEL_PATTERN pixel_pattern;
+ uint32_t input_format;
+ enum msm_vfe_hvx_streaming_cmd hvx_cmd;
+ uint32_t is_split;
+};
+
+struct msm_vfe_rdi_cfg {
+ uint8_t cid;
+ uint8_t frame_based;
+};
+
+struct msm_vfe_input_cfg {
+ union {
+ struct msm_vfe_pix_cfg pix_cfg;
+ struct msm_vfe_rdi_cfg rdi_cfg;
+ } d;
+ enum msm_vfe_input_src input_src;
+ uint32_t input_pix_clk;
+};
+
+struct msm_vfe_fetch_eng_start {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t buf_idx;
+ uint8_t offline_mode;
+ uint32_t fd;
+ uint32_t buf_addr;
+ uint32_t frame_id;
+};
+
+enum msm_vfe_fetch_eng_pass {
+ OFFLINE_FIRST_PASS,
+ OFFLINE_SECOND_PASS,
+ OFFLINE_MAX_PASS,
+};
+
+struct msm_vfe_fetch_eng_multi_pass_start {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t buf_idx;
+ uint8_t offline_mode;
+ uint32_t fd;
+ uint32_t buf_addr;
+ uint32_t frame_id;
+ uint32_t output_buf_idx;
+ uint32_t input_buf_offset;
+ enum msm_vfe_fetch_eng_pass offline_pass;
+ uint32_t output_stream_id;
+};
+
+struct msm_vfe_axi_plane_cfg {
+ uint32_t output_width; /*Include padding*/
+ uint32_t output_height;
+ uint32_t output_stride;
+ uint32_t output_scan_lines;
+ uint32_t output_plane_format; /*Y/Cb/Cr/CbCr*/
+ uint32_t plane_addr_offset;
+ uint8_t csid_src; /*RDI 0-2*/
+ uint8_t rdi_cid;/*CID 1-16*/
+};
+
+enum msm_stream_memory_input_t {
+ MEMORY_INPUT_DISABLED,
+ MEMORY_INPUT_ENABLED
+};
+
+struct msm_vfe_axi_stream_request_cmd {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t vt_enable;
+ uint32_t output_format;/*Planar/RAW/Misc*/
+ enum msm_vfe_axi_stream_src stream_src; /*CAMIF/IDEAL/RDIs*/
+ struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+
+ uint32_t burst_count;
+ uint32_t hfr_mode;
+ uint8_t frame_base;
+
+ uint32_t init_frame_drop; /*MAX 31 Frames*/
+ enum msm_vfe_frame_skip_pattern frame_skip_pattern;
+ uint8_t buf_divert; /* if TRUE no vb2 buf done. */
+ /*Return values*/
+ uint32_t axi_stream_handle;
+ uint32_t controllable_output;
+ uint32_t burst_len;
+ /* Flag indicating memory input stream */
+ enum msm_stream_memory_input_t memory_input;
+};
+
+struct msm_vfe_axi_stream_release_cmd {
+ uint32_t stream_handle;
+};
+
+enum msm_vfe_axi_stream_cmd {
+ STOP_STREAM,
+ START_STREAM,
+ STOP_IMMEDIATELY,
+};
+
+enum msm_vfe_hw_state {
+ HW_STATE_NONE,
+ HW_STATE_SLEEP,
+ HW_STATE_AWAKE,
+};
+
+struct msm_vfe_axi_stream_cfg_cmd {
+ uint8_t num_streams;
+ uint32_t stream_handle[VFE_AXI_SRC_MAX];
+ enum msm_vfe_axi_stream_cmd cmd;
+ uint8_t sync_frame_id_src;
+ enum msm_vfe_hw_state hw_state;
+};
+
+enum msm_vfe_axi_stream_update_type {
+ ENABLE_STREAM_BUF_DIVERT,
+ DISABLE_STREAM_BUF_DIVERT,
+ UPDATE_STREAM_FRAMEDROP_PATTERN,
+ UPDATE_STREAM_STATS_FRAMEDROP_PATTERN,
+ UPDATE_STREAM_AXI_CONFIG,
+ UPDATE_STREAM_REQUEST_FRAMES,
+ UPDATE_STREAM_ADD_BUFQ,
+ UPDATE_STREAM_REMOVE_BUFQ,
+ UPDATE_STREAM_SW_FRAME_DROP,
+ UPDATE_STREAM_REQUEST_FRAMES_VER2,
+ UPDATE_STREAM_OFFLINE_AXI_CONFIG,
+};
+#define UPDATE_STREAM_REQUEST_FRAMES_VER2 UPDATE_STREAM_REQUEST_FRAMES_VER2
+
+enum msm_vfe_iommu_type {
+ IOMMU_ATTACH,
+ IOMMU_DETACH,
+};
+
+enum msm_vfe_buff_queue_id {
+ VFE_BUF_QUEUE_DEFAULT,
+ VFE_BUF_QUEUE_SHARED,
+ VFE_BUF_QUEUE_MAX,
+};
+
+struct msm_vfe_axi_stream_cfg_update_info {
+ uint32_t stream_handle;
+ uint32_t output_format;
+ uint32_t user_stream_id;
+ uint32_t frame_id;
+ enum msm_vfe_frame_skip_pattern skip_pattern;
+ struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+ struct msm_isp_sw_framskip sw_skip_info;
+};
+
+struct msm_vfe_axi_stream_cfg_update_info_req_frm {
+ uint32_t stream_handle;
+ uint32_t user_stream_id;
+ uint32_t frame_id;
+ uint32_t buf_index;
+};
+
+struct msm_vfe_axi_halt_cmd {
+ uint32_t stop_camif;
+ uint32_t overflow_detected;
+ uint32_t blocking_halt;
+};
+
+struct msm_vfe_axi_reset_cmd {
+ uint32_t blocking;
+ uint32_t frame_id;
+};
+
+struct msm_vfe_axi_restart_cmd {
+ uint32_t enable_camif;
+};
+
+struct msm_vfe_axi_stream_update_cmd {
+ uint32_t num_streams;
+ enum msm_vfe_axi_stream_update_type update_type;
+ /*
+ * For backward compatibility, ensure 1st member of any struct
+ * in union below is uint32_t stream_handle.
+ */
+ union {
+ struct msm_vfe_axi_stream_cfg_update_info
+ update_info[MSM_ISP_STATS_MAX];
+ struct msm_vfe_axi_stream_cfg_update_info_req_frm req_frm_ver2;
+ };
+};
+
+struct msm_vfe_smmu_attach_cmd {
+ uint32_t security_mode;
+ uint32_t iommu_attach_mode;
+};
+
+struct msm_vfe_stats_stream_request_cmd {
+ uint32_t session_id;
+ uint32_t stream_id;
+ enum msm_isp_stats_type stats_type;
+ uint32_t composite_flag;
+ uint32_t framedrop_pattern;
+ uint32_t init_frame_drop; /*MAX 31 Frames*/
+ uint32_t irq_subsample_pattern;
+ uint32_t buffer_offset;
+ uint32_t stream_handle;
+};
+
+struct msm_vfe_stats_stream_release_cmd {
+ uint32_t stream_handle;
+};
+struct msm_vfe_stats_stream_cfg_cmd {
+ uint8_t num_streams;
+ uint32_t stream_handle[MSM_ISP_STATS_MAX];
+ uint8_t enable;
+ uint32_t stats_burst_len;
+};
+
+enum msm_vfe_reg_cfg_type {
+ VFE_WRITE,
+ VFE_WRITE_MB,
+ VFE_READ,
+ VFE_CFG_MASK,
+ VFE_WRITE_DMI_16BIT,
+ VFE_WRITE_DMI_32BIT,
+ VFE_WRITE_DMI_64BIT,
+ VFE_READ_DMI_16BIT,
+ VFE_READ_DMI_32BIT,
+ VFE_READ_DMI_64BIT,
+ GET_MAX_CLK_RATE,
+ GET_CLK_RATES,
+ GET_ISP_ID,
+ VFE_HW_UPDATE_LOCK,
+ VFE_HW_UPDATE_UNLOCK,
+ SET_WM_UB_SIZE,
+ SET_UB_POLICY,
+};
+
+struct msm_vfe_cfg_cmd2 {
+ uint16_t num_cfg;
+ uint16_t cmd_len;
+ void __user *cfg_data;
+ void __user *cfg_cmd;
+};
+
+struct msm_vfe_cfg_cmd_list {
+ struct msm_vfe_cfg_cmd2 cfg_cmd;
+ struct msm_vfe_cfg_cmd_list *next;
+ uint32_t next_size;
+};
+
+struct msm_vfe_reg_rw_info {
+ uint32_t reg_offset;
+ uint32_t cmd_data_offset;
+ uint32_t len;
+};
+
+struct msm_vfe_reg_mask_info {
+ uint32_t reg_offset;
+ uint32_t mask;
+ uint32_t val;
+};
+
+struct msm_vfe_reg_dmi_info {
+ uint32_t hi_tbl_offset; /*Optional*/
+ uint32_t lo_tbl_offset; /*Required*/
+ uint32_t len;
+};
+
+struct msm_vfe_reg_cfg_cmd {
+ union {
+ struct msm_vfe_reg_rw_info rw_info;
+ struct msm_vfe_reg_mask_info mask_info;
+ struct msm_vfe_reg_dmi_info dmi_info;
+ } u;
+
+ enum msm_vfe_reg_cfg_type cmd_type;
+};
+
+enum vfe_sd_type {
+ VFE_SD_0 = 0,
+ VFE_SD_1,
+ VFE_SD_COMMON,
+ VFE_SD_MAX,
+};
+
+/* When you change the value below, check for the sof event_data size.
+ * V4l2 limits payload to 64 bytes
+ */
+#define MS_NUM_SLAVE_MAX 1
+
+/* Usecases when 2 HW need to be related or synced */
+enum msm_vfe_dual_hw_type {
+ DUAL_NONE = 0,
+ DUAL_HW_VFE_SPLIT = 1,
+ DUAL_HW_MASTER_SLAVE = 2,
+};
+
+/* Type for 2 INTF when used in Master-Slave mode */
+enum msm_vfe_dual_hw_ms_type {
+ MS_TYPE_NONE,
+ MS_TYPE_MASTER,
+ MS_TYPE_SLAVE,
+};
+
+struct msm_isp_set_dual_hw_ms_cmd {
+ uint8_t num_src;
+ /* Each session can be only one type but multiple intf if YUV cam */
+ enum msm_vfe_dual_hw_ms_type dual_hw_ms_type;
+ /* Primary intf is mostly associated with preview.
+ * This primary intf SOF frame_id and timestamp is tracked
+ * and used to calculate delta
+ */
+ enum msm_vfe_input_src primary_intf;
+ /* input_src array indicates other input INTF that may be Master/Slave.
+ * For these additional intf, frame_id and timestamp are not saved.
+ * However, if these are slaves then they will still get their
+ * frame_id from Master
+ */
+ enum msm_vfe_input_src input_src[VFE_SRC_MAX];
+ uint32_t sof_delta_threshold; /* In milliseconds. Sent for Master */
+};
+
+enum msm_isp_buf_type {
+ ISP_PRIVATE_BUF,
+ ISP_SHARE_BUF,
+ MAX_ISP_BUF_TYPE,
+};
+
+struct msm_isp_unmap_buf_req {
+ uint32_t fd;
+};
+
+struct msm_isp_buf_request {
+ uint32_t vfe_id;
+ enum msm_vfe_axi_stream_src output_id;
+ uint32_t flags;
+ uint8_t num_buf;
+ uint32_t handle;
+ enum msm_isp_buf_type buf_type;
+};
+
+struct msm_isp_qbuf_plane {
+ uint32_t addr;
+ uint32_t offset;
+ uint32_t length;
+};
+
+struct msm_isp_qbuf_buffer {
+ struct msm_isp_qbuf_plane planes[MAX_PLANES_PER_STREAM];
+ uint32_t num_planes;
+};
+
+struct msm_isp_qbuf_info {
+ uint32_t handle;
+ int32_t buf_idx;
+ /*Only used for prepare buffer*/
+ struct msm_isp_qbuf_buffer buffer;
+ /*Only used for diverted buffer*/
+ uint32_t dirty_buf;
+};
+
+struct msm_isp_clk_rates {
+ uint32_t svs_rate;
+ uint32_t nominal_rate;
+ uint32_t high_rate;
+};
+
+struct msm_vfe_axi_src_state {
+ enum msm_vfe_input_src input_src;
+ uint32_t src_active;
+ uint32_t src_frame_id;
+};
+
+enum msm_isp_event_mask_index {
+ ISP_EVENT_MASK_INDEX_STATS_NOTIFY = 0,
+ ISP_EVENT_MASK_INDEX_ERROR = 1,
+ ISP_EVENT_MASK_INDEX_IOMMU_P_FAULT = 2,
+ ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE = 3,
+ ISP_EVENT_MASK_INDEX_REG_UPDATE = 4,
+ ISP_EVENT_MASK_INDEX_SOF = 5,
+ ISP_EVENT_MASK_INDEX_BUF_DIVERT = 6,
+ ISP_EVENT_MASK_INDEX_COMP_STATS_NOTIFY = 7,
+ ISP_EVENT_MASK_INDEX_MASK_FE_READ_DONE = 8,
+ ISP_EVENT_MASK_INDEX_BUF_DONE = 9,
+ ISP_EVENT_MASK_INDEX_REG_UPDATE_MISSING = 10,
+ ISP_EVENT_MASK_INDEX_PING_PONG_MISMATCH = 11,
+ ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR = 12,
+};
+
+
+#define ISP_EVENT_SUBS_MASK_NONE 0
+
+#define ISP_EVENT_SUBS_MASK_STATS_NOTIFY \
+ (1 << ISP_EVENT_MASK_INDEX_STATS_NOTIFY)
+
+#define ISP_EVENT_SUBS_MASK_ERROR \
+ (1 << ISP_EVENT_MASK_INDEX_ERROR)
+
+#define ISP_EVENT_SUBS_MASK_IOMMU_P_FAULT \
+ (1 << ISP_EVENT_MASK_INDEX_IOMMU_P_FAULT)
+
+#define ISP_EVENT_SUBS_MASK_STREAM_UPDATE_DONE \
+ (1 << ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE)
+
+#define ISP_EVENT_SUBS_MASK_REG_UPDATE \
+ (1 << ISP_EVENT_MASK_INDEX_REG_UPDATE)
+
+#define ISP_EVENT_SUBS_MASK_SOF \
+ (1 << ISP_EVENT_MASK_INDEX_SOF)
+
+#define ISP_EVENT_SUBS_MASK_BUF_DIVERT \
+ (1 << ISP_EVENT_MASK_INDEX_BUF_DIVERT)
+
+#define ISP_EVENT_SUBS_MASK_COMP_STATS_NOTIFY \
+ (1 << ISP_EVENT_MASK_INDEX_COMP_STATS_NOTIFY)
+
+#define ISP_EVENT_SUBS_MASK_FE_READ_DONE \
+ (1 << ISP_EVENT_MASK_INDEX_MASK_FE_READ_DONE)
+
+#define ISP_EVENT_SUBS_MASK_BUF_DONE \
+ (1 << ISP_EVENT_MASK_INDEX_BUF_DONE)
+
+#define ISP_EVENT_SUBS_MASK_REG_UPDATE_MISSING \
+ (1 << ISP_EVENT_MASK_INDEX_REG_UPDATE_MISSING)
+
+#define ISP_EVENT_SUBS_MASK_PING_PONG_MISMATCH \
+ (1 << ISP_EVENT_MASK_INDEX_PING_PONG_MISMATCH)
+
+#define ISP_EVENT_SUBS_MASK_BUF_FATAL_ERROR \
+ (1 << ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR)
+
+enum msm_isp_event_idx {
+ ISP_REG_UPDATE = 0,
+ ISP_EPOCH_0 = 1,
+ ISP_EPOCH_1 = 2,
+ ISP_START_ACK = 3,
+ ISP_STOP_ACK = 4,
+ ISP_IRQ_VIOLATION = 5,
+ ISP_STATS_OVERFLOW = 6,
+ ISP_BUF_DONE = 7,
+ ISP_FE_RD_DONE = 8,
+ ISP_IOMMU_P_FAULT = 9,
+ ISP_ERROR = 10,
+ ISP_HW_FATAL_ERROR = 11,
+ ISP_PING_PONG_MISMATCH = 12,
+ ISP_REG_UPDATE_MISSING = 13,
+ ISP_BUF_FATAL_ERROR = 14,
+ ISP_EVENT_MAX = 15
+};
+
+#define ISP_EVENT_OFFSET 8
+#define ISP_EVENT_BASE (V4L2_EVENT_PRIVATE_START)
+#define ISP_BUF_EVENT_BASE (ISP_EVENT_BASE + (1 << ISP_EVENT_OFFSET))
+#define ISP_STATS_EVENT_BASE (ISP_EVENT_BASE + (2 << ISP_EVENT_OFFSET))
+#define ISP_CAMIF_EVENT_BASE (ISP_EVENT_BASE + (3 << ISP_EVENT_OFFSET))
+#define ISP_STREAM_EVENT_BASE (ISP_EVENT_BASE + (4 << ISP_EVENT_OFFSET))
+#define ISP_EVENT_REG_UPDATE (ISP_EVENT_BASE + ISP_REG_UPDATE)
+#define ISP_EVENT_EPOCH_0 (ISP_EVENT_BASE + ISP_EPOCH_0)
+#define ISP_EVENT_EPOCH_1 (ISP_EVENT_BASE + ISP_EPOCH_1)
+#define ISP_EVENT_START_ACK (ISP_EVENT_BASE + ISP_START_ACK)
+#define ISP_EVENT_STOP_ACK (ISP_EVENT_BASE + ISP_STOP_ACK)
+#define ISP_EVENT_IRQ_VIOLATION (ISP_EVENT_BASE + ISP_IRQ_VIOLATION)
+#define ISP_EVENT_STATS_OVERFLOW (ISP_EVENT_BASE + ISP_STATS_OVERFLOW)
+#define ISP_EVENT_ERROR (ISP_EVENT_BASE + ISP_ERROR)
+#define ISP_EVENT_SOF (ISP_CAMIF_EVENT_BASE)
+#define ISP_EVENT_EOF (ISP_CAMIF_EVENT_BASE + 1)
+#define ISP_EVENT_BUF_DONE (ISP_EVENT_BASE + ISP_BUF_DONE)
+#define ISP_EVENT_BUF_DIVERT (ISP_BUF_EVENT_BASE)
+#define ISP_EVENT_STATS_NOTIFY (ISP_STATS_EVENT_BASE)
+#define ISP_EVENT_COMP_STATS_NOTIFY (ISP_EVENT_STATS_NOTIFY + MSM_ISP_STATS_MAX)
+#define ISP_EVENT_FE_READ_DONE (ISP_EVENT_BASE + ISP_FE_RD_DONE)
+#define ISP_EVENT_IOMMU_P_FAULT (ISP_EVENT_BASE + ISP_IOMMU_P_FAULT)
+#define ISP_EVENT_HW_FATAL_ERROR (ISP_EVENT_BASE + ISP_HW_FATAL_ERROR)
+#define ISP_EVENT_PING_PONG_MISMATCH (ISP_EVENT_BASE + ISP_PING_PONG_MISMATCH)
+#define ISP_EVENT_REG_UPDATE_MISSING (ISP_EVENT_BASE + ISP_REG_UPDATE_MISSING)
+#define ISP_EVENT_BUF_FATAL_ERROR (ISP_EVENT_BASE + ISP_BUF_FATAL_ERROR)
+#define ISP_EVENT_STREAM_UPDATE_DONE (ISP_STREAM_EVENT_BASE)
+
+/* The msm_v4l2_event_data structure should match the
+ * v4l2_event.u.data field.
+ * should not exceed 64 bytes
+ */
+
+struct msm_isp_buf_event {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t handle;
+ uint32_t output_format;
+ int8_t buf_idx;
+};
+struct msm_isp_fetch_eng_event {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t handle;
+ uint32_t fd;
+ int8_t buf_idx;
+ int8_t offline_mode;
+};
+struct msm_isp_stats_event {
+ uint32_t stats_mask; /* 4 bytes */
+ uint8_t stats_buf_idxs[MSM_ISP_STATS_MAX]; /* 11 bytes */
+};
+
+struct msm_isp_stream_ack {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t handle;
+};
+
+enum msm_vfe_error_type {
+ ISP_ERROR_NONE,
+ ISP_ERROR_CAMIF,
+ ISP_ERROR_BUS_OVERFLOW,
+ ISP_ERROR_RETURN_EMPTY_BUFFER,
+ ISP_ERROR_FRAME_ID_MISMATCH,
+ ISP_ERROR_MAX,
+};
+
+struct msm_isp_error_info {
+ enum msm_vfe_error_type err_type;
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t stream_id_mask;
+};
+
+/* This structure reports delta between master and slave */
+struct msm_isp_ms_delta_info {
+ uint8_t num_delta_info;
+ uint32_t delta[MS_NUM_SLAVE_MAX];
+};
+
+/* This is sent in EPOCH irq */
+struct msm_isp_output_info {
+ uint8_t regs_not_updated;
+ /* mask with bufq_handle for regs not updated or return empty */
+ uint16_t output_err_mask;
+ /* mask with stream_idx for get_buf failed */
+ uint8_t stream_framedrop_mask;
+ /* mask with stats stream_idx for get_buf failed */
+ uint16_t stats_framedrop_mask;
+ /* delta between master and slave */
+};
+
+/* This structure is piggybacked with SOF event */
+struct msm_isp_sof_info {
+ uint8_t regs_not_updated;
+ /* mask with bufq_handle for regs not updated */
+ uint16_t reg_update_fail_mask;
+ /* mask with bufq_handle for get_buf failed */
+ uint32_t stream_get_buf_fail_mask;
+ /* mask with stats stream_idx for get_buf failed */
+ uint16_t stats_get_buf_fail_mask;
+ /* delta between master and slave */
+ struct msm_isp_ms_delta_info ms_delta_info;
+ /*
+ * mask with AXI_SRC in paused state. In PAUSED
+ * state there is no Buffer output. So this mask is used
+ * to report drop.
+ */
+ uint16_t axi_updating_mask;
+ /* extended mask with bufq_handle for regs not updated */
+ uint32_t reg_update_fail_mask_ext;
+};
+#define AXI_UPDATING_MASK 1
+#define REG_UPDATE_FAIL_MASK_EXT 1
+
+struct msm_isp_event_data {
+ /*Wall clock except for buffer divert events
+ *which use monotonic clock
+ */
+ struct timeval timestamp;
+ /* Monotonic timestamp since bootup */
+ struct timeval mono_timestamp;
+ uint32_t frame_id;
+ union {
+ /* Sent for Stats_Done event */
+ struct msm_isp_stats_event stats;
+ /* Sent for Buf_Divert event */
+ struct msm_isp_buf_event buf_done;
+ /* Sent for offline fetch done event */
+ struct msm_isp_fetch_eng_event fetch_done;
+ /* Sent for Error_Event */
+ struct msm_isp_error_info error_info;
+ /*
+ * This struct needs to be removed once
+ * userspace switches to sof_info
+ */
+ struct msm_isp_output_info output_info;
+ /* Sent for SOF event */
+ struct msm_isp_sof_info sof_info;
+ } u; /* union can have max 52 bytes */
+};
+
+enum msm_vfe_ahb_clk_vote {
+ MSM_ISP_CAMERA_AHB_SVS_VOTE = 1,
+ MSM_ISP_CAMERA_AHB_TURBO_VOTE = 2,
+ MSM_ISP_CAMERA_AHB_NOMINAL_VOTE = 3,
+ MSM_ISP_CAMERA_AHB_SUSPEND_VOTE = 4,
+};
+
+struct msm_isp_ahb_clk_cfg {
+ uint32_t vote;
+ uint32_t reserved[2];
+};
+
+#define V4L2_PIX_FMT_QBGGR8 v4l2_fourcc('Q', 'B', 'G', '8')
+#define V4L2_PIX_FMT_QGBRG8 v4l2_fourcc('Q', 'G', 'B', '8')
+#define V4L2_PIX_FMT_QGRBG8 v4l2_fourcc('Q', 'G', 'R', '8')
+#define V4L2_PIX_FMT_QRGGB8 v4l2_fourcc('Q', 'R', 'G', '8')
+#define V4L2_PIX_FMT_QBGGR10 v4l2_fourcc('Q', 'B', 'G', '0')
+#define V4L2_PIX_FMT_QGBRG10 v4l2_fourcc('Q', 'G', 'B', '0')
+#define V4L2_PIX_FMT_QGRBG10 v4l2_fourcc('Q', 'G', 'R', '0')
+#define V4L2_PIX_FMT_QRGGB10 v4l2_fourcc('Q', 'R', 'G', '0')
+#define V4L2_PIX_FMT_QBGGR12 v4l2_fourcc('Q', 'B', 'G', '2')
+#define V4L2_PIX_FMT_QGBRG12 v4l2_fourcc('Q', 'G', 'B', '2')
+#define V4L2_PIX_FMT_QGRBG12 v4l2_fourcc('Q', 'G', 'R', '2')
+#define V4L2_PIX_FMT_QRGGB12 v4l2_fourcc('Q', 'R', 'G', '2')
+#define V4L2_PIX_FMT_QBGGR14 v4l2_fourcc('Q', 'B', 'G', '4')
+#define V4L2_PIX_FMT_QGBRG14 v4l2_fourcc('Q', 'G', 'B', '4')
+#define V4L2_PIX_FMT_QGRBG14 v4l2_fourcc('Q', 'G', 'R', '4')
+#define V4L2_PIX_FMT_QRGGB14 v4l2_fourcc('Q', 'R', 'G', '4')
+#define V4L2_PIX_FMT_P16BGGR10 v4l2_fourcc('P', 'B', 'G', '0')
+#define V4L2_PIX_FMT_P16GBRG10 v4l2_fourcc('P', 'G', 'B', '0')
+#define V4L2_PIX_FMT_P16GRBG10 v4l2_fourcc('P', 'G', 'R', '0')
+#define V4L2_PIX_FMT_P16RGGB10 v4l2_fourcc('P', 'R', 'G', '0')
+#define V4L2_PIX_FMT_NV14 v4l2_fourcc('N', 'V', '1', '4')
+#define V4L2_PIX_FMT_NV41 v4l2_fourcc('N', 'V', '4', '1')
+#define V4L2_PIX_FMT_META v4l2_fourcc('Q', 'M', 'E', 'T')
+#define V4L2_PIX_FMT_META10 v4l2_fourcc('Q', 'M', '1', '0')
+#define V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4') /* 14 BGBG.GRGR.*/
+#define V4L2_PIX_FMT_SGBRG14 v4l2_fourcc('G', 'B', '1', '4') /* 14 GBGB.RGRG.*/
+#define V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('B', 'A', '1', '4') /* 14 GRGR.BGBG.*/
+#define V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4') /* 14 RGRG.GBGB.*/
+
+
+enum msm_vfe_pixel_data_size {
+ VFE_PIXEL_DATA_SIZE_8BIT,
+ VFE_PIXEL_DATA_SIZE_10BIT,
+ VFE_PIXEL_DATA_SIZE_12BIT,
+ VFE_PIXEL_DATA_SIZE_14BIT,
+};
+
+struct msm_vfe_operation_cfg {
+ enum msm_vfe_camif_input camif_input;
+ enum msm_vfe_pixel_data_size dataSize;
+ enum msm_vfe_inputmux input_mux;
+ enum ISP_START_PIXEL_PATTERN pixel_pattern;
+ enum msm_vfe_hvx_streaming_cmd hvx_cmd;
+ uint8_t yuv_cosited;
+};
+
+struct msm_vfe_axi_output_plane_cfg {
+ uint8_t wmIndex;
+ enum msm_vfe_plane_fmt plane_fmt;
+ uint32_t image_qwords_per_line;
+ uint32_t image_height;
+ uint32_t output_stride;
+ uint32_t output_scan_lines;
+ uint32_t output_plane_format;
+ uint32_t frame_increment;
+};
+
+struct msm_vfe_axi_output_path_cfg {
+ uint8_t enable;
+
+ uint32_t format;
+ uint8_t raw_data_size;
+ uint32_t burst_count;
+ struct msm_vfe_axi_output_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+
+ uint8_t frame_based;
+ uint32_t frame_group;
+ uint32_t frame_interval;
+
+ uint8_t framedrop_period;
+ uint32_t framedrop_pattern;
+
+ uint8_t rdi_cid;
+ uint8_t rdi_frameskip_en;
+ uint32_t rdi_frameskip_pattern;
+};
+
+struct msm_vfe_axi_output_cfg {
+ struct msm_vfe_axi_output_path_cfg output_path_cfg[VFE_AXI_SRC_MAX];
+};
+
+
+enum msm_isp_ioctl_cmd_code {
+ MSM_VFE_REG_CFG = BASE_VIDIOC_PRIVATE,
+
+ MSM_ISP_REQUEST_BUFQ,
+ MSM_ISP_RELEASE_BUFQ,
+ MSM_ISP_ENQUEUE_BUF,
+ MSM_ISP_DEQUEUE_BUF,
+
+ MSM_ISP_REQUEST_STREAM,
+ MSM_ISP_CFG_STREAM,
+ MSM_ISP_RELEASE_STREAM,
+ MSM_ISP_INPUT_CFG,
+ MSM_ISP_SET_SRC_STATE,
+ MSM_ISP_REQUEST_STATS_STREAM,
+ MSM_ISP_CFG_STATS_STREAM,
+ MSM_ISP_RELEASE_STATS_STREAM,
+ MSM_ISP_REG_UPDATE_CMD,
+ MSM_ISP_UPDATE_STREAM,
+ MSM_VFE_REG_LIST_CFG,
+
+ MSM_ISP_UPDATE_STATS_STREAM,
+ MSM_ISP_AXI_HALT,
+ MSM_ISP_AXI_RESET,
+ MSM_ISP_AXI_RESTART,
+ MSM_ISP_FETCH_ENG_START,
+ MSM_ISP_SET_DUAL_HW_MASTER_SLAVE,
+ MSM_ISP_MAP_BUF_START_FE,
+ MSM_ISP_FETCH_ENG_MULTI_PASS_START,
+ MSM_ISP_MAP_BUF_START_MULTI_PASS_FE,
+ MSM_ISP_CFG_HW_STATE,
+
+ MSM_ISP_SMMU_ATTACH,
+ MSM_ISP_UNMAP_BUF,
+
+ MSM_ISP_OPERATION_CFG,
+ MSM_ISP_CAMIF_CFG,
+ MSM_ISP_AXI_OUTPUT_CFG,
+ MSM_ISP_START,
+ MSM_ISP_STOP,
+};
+
+
+#define VIDIOC_MSM_VFE_REG_CFG \
+ _IOWR('V', MSM_VFE_REG_CFG, \
+ struct msm_vfe_cfg_cmd2)
+
+#define VIDIOC_MSM_ISP_REQUEST_BUFQ \
+ _IOWR('V', MSM_ISP_REQUEST_BUFQ, \
+ struct msm_isp_buf_request)
+
+#define VIDIOC_MSM_ISP_RELEASE_BUFQ \
+ _IOWR('V', MSM_ISP_RELEASE_BUFQ, \
+ struct msm_isp_buf_request)
+
+#define VIDIOC_MSM_ISP_ENQUEUE_BUF \
+ _IOWR('V', MSM_ISP_ENQUEUE_BUF, \
+ struct msm_isp_qbuf_info)
+
+#define VIDIOC_MSM_ISP_DEQUEUE_BUF \
+ _IOWR('V', MSM_ISP_DEQUEUE_BUF, \
+ struct msm_isp_qbuf_info)
+
+#define VIDIOC_MSM_ISP_REQUEST_STREAM \
+ _IOWR('V', MSM_ISP_REQUEST_STREAM, \
+ struct msm_vfe_axi_stream_request_cmd)
+
+#define VIDIOC_MSM_ISP_CFG_STREAM \
+ _IOWR('V', MSM_ISP_CFG_STREAM, \
+ struct msm_vfe_axi_stream_cfg_cmd)
+
+#define VIDIOC_MSM_ISP_RELEASE_STREAM \
+ _IOWR('V', MSM_ISP_RELEASE_STREAM, \
+ struct msm_vfe_axi_stream_release_cmd)
+
+#define VIDIOC_MSM_ISP_INPUT_CFG \
+ _IOWR('V', MSM_ISP_INPUT_CFG, \
+ struct msm_vfe_input_cfg)
+
+#define VIDIOC_MSM_ISP_SET_SRC_STATE \
+ _IOWR('V', MSM_ISP_SET_SRC_STATE, \
+ struct msm_vfe_axi_src_state)
+
+#define VIDIOC_MSM_ISP_REQUEST_STATS_STREAM \
+ _IOWR('V', MSM_ISP_REQUEST_STATS_STREAM, \
+ struct msm_vfe_stats_stream_request_cmd)
+
+#define VIDIOC_MSM_ISP_CFG_STATS_STREAM \
+ _IOWR('V', MSM_ISP_CFG_STATS_STREAM, \
+ struct msm_vfe_stats_stream_cfg_cmd)
+
+#define VIDIOC_MSM_ISP_RELEASE_STATS_STREAM \
+ _IOWR('V', MSM_ISP_RELEASE_STATS_STREAM, \
+ struct msm_vfe_stats_stream_release_cmd)
+
+#define VIDIOC_MSM_ISP_REG_UPDATE_CMD \
+ _IOWR('V', MSM_ISP_REG_UPDATE_CMD, \
+ enum msm_vfe_input_src)
+
+#define VIDIOC_MSM_ISP_UPDATE_STREAM \
+ _IOWR('V', MSM_ISP_UPDATE_STREAM, \
+ struct msm_vfe_axi_stream_update_cmd)
+
+#define VIDIOC_MSM_VFE_REG_LIST_CFG \
+ _IOWR('V', MSM_VFE_REG_LIST_CFG, \
+ struct msm_vfe_cfg_cmd_list)
+
+#define VIDIOC_MSM_ISP_SMMU_ATTACH \
+ _IOWR('V', MSM_ISP_SMMU_ATTACH, \
+ struct msm_vfe_smmu_attach_cmd)
+
+#define VIDIOC_MSM_ISP_UPDATE_STATS_STREAM \
+ _IOWR('V', MSM_ISP_UPDATE_STATS_STREAM, \
+ struct msm_vfe_axi_stream_update_cmd)
+
+#define VIDIOC_MSM_ISP_AXI_HALT \
+ _IOWR('V', MSM_ISP_AXI_HALT, \
+ struct msm_vfe_axi_halt_cmd)
+
+#define VIDIOC_MSM_ISP_AXI_RESET \
+ _IOWR('V', MSM_ISP_AXI_RESET, \
+ struct msm_vfe_axi_reset_cmd)
+
+#define VIDIOC_MSM_ISP_AXI_RESTART \
+ _IOWR('V', MSM_ISP_AXI_RESTART, \
+ struct msm_vfe_axi_restart_cmd)
+
+#define VIDIOC_MSM_ISP_FETCH_ENG_START \
+ _IOWR('V', MSM_ISP_FETCH_ENG_START, \
+ struct msm_vfe_fetch_eng_start)
+
+#define VIDIOC_MSM_ISP_SET_DUAL_HW_MASTER_SLAVE \
+ _IOWR('V', MSM_ISP_SET_DUAL_HW_MASTER_SLAVE, \
+ struct msm_isp_set_dual_hw_ms_cmd)
+
+#define VIDIOC_MSM_ISP_MAP_BUF_START_FE \
+ _IOWR('V', MSM_ISP_MAP_BUF_START_FE, \
+ struct msm_vfe_fetch_eng_start)
+
+#define VIDIOC_MSM_ISP_UNMAP_BUF \
+ _IOWR('V', MSM_ISP_UNMAP_BUF, \
+ struct msm_isp_unmap_buf_req)
+
+#define VIDIOC_MSM_ISP_AHB_CLK_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE+25, struct msm_isp_ahb_clk_cfg)
+
+#define VIDIOC_MSM_ISP_FETCH_ENG_MULTI_PASS_START \
+ _IOWR('V', MSM_ISP_FETCH_ENG_MULTI_PASS_START, \
+ struct msm_vfe_fetch_eng_multi_pass_start)
+
+#define VIDIOC_MSM_ISP_MAP_BUF_START_MULTI_PASS_FE \
+ _IOWR('V', MSM_ISP_MAP_BUF_START_MULTI_PASS_FE, \
+ struct msm_vfe_fetch_eng_multi_pass_start)
+
+#define VIDIOC_MSM_ISP_CFG_HW_STATE \
+ _IOWR('V', MSM_ISP_CFG_HW_STATE, \
+ struct msm_vfe_axi_stream_cfg_cmd)
+
+
+#define VIDIOC_MSM_ISP_OPERATION_CFG \
+ _IOWR('V', MSM_ISP_OPERATION_CFG, \
+ struct msm_vfe_operation_cfg)
+
+#define VIDIOC_MSM_ISP_AXI_OUTPUT_CFG \
+ _IOWR('V', MSM_ISP_AXI_OUTPUT_CFG, \
+ struct msm_vfe_axi_output_cfg)
+
+#define VIDIOC_MSM_ISP_CAMIF_CFG \
+ _IOWR('V', MSM_ISP_CAMIF_CFG, \
+ struct msm_vfe_camif_cfg)
+
+
+#endif /* __UAPI_MSM_AIS_ISP__ */
diff --git a/include/uapi/media/ais/msm_ais_ispif.h b/include/uapi/media/ais/msm_ais_ispif.h
new file mode 100644
index 000000000000..b12175d787c2
--- /dev/null
+++ b/include/uapi/media/ais/msm_ais_ispif.h
@@ -0,0 +1,173 @@
+#ifndef UAPI_MSM_AIS_ISPIF_H
+#define UAPI_MSM_AIS_ISPIF_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/videodev2.h>
+
+#define CSID_VERSION_V20 0x02000011
+#define CSID_VERSION_V22 0x02001000
+#define CSID_VERSION_V30 0x30000000
+#define CSID_VERSION_V3 0x30000000
+#define CSID_VERSION_V35 0x30050000
+
+enum msm_ispif_vfe_intf {
+ VFE0,
+ VFE1,
+ VFE_MAX
+};
+#define VFE0_MASK (1 << VFE0)
+#define VFE1_MASK (1 << VFE1)
+
+enum msm_ispif_intftype {
+ PIX0,
+ RDI0,
+ PIX1,
+ RDI1,
+ RDI2,
+ INTF_MAX
+};
+#define MAX_PARAM_ENTRIES (INTF_MAX * 2)
+#define MAX_CID_CH 8
+#define MAX_CID_CH_v2 4
+
+#define PIX0_MASK (1 << PIX0)
+#define PIX1_MASK (1 << PIX1)
+#define RDI0_MASK (1 << RDI0)
+#define RDI1_MASK (1 << RDI1)
+#define RDI2_MASK (1 << RDI2)
+
+
+enum msm_ispif_vc {
+ VC0,
+ VC1,
+ VC2,
+ VC3,
+ VC_MAX
+};
+
+enum msm_ispif_cid {
+ CID0,
+ CID1,
+ CID2,
+ CID3,
+ CID4,
+ CID5,
+ CID6,
+ CID7,
+ CID8,
+ CID9,
+ CID10,
+ CID11,
+ CID12,
+ CID13,
+ CID14,
+ CID15,
+ CID_MAX
+};
+
+enum msm_ispif_csid {
+ CSID0,
+ CSID1,
+ CSID2,
+ CSID3,
+ CSID_MAX
+};
+
+enum msm_ispif_pixel_odd_even {
+ PIX_EVEN,
+ PIX_ODD
+};
+
+enum msm_ispif_pixel_pack_mode {
+ PACK_BYTE,
+ PACK_PLAIN_PACK,
+ PACK_NV_P8,
+ PACK_NV_P16
+};
+
+struct msm_ispif_pack_cfg {
+ int pixel_swap_en;
+ enum msm_ispif_pixel_odd_even even_odd_sel;
+ enum msm_ispif_pixel_pack_mode pack_mode;
+};
+
+struct msm_ispif_params_entry {
+ enum msm_ispif_vfe_intf vfe_intf;
+ enum msm_ispif_intftype intftype;
+ enum msm_ispif_csid csid;
+ int num_cids;
+ enum msm_ispif_cid cids[MAX_CID_CH_v2];
+ uint8_t crop_enable;
+ uint16_t crop_start_pixel;
+ uint16_t crop_end_pixel;
+ uint8_t rdi_frameskip_enable;
+ uint32_t rdi_framedrop_period;
+ uint32_t rdi_framedrop_pattern;
+};
+
+struct msm_ispif_param_data_ext {
+ uint32_t num;
+ struct msm_ispif_params_entry entries[MAX_PARAM_ENTRIES];
+ struct msm_ispif_pack_cfg pack_cfg[CID_MAX];
+};
+
+struct msm_ispif_param_data {
+ uint32_t num;
+ struct msm_ispif_params_entry entries[MAX_PARAM_ENTRIES];
+};
+
+struct msm_isp_info {
+ uint32_t max_resolution;
+ uint32_t id;
+ uint32_t ver;
+};
+
+struct msm_ispif_vfe_info {
+ int num_vfe;
+ struct msm_isp_info info[VFE_MAX];
+};
+
+enum ispif_cfg_type_t {
+ ISPIF_CLK_ENABLE,
+ ISPIF_CLK_DISABLE,
+ ISPIF_INIT,
+ ISPIF_RELEASE,
+ ISPIF_RESET,
+ ISPIF_CFG,
+ ISPIF_START_FRAME_BOUNDARY,
+ ISPIF_RESTART_FRAME_BOUNDARY,
+ ISPIF_STOP_FRAME_BOUNDARY,
+ ISPIF_STOP,
+ ISPIF_ENABLE_REG_DUMP,
+ ISPIF_SET_VFE_INFO,
+ ISPIF_CFG2
+};
+
+
+struct ispif_cfg_data_ext {
+ enum ispif_cfg_type_t cfg_type;
+ void __user *data;
+ uint32_t size;
+};
+
+struct ispif_cfg_data {
+ enum ispif_cfg_type_t cfg_type;
+ union {
+ int reg_dump; /* ISPIF_ENABLE_REG_DUMP */
+ uint32_t csid_version; /* ISPIF_INIT */
+ struct msm_ispif_vfe_info vfe_info; /* ISPIF_SET_VFE_INFO */
+ struct msm_ispif_param_data params; /* CFG, START, STOP */
+ };
+};
+
+#define ISPIF_RDI_PACK_MODE_SUPPORT 1
+
+#define VIDIOC_MSM_ISPIF_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE, struct ispif_cfg_data)
+
+#define VIDIOC_MSM_ISPIF_CFG_EXT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE+1, struct ispif_cfg_data_ext)
+
+#endif
+
diff --git a/include/uapi/media/ais/msm_ais_sensor.h b/include/uapi/media/ais/msm_ais_sensor.h
new file mode 100644
index 000000000000..f8b98def850a
--- /dev/null
+++ b/include/uapi/media/ais/msm_ais_sensor.h
@@ -0,0 +1,614 @@
+#ifndef __UAPI_LINUX_MSM_AIS_SENSOR_H
+#define __UAPI_LINUX_MSM_AIS_SENSOR_H
+
+#include <linux/v4l2-mediabus.h>
+#include <media/ais/msm_ais_sensor_sdk.h>
+
+#include <linux/types.h>
+#include <linux/i2c.h>
+
+#define I2C_SEQ_REG_SETTING_MAX 5
+
+#define MSM_SENSOR_MCLK_8HZ 8000000
+#define MSM_SENSOR_MCLK_16HZ 16000000
+#define MSM_SENSOR_MCLK_24HZ 24000000
+
+#define MAX_SENSOR_NAME 32
+#define MAX_ACTUATOR_AF_TOTAL_STEPS 1024
+
+#define MAX_OIS_MOD_NAME_SIZE 32
+#define MAX_OIS_NAME_SIZE 32
+#define MAX_OIS_REG_SETTINGS 800
+
+#define MOVE_NEAR 0
+#define MOVE_FAR 1
+
+#define MSM_ACTUATOR_MOVE_SIGNED_FAR -1
+#define MSM_ACTUATOR_MOVE_SIGNED_NEAR 1
+
+#define MAX_ACTUATOR_REGION 5
+
+#define MAX_EEPROM_NAME 32
+
+#define MAX_AF_ITERATIONS 3
+#define MAX_NUMBER_OF_STEPS 47
+#define MAX_REGULATOR 5
+
+#define MSM_V4L2_PIX_FMT_META v4l2_fourcc('M', 'E', 'T', 'A') /* META */
+#define MSM_V4L2_PIX_FMT_META10 v4l2_fourcc('M', 'E', '1', '0') /* META10 */
+#define MSM_V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4')
+ /* 14 BGBG.. GRGR.. */
+#define MSM_V4L2_PIX_FMT_SGBRG14 v4l2_fourcc('G', 'B', '1', '4')
+ /* 14 GBGB.. RGRG.. */
+#define MSM_V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('B', 'A', '1', '4')
+ /* 14 GRGR.. BGBG.. */
+#define MSM_V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4')
+ /* 14 RGRG.. GBGB.. */
+
+enum flash_type {
+ LED_FLASH = 1,
+ STROBE_FLASH,
+ GPIO_FLASH
+};
+
+enum msm_sensor_resolution_t {
+ MSM_SENSOR_RES_FULL,
+ MSM_SENSOR_RES_QTR,
+ MSM_SENSOR_RES_2,
+ MSM_SENSOR_RES_3,
+ MSM_SENSOR_RES_4,
+ MSM_SENSOR_RES_5,
+ MSM_SENSOR_RES_6,
+ MSM_SENSOR_RES_7,
+ MSM_SENSOR_INVALID_RES,
+};
+
+enum msm_camera_stream_type_t {
+ MSM_CAMERA_STREAM_PREVIEW,
+ MSM_CAMERA_STREAM_SNAPSHOT,
+ MSM_CAMERA_STREAM_VIDEO,
+ MSM_CAMERA_STREAM_INVALID,
+};
+
+enum sensor_sub_module_t {
+ SUB_MODULE_SENSOR,
+ SUB_MODULE_CHROMATIX,
+ SUB_MODULE_ACTUATOR,
+ SUB_MODULE_EEPROM,
+ SUB_MODULE_LED_FLASH,
+ SUB_MODULE_STROBE_FLASH,
+ SUB_MODULE_CSID,
+ SUB_MODULE_CSID_3D,
+ SUB_MODULE_CSIPHY,
+ SUB_MODULE_CSIPHY_3D,
+ SUB_MODULE_OIS,
+ SUB_MODULE_EXT,
+ SUB_MODULE_IR_LED,
+ SUB_MODULE_IR_CUT,
+ SUB_MODULE_MAX,
+};
+
+enum {
+ MSM_CAMERA_EFFECT_MODE_OFF,
+ MSM_CAMERA_EFFECT_MODE_MONO,
+ MSM_CAMERA_EFFECT_MODE_NEGATIVE,
+ MSM_CAMERA_EFFECT_MODE_SOLARIZE,
+ MSM_CAMERA_EFFECT_MODE_SEPIA,
+ MSM_CAMERA_EFFECT_MODE_POSTERIZE,
+ MSM_CAMERA_EFFECT_MODE_WHITEBOARD,
+ MSM_CAMERA_EFFECT_MODE_BLACKBOARD,
+ MSM_CAMERA_EFFECT_MODE_AQUA,
+ MSM_CAMERA_EFFECT_MODE_EMBOSS,
+ MSM_CAMERA_EFFECT_MODE_SKETCH,
+ MSM_CAMERA_EFFECT_MODE_NEON,
+ MSM_CAMERA_EFFECT_MODE_MAX
+};
+
+enum {
+ MSM_CAMERA_WB_MODE_AUTO,
+ MSM_CAMERA_WB_MODE_CUSTOM,
+ MSM_CAMERA_WB_MODE_INCANDESCENT,
+ MSM_CAMERA_WB_MODE_FLUORESCENT,
+ MSM_CAMERA_WB_MODE_WARM_FLUORESCENT,
+ MSM_CAMERA_WB_MODE_DAYLIGHT,
+ MSM_CAMERA_WB_MODE_CLOUDY_DAYLIGHT,
+ MSM_CAMERA_WB_MODE_TWILIGHT,
+ MSM_CAMERA_WB_MODE_SHADE,
+ MSM_CAMERA_WB_MODE_OFF,
+ MSM_CAMERA_WB_MODE_MAX
+};
+
+enum {
+ MSM_CAMERA_SCENE_MODE_OFF,
+ MSM_CAMERA_SCENE_MODE_AUTO,
+ MSM_CAMERA_SCENE_MODE_LANDSCAPE,
+ MSM_CAMERA_SCENE_MODE_SNOW,
+ MSM_CAMERA_SCENE_MODE_BEACH,
+ MSM_CAMERA_SCENE_MODE_SUNSET,
+ MSM_CAMERA_SCENE_MODE_NIGHT,
+ MSM_CAMERA_SCENE_MODE_PORTRAIT,
+ MSM_CAMERA_SCENE_MODE_BACKLIGHT,
+ MSM_CAMERA_SCENE_MODE_SPORTS,
+ MSM_CAMERA_SCENE_MODE_ANTISHAKE,
+ MSM_CAMERA_SCENE_MODE_FLOWERS,
+ MSM_CAMERA_SCENE_MODE_CANDLELIGHT,
+ MSM_CAMERA_SCENE_MODE_FIREWORKS,
+ MSM_CAMERA_SCENE_MODE_PARTY,
+ MSM_CAMERA_SCENE_MODE_NIGHT_PORTRAIT,
+ MSM_CAMERA_SCENE_MODE_THEATRE,
+ MSM_CAMERA_SCENE_MODE_ACTION,
+ MSM_CAMERA_SCENE_MODE_AR,
+ MSM_CAMERA_SCENE_MODE_FACE_PRIORITY,
+ MSM_CAMERA_SCENE_MODE_BARCODE,
+ MSM_CAMERA_SCENE_MODE_HDR,
+ MSM_CAMERA_SCENE_MODE_MAX
+};
+
+enum csid_cfg_type_t {
+ CSID_INIT,
+ CSID_CFG,
+ CSID_UPDATE_CFG,
+ CSID_TESTMODE_CFG,
+ CSID_START,
+ CSID_STOP,
+ CSID_RELEASE,
+};
+
+enum csiphy_cfg_type_t {
+ CSIPHY_INIT,
+ CSIPHY_CFG,
+ CSIPHY_START,
+ CSIPHY_STOP,
+ CSIPHY_RELEASE,
+};
+
+enum camera_vreg_type {
+ VREG_TYPE_DEFAULT,
+ VREG_TYPE_CUSTOM,
+};
+
+enum sensor_af_t {
+ SENSOR_AF_FOCUSSED,
+ SENSOR_AF_NOT_FOCUSSED,
+};
+
+enum cci_i2c_master_t {
+ MASTER_0,
+ MASTER_1,
+ MASTER_MAX,
+};
+
+struct msm_camera_i2c_array_write_config {
+ struct msm_camera_i2c_reg_setting conf_array;
+ uint16_t slave_addr;
+};
+
+struct msm_camera_i2c_read_config {
+ uint16_t slave_addr;
+ uint16_t reg_addr;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+ enum msm_camera_i2c_data_type data_type;
+ uint16_t data;
+};
+
+struct msm_camera_csi2_params {
+ struct msm_camera_csid_params csid_params;
+ struct msm_camera_csiphy_params csiphy_params;
+ uint8_t csi_clk_scale_enable;
+};
+
+struct msm_camera_csi_lane_params {
+ uint16_t csi_lane_assign;
+ uint16_t csi_lane_mask;
+};
+
+struct csi_lane_params_t {
+ uint16_t csi_lane_assign;
+ uint8_t csi_lane_mask;
+ uint8_t csi_if;
+ int8_t csid_core[2];
+ uint8_t csi_phy_sel;
+};
+
+struct msm_sensor_info_t {
+ char sensor_name[MAX_SENSOR_NAME];
+ uint32_t session_id;
+ int32_t subdev_id[SUB_MODULE_MAX];
+ int32_t subdev_intf[SUB_MODULE_MAX];
+ uint8_t is_mount_angle_valid;
+ uint32_t sensor_mount_angle;
+ int modes_supported;
+ enum camb_position_t position;
+};
+
+struct camera_vreg_t {
+ const char *reg_name;
+ int min_voltage;
+ int max_voltage;
+ int op_mode;
+ uint32_t delay;
+ const char *custom_vreg_name;
+ enum camera_vreg_type type;
+};
+
+struct sensorb_cfg_data {
+ int cfgtype;
+ union {
+ struct msm_sensor_info_t sensor_info;
+ struct msm_sensor_init_params sensor_init_params;
+ void *setting;
+ struct msm_sensor_i2c_sync_params sensor_i2c_sync_params;
+ } cfg;
+};
+
+struct csid_cfg_data {
+ enum csid_cfg_type_t cfgtype;
+ union {
+ uint32_t csid_version;
+ struct msm_camera_csid_params *csid_params;
+ struct msm_camera_csid_testmode_parms *csid_testmode_params;
+ uint32_t csid_cidmask;
+ } cfg;
+};
+
+struct csiphy_cfg_data {
+ enum csiphy_cfg_type_t cfgtype;
+ union {
+ struct msm_camera_csiphy_params *csiphy_params;
+ struct msm_camera_csi_lane_params *csi_lane_params;
+ } cfg;
+};
+
+enum eeprom_cfg_type_t {
+ CFG_EEPROM_GET_INFO,
+ CFG_EEPROM_GET_CAL_DATA,
+ CFG_EEPROM_READ_CAL_DATA,
+ CFG_EEPROM_WRITE_DATA,
+ CFG_EEPROM_GET_MM_INFO,
+ CFG_EEPROM_INIT,
+};
+
+struct eeprom_get_t {
+ uint32_t num_bytes;
+};
+
+struct eeprom_read_t {
+ uint8_t *dbuffer;
+ uint32_t num_bytes;
+};
+
+struct eeprom_write_t {
+ uint8_t *dbuffer;
+ uint32_t num_bytes;
+};
+
+struct eeprom_get_cmm_t {
+ uint32_t cmm_support;
+ uint32_t cmm_compression;
+ uint32_t cmm_size;
+};
+
+struct msm_eeprom_info_t {
+ struct msm_sensor_power_setting_array *power_setting_array;
+ enum i2c_freq_mode_t i2c_freq_mode;
+ struct msm_eeprom_memory_map_array *mem_map_array;
+};
+
+struct msm_ir_led_cfg_data_t {
+ enum msm_ir_led_cfg_type_t cfg_type;
+ int32_t pwm_duty_on_ns;
+ int32_t pwm_period_ns;
+};
+
+struct msm_ir_cut_cfg_data_t {
+ enum msm_ir_cut_cfg_type_t cfg_type;
+};
+
+struct msm_eeprom_cfg_data {
+ enum eeprom_cfg_type_t cfgtype;
+ uint8_t is_supported;
+ union {
+ char eeprom_name[MAX_SENSOR_NAME];
+ struct eeprom_get_t get_data;
+ struct eeprom_read_t read_data;
+ struct eeprom_write_t write_data;
+ struct eeprom_get_cmm_t get_cmm_data;
+ struct msm_eeprom_info_t eeprom_info;
+ } cfg;
+};
+
+enum msm_sensor_cfg_type_t {
+ CFG_SET_SLAVE_INFO,
+ CFG_SLAVE_READ_I2C,
+ CFG_WRITE_I2C_ARRAY,
+ CFG_SLAVE_WRITE_I2C_ARRAY,
+ CFG_WRITE_I2C_SEQ_ARRAY,
+ CFG_POWER_UP,
+ CFG_POWER_DOWN,
+ CFG_SET_STOP_STREAM_SETTING,
+ CFG_GET_SENSOR_INFO,
+ CFG_GET_SENSOR_INIT_PARAMS,
+ CFG_SET_INIT_SETTING,
+ CFG_SET_RESOLUTION,
+ CFG_SET_STOP_STREAM,
+ CFG_SET_START_STREAM,
+ CFG_SET_SATURATION,
+ CFG_SET_CONTRAST,
+ CFG_SET_SHARPNESS,
+ CFG_SET_ISO,
+ CFG_SET_EXPOSURE_COMPENSATION,
+ CFG_SET_ANTIBANDING,
+ CFG_SET_BESTSHOT_MODE,
+ CFG_SET_EFFECT,
+ CFG_SET_WHITE_BALANCE,
+ CFG_SET_AUTOFOCUS,
+ CFG_CANCEL_AUTOFOCUS,
+ CFG_SET_STREAM_TYPE,
+ CFG_SET_I2C_SYNC_PARAM,
+ CFG_WRITE_I2C_ARRAY_ASYNC,
+ CFG_WRITE_I2C_ARRAY_SYNC,
+ CFG_WRITE_I2C_ARRAY_SYNC_BLOCK,
+};
+
+enum msm_actuator_cfg_type_t {
+ CFG_GET_ACTUATOR_INFO,
+ CFG_SET_ACTUATOR_INFO,
+ CFG_SET_DEFAULT_FOCUS,
+ CFG_MOVE_FOCUS,
+ CFG_SET_POSITION,
+ CFG_ACTUATOR_POWERDOWN,
+ CFG_ACTUATOR_POWERUP,
+ CFG_ACTUATOR_INIT,
+};
+
+struct msm_ois_opcode {
+ uint32_t prog;
+ uint32_t coeff;
+ uint32_t pheripheral;
+ uint32_t memory;
+};
+
+enum msm_ois_cfg_type_t {
+ CFG_OIS_INIT,
+ CFG_OIS_POWERDOWN,
+ CFG_OIS_POWERUP,
+ CFG_OIS_CONTROL,
+ CFG_OIS_I2C_WRITE_SEQ_TABLE,
+};
+
+enum msm_ois_cfg_download_type_t {
+ CFG_OIS_DOWNLOAD,
+ CFG_OIS_DATA_CONFIG,
+};
+
+enum msm_ois_i2c_operation {
+ MSM_OIS_WRITE = 0,
+ MSM_OIS_POLL,
+};
+
+struct reg_settings_ois_t {
+ uint16_t reg_addr;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+ uint32_t reg_data;
+ enum msm_camera_i2c_data_type data_type;
+ enum msm_ois_i2c_operation i2c_operation;
+ uint32_t delay;
+};
+
+struct msm_ois_params_t {
+ uint16_t data_size;
+ uint16_t setting_size;
+ uint32_t i2c_addr;
+ enum i2c_freq_mode_t i2c_freq_mode;
+ enum msm_camera_i2c_reg_addr_type i2c_addr_type;
+ enum msm_camera_i2c_data_type i2c_data_type;
+ struct reg_settings_ois_t *settings;
+};
+
+struct msm_ois_set_info_t {
+ struct msm_ois_params_t ois_params;
+};
+
+struct msm_actuator_move_params_t {
+ int8_t dir;
+ int8_t sign_dir;
+ int16_t dest_step_pos;
+ int32_t num_steps;
+ uint16_t curr_lens_pos;
+ struct damping_params_t *ringing_params;
+};
+
+struct msm_actuator_tuning_params_t {
+ int16_t initial_code;
+ uint16_t pwd_step;
+ uint16_t region_size;
+ uint32_t total_steps;
+ struct region_params_t *region_params;
+};
+
+struct park_lens_data_t {
+ uint32_t damping_step;
+ uint32_t damping_delay;
+ uint32_t hw_params;
+ uint32_t max_step;
+};
+
+struct msm_actuator_params_t {
+ enum actuator_type act_type;
+ uint8_t reg_tbl_size;
+ uint16_t data_size;
+ uint16_t init_setting_size;
+ uint32_t i2c_addr;
+ enum i2c_freq_mode_t i2c_freq_mode;
+ enum msm_camera_i2c_reg_addr_type i2c_addr_type;
+ enum msm_camera_i2c_data_type i2c_data_type;
+ struct msm_actuator_reg_params_t *reg_tbl_params;
+ struct reg_settings_t *init_settings;
+ struct park_lens_data_t park_lens;
+};
+
+struct msm_actuator_set_info_t {
+ struct msm_actuator_params_t actuator_params;
+ struct msm_actuator_tuning_params_t af_tuning_params;
+};
+
+struct msm_actuator_get_info_t {
+ uint32_t focal_length_num;
+ uint32_t focal_length_den;
+ uint32_t f_number_num;
+ uint32_t f_number_den;
+ uint32_t f_pix_num;
+ uint32_t f_pix_den;
+ uint32_t total_f_dist_num;
+ uint32_t total_f_dist_den;
+ uint32_t hor_view_angle_num;
+ uint32_t hor_view_angle_den;
+ uint32_t ver_view_angle_num;
+ uint32_t ver_view_angle_den;
+};
+
+enum af_camera_name {
+ ACTUATOR_MAIN_CAM_0,
+ ACTUATOR_MAIN_CAM_1,
+ ACTUATOR_MAIN_CAM_2,
+ ACTUATOR_MAIN_CAM_3,
+ ACTUATOR_MAIN_CAM_4,
+ ACTUATOR_MAIN_CAM_5,
+ ACTUATOR_WEB_CAM_0,
+ ACTUATOR_WEB_CAM_1,
+ ACTUATOR_WEB_CAM_2,
+};
+
+struct msm_ois_slave_info {
+ char ois_name[MAX_OIS_NAME_SIZE];
+ uint32_t i2c_addr;
+ struct msm_ois_opcode opcode;
+};
+struct msm_ois_cfg_data {
+ int cfgtype;
+ union {
+ struct msm_ois_set_info_t set_info;
+ struct msm_camera_i2c_seq_reg_setting *settings;
+ } cfg;
+};
+
+struct msm_ois_cfg_download_data {
+ int cfgtype;
+ struct msm_ois_slave_info slave_info;
+};
+
+struct msm_actuator_set_position_t {
+ uint16_t number_of_steps;
+ uint32_t hw_params;
+ uint16_t pos[MAX_NUMBER_OF_STEPS];
+ uint16_t delay[MAX_NUMBER_OF_STEPS];
+};
+
+struct msm_actuator_cfg_data {
+ int cfgtype;
+ uint8_t is_af_supported;
+ union {
+ struct msm_actuator_move_params_t move;
+ struct msm_actuator_set_info_t set_info;
+ struct msm_actuator_get_info_t get_info;
+ struct msm_actuator_set_position_t setpos;
+ enum af_camera_name cam_name;
+ } cfg;
+};
+
+enum msm_camera_led_config_t {
+ MSM_CAMERA_LED_OFF,
+ MSM_CAMERA_LED_LOW,
+ MSM_CAMERA_LED_HIGH,
+ MSM_CAMERA_LED_INIT,
+ MSM_CAMERA_LED_RELEASE,
+};
+
+struct msm_camera_led_cfg_t {
+ enum msm_camera_led_config_t cfgtype;
+ int32_t torch_current[MAX_LED_TRIGGERS];
+ int32_t flash_current[MAX_LED_TRIGGERS];
+ int32_t flash_duration[MAX_LED_TRIGGERS];
+};
+
+struct msm_flash_init_info_t {
+ enum msm_flash_driver_type flash_driver_type;
+ uint32_t slave_addr;
+ enum i2c_freq_mode_t i2c_freq_mode;
+ struct msm_sensor_power_setting_array *power_setting_array;
+ struct msm_camera_i2c_reg_setting_array *settings;
+};
+
+struct msm_flash_cfg_data_t {
+ enum msm_flash_cfg_type_t cfg_type;
+ int32_t flash_current[MAX_LED_TRIGGERS];
+ int32_t flash_duration[MAX_LED_TRIGGERS];
+ union {
+ struct msm_flash_init_info_t *flash_init_info;
+ struct msm_camera_i2c_reg_setting_array *settings;
+ } cfg;
+};
+
+/* sensor init structures and enums */
+enum msm_sensor_init_cfg_type_t {
+ CFG_SINIT_PROBE,
+ CFG_SINIT_PROBE_DONE,
+ CFG_SINIT_PROBE_WAIT_DONE,
+};
+
+struct sensor_init_cfg_data {
+ enum msm_sensor_init_cfg_type_t cfgtype;
+ struct msm_sensor_info_t probed_info;
+ char entity_name[MAX_SENSOR_NAME];
+ union {
+ void *setting;
+ } cfg;
+};
+
+#define VIDIOC_MSM_SENSOR_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct sensorb_cfg_data)
+
+#define VIDIOC_MSM_SENSOR_RELEASE \
+ _IO('V', BASE_VIDIOC_PRIVATE + 2)
+
+#define VIDIOC_MSM_SENSOR_GET_SUBDEV_ID \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 3, uint32_t)
+
+#define VIDIOC_MSM_CSIPHY_IO_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 4, struct csiphy_cfg_data)
+
+#define VIDIOC_MSM_CSID_IO_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 5, struct csid_cfg_data)
+
+#define VIDIOC_MSM_ACTUATOR_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 6, struct msm_actuator_cfg_data)
+
+#define VIDIOC_MSM_FLASH_LED_DATA_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 7, struct msm_camera_led_cfg_t)
+
+#define VIDIOC_MSM_EEPROM_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 8, struct msm_eeprom_cfg_data)
+
+#define VIDIOC_MSM_SENSOR_GET_AF_STATUS \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 9, uint32_t)
+
+#define VIDIOC_MSM_SENSOR_INIT_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 10, struct sensor_init_cfg_data)
+
+#define VIDIOC_MSM_OIS_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 11, struct msm_ois_cfg_data)
+
+#define VIDIOC_MSM_FLASH_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 13, struct msm_flash_cfg_data_t)
+
+#define VIDIOC_MSM_OIS_CFG_DOWNLOAD \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 14, struct msm_ois_cfg_download_data)
+
+#define VIDIOC_MSM_IR_LED_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_ir_led_cfg_data_t)
+
+#define VIDIOC_MSM_IR_CUT_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_ir_cut_cfg_data_t)
+
+#endif /* __UAPI_LINUX_MSM_AIS_SENSOR_H */
+
diff --git a/include/uapi/media/ais/msm_ais_sensor_sdk.h b/include/uapi/media/ais/msm_ais_sensor_sdk.h
new file mode 100644
index 000000000000..c2a93a51a985
--- /dev/null
+++ b/include/uapi/media/ais/msm_ais_sensor_sdk.h
@@ -0,0 +1,417 @@
+#ifndef __UAPI_LINUX_MSM_AIS_SENSOR_SDK_H
+#define __UAPI_LINUX_MSM_AIS_SENSOR_SDK_H
+
+#include <linux/videodev2.h>
+
+#define KVERSION 0x1
+
+#define MAX_POWER_CONFIG 12
+#define GPIO_OUT_LOW (0 << 1)
+#define GPIO_OUT_HIGH (1 << 1)
+#define CSI_EMBED_DATA 0x12
+#define CSI_RESERVED_DATA_0 0x13
+#define CSI_YUV422_8 0x1E
+#define CSI_RAW8 0x2A
+#define CSI_RAW10 0x2B
+#define CSI_RAW12 0x2C
+#define CSI_DECODE_6BIT 0
+#define CSI_DECODE_8BIT 1
+#define CSI_DECODE_10BIT 2
+#define CSI_DECODE_12BIT 3
+#define CSI_DECODE_DPCM_10_6_10 4
+#define CSI_DECODE_DPCM_10_8_10 5
+#define MAX_CID 16
+#define I2C_SEQ_REG_DATA_MAX 1024
+#define I2C_REG_DATA_MAX (8*1024)
+
+#define MSM_V4L2_PIX_FMT_META v4l2_fourcc('M', 'E', 'T', 'A') /* META */
+#define MSM_V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4')
+ /* 14 BGBG.. GRGR.. */
+#define MSM_V4L2_PIX_FMT_SGBRG14 v4l2_fourcc('G', 'B', '1', '4')
+ /* 14 GBGB.. RGRG.. */
+#define MSM_V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('B', 'A', '1', '4')
+ /* 14 GRGR.. BGBG.. */
+#define MSM_V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4')
+ /* 14 RGRG.. GBGB.. */
+
+#define MAX_ACTUATOR_REG_TBL_SIZE 8
+#define MAX_ACTUATOR_REGION 5
+#define NUM_ACTUATOR_DIR 2
+#define MAX_ACTUATOR_SCENARIO 8
+#define MAX_ACT_MOD_NAME_SIZE 32
+#define MAX_ACT_NAME_SIZE 32
+#define MAX_ACTUATOR_INIT_SET 120
+#define MAX_I2C_REG_SET 12
+
+#define MAX_LED_TRIGGERS 3
+
+#define MSM_EEPROM_MEMORY_MAP_MAX_SIZE 80
+#define MSM_EEPROM_MAX_MEM_MAP_CNT 8
+
+enum msm_sensor_camera_id_t {
+ CAMERA_0,
+ CAMERA_1,
+ CAMERA_2,
+ CAMERA_3,
+ MAX_CAMERAS,
+};
+
+enum i2c_freq_mode_t {
+ I2C_STANDARD_MODE,
+ I2C_FAST_MODE,
+ I2C_CUSTOM_MODE,
+ I2C_FAST_PLUS_MODE,
+ I2C_MAX_MODES,
+};
+
+enum camb_position_t {
+ BACK_CAMERA_B,
+ FRONT_CAMERA_B,
+ AUX_CAMERA_B = 0x100,
+ INVALID_CAMERA_B,
+};
+
+enum msm_sensor_power_seq_type_t {
+ SENSOR_CLK,
+ SENSOR_GPIO,
+ SENSOR_VREG,
+ SENSOR_I2C_MUX,
+ SENSOR_I2C,
+};
+
+enum msm_camera_i2c_reg_addr_type {
+ MSM_CAMERA_I2C_BYTE_ADDR = 1,
+ MSM_CAMERA_I2C_WORD_ADDR,
+ MSM_CAMERA_I2C_3B_ADDR,
+ MSM_CAMERA_I2C_ADDR_TYPE_MAX,
+};
+
+enum msm_camera_i2c_data_type {
+ MSM_CAMERA_I2C_BYTE_DATA = 1,
+ MSM_CAMERA_I2C_WORD_DATA,
+ MSM_CAMERA_I2C_DWORD_DATA,
+ MSM_CAMERA_I2C_SET_BYTE_MASK,
+ MSM_CAMERA_I2C_UNSET_BYTE_MASK,
+ MSM_CAMERA_I2C_SET_WORD_MASK,
+ MSM_CAMERA_I2C_UNSET_WORD_MASK,
+ MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA,
+ MSM_CAMERA_I2C_DATA_TYPE_MAX,
+};
+
+enum msm_sensor_power_seq_gpio_t {
+ SENSOR_GPIO_RESET,
+ SENSOR_GPIO_STANDBY,
+ SENSOR_GPIO_AF_PWDM,
+ SENSOR_GPIO_VIO,
+ SENSOR_GPIO_VANA,
+ SENSOR_GPIO_VDIG,
+ SENSOR_GPIO_VAF,
+ SENSOR_GPIO_FL_EN,
+ SENSOR_GPIO_FL_NOW,
+ SENSOR_GPIO_FL_RESET,
+ SENSOR_GPIO_CUSTOM1,
+ SENSOR_GPIO_CUSTOM2,
+ SENSOR_GPIO_MAX,
+};
+
+enum msm_ir_cut_filter_gpio_t {
+ IR_CUT_FILTER_GPIO_P = 0,
+ IR_CUT_FILTER_GPIO_M,
+ IR_CUT_FILTER_GPIO_MAX,
+};
+#define IR_CUT_FILTER_GPIO_P IR_CUT_FILTER_GPIO_P
+#define IR_CUT_FILTER_GPIO_M IR_CUT_FILTER_GPIO_M
+#define R_CUT_FILTER_GPIO_MAX IR_CUT_FILTER_GPIO_MAX
+
+enum msm_camera_vreg_name_t {
+ CAM_VDIG,
+ CAM_VIO,
+ CAM_VANA,
+ CAM_VAF,
+ CAM_V_CUSTOM1,
+ CAM_V_CUSTOM2,
+ CAM_VREG_MAX,
+};
+
+enum msm_sensor_clk_type_t {
+ SENSOR_CAM_MCLK,
+ SENSOR_CAM_CLK,
+ SENSOR_CAM_CLK_MAX,
+};
+
+enum camerab_mode_t {
+ CAMERA_MODE_2D_B = (1<<0),
+ CAMERA_MODE_3D_B = (1<<1),
+ CAMERA_MODE_INVALID = (1<<2),
+};
+
+enum msm_actuator_data_type {
+ MSM_ACTUATOR_BYTE_DATA = 1,
+ MSM_ACTUATOR_WORD_DATA,
+};
+
+enum msm_actuator_addr_type {
+ MSM_ACTUATOR_BYTE_ADDR = 1,
+ MSM_ACTUATOR_WORD_ADDR,
+};
+
+enum msm_actuator_write_type {
+ MSM_ACTUATOR_WRITE_HW_DAMP,
+ MSM_ACTUATOR_WRITE_DAC,
+ MSM_ACTUATOR_WRITE,
+ MSM_ACTUATOR_WRITE_DIR_REG,
+ MSM_ACTUATOR_POLL,
+ MSM_ACTUATOR_READ_WRITE,
+};
+
+enum msm_actuator_i2c_operation {
+ MSM_ACT_WRITE = 0,
+ MSM_ACT_POLL,
+};
+
+enum actuator_type {
+ ACTUATOR_VCM,
+ ACTUATOR_PIEZO,
+ ACTUATOR_HVCM,
+ ACTUATOR_BIVCM,
+};
+
+enum msm_flash_driver_type {
+ FLASH_DRIVER_PMIC,
+ FLASH_DRIVER_I2C,
+ FLASH_DRIVER_GPIO,
+ FLASH_DRIVER_DEFAULT
+};
+
+enum msm_flash_cfg_type_t {
+ CFG_FLASH_INIT,
+ CFG_FLASH_RELEASE,
+ CFG_FLASH_OFF,
+ CFG_FLASH_LOW,
+ CFG_FLASH_HIGH,
+};
+
+enum msm_ir_led_cfg_type_t {
+ CFG_IR_LED_INIT = 0,
+ CFG_IR_LED_RELEASE,
+ CFG_IR_LED_OFF,
+ CFG_IR_LED_ON,
+};
+#define CFG_IR_LED_INIT CFG_IR_LED_INIT
+#define CFG_IR_LED_RELEASE CFG_IR_LED_RELEASE
+#define CFG_IR_LED_OFF CFG_IR_LED_OFF
+#define CFG_IR_LED_ON CFG_IR_LED_ON
+
+enum msm_ir_cut_cfg_type_t {
+ CFG_IR_CUT_INIT = 0,
+ CFG_IR_CUT_RELEASE,
+ CFG_IR_CUT_OFF,
+ CFG_IR_CUT_ON,
+};
+#define CFG_IR_CUT_INIT CFG_IR_CUT_INIT
+#define CFG_IR_CUT_RELEASE CFG_IR_CUT_RELEASE
+#define CFG_IR_CUT_OFF CFG_IR_CUT_OFF
+#define CFG_IR_CUT_ON CFG_IR_CUT_ON
+
+enum msm_sensor_output_format_t {
+ MSM_SENSOR_BAYER,
+ MSM_SENSOR_YCBCR,
+ MSM_SENSOR_META,
+};
+
+struct msm_sensor_power_setting {
+ enum msm_sensor_power_seq_type_t seq_type;
+ unsigned short seq_val;
+ long config_val;
+ unsigned short delay;
+ void *data[10];
+};
+
+struct msm_sensor_power_setting_array {
+ struct msm_sensor_power_setting power_setting_a[MAX_POWER_CONFIG];
+ struct msm_sensor_power_setting *power_setting;
+ unsigned short size;
+ struct msm_sensor_power_setting power_down_setting_a[MAX_POWER_CONFIG];
+ struct msm_sensor_power_setting *power_down_setting;
+ unsigned short size_down;
+};
+
+enum msm_camera_i2c_operation {
+ MSM_CAM_WRITE = 0,
+ MSM_CAM_POLL,
+ MSM_CAM_READ,
+};
+
+struct msm_sensor_i2c_sync_params {
+ unsigned int cid;
+ int csid;
+ unsigned short line;
+ unsigned short delay;
+};
+
+struct msm_camera_reg_settings_t {
+ uint16_t reg_addr;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+ uint16_t reg_data;
+ enum msm_camera_i2c_data_type data_type;
+ enum msm_camera_i2c_operation i2c_operation;
+ uint16_t delay;
+};
+
+struct msm_eeprom_mem_map_t {
+ int slave_addr;
+ struct msm_camera_reg_settings_t
+ mem_settings[MSM_EEPROM_MEMORY_MAP_MAX_SIZE];
+ int memory_map_size;
+};
+
+struct msm_eeprom_memory_map_array {
+ struct msm_eeprom_mem_map_t memory_map[MSM_EEPROM_MAX_MEM_MAP_CNT];
+ uint32_t msm_size_of_max_mappings;
+};
+
+struct msm_sensor_init_params {
+ /* mask of modes supported: 2D, 3D */
+ int modes_supported;
+ /* sensor position: front, back */
+ enum camb_position_t position;
+ /* sensor mount angle */
+ unsigned int sensor_mount_angle;
+};
+
+struct msm_sensor_id_info_t {
+ unsigned short sensor_id_reg_addr;
+ unsigned short sensor_id;
+ unsigned short sensor_id_mask;
+};
+
+struct msm_camera_sensor_slave_info {
+ char sensor_name[32];
+ char eeprom_name[32];
+ char actuator_name[32];
+ char ois_name[32];
+ char flash_name[32];
+ enum msm_sensor_camera_id_t camera_id;
+ unsigned short slave_addr;
+ enum i2c_freq_mode_t i2c_freq_mode;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+ struct msm_sensor_id_info_t sensor_id_info;
+ struct msm_sensor_power_setting_array power_setting_array;
+ unsigned char is_init_params_valid;
+ struct msm_sensor_init_params sensor_init_params;
+ enum msm_sensor_output_format_t output_format;
+};
+
+struct msm_camera_i2c_reg_array {
+ unsigned short reg_addr;
+ unsigned short reg_data;
+ unsigned int delay;
+};
+
+struct msm_camera_i2c_reg_setting {
+ struct msm_camera_i2c_reg_array *reg_setting;
+ unsigned short size;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+ enum msm_camera_i2c_data_type data_type;
+ unsigned short delay;
+};
+
+struct msm_camera_csid_vc_cfg {
+ unsigned char cid;
+ unsigned char dt;
+ unsigned char decode_format;
+};
+
+struct msm_camera_csid_lut_params {
+ unsigned char num_cid;
+ struct msm_camera_csid_vc_cfg vc_cfg_a[MAX_CID];
+ struct msm_camera_csid_vc_cfg *vc_cfg[MAX_CID];
+};
+
+struct msm_camera_csid_params {
+ unsigned char lane_cnt;
+ unsigned short lane_assign;
+ unsigned char phy_sel;
+ unsigned int csi_clk;
+ struct msm_camera_csid_lut_params lut_params;
+ unsigned char csi_3p_sel;
+};
+
+struct msm_camera_csid_testmode_parms {
+ unsigned int num_bytes_per_line;
+ unsigned int num_lines;
+ unsigned int h_blanking_count;
+ unsigned int v_blanking_count;
+ unsigned int payload_mode;
+};
+
+struct msm_camera_csiphy_params {
+ unsigned char lane_cnt;
+ unsigned char settle_cnt;
+ unsigned short lane_mask;
+ unsigned char combo_mode;
+ unsigned char csid_core;
+ unsigned int csiphy_clk;
+ unsigned char csi_3phase;
+};
+
+struct msm_camera_i2c_seq_reg_array {
+ unsigned short reg_addr;
+ unsigned char reg_data[I2C_SEQ_REG_DATA_MAX];
+ unsigned short reg_data_size;
+};
+
+struct msm_camera_i2c_seq_reg_setting {
+ struct msm_camera_i2c_seq_reg_array *reg_setting;
+ unsigned short size;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+ unsigned short delay;
+};
+
+struct msm_actuator_reg_params_t {
+ enum msm_actuator_write_type reg_write_type;
+ unsigned int hw_mask;
+ unsigned short reg_addr;
+ unsigned short hw_shift;
+ unsigned short data_shift;
+ unsigned short data_type;
+ unsigned short addr_type;
+ unsigned short reg_data;
+ unsigned short delay;
+};
+
+
+struct damping_params_t {
+ unsigned int damping_step;
+ unsigned int damping_delay;
+ unsigned int hw_params;
+};
+
+struct region_params_t {
+ /* [0] = ForwardDirection Macro boundary
+ * [1] = ReverseDirection Inf boundary
+ */
+ unsigned short step_bound[2];
+ unsigned short code_per_step;
+ /* qvalue for converting float type numbers to integer format */
+ unsigned int qvalue;
+};
+
+struct reg_settings_t {
+ unsigned short reg_addr;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+ unsigned short reg_data;
+ enum msm_camera_i2c_data_type data_type;
+ enum msm_actuator_i2c_operation i2c_operation;
+ unsigned int delay;
+};
+
+struct msm_camera_i2c_reg_setting_array {
+ struct msm_camera_i2c_reg_array reg_setting_a[MAX_I2C_REG_SET];
+ unsigned short size;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+ enum msm_camera_i2c_data_type data_type;
+ unsigned short delay;
+};
+
+#endif /* __UAPI_LINUX_MSM_AIS_SENSOR_SDK_H */
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index dac3724e4c1e..6c8e154c7384 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -47,7 +47,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
if (!c->irq_set_affinity) {
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
} else {
- int r = irq_do_set_affinity(d, affinity, false);
+ int r = irq_set_affinity_locked(d, affinity, false);
if (r)
pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
d->irq, r);
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 5819ca07a22b..b9b881ebaff3 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -9,7 +9,6 @@ ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y)
endif
obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o tick-sched.o
-obj-$(CONFIG_TIMER_STATS) += timer_stats.o
obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o
obj-$(CONFIG_TEST_UDELAY) += test_udelay.o
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index bf7fc4989e5c..01a49614e942 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -782,34 +782,6 @@ void hrtimers_resume(void)
clock_was_set_delayed();
}
-static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
-{
-#ifdef CONFIG_TIMER_STATS
- if (timer->start_site)
- return;
- timer->start_site = __builtin_return_address(0);
- memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
- timer->start_pid = current->pid;
-#endif
-}
-
-static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
-{
-#ifdef CONFIG_TIMER_STATS
- timer->start_site = NULL;
-#endif
-}
-
-static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
-{
-#ifdef CONFIG_TIMER_STATS
- if (likely(!timer_stats_active))
- return;
- timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
- timer->function, timer->start_comm, 0);
-#endif
-}
-
/*
* Counterpart to lock_hrtimer_base above:
*/
@@ -953,7 +925,6 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest
* rare case and less expensive than a smp call.
*/
debug_deactivate(timer);
- timer_stats_hrtimer_clear_start_info(timer);
reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
if (!restart)
@@ -1012,8 +983,6 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
/* Switch the timer base, if necessary: */
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
- timer_stats_hrtimer_set_start_info(timer);
-
/* Update pinned state */
timer->state &= ~HRTIMER_STATE_PINNED;
timer->state |= (!!(mode & HRTIMER_MODE_PINNED)) << HRTIMER_PINNED_SHIFT;
@@ -1154,12 +1123,6 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
timerqueue_init(&timer->node);
-
-#ifdef CONFIG_TIMER_STATS
- timer->start_site = NULL;
- timer->start_pid = -1;
- memset(timer->start_comm, 0, TASK_COMM_LEN);
-#endif
}
/**
@@ -1243,7 +1206,6 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
raw_write_seqcount_barrier(&cpu_base->seq);
__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
- timer_stats_account_hrtimer(timer);
fn = timer->function;
/*
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 75ce9cbc313d..90a82deece45 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -500,38 +500,6 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
}
}
-#ifdef CONFIG_TIMER_STATS
-void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
-{
- if (timer->start_site)
- return;
-
- timer->start_site = addr;
- memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
- timer->start_pid = current->pid;
-}
-
-static void timer_stats_account_timer(struct timer_list *timer)
-{
- void *site;
-
- /*
- * start_site can be concurrently reset by
- * timer_stats_timer_clear_start_info()
- */
- site = READ_ONCE(timer->start_site);
- if (likely(!site))
- return;
-
- timer_stats_update_stats(timer, timer->start_pid, site,
- timer->function, timer->start_comm,
- timer->flags);
-}
-
-#else
-static void timer_stats_account_timer(struct timer_list *timer) {}
-#endif
-
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
static struct debug_obj_descr timer_debug_descr;
@@ -734,11 +702,6 @@ static void do_init_timer(struct timer_list *timer, unsigned int flags,
timer->entry.pprev = NULL;
timer->flags = flags | raw_smp_processor_id();
timer->slack = -1;
-#ifdef CONFIG_TIMER_STATS
- timer->start_site = NULL;
- timer->start_pid = -1;
- memset(timer->start_comm, 0, TASK_COMM_LEN);
-#endif
lockdep_init_map(&timer->lockdep_map, name, key, 0);
}
@@ -845,7 +808,6 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
unsigned long flags;
int ret = 0;
- timer_stats_timer_set_start_info(timer);
BUG_ON(!timer->function);
base = lock_timer_base(timer, &flags);
@@ -1044,7 +1006,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
struct tvec_base *base;
unsigned long flags;
- timer_stats_timer_set_start_info(timer);
BUG_ON(timer_pending(timer) || !timer->function);
/*
@@ -1089,7 +1050,6 @@ int del_timer(struct timer_list *timer)
debug_assert_init(timer);
- timer_stats_timer_clear_start_info(timer);
if (timer_pending(timer)) {
base = lock_timer_base(timer, &flags);
ret = detach_if_pending(timer, base, true);
@@ -1117,10 +1077,9 @@ int try_to_del_timer_sync(struct timer_list *timer)
base = lock_timer_base(timer, &flags);
- if (base->running_timer != timer) {
- timer_stats_timer_clear_start_info(timer);
+ if (base->running_timer != timer)
ret = detach_if_pending(timer, base, true);
- }
+
spin_unlock_irqrestore(&base->lock, flags);
return ret;
@@ -1304,8 +1263,6 @@ static inline void __run_timers(struct tvec_base *base)
data = timer->data;
irqsafe = timer->flags & TIMER_IRQSAFE;
- timer_stats_account_timer(timer);
-
base->running_timer = timer;
detach_expired_timer(timer, base);
@@ -1782,7 +1739,6 @@ static void __init init_timer_cpus(void)
void __init init_timers(void)
{
init_timer_cpus();
- init_timer_stats();
timer_register_cpu_notifier();
open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
}
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index ba7d8b288bb3..83aa1f867b97 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -62,21 +62,11 @@ static void
print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
int idx, u64 now)
{
-#ifdef CONFIG_TIMER_STATS
- char tmp[TASK_COMM_LEN + 1];
-#endif
SEQ_printf(m, " #%d: ", idx);
print_name_offset(m, taddr);
SEQ_printf(m, ", ");
print_name_offset(m, timer->function);
SEQ_printf(m, ", S:%02x", timer->state);
-#ifdef CONFIG_TIMER_STATS
- SEQ_printf(m, ", ");
- print_name_offset(m, timer->start_site);
- memcpy(tmp, timer->start_comm, TASK_COMM_LEN);
- tmp[TASK_COMM_LEN] = 0;
- SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
-#endif
SEQ_printf(m, "\n");
SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n",
(unsigned long long)ktime_to_ns(hrtimer_get_softexpires(timer)),
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
deleted file mode 100644
index 1adecb4b87c8..000000000000
--- a/kernel/time/timer_stats.c
+++ /dev/null
@@ -1,425 +0,0 @@
-/*
- * kernel/time/timer_stats.c
- *
- * Collect timer usage statistics.
- *
- * Copyright(C) 2006, Red Hat, Inc., Ingo Molnar
- * Copyright(C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
- *
- * timer_stats is based on timer_top, a similar functionality which was part of
- * Con Kolivas dyntick patch set. It was developed by Daniel Petrini at the
- * Instituto Nokia de Tecnologia - INdT - Manaus. timer_top's design was based
- * on dynamic allocation of the statistics entries and linear search based
- * lookup combined with a global lock, rather than the static array, hash
- * and per-CPU locking which is used by timer_stats. It was written for the
- * pre hrtimer kernel code and therefore did not take hrtimers into account.
- * Nevertheless it provided the base for the timer_stats implementation and
- * was a helpful source of inspiration. Kudos to Daniel and the Nokia folks
- * for this effort.
- *
- * timer_top.c is
- * Copyright (C) 2005 Instituto Nokia de Tecnologia - INdT - Manaus
- * Written by Daniel Petrini <d.pensator@gmail.com>
- * timer_top.c was released under the GNU General Public License version 2
- *
- * We export the addresses and counting of timer functions being called,
- * the pid and cmdline from the owner process if applicable.
- *
- * Start/stop data collection:
- * # echo [1|0] >/proc/timer_stats
- *
- * Display the information collected so far:
- * # cat /proc/timer_stats
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/proc_fs.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/kallsyms.h>
-
-#include <asm/uaccess.h>
-
-/*
- * This is our basic unit of interest: a timer expiry event identified
- * by the timer, its start/expire functions and the PID of the task that
- * started the timer. We count the number of times an event happens:
- */
-struct entry {
- /*
- * Hash list:
- */
- struct entry *next;
-
- /*
- * Hash keys:
- */
- void *timer;
- void *start_func;
- void *expire_func;
- pid_t pid;
-
- /*
- * Number of timeout events:
- */
- unsigned long count;
- u32 flags;
-
- /*
- * We save the command-line string to preserve
- * this information past task exit:
- */
- char comm[TASK_COMM_LEN + 1];
-
-} ____cacheline_aligned_in_smp;
-
-/*
- * Spinlock protecting the tables - not taken during lookup:
- */
-static DEFINE_RAW_SPINLOCK(table_lock);
-
-/*
- * Per-CPU lookup locks for fast hash lookup:
- */
-static DEFINE_PER_CPU(raw_spinlock_t, tstats_lookup_lock);
-
-/*
- * Mutex to serialize state changes with show-stats activities:
- */
-static DEFINE_MUTEX(show_mutex);
-
-/*
- * Collection status, active/inactive:
- */
-int __read_mostly timer_stats_active;
-
-/*
- * Beginning/end timestamps of measurement:
- */
-static ktime_t time_start, time_stop;
-
-/*
- * tstat entry structs only get allocated while collection is
- * active and never freed during that time - this simplifies
- * things quite a bit.
- *
- * They get freed when a new collection period is started.
- */
-#define MAX_ENTRIES_BITS 10
-#define MAX_ENTRIES (1UL << MAX_ENTRIES_BITS)
-
-static unsigned long nr_entries;
-static struct entry entries[MAX_ENTRIES];
-
-static atomic_t overflow_count;
-
-/*
- * The entries are in a hash-table, for fast lookup:
- */
-#define TSTAT_HASH_BITS (MAX_ENTRIES_BITS - 1)
-#define TSTAT_HASH_SIZE (1UL << TSTAT_HASH_BITS)
-#define TSTAT_HASH_MASK (TSTAT_HASH_SIZE - 1)
-
-#define __tstat_hashfn(entry) \
- (((unsigned long)(entry)->timer ^ \
- (unsigned long)(entry)->start_func ^ \
- (unsigned long)(entry)->expire_func ^ \
- (unsigned long)(entry)->pid ) & TSTAT_HASH_MASK)
-
-#define tstat_hashentry(entry) (tstat_hash_table + __tstat_hashfn(entry))
-
-static struct entry *tstat_hash_table[TSTAT_HASH_SIZE] __read_mostly;
-
-static void reset_entries(void)
-{
- nr_entries = 0;
- memset(entries, 0, sizeof(entries));
- memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
- atomic_set(&overflow_count, 0);
-}
-
-static struct entry *alloc_entry(void)
-{
- if (nr_entries >= MAX_ENTRIES)
- return NULL;
-
- return entries + nr_entries++;
-}
-
-static int match_entries(struct entry *entry1, struct entry *entry2)
-{
- return entry1->timer == entry2->timer &&
- entry1->start_func == entry2->start_func &&
- entry1->expire_func == entry2->expire_func &&
- entry1->pid == entry2->pid;
-}
-
-/*
- * Look up whether an entry matching this item is present
- * in the hash already. Must be called with irqs off and the
- * lookup lock held:
- */
-static struct entry *tstat_lookup(struct entry *entry, char *comm)
-{
- struct entry **head, *curr, *prev;
-
- head = tstat_hashentry(entry);
- curr = *head;
-
- /*
- * The fastpath is when the entry is already hashed,
- * we do this with the lookup lock held, but with the
- * table lock not held:
- */
- while (curr) {
- if (match_entries(curr, entry))
- return curr;
-
- curr = curr->next;
- }
- /*
- * Slowpath: allocate, set up and link a new hash entry:
- */
- prev = NULL;
- curr = *head;
-
- raw_spin_lock(&table_lock);
- /*
- * Make sure we have not raced with another CPU:
- */
- while (curr) {
- if (match_entries(curr, entry))
- goto out_unlock;
-
- prev = curr;
- curr = curr->next;
- }
-
- curr = alloc_entry();
- if (curr) {
- *curr = *entry;
- curr->count = 0;
- curr->next = NULL;
- memcpy(curr->comm, comm, TASK_COMM_LEN);
-
- smp_mb(); /* Ensure that curr is initialized before insert */
-
- if (prev)
- prev->next = curr;
- else
- *head = curr;
- }
- out_unlock:
- raw_spin_unlock(&table_lock);
-
- return curr;
-}
-
-/**
- * timer_stats_update_stats - Update the statistics for a timer.
- * @timer: pointer to either a timer_list or a hrtimer
- * @pid: the pid of the task which set up the timer
- * @startf: pointer to the function which did the timer setup
- * @timerf: pointer to the timer callback function of the timer
- * @comm: name of the process which set up the timer
- * @tflags: The flags field of the timer
- *
- * When the timer is already registered, then the event counter is
- * incremented. Otherwise the timer is registered in a free slot.
- */
-void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
- void *timerf, char *comm, u32 tflags)
-{
- /*
- * It doesn't matter which lock we take:
- */
- raw_spinlock_t *lock;
- struct entry *entry, input;
- unsigned long flags;
-
- if (likely(!timer_stats_active))
- return;
-
- lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id());
-
- input.timer = timer;
- input.start_func = startf;
- input.expire_func = timerf;
- input.pid = pid;
- input.flags = tflags;
-
- raw_spin_lock_irqsave(lock, flags);
- if (!timer_stats_active)
- goto out_unlock;
-
- entry = tstat_lookup(&input, comm);
- if (likely(entry))
- entry->count++;
- else
- atomic_inc(&overflow_count);
-
- out_unlock:
- raw_spin_unlock_irqrestore(lock, flags);
-}
-
-static void print_name_offset(struct seq_file *m, unsigned long addr)
-{
- char symname[KSYM_NAME_LEN];
-
- if (lookup_symbol_name(addr, symname) < 0)
- seq_printf(m, "<%p>", (void *)addr);
- else
- seq_printf(m, "%s", symname);
-}
-
-static int tstats_show(struct seq_file *m, void *v)
-{
- struct timespec period;
- struct entry *entry;
- unsigned long ms;
- long events = 0;
- ktime_t time;
- int i;
-
- mutex_lock(&show_mutex);
- /*
- * If still active then calculate up to now:
- */
- if (timer_stats_active)
- time_stop = ktime_get();
-
- time = ktime_sub(time_stop, time_start);
-
- period = ktime_to_timespec(time);
- ms = period.tv_nsec / 1000000;
-
- seq_puts(m, "Timer Stats Version: v0.3\n");
- seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
- if (atomic_read(&overflow_count))
- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
- seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
-
- for (i = 0; i < nr_entries; i++) {
- entry = entries + i;
- if (entry->flags & TIMER_DEFERRABLE) {
- seq_printf(m, "%4luD, %5d %-16s ",
- entry->count, entry->pid, entry->comm);
- } else {
- seq_printf(m, " %4lu, %5d %-16s ",
- entry->count, entry->pid, entry->comm);
- }
-
- print_name_offset(m, (unsigned long)entry->start_func);
- seq_puts(m, " (");
- print_name_offset(m, (unsigned long)entry->expire_func);
- seq_puts(m, ")\n");
-
- events += entry->count;
- }
-
- ms += period.tv_sec * 1000;
- if (!ms)
- ms = 1;
-
- if (events && period.tv_sec)
- seq_printf(m, "%ld total events, %ld.%03ld events/sec\n",
- events, events * 1000 / ms,
- (events * 1000000 / ms) % 1000);
- else
- seq_printf(m, "%ld total events\n", events);
-
- mutex_unlock(&show_mutex);
-
- return 0;
-}
-
-/*
- * After a state change, make sure all concurrent lookup/update
- * activities have stopped:
- */
-static void sync_access(void)
-{
- unsigned long flags;
- int cpu;
-
- for_each_online_cpu(cpu) {
- raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu);
-
- raw_spin_lock_irqsave(lock, flags);
- /* nothing */
- raw_spin_unlock_irqrestore(lock, flags);
- }
-}
-
-static ssize_t tstats_write(struct file *file, const char __user *buf,
- size_t count, loff_t *offs)
-{
- char ctl[2];
-
- if (count != 2 || *offs)
- return -EINVAL;
-
- if (copy_from_user(ctl, buf, count))
- return -EFAULT;
-
- mutex_lock(&show_mutex);
- switch (ctl[0]) {
- case '0':
- if (timer_stats_active) {
- timer_stats_active = 0;
- time_stop = ktime_get();
- sync_access();
- }
- break;
- case '1':
- if (!timer_stats_active) {
- reset_entries();
- time_start = ktime_get();
- smp_mb();
- timer_stats_active = 1;
- }
- break;
- default:
- count = -EINVAL;
- }
- mutex_unlock(&show_mutex);
-
- return count;
-}
-
-static int tstats_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, tstats_show, NULL);
-}
-
-static const struct file_operations tstats_fops = {
- .open = tstats_open,
- .read = seq_read,
- .write = tstats_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-void __init init_timer_stats(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu)
- raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu));
-}
-
-static int __init init_tstats_procfs(void)
-{
- struct proc_dir_entry *pe;
-
- pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
- if (!pe)
- return -ENOMEM;
- return 0;
-}
-__initcall(init_tstats_procfs);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c0ab232e3abd..73c018d7df00 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1508,8 +1508,6 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
return;
}
- timer_stats_timer_set_start_info(&dwork->timer);
-
dwork->wq = wq;
dwork->cpu = cpu;
timer->expires = jiffies + delay;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3cd6011f209d..20b74735508d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -951,20 +951,6 @@ config DEBUG_TIMEKEEPING
If unsure, say N.
-config TIMER_STATS
- bool "Collect kernel timers statistics"
- depends on DEBUG_KERNEL && PROC_FS
- help
- If you say Y here, additional code will be inserted into the
- timer routines to collect statistics about kernel timers being
- reprogrammed. The statistics can be read from /proc/timer_stats.
- The statistics collection is started by writing 1 to /proc/timer_stats,
- writing 0 stops it. This feature is useful to collect information
- about timer usage patterns in kernel and userspace. This feature
- is lightweight if enabled in the kernel config but not activated
- (it defaults to deactivated on bootup and will only be activated
- if some application like powertop activates it explicitly).
-
config DEBUG_TASK_STACK_SCAN_OFF
bool "Disable kmemleak task stack scan by default"
depends on DEBUG_KMEMLEAK
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 10cd1860e5b0..7e26aea3e404 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -4315,6 +4315,51 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ {
+ /* Mainly testing JIT + imm64 here. */
+ "JMP_JGE_X: ldimm64 test 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 2),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xeeeeeeeeU } },
+ },
+ {
+ "JMP_JGE_X: ldimm64 test 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 0),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffffU } },
+ },
+ {
+ "JMP_JGE_X: ldimm64 test 3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 4),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JNE | BPF_X */
{
"JMP_JNE_X: if (3 != 2) return 1",
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b94e165a4f79..fe38ef58997c 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1018,7 +1018,7 @@ static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
return err;
}
- if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name))
+ if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
return -EMSGSIZE;
return 0;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index fc65a0167fbe..c9ef99fbd0dd 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -89,6 +89,7 @@
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
#include <linux/slab.h>
+#include <linux/netfilter/xt_qtaguid.h>
#include <asm/uaccess.h>
@@ -415,6 +416,9 @@ int inet_release(struct socket *sock)
if (sk) {
long timeout;
+#ifdef CONFIG_NETFILTER_XT_MATCH_QTAGUID
+ qtaguid_untag(sock, true);
+#endif
/* Applications forget to leave groups before exiting */
ip_mc_drop_socket(sk);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 6287418c1dfe..ca1031411aa7 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -354,6 +354,9 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
rt->dst.dev->mtu);
return -EMSGSIZE;
}
+ if (length < sizeof(struct iphdr))
+ return -EINVAL;
+
if (flags&MSG_PROBE)
goto out;
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 1e70fa8fa793..3861dedd5365 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -264,13 +264,15 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
+ u32 delta;
if (rtt_us > 0)
tcp_lp_rtt_sample(sk, rtt_us);
/* calc inference */
- if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
- lp->inference = 3 * (tcp_time_stamp - tp->rx_opt.rcv_tsecr);
+ delta = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
+ if ((s32)delta > 0)
+ lp->inference = 3 * delta;
/* test if within inference */
if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference))
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 019db68bdb9f..4c1c94fa8f08 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -547,6 +547,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
tcp_ecn_openreq_child(newtp, req);
+ newtp->fastopen_req = NULL;
newtp->fastopen_rsk = NULL;
newtp->syn_data_acked = 0;
newtp->rack.mstamp.v64 = 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8a62ad0c850b..2ca323b68efd 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1221,7 +1221,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
* eventually). The difference is that pulled data not copied, but
* immediately discarded.
*/
-static void __pskb_trim_head(struct sk_buff *skb, int len)
+static int __pskb_trim_head(struct sk_buff *skb, int len)
{
struct skb_shared_info *shinfo;
int i, k, eat;
@@ -1231,7 +1231,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
__skb_pull(skb, eat);
len -= eat;
if (!len)
- return;
+ return 0;
}
eat = len;
k = 0;
@@ -1257,23 +1257,28 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
skb_reset_tail_pointer(skb);
skb->data_len -= len;
skb->len = skb->data_len;
+ return len;
}
/* Remove acked data from a packet in the transmit queue. */
int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
{
+ u32 delta_truesize;
+
if (skb_unclone(skb, GFP_ATOMIC))
return -ENOMEM;
- __pskb_trim_head(skb, len);
+ delta_truesize = __pskb_trim_head(skb, len);
TCP_SKB_CB(skb)->seq += len;
skb->ip_summed = CHECKSUM_PARTIAL;
- skb->truesize -= len;
- sk->sk_wmem_queued -= len;
- sk_mem_uncharge(sk, len);
- sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+ if (delta_truesize) {
+ skb->truesize -= delta_truesize;
+ sk->sk_wmem_queued -= delta_truesize;
+ sk_mem_uncharge(sk, delta_truesize);
+ sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+ }
/* Any change of skb->len requires recalculation of tso factor. */
if (tcp_skb_pcount(skb) > 1)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3d72aeffa3f1..3d30ba111ee5 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3375,6 +3375,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
*/
static struct notifier_block ipv6_dev_notf = {
.notifier_call = addrconf_notify,
+ .priority = ADDRCONF_NOTIFY_PRIORITY,
};
static void addrconf_type_change(struct net_device *dev, unsigned long event)
@@ -6046,6 +6047,8 @@ int __init addrconf_init(void)
goto errlo;
}
+ ip6_route_init_special_entries();
+
for (i = 0; i < IN6_ADDR_HSIZE; i++)
INIT_HLIST_HEAD(&inet6_addr_lst[i]);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 6896830feabb..d13ed145e93a 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -630,6 +630,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
return -EMSGSIZE;
}
+ if (length < sizeof(struct ipv6hdr))
+ return -EINVAL;
if (flags&MSG_PROBE)
goto out;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ff1499293938..e98613d2f34f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3362,7 +3362,10 @@ static int ip6_route_dev_notify(struct notifier_block *this,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
- if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
+ if (!(dev->flags & IFF_LOOPBACK))
+ return NOTIFY_OK;
+
+ if (event == NETDEV_REGISTER) {
net->ipv6.ip6_null_entry->dst.dev = dev;
net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
@@ -3371,6 +3374,12 @@ static int ip6_route_dev_notify(struct notifier_block *this,
net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
#endif
+ } else if (event == NETDEV_UNREGISTER) {
+ in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
+#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
+ in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
+#endif
}
return NOTIFY_OK;
@@ -3677,9 +3686,24 @@ static struct pernet_operations ip6_route_net_late_ops = {
static struct notifier_block ip6_route_dev_notifier = {
.notifier_call = ip6_route_dev_notify,
- .priority = 0,
+ .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
};
+void __init ip6_route_init_special_entries(void)
+{
+ /* Registering of the loopback is done before this portion of code,
+ * the loopback reference in rt6_info will not be taken, do it
+ * manually for init_net */
+ init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
+ init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
+ init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+ init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
+ init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+ #endif
+}
+
int __init ip6_route_init(void)
{
int ret;
@@ -3706,17 +3730,6 @@ int __init ip6_route_init(void)
ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
- /* Registering of the loopback is done before this portion of code,
- * the loopback reference in rt6_info will not be taken, do it
- * manually for init_net */
- init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
- init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
- #ifdef CONFIG_IPV6_MULTIPLE_TABLES
- init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
- init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
- init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
- init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
- #endif
ret = fib6_init();
if (ret)
goto out_register_subsys;
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index e0d8e9ad315b..fef69cb21f6f 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -321,7 +321,7 @@ static void sock_tag_tree_erase(struct rb_root *st_to_free_tree)
st_entry->tag,
get_uid_from_tag(st_entry->tag));
rb_erase(&st_entry->sock_node, st_to_free_tree);
- sockfd_put(st_entry->socket);
+ sock_put(st_entry->sk);
kfree(st_entry);
}
}
@@ -1917,12 +1917,12 @@ static int qtaguid_ctrl_proc_show(struct seq_file *m, void *v)
{
struct sock_tag *sock_tag_entry = v;
uid_t uid;
- long f_count;
CT_DEBUG("qtaguid: proc ctrl pid=%u tgid=%u uid=%u\n",
current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
if (sock_tag_entry != SEQ_START_TOKEN) {
+ int sk_ref_count;
uid = get_uid_from_tag(sock_tag_entry->tag);
CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u) "
"pid=%u\n",
@@ -1931,13 +1931,13 @@ static int qtaguid_ctrl_proc_show(struct seq_file *m, void *v)
uid,
sock_tag_entry->pid
);
- f_count = atomic_long_read(
- &sock_tag_entry->socket->file->f_count);
+ sk_ref_count = atomic_read(
+ &sock_tag_entry->sk->sk_refcnt);
seq_printf(m, "sock=%pK tag=0x%llx (uid=%u) pid=%u "
- "f_count=%lu\n",
+ "f_count=%d\n",
sock_tag_entry->sk,
sock_tag_entry->tag, uid,
- sock_tag_entry->pid, f_count);
+ sock_tag_entry->pid, sk_ref_count);
} else {
seq_printf(m, "events: sockets_tagged=%llu "
"sockets_untagged=%llu "
@@ -2233,8 +2233,8 @@ static int ctrl_cmd_tag(const char *input)
from_kuid(&init_user_ns, current_fsuid()));
goto err;
}
- CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n",
- input, atomic_long_read(&el_socket->file->f_count),
+ CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->sk_refcnt=%d ->sk=%p\n",
+ input, atomic_read(&el_socket->sk->sk_refcnt),
el_socket->sk);
if (argc < 3) {
acct_tag = make_atag_from_value(0);
@@ -2278,16 +2278,9 @@ static int ctrl_cmd_tag(const char *input)
struct tag_ref *prev_tag_ref_entry;
CT_DEBUG("qtaguid: ctrl_tag(%s): retag for sk=%p "
- "st@%p ...->f_count=%ld\n",
+ "st@%p ...->sk_refcnt=%d\n",
input, el_socket->sk, sock_tag_entry,
- atomic_long_read(&el_socket->file->f_count));
- /*
- * This is a re-tagging, so release the sock_fd that was
- * locked at the time of the 1st tagging.
- * There is still the ref from this call's sockfd_lookup() so
- * it can be done within the spinlock.
- */
- sockfd_put(sock_tag_entry->socket);
+ atomic_read(&el_socket->sk->sk_refcnt));
prev_tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag,
&uid_tag_data_entry);
BUG_ON(IS_ERR_OR_NULL(prev_tag_ref_entry));
@@ -2307,8 +2300,12 @@ static int ctrl_cmd_tag(const char *input)
res = -ENOMEM;
goto err_tag_unref_put;
}
+ /*
+ * Hold the sk refcount here to make sure the sk pointer cannot
+ * be freed and reused
+ */
+ sock_hold(el_socket->sk);
sock_tag_entry->sk = el_socket->sk;
- sock_tag_entry->socket = el_socket;
sock_tag_entry->pid = current->tgid;
sock_tag_entry->tag = combine_atag_with_uid(acct_tag, uid_int);
spin_lock_bh(&uid_tag_data_tree_lock);
@@ -2335,10 +2332,11 @@ static int ctrl_cmd_tag(const char *input)
atomic64_inc(&qtu_events.sockets_tagged);
}
spin_unlock_bh(&sock_tag_list_lock);
- /* We keep the ref to the socket (file) until it is untagged */
- CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->f_count=%ld\n",
+ /* We keep the ref to the sk until it is untagged */
+ CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->sk_refcnt=%d\n",
input, sock_tag_entry,
- atomic_long_read(&el_socket->file->f_count));
+ atomic_read(&el_socket->sk->sk_refcnt));
+ sockfd_put(el_socket);
return 0;
err_tag_unref_put:
@@ -2346,8 +2344,8 @@ err_tag_unref_put:
tag_ref_entry->num_sock_tags--;
free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry);
err_put:
- CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->f_count=%ld\n",
- input, atomic_long_read(&el_socket->file->f_count) - 1);
+ CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->sk_refcnt=%d\n",
+ input, atomic_read(&el_socket->sk->sk_refcnt) - 1);
/* Release the sock_fd that was grabbed by sockfd_lookup(). */
sockfd_put(el_socket);
return res;
@@ -2363,17 +2361,13 @@ static int ctrl_cmd_untag(const char *input)
int sock_fd = 0;
struct socket *el_socket;
int res, argc;
- struct sock_tag *sock_tag_entry;
- struct tag_ref *tag_ref_entry;
- struct uid_tag_data *utd_entry;
- struct proc_qtu_data *pqd_entry;
argc = sscanf(input, "%c %d", &cmd, &sock_fd);
CT_DEBUG("qtaguid: ctrl_untag(%s): argc=%d cmd=%c sock_fd=%d\n",
input, argc, cmd, sock_fd);
if (argc < 2) {
res = -EINVAL;
- goto err;
+ return res;
}
el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */
if (!el_socket) {
@@ -2381,17 +2375,31 @@ static int ctrl_cmd_untag(const char *input)
" sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
input, sock_fd, res, current->pid, current->tgid,
from_kuid(&init_user_ns, current_fsuid()));
- goto err;
+ return res;
}
CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n",
input, atomic_long_read(&el_socket->file->f_count),
el_socket->sk);
+ res = qtaguid_untag(el_socket, false);
+ sockfd_put(el_socket);
+ return res;
+}
+
+int qtaguid_untag(struct socket *el_socket, bool kernel)
+{
+ int res;
+ pid_t pid;
+ struct sock_tag *sock_tag_entry;
+ struct tag_ref *tag_ref_entry;
+ struct uid_tag_data *utd_entry;
+ struct proc_qtu_data *pqd_entry;
+
spin_lock_bh(&sock_tag_list_lock);
sock_tag_entry = get_sock_stat_nl(el_socket->sk);
if (!sock_tag_entry) {
spin_unlock_bh(&sock_tag_list_lock);
res = -EINVAL;
- goto err_put;
+ return res;
}
/*
* The socket already belongs to the current process
@@ -2403,20 +2411,26 @@ static int ctrl_cmd_untag(const char *input)
BUG_ON(!tag_ref_entry);
BUG_ON(tag_ref_entry->num_sock_tags <= 0);
spin_lock_bh(&uid_tag_data_tree_lock);
+ if (kernel)
+ pid = sock_tag_entry->pid;
+ else
+ pid = current->tgid;
pqd_entry = proc_qtu_data_tree_search(
- &proc_qtu_data_tree, current->tgid);
+ &proc_qtu_data_tree, pid);
/*
* TODO: remove if, and start failing.
* At first, we want to catch user-space code that is not
* opening the /dev/xt_qtaguid.
*/
- if (IS_ERR_OR_NULL(pqd_entry))
+ if (IS_ERR_OR_NULL(pqd_entry) || !sock_tag_entry->list.next) {
pr_warn_once("qtaguid: %s(): "
"User space forgot to open /dev/xt_qtaguid? "
- "pid=%u tgid=%u uid=%u\n", __func__,
- current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
- else
+ "pid=%u tgid=%u sk_pid=%u, uid=%u\n", __func__,
+ current->pid, current->tgid, sock_tag_entry->pid,
+ from_kuid(&init_user_ns, current_fsuid()));
+ } else {
list_del(&sock_tag_entry->list);
+ }
spin_unlock_bh(&uid_tag_data_tree_lock);
/*
* We don't free tag_ref from the utd_entry here,
@@ -2425,30 +2439,17 @@ static int ctrl_cmd_untag(const char *input)
tag_ref_entry->num_sock_tags--;
spin_unlock_bh(&sock_tag_list_lock);
/*
- * Release the sock_fd that was grabbed at tag time,
- * and once more for the sockfd_lookup() here.
+ * Release the sock_fd that was grabbed at tag time.
*/
- sockfd_put(sock_tag_entry->socket);
- CT_DEBUG("qtaguid: ctrl_untag(%s): done. st@%p ...->f_count=%ld\n",
- input, sock_tag_entry,
- atomic_long_read(&el_socket->file->f_count) - 1);
- sockfd_put(el_socket);
+ sock_put(sock_tag_entry->sk);
+ CT_DEBUG("qtaguid: done. st@%p ...->sk_refcnt=%d\n",
+ sock_tag_entry,
+ atomic_read(&el_socket->sk->sk_refcnt));
kfree(sock_tag_entry);
atomic64_inc(&qtu_events.sockets_untagged);
return 0;
-
-err_put:
- CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%ld\n",
- input, atomic_long_read(&el_socket->file->f_count) - 1);
- /* Release the sock_fd that was grabbed by sockfd_lookup(). */
- sockfd_put(el_socket);
- return res;
-
-err:
- CT_DEBUG("qtaguid: ctrl_untag(%s): done.\n", input);
- return res;
}
static ssize_t qtaguid_ctrl_parse(const char *input, size_t count)
diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h
index 6dc14a9c6889..8178fbdfb036 100644
--- a/net/netfilter/xt_qtaguid_internal.h
+++ b/net/netfilter/xt_qtaguid_internal.h
@@ -256,8 +256,6 @@ struct iface_stat_work {
struct sock_tag {
struct rb_node sock_node;
struct sock *sk; /* Only used as a number, never dereferenced */
- /* The socket is needed for sockfd_put() */
- struct socket *socket;
/* Used to associate with a given pid */
struct list_head list; /* in proc_qtu_data.sock_tag_list */
pid_t pid;
diff --git a/net/netfilter/xt_qtaguid_print.c b/net/netfilter/xt_qtaguid_print.c
index f6a00a3520ed..2a7190d285e6 100644
--- a/net/netfilter/xt_qtaguid_print.c
+++ b/net/netfilter/xt_qtaguid_print.c
@@ -24,7 +24,7 @@
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/spinlock_types.h>
-
+#include <net/sock.h>
#include "xt_qtaguid_internal.h"
#include "xt_qtaguid_print.h"
@@ -237,10 +237,10 @@ char *pp_sock_tag(struct sock_tag *st)
tag_str = pp_tag_t(&st->tag);
res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
"sock_node=rb_node{...}, "
- "sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
+ "sk=%p (f_count=%d), list=list_head{...}, "
"pid=%u, tag=%s}",
- st, st->sk, st->socket, atomic_long_read(
- &st->socket->file->f_count),
+ st, st->sk, atomic_read(
+ &st->sk->sk_refcnt),
st->pid, tag_str);
_bug_on_err_or_null(res);
kfree(tag_str);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index d778d99326df..71e1f0def5a5 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -802,8 +802,7 @@ void rfkill_resume_polling(struct rfkill *rfkill)
}
EXPORT_SYMBOL(rfkill_resume_polling);
-#ifdef CONFIG_RFKILL_PM
-static int rfkill_suspend(struct device *dev)
+static __maybe_unused int rfkill_suspend(struct device *dev)
{
struct rfkill *rfkill = to_rfkill(dev);
@@ -812,7 +811,7 @@ static int rfkill_suspend(struct device *dev)
return 0;
}
-static int rfkill_resume(struct device *dev)
+static __maybe_unused int rfkill_resume(struct device *dev)
{
struct rfkill *rfkill = to_rfkill(dev);
bool cur;
@@ -828,19 +827,13 @@ static int rfkill_resume(struct device *dev)
}
static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume);
-#define RFKILL_PM_OPS (&rfkill_pm_ops)
-#else
-#define RFKILL_PM_OPS NULL
-#endif
static struct class rfkill_class = {
.name = "rfkill",
.dev_release = rfkill_release,
.dev_groups = rfkill_dev_groups,
.dev_uevent = rfkill_dev_uevent,
-#ifdef CONFIG_RFKILL_PM
- .pm = RFKILL_PM_OPS,
-#endif
+ .pm = IS_ENABLED(CONFIG_RFKILL_PM) ? &rfkill_pm_ops : NULL,
};
bool rfkill_blocked(struct rfkill *rfkill)
diff --git a/net/rmnet_data/rmnet_data_config.c b/net/rmnet_data/rmnet_data_config.c
index fad084d03854..6fc9b86204de 100644
--- a/net/rmnet_data/rmnet_data_config.c
+++ b/net/rmnet_data/rmnet_data_config.c
@@ -1165,6 +1165,7 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
{
int i, j;
struct net_device *vndev;
+ struct rmnet_phys_ep_config *config;
struct rmnet_logical_ep_conf_s *cfg;
struct rmnet_free_vnd_work *vnd_work;
ASSERT_RTNL();
@@ -1220,6 +1221,15 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
kfree(vnd_work);
}
+ config = _rmnet_get_phys_ep_config(dev);
+
+ if (config) {
+ cfg = &config->local_ep;
+
+ if (cfg && cfg->refcount)
+ rmnet_unset_logical_endpoint_config
+ (cfg->egress_dev, RMNET_LOCAL_LOGICAL_ENDPOINT);
+ }
/* Clear the mappings on the phys ep */
trace_rmnet_unregister_cb_clear_lepcs(dev);
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
index cef9369eace5..ae60f35b363d 100644
--- a/net/rmnet_data/rmnet_data_handlers.c
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -569,12 +569,9 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
LOGD("headroom of %d bytes", required_headroom);
if (skb_headroom(skb) < required_headroom) {
- if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) {
- LOGD("Failed to add headroom of %d bytes",
- required_headroom);
- kfree_skb(skb);
- return 1;
- }
+ LOGE("Not enough headroom for %d bytes", required_headroom);
+ kfree_skb(skb);
+ return 1;
}
if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 3c431e49aef1..c8c4f547b4f1 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -765,7 +765,7 @@ country KR: DFS-ETSI
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (20), AUTO-BW
(5250 - 5330 @ 80), (20), DFS, AUTO-BW
- (5490 - 5710 @ 160), (30), DFS
+ (5490 - 5730 @ 160), (30), DFS
(5735 - 5835 @ 80), (30)
# 60 GHz band channels 1-4,
# ref: http://www.law.go.kr/%ED%96%89%EC%A0%95%EA%B7%9C%EC%B9%99/%EB%AC%B4%EC%84%A0%EC%84%A4%EB%B9%84%EA%B7%9C%EC%B9%99
@@ -773,10 +773,6 @@ country KR: DFS-ETSI
country KP: DFS-ETSI
(2402 - 2482 @ 40), (20)
- (5170 - 5250 @ 80), (20)
- (5250 - 5330 @ 80), (20), DFS
- (5490 - 5630 @ 80), (30), DFS
- (5735 - 5815 @ 80), (30)
country KW: DFS-ETSI
(2402 - 2482 @ 40), (20)
@@ -1023,7 +1019,7 @@ country NA: DFS-ETSI
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (23), AUTO-BW
(5250 - 5330 @ 80), (23), DFS, AUTO-BW
- (5490 - 5710 @ 160), (30), DFS
+ (5490 - 5730 @ 160), (30), DFS
(5735 - 5835 @ 80), (33)
country NG: DFS-ETSI
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index f07224d8b88f..9adfdd711b31 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -239,7 +239,7 @@ static struct xfrm_algo_desc aalg_list[] = {
.uinfo = {
.auth = {
- .icv_truncbits = 96,
+ .icv_truncbits = 128,
.icv_fullbits = 256,
}
},
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 8f3e5e9d8bdb..e6de496bffbe 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2166,7 +2166,20 @@ static void azx_remove(struct pci_dev *pci)
/* cancel the pending probing work */
chip = card->private_data;
hda = container_of(chip, struct hda_intel, chip);
+ /* FIXME: below is an ugly workaround.
+ * Both device_release_driver() and driver_probe_device()
+ * take *both* the device's and its parent's lock before
+ * calling the remove() and probe() callbacks. The codec
+ * probe takes the locks of both the codec itself and its
+ * parent, i.e. the PCI controller dev. Meanwhile, when
+ * the PCI controller is unbound, it takes its lock, too
+ * ==> ouch, a deadlock!
+ * As a workaround, we unlock temporarily here the controller
+ * device during cancel_work_sync() call.
+ */
+ device_unlock(&pci->dev);
cancel_work_sync(&hda->probe_work);
+ device_lock(&pci->dev);
snd_card_free(card);
}
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
index 699e7251023f..9eaf6cc7b89b 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -3772,7 +3772,6 @@ static int msm_anlg_cdc_device_up(struct snd_soc_codec *codec)
{
struct sdm660_cdc_priv *sdm660_cdc_priv =
snd_soc_codec_get_drvdata(codec);
- int ret = 0;
dev_dbg(codec->dev, "%s: device up!\n", __func__);
@@ -3794,21 +3793,6 @@ static int msm_anlg_cdc_device_up(struct snd_soc_codec *codec)
else if (sdm660_cdc_priv->boost_option == BYPASS_ALWAYS)
msm_anlg_cdc_bypass_on(codec);
- msm_anlg_cdc_configure_cap(codec, false, false);
- wcd_mbhc_stop(&sdm660_cdc_priv->mbhc);
- wcd_mbhc_deinit(&sdm660_cdc_priv->mbhc);
- /* Disable mechanical detection and set type to insertion */
- snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1,
- 0xA0, 0x20);
- ret = wcd_mbhc_init(&sdm660_cdc_priv->mbhc, codec, &mbhc_cb,
- &intr_ids, wcd_mbhc_registers, true);
- if (ret)
- dev_err(codec->dev, "%s: mbhc initialization failed\n",
- __func__);
- else
- wcd_mbhc_start(&sdm660_cdc_priv->mbhc,
- sdm660_cdc_priv->mbhc.mbhc_cfg);
-
return 0;
}
diff --git a/sound/soc/codecs/wcd_cpe_core.c b/sound/soc/codecs/wcd_cpe_core.c
index 3b681f53b17a..9218786f913f 100644
--- a/sound/soc/codecs/wcd_cpe_core.c
+++ b/sound/soc/codecs/wcd_cpe_core.c
@@ -1731,10 +1731,10 @@ static ssize_t fw_name_store(struct wcd_cpe_core *core,
if (pos)
copy_count = pos - buf;
- if (copy_count > WCD_CPE_IMAGE_FNAME_MAX) {
+ if (copy_count > (WCD_CPE_IMAGE_FNAME_MAX - 1)) {
dev_err(core->dev,
"%s: Invalid length %d, max allowed %d\n",
- __func__, copy_count, WCD_CPE_IMAGE_FNAME_MAX);
+ __func__, copy_count, WCD_CPE_IMAGE_FNAME_MAX - 1);
return -EINVAL;
}
diff --git a/sound/soc/msm/apq8096-auto.c b/sound/soc/msm/apq8096-auto.c
index e1baca358d5f..be6a6a710dc1 100644
--- a/sound/soc/msm/apq8096-auto.c
+++ b/sound/soc/msm/apq8096-auto.c
@@ -58,6 +58,7 @@ static int msm_quat_mi2s_rx_ch = 2;
static int msm_sec_mi2s_tx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_tert_mi2s_tx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_quat_mi2s_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_mi2s_rate = SAMPLING_RATE_48KHZ;
/* TDM default channels */
static int msm_sec_tdm_tx_0_ch = 2; /* STEREO MIC */
@@ -386,6 +387,8 @@ static char const *ec_ref_bit_format_text[] = {"0", "S16_LE", "S24_LE"};
static const char *const ec_ref_rate_text[] = {"0", "8000", "16000",
"32000", "44100", "48000", "96000", "192000", "384000"};
+static const char *const mi2s_rate_text[] = {"32000", "44100", "48000"};
+
static struct afe_clk_set sec_mi2s_tx_clk = {
AFE_API_VERSION_I2S_CONFIG,
Q6AFE_LPASS_CLK_ID_SEC_MI2S_EBIT,
@@ -695,6 +698,37 @@ static int msm_sec_mi2s_tx_bit_format_put(struct snd_kcontrol *kcontrol,
return 0;
}
+static int msm_sec_mi2s_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = msm_sec_mi2s_rate;
+ pr_debug("%s: msm_sec_mi2s_rate = %d\n", __func__, msm_sec_mi2s_rate);
+ return 0;
+}
+
+static int msm_sec_mi2s_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_sec_mi2s_rate = SAMPLING_RATE_32KHZ;
+ break;
+ case 1:
+ msm_sec_mi2s_rate = SAMPLING_RATE_44P1KHZ;
+ break;
+ case 2:
+ msm_sec_mi2s_rate = SAMPLING_RATE_48KHZ;
+ break;
+ default:
+ msm_sec_mi2s_rate = SAMPLING_RATE_48KHZ;
+ break;
+ }
+ pr_debug("%s: msm_sec_mi2s_rate = %d\n",
+ __func__, msm_sec_mi2s_rate);
+ return 0;
+}
+
+
static int msm_sec_tdm_tx_0_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1983,11 +2017,14 @@ static int msm_mi2s_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
+ rate->min = rate->max = SAMPLING_RATE_48KHZ;
+
switch (cpu_dai->id) {
case 0: /*MSM_PRIM_MI2S*/
break;
case 1: /*MSM_SEC_MI2S*/
pr_debug("%s: channel:%d\n", __func__, msm_sec_mi2s_tx_ch);
+ rate->min = rate->max = msm_sec_mi2s_rate;
channels->min = channels->max = msm_sec_mi2s_tx_ch;
param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
msm_sec_mi2s_tx_bit_format);
@@ -2005,7 +2042,6 @@ static int msm_mi2s_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
__func__, cpu_dai->id);
return -EINVAL;
}
- rate->min = rate->max = SAMPLING_RATE_48KHZ;
pr_debug("%s: dai id = 0x%x channels = %d rate = %d format = 0x%x\n",
__func__, cpu_dai->id, channels->max, rate->max,
@@ -2631,6 +2667,7 @@ static const struct soc_enum msm_snd_enum[] = {
SOC_ENUM_SINGLE_EXT(9, ec_ref_ch_text),
SOC_ENUM_SINGLE_EXT(3, ec_ref_bit_format_text),
SOC_ENUM_SINGLE_EXT(9, ec_ref_rate_text),
+ SOC_ENUM_SINGLE_EXT(3, mi2s_rate_text),
};
static const struct snd_kcontrol_new msm_snd_controls[] = {
@@ -2758,6 +2795,8 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
SOC_ENUM_EXT("SEC_MI2S_TX Bit Format", msm_snd_enum[7],
msm_sec_mi2s_tx_bit_format_get,
msm_sec_mi2s_tx_bit_format_put),
+ SOC_ENUM_EXT("SEC_MI2S_TX SampleRate", msm_snd_enum[11],
+ msm_sec_mi2s_rate_get, msm_sec_mi2s_rate_put),
SOC_ENUM_EXT("EC Reference Channels", msm_snd_enum[8],
msm_ec_ref_ch_get, msm_ec_ref_ch_put),
SOC_ENUM_EXT("EC Reference Bit Format", msm_snd_enum[9],
@@ -3059,20 +3098,22 @@ static struct snd_soc_dai_link apq8096_common_dai_links[] = {
.ops = &apq8096_ll_ops,
},
{
- .name = "Listen 1 Audio Service",
- .stream_name = "Listen 1 Audio Service",
- .cpu_dai_name = "LSM1",
- .platform_name = "msm-lsm-client",
+ .name = "MSM8996 Media20",
+ .stream_name = "MultiMedia20",
+ .cpu_dai_name = "MultiMedia20",
+ .platform_name = "msm-pcm-loopback",
.dynamic = 1,
+ .dpcm_playback = 1,
.dpcm_capture = 1,
- .trigger = { SND_SOC_DPCM_TRIGGER_POST,
- SND_SOC_DPCM_TRIGGER_POST },
- .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
- .ignore_suspend = 1,
- .ignore_pmdown_time = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM1,
+ .ignore_suspend = 1,
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA20,
},
/* Multiple Tunnel instances */
{
@@ -3935,19 +3976,22 @@ static struct snd_soc_dai_link apq8096_custom_fe_dai_links[] = {
.codec_name = "snd-soc-dummy",
},
{
- .name = "Tertiary MI2S TX_Hostless",
- .stream_name = "Tertiary MI2S_TX Hostless Capture",
- .cpu_dai_name = "TERT_MI2S_TX_HOSTLESS",
- .platform_name = "msm-pcm-hostless",
+ .name = "MSM8996 Media20",
+ .stream_name = "MultiMedia20",
+ .cpu_dai_name = "MultiMedia20",
+ .platform_name = "msm-pcm-loopback",
.dynamic = 1,
+ .dpcm_playback = 1,
.dpcm_capture = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
- SND_SOC_DPCM_TRIGGER_POST},
- .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ SND_SOC_DPCM_TRIGGER_POST},
.ignore_suspend = 1,
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ /* this dainlink has playback support */
.ignore_pmdown_time = 1,
- .codec_dai_name = "snd-soc-dummy-dai",
- .codec_name = "snd-soc-dummy",
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA20,
},
};
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 33443ab151e9..509574faebf8 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -2608,6 +2608,39 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.name = "MultiMedia19",
.probe = fe_dai_probe,
},
+ {
+ .playback = {
+ .stream_name = "MultiMedia20 Playback",
+ .aif_name = "MM_DL20",
+ .rates = (SNDRV_PCM_RATE_8000_384000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 384000,
+ },
+ .capture = {
+ .stream_name = "MultiMedia20 Capture",
+ .aif_name = "MM_UL20",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &msm_fe_Multimedia_dai_ops,
+ .name = "MultiMedia20",
+ .probe = fe_dai_probe,
+ },
};
static int msm_fe_dai_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index 0a93ee593ed2..d037f8696cac 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -91,7 +91,8 @@ struct msm_compr_gapless_state {
static unsigned int supported_sample_rates[] = {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000,
- 88200, 96000, 128000, 176400, 192000, 352800, 384000, 2822400, 5644800
+ 88200, 96000, 128000, 144000, 176400, 192000, 352800, 384000, 2822400,
+ 5644800
};
struct msm_compr_pdata {
@@ -177,7 +178,7 @@ struct msm_compr_audio {
const u32 compr_codecs[] = {
SND_AUDIOCODEC_AC3, SND_AUDIOCODEC_EAC3, SND_AUDIOCODEC_DTS,
- SND_AUDIOCODEC_DSD, SND_AUDIOCODEC_TRUEHD};
+ SND_AUDIOCODEC_DSD, SND_AUDIOCODEC_TRUEHD, SND_AUDIOCODEC_IEC61937};
struct query_audio_effect {
uint32_t mod_id;
@@ -921,7 +922,7 @@ static void populate_codec_list(struct msm_compr_audio *prtd)
COMPR_PLAYBACK_MIN_NUM_FRAGMENTS;
prtd->compr_cap.max_fragments =
COMPR_PLAYBACK_MAX_NUM_FRAGMENTS;
- prtd->compr_cap.num_codecs = 16;
+ prtd->compr_cap.num_codecs = 17;
prtd->compr_cap.codecs[0] = SND_AUDIOCODEC_MP3;
prtd->compr_cap.codecs[1] = SND_AUDIOCODEC_AAC;
prtd->compr_cap.codecs[2] = SND_AUDIOCODEC_AC3;
@@ -938,6 +939,7 @@ static void populate_codec_list(struct msm_compr_audio *prtd)
prtd->compr_cap.codecs[13] = SND_AUDIOCODEC_DSD;
prtd->compr_cap.codecs[14] = SND_AUDIOCODEC_APTX;
prtd->compr_cap.codecs[15] = SND_AUDIOCODEC_TRUEHD;
+ prtd->compr_cap.codecs[16] = SND_AUDIOCODEC_IEC61937;
}
static int msm_compr_send_media_format_block(struct snd_compr_stream *cstream,
@@ -1196,6 +1198,15 @@ static int msm_compr_send_media_format_block(struct snd_compr_stream *cstream,
pr_debug("SND_AUDIOCODEC_TRUEHD\n");
/* no media format block needed */
break;
+ case FORMAT_IEC61937:
+ pr_debug("SND_AUDIOCODEC_IEC61937\n");
+ ret = q6asm_media_format_block_iec(prtd->audio_client,
+ prtd->sample_rate,
+ prtd->num_channels);
+ if (ret < 0)
+ pr_err("%s: CMD IEC61937 Format block failed ret %d\n",
+ __func__, ret);
+ break;
case FORMAT_APTX:
pr_debug("SND_AUDIOCODEC_APTX\n");
memset(&aptx_cfg, 0x0, sizeof(struct aptx_dec_bt_addr_cfg));
@@ -1857,8 +1868,11 @@ static int msm_compr_set_params(struct snd_compr_stream *cstream,
prtd->sample_rate = prtd->codec_param.codec.sample_rate;
pr_debug("%s: sample_rate %d\n", __func__, prtd->sample_rate);
- if (prtd->codec_param.codec.compr_passthr >= LEGACY_PCM &&
- prtd->codec_param.codec.compr_passthr <= COMPRESSED_PASSTHROUGH_DSD)
+ if ((prtd->codec_param.codec.compr_passthr >= LEGACY_PCM &&
+ prtd->codec_param.
+ codec.compr_passthr <= COMPRESSED_PASSTHROUGH_DSD) ||
+ (prtd->codec_param.
+ codec.compr_passthr == COMPRESSED_PASSTHROUGH_IEC61937))
prtd->compr_passthr = prtd->codec_param.codec.compr_passthr;
else
prtd->compr_passthr = LEGACY_PCM;
@@ -1981,6 +1995,12 @@ static int msm_compr_set_params(struct snd_compr_stream *cstream,
break;
}
+ case SND_AUDIOCODEC_IEC61937: {
+ pr_debug("%s: SND_AUDIOCODEC_IEC61937\n", __func__);
+ prtd->codec = FORMAT_IEC61937;
+ break;
+ }
+
case SND_AUDIOCODEC_APTX: {
pr_debug("%s: SND_AUDIOCODEC_APTX\n", __func__);
prtd->codec = FORMAT_APTX;
@@ -2853,6 +2873,7 @@ static int msm_compr_get_codec_caps(struct snd_compr_stream *cstream,
case SND_AUDIOCODEC_DTS:
case SND_AUDIOCODEC_DSD:
case SND_AUDIOCODEC_TRUEHD:
+ case SND_AUDIOCODEC_IEC61937:
case SND_AUDIOCODEC_APTX:
break;
default:
@@ -3290,6 +3311,7 @@ static int msm_compr_send_dec_params(struct snd_compr_stream *cstream,
case FORMAT_MP3:
case FORMAT_MPEG4_AAC:
case FORMAT_TRUEHD:
+ case FORMAT_IEC61937:
case FORMAT_APTX:
pr_debug("%s: no runtime parameters for codec: %d\n", __func__,
prtd->codec);
@@ -3358,6 +3380,7 @@ static int msm_compr_dec_params_put(struct snd_kcontrol *kcontrol,
case FORMAT_DTS:
case FORMAT_DSD:
case FORMAT_TRUEHD:
+ case FORMAT_IEC61937:
case FORMAT_APTX:
pr_debug("%s: no runtime parameters for codec: %d\n", __func__,
prtd->codec);
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
index 45868f508a60..a71fb74d35bc 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
@@ -174,7 +174,7 @@ static const struct snd_kcontrol_new hdmi_config_controls[] = {
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
- .name = "HDMI Drift",
+ .name = "HDMI DRIFT",
.info = msm_dai_q6_ext_disp_drift_info,
.get = msm_dai_q6_ext_disp_drift_get,
},
@@ -191,7 +191,7 @@ static const struct snd_kcontrol_new display_port_config_controls[] = {
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
- .name = "DISPLAY_PORT Drift",
+ .name = "DISPLAY_PORT DRIFT",
.info = msm_dai_q6_ext_disp_drift_info,
.get = msm_dai_q6_ext_disp_drift_get,
},
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 0f63fd6bbd00..32f68fedcd3b 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -136,7 +136,8 @@ struct msm_pcm_route_bdai_name {
};
static struct msm_pcm_route_bdai_name be_dai_name_table[MSM_BACKEND_DAI_MAX];
-static int msm_routing_send_device_pp_params(int port_id, int copp_idx);
+static int msm_routing_send_device_pp_params(int port_id, int copp_idx,
+ int fe_id);
static int msm_routing_get_bit_width(unsigned int format)
{
@@ -281,253 +282,256 @@ static void msm_pcm_routng_cfg_matrix_map_pp(struct route_payload payload,
#define SLIMBUS_EXTPROC_RX AFE_PORT_INVALID
struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
- { PRIMARY_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_RX},
- { PRIMARY_I2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_TX},
- { SLIMBUS_0_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_RX},
- { SLIMBUS_0_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_TX},
- { HDMI_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_HDMI},
- { INT_BT_SCO_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_RX},
- { INT_BT_SCO_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_TX},
- { INT_FM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_RX},
- { INT_FM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_TX},
- { RT_PROXY_PORT_001_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { PRIMARY_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_PRI_I2S_RX},
+ { PRIMARY_I2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_PRI_I2S_TX},
+ { SLIMBUS_0_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_0_RX},
+ { SLIMBUS_0_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_0_TX},
+ { HDMI_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_HDMI},
+ { INT_BT_SCO_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_INT_BT_SCO_RX},
+ { INT_BT_SCO_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_INT_BT_SCO_TX},
+ { INT_FM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_INT_FM_RX},
+ { INT_FM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_INT_FM_TX},
+ { RT_PROXY_PORT_001_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_AFE_PCM_RX},
- { RT_PROXY_PORT_001_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { RT_PROXY_PORT_001_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_AFE_PCM_TX},
- { AFE_PORT_ID_PRIMARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_AUXPCM_RX},
- { AFE_PORT_ID_PRIMARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_AUXPCM_TX},
- { VOICE_PLAYBACK_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { VOICE_PLAYBACK_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_VOICE_PLAYBACK_TX},
- { VOICE2_PLAYBACK_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { VOICE2_PLAYBACK_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_VOICE2_PLAYBACK_TX},
- { VOICE_RECORD_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { VOICE_RECORD_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INCALL_RECORD_RX},
- { VOICE_RECORD_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { VOICE_RECORD_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INCALL_RECORD_TX},
- { MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_RX},
- { MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_TX},
- { SECONDARY_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX},
- { SLIMBUS_1_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX},
- { SLIMBUS_1_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX},
- { SLIMBUS_2_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_RX},
- { SLIMBUS_2_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_TX},
- { SLIMBUS_3_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX},
- { SLIMBUS_3_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_TX},
- { SLIMBUS_4_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX},
- { SLIMBUS_4_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX},
- { SLIMBUS_5_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_RX},
- { SLIMBUS_5_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_TX},
- { SLIMBUS_6_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_RX},
- { SLIMBUS_6_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_TX},
- { SLIMBUS_7_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_RX},
- { SLIMBUS_7_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_TX},
- { SLIMBUS_8_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_RX},
- { SLIMBUS_8_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_TX},
- { SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_RX},
- { SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_TX},
- { SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_1_TX},
- { AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_MI2S_RX},
+ { MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_MI2S_TX},
+ { SECONDARY_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SEC_I2S_RX},
+ { SLIMBUS_1_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_1_RX},
+ { SLIMBUS_1_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_1_TX},
+ { SLIMBUS_2_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_2_RX},
+ { SLIMBUS_2_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_2_TX},
+ { SLIMBUS_3_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_3_RX},
+ { SLIMBUS_3_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_3_TX},
+ { SLIMBUS_4_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_4_RX},
+ { SLIMBUS_4_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_4_TX},
+ { SLIMBUS_5_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_5_RX},
+ { SLIMBUS_5_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_5_TX},
+ { SLIMBUS_6_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_6_RX},
+ { SLIMBUS_6_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_6_TX},
+ { SLIMBUS_7_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_7_RX},
+ { SLIMBUS_7_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_7_TX},
+ { SLIMBUS_8_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_8_RX},
+ { SLIMBUS_8_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_8_TX},
+ { SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_STUB_RX},
+ { SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_STUB_TX},
+ { SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_STUB_1_TX},
+ { AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_MI2S_RX},
- { AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_MI2S_TX},
- { AFE_PORT_ID_SECONDARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_MI2S_RX},
- { AFE_PORT_ID_SECONDARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_MI2S_TX},
- { AFE_PORT_ID_PRIMARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_MI2S_RX},
- { AFE_PORT_ID_PRIMARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_MI2S_TX},
- { AFE_PORT_ID_TERTIARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_MI2S_RX},
- { AFE_PORT_ID_TERTIARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_MI2S_TX},
- { AUDIO_PORT_ID_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AUDIO_PORT_ID_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_AUDIO_I2S_RX},
- { AFE_PORT_ID_SECONDARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_AUXPCM_RX},
- { AFE_PORT_ID_SECONDARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_AUXPCM_TX},
- { AFE_PORT_ID_SPDIF_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SPDIF_RX},
- { AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SPDIF_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+ LPASS_BE_SPDIF_RX},
+ { AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_MI2S_RX_SD1},
- { AFE_PORT_ID_QUINARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUINARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUIN_MI2S_RX},
- { AFE_PORT_ID_QUINARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUINARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUIN_MI2S_TX},
- { AFE_PORT_ID_SENARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SENARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SENARY_MI2S_TX},
- { AFE_PORT_ID_PRIMARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_RX_0},
- { AFE_PORT_ID_PRIMARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_TX_0},
- { AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_RX_1},
- { AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_TX_1},
- { AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_RX_2},
- { AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_TX_2},
- { AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_RX_3},
- { AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_TX_3},
- { AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_RX_4},
- { AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_TX_4},
- { AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_RX_5},
- { AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_TX_5},
- { AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_RX_6},
- { AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_TX_6},
- { AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_RX_7},
- { AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_PRI_TDM_TX_7},
- { AFE_PORT_ID_SECONDARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_RX_0},
- { AFE_PORT_ID_SECONDARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_TX_0},
- { AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_RX_1},
- { AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_TX_1},
- { AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_RX_2},
- { AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_TX_2},
- { AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_RX_3},
- { AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_TX_3},
- { AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_RX_4},
- { AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_TX_4},
- { AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_RX_5},
- { AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_TX_5},
- { AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_RX_6},
- { AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_TX_6},
- { AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_RX_7},
- { AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_SEC_TDM_TX_7},
- { AFE_PORT_ID_TERTIARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_RX_0},
- { AFE_PORT_ID_TERTIARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_TX_0},
- { AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_RX_1},
- { AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_TX_1},
- { AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_RX_2},
- { AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_TX_2},
- { AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_RX_3},
- { AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_TX_3},
- { AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_RX_4},
- { AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_TX_4},
- { AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_RX_5},
- { AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_TX_5},
- { AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_RX_6},
- { AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_TX_6},
- { AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_RX_7},
- { AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_TDM_TX_7},
- { AFE_PORT_ID_QUATERNARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_RX_0},
- { AFE_PORT_ID_QUATERNARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_TX_0},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_RX_1},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_TX_1},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_RX_2},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_TX_2},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_RX_3},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_TX_3},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_RX_4},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_TX_4},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_RX_5},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_TX_5},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_RX_6},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_TX_6},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_RX_7},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_TDM_TX_7},
- { INT_BT_A2DP_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_A2DP_RX},
- { AFE_PORT_ID_USB_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { INT_BT_A2DP_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+ LPASS_BE_INT_BT_A2DP_RX},
+ { AFE_PORT_ID_USB_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_USB_AUDIO_RX},
- { AFE_PORT_ID_USB_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_USB_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_USB_AUDIO_TX},
- { DISPLAY_PORT_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_DISPLAY_PORT},
- { AFE_PORT_ID_TERTIARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { DISPLAY_PORT_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+ LPASS_BE_DISPLAY_PORT},
+ { AFE_PORT_ID_TERTIARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_AUXPCM_RX},
- { AFE_PORT_ID_TERTIARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_TERT_AUXPCM_TX},
- { AFE_PORT_ID_QUATERNARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_AUXPCM_RX},
- { AFE_PORT_ID_QUATERNARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_QUAT_AUXPCM_TX},
- { AFE_PORT_ID_INT0_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT0_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INT0_MI2S_RX},
- { AFE_PORT_ID_INT0_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT0_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INT0_MI2S_TX},
- { AFE_PORT_ID_INT1_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT1_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INT1_MI2S_RX},
- { AFE_PORT_ID_INT1_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT1_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INT1_MI2S_TX},
- { AFE_PORT_ID_INT2_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT2_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INT2_MI2S_RX},
- { AFE_PORT_ID_INT2_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT2_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INT2_MI2S_TX},
- { AFE_PORT_ID_INT3_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT3_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INT3_MI2S_RX},
- { AFE_PORT_ID_INT3_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT3_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INT3_MI2S_TX},
- { AFE_PORT_ID_INT4_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT4_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INT4_MI2S_RX},
- { AFE_PORT_ID_INT4_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT4_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INT4_MI2S_TX},
- { AFE_PORT_ID_INT5_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT5_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INT5_MI2S_RX},
- { AFE_PORT_ID_INT5_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT5_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INT5_MI2S_TX},
- { AFE_PORT_ID_INT6_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT6_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INT6_MI2S_RX},
- { AFE_PORT_ID_INT6_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT6_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
LPASS_BE_INT6_MI2S_TX},
};
@@ -593,6 +597,9 @@ static struct msm_pcm_routing_fdai_data
/* MULTIMEDIA19 */
{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* MULTIMEDIA20 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
/* CS_VOICE */
{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
@@ -1087,7 +1094,7 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
msm_qti_pp_send_eq_values(fe_id);
for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
if (test_bit(fe_id, &msm_bedais[i].fe_sessions[0]))
- msm_bedais[i].passthr_mode = passthr_mode;
+ msm_bedais[i].passthr_mode[fe_id] = passthr_mode;
if (!is_be_dai_extproc(i) &&
(afe_get_port_type(msm_bedais[i].port_id) ==
@@ -1191,7 +1198,7 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
COMPRESSED_PASSTHROUGH_GEN) {
msm_routing_send_device_pp_params(
msm_bedais[i].port_id,
- copp_idx);
+ copp_idx, fe_id);
}
}
}
@@ -1289,7 +1296,7 @@ int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode,
channels = msm_bedais[i].channel;
else
channels = msm_bedais[i].adm_override_ch;
- msm_bedais[i].passthr_mode =
+ msm_bedais[i].passthr_mode[fedai_id] =
LEGACY_PCM;
bits_per_sample = msm_routing_get_bit_width(
@@ -1360,7 +1367,7 @@ int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode,
}
}
if ((perf_mode == LEGACY_PCM_MODE) &&
- (msm_bedais[i].passthr_mode ==
+ (msm_bedais[i].passthr_mode[fedai_id] ==
LEGACY_PCM))
msm_pcm_routing_cfg_pp(msm_bedais[i].port_id,
copp_idx, topology,
@@ -1445,7 +1452,7 @@ void msm_pcm_routing_dereg_phy_stream(int fedai_id, int stream_type)
if ((DOLBY_ADM_COPP_TOPOLOGY_ID == topology ||
DS2_ADM_COPP_TOPOLOGY_ID == topology) &&
(fdai->perf_mode == LEGACY_PCM_MODE) &&
- (msm_bedais[i].passthr_mode ==
+ (msm_bedais[i].passthr_mode[fedai_id] ==
LEGACY_PCM))
msm_pcm_routing_deinit_pp(msm_bedais[i].port_id,
topology);
@@ -1480,7 +1487,7 @@ static void msm_pcm_routing_process_audio(u16 reg, u16 val, int set)
u32 channels, sample_rate;
uint16_t bits_per_sample = 16;
struct msm_pcm_routing_fdai_data *fdai;
- uint32_t passthr_mode = msm_bedais[reg].passthr_mode;
+ uint32_t passthr_mode;
bool is_lsm;
pr_debug("%s: reg %x val %x set %x\n", __func__, reg, val, set);
@@ -1497,6 +1504,7 @@ static void msm_pcm_routing_process_audio(u16 reg, u16 val, int set)
return;
}
+ passthr_mode = msm_bedais[reg].passthr_mode[val];
if (afe_get_port_type(msm_bedais[reg].port_id) ==
MSM_AFE_PORT_TYPE_RX) {
session_type = SESSION_TYPE_RX;
@@ -5454,6 +5462,9 @@ static const struct snd_kcontrol_new quat_tdm_rx_0_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new quat_tdm_tx_0_mixer_controls[] = {
@@ -5556,6 +5567,9 @@ static const struct snd_kcontrol_new quat_tdm_rx_1_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new quat_tdm_rx_2_mixer_controls[] = {
@@ -5607,6 +5621,9 @@ static const struct snd_kcontrol_new quat_tdm_rx_2_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new quat_tdm_rx_3_mixer_controls[] = {
@@ -5658,6 +5675,9 @@ static const struct snd_kcontrol_new quat_tdm_rx_3_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new mmul1_mixer_controls[] = {
@@ -6469,6 +6489,70 @@ static const struct snd_kcontrol_new mmul19_mixer_controls[] = {
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
};
+
+static const struct snd_kcontrol_new mmul20_mixer_controls[] = {
+ SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new pri_rx_voice_mixer_controls[] = {
SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
@@ -10506,6 +10590,7 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
SND_SOC_DAPM_AIF_IN("MM_DL14", "MultiMedia14 Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("MM_DL15", "MultiMedia15 Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("MM_DL16", "MultiMedia16 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL20", "MultiMedia20 Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("VOIP_DL", "VoIP Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
@@ -10518,6 +10603,7 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
SND_SOC_DAPM_AIF_OUT("MM_UL17", "MultiMedia17 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL18", "MultiMedia18 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL19", "MultiMedia19 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL20", "MultiMedia20 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("CS-VOICE_DL1", "CS-VOICE Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("CS-VOICE_UL1", "CS-VOICE Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("VOICE2_DL", "Voice2 Playback", 0, 0, 0, 0),
@@ -11256,6 +11342,8 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
mmul18_mixer_controls, ARRAY_SIZE(mmul18_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia19 Mixer", SND_SOC_NOPM, 0, 0,
mmul19_mixer_controls, ARRAY_SIZE(mmul19_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia20 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul20_mixer_controls, ARRAY_SIZE(mmul20_mixer_controls)),
SND_SOC_DAPM_MIXER("AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
auxpcm_rx_mixer_controls, ARRAY_SIZE(auxpcm_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("SEC_AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
@@ -12277,6 +12365,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia20", "MM_DL20"},
{"QUAT_TDM_RX_0", NULL, "QUAT_TDM_RX_0 Audio Mixer"},
{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12349,6 +12438,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia20", "MM_DL20"},
{"QUAT_TDM_RX_1", NULL, "QUAT_TDM_RX_1 Audio Mixer"},
{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12367,6 +12457,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia20", "MM_DL20"},
{"QUAT_TDM_RX_2", NULL, "QUAT_TDM_RX_2 Audio Mixer"},
{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12385,6 +12476,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia20", "MM_DL20"},
{"QUAT_TDM_RX_3", NULL, "QUAT_TDM_RX_3 Audio Mixer"},
{"MultiMedia1 Mixer", "PRI_TX", "PRI_I2S_TX"},
@@ -12436,6 +12528,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia6 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia6 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
{"MultiMedia6 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+ {"MultiMedia6 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
{"MultiMedia1 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
{"MultiMedia1 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
@@ -12565,6 +12658,27 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia9 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
{"MultiMedia9 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+ {"MultiMedia20 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+ {"MultiMedia20 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+ {"MultiMedia20 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"MultiMedia20 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"MultiMedia20 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+ {"MultiMedia20 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+ {"MultiMedia20 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+ {"MultiMedia20 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+ {"MultiMedia20 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+ {"MultiMedia20 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+ {"MultiMedia20 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+ {"MultiMedia20 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+ {"MultiMedia20 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+ {"MultiMedia20 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+ {"MultiMedia20 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+ {"MultiMedia20 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+ {"MultiMedia20 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+ {"MultiMedia20 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+ {"MultiMedia20 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+ {"MultiMedia20 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+
{"MultiMedia1 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
{"MultiMedia2 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
{"MultiMedia4 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
@@ -12683,6 +12797,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MM_UL17", NULL, "MultiMedia17 Mixer"},
{"MM_UL18", NULL, "MultiMedia18 Mixer"},
{"MM_UL19", NULL, "MultiMedia19 Mixer"},
+ {"MM_UL20", NULL, "MultiMedia20 Mixer"},
{"AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
{"AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
@@ -14114,7 +14229,7 @@ static int msm_pcm_routing_close(struct snd_pcm_substream *substream)
clear_bit(idx,
&session_copp_map[i][session_type][be_id]);
if ((fdai->perf_mode == LEGACY_PCM_MODE) &&
- (bedai->passthr_mode == LEGACY_PCM))
+ (bedai->passthr_mode[i] == LEGACY_PCM))
msm_pcm_routing_deinit_pp(bedai->port_id,
topology);
}
@@ -14123,7 +14238,10 @@ static int msm_pcm_routing_close(struct snd_pcm_substream *substream)
bedai->active = 0;
bedai->sample_rate = 0;
bedai->channel = 0;
- bedai->passthr_mode = LEGACY_PCM;
+ for (i = 0; i < MSM_FRONTEND_DAI_MAX; i++) {
+ if (bedai->passthr_mode[i] != LISTEN)
+ bedai->passthr_mode[i] = LEGACY_PCM;
+ }
mutex_unlock(&routing_lock);
return 0;
@@ -14152,17 +14270,6 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
bedai = &msm_bedais[be_id];
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- if (bedai->passthr_mode != LEGACY_PCM)
- path_type = ADM_PATH_COMPRESSED_RX;
- else
- path_type = ADM_PATH_PLAYBACK;
- session_type = SESSION_TYPE_RX;
- } else {
- path_type = ADM_PATH_LIVE_REC;
- session_type = SESSION_TYPE_TX;
- }
-
mutex_lock(&routing_lock);
if (bedai->active == 1)
goto done; /* Ignore prepare if back-end already active */
@@ -14179,6 +14286,17 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
route_check_fe_id_adm_support(i)))
continue;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (bedai->passthr_mode[i] != LEGACY_PCM)
+ path_type = ADM_PATH_COMPRESSED_RX;
+ else
+ path_type = ADM_PATH_PLAYBACK;
+ session_type = SESSION_TYPE_RX;
+ } else {
+ path_type = ADM_PATH_LIVE_REC;
+ session_type = SESSION_TYPE_TX;
+ }
+
is_lsm = (i >= MSM_FRONTEND_DAI_LSM1) &&
(i <= MSM_FRONTEND_DAI_LSM8);
fdai = &fe_dai_map[i][session_type];
@@ -14256,9 +14374,9 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
msm_pcm_routing_build_matrix(i, session_type, path_type,
fdai->perf_mode,
- bedai->passthr_mode);
+ bedai->passthr_mode[i]);
if ((fdai->perf_mode == LEGACY_PCM_MODE) &&
- (bedai->passthr_mode == LEGACY_PCM))
+ (bedai->passthr_mode[i] == LEGACY_PCM))
msm_pcm_routing_cfg_pp(bedai->port_id, copp_idx,
topology, channels);
}
@@ -14270,10 +14388,10 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
pr_debug("%s voice session_id: 0x%x\n", __func__,
session_id);
- if (session_type == SESSION_TYPE_TX)
- voc_path_type = TX_PATH;
- else
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
voc_path_type = RX_PATH;
+ else
+ voc_path_type = TX_PATH;
voc_set_route_flag(session_id, voc_path_type, 1);
@@ -14323,7 +14441,8 @@ done:
return 0;
}
-static int msm_routing_send_device_pp_params(int port_id, int copp_idx)
+static int msm_routing_send_device_pp_params(int port_id, int copp_idx,
+ int fe_id)
{
int index, topo_id, be_idx;
unsigned long pp_config = 0;
@@ -14366,8 +14485,8 @@ static int msm_routing_send_device_pp_params(int port_id, int copp_idx)
return -EINVAL;
}
- if ((msm_bedais[be_idx].passthr_mode == LEGACY_PCM) ||
- (msm_bedais[be_idx].passthr_mode == LISTEN))
+ if ((msm_bedais[be_idx].passthr_mode[fe_id] == LEGACY_PCM) ||
+ (msm_bedais[be_idx].passthr_mode[fe_id] == LISTEN))
compr_passthr_mode = false;
pp_config = msm_bedais_pp_params[index].pp_params_config;
@@ -14426,12 +14545,12 @@ static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol,
return -EINVAL;
}
- if ((msm_bedais[be_idx].passthr_mode == LEGACY_PCM) ||
- (msm_bedais[be_idx].passthr_mode == LISTEN))
- compr_passthr_mode = false;
-
for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions[0],
MSM_FRONTEND_DAI_MM_SIZE) {
+ if ((msm_bedais[be_idx].passthr_mode[i] == LEGACY_PCM) ||
+ (msm_bedais[be_idx].passthr_mode[i] == LISTEN))
+ compr_passthr_mode = false;
+
for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
unsigned long copp =
session_copp_map[i]
@@ -14444,7 +14563,7 @@ static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol,
continue;
pr_debug("%s: port: 0x%x, copp %ld, be active: %d, passt: %d\n",
__func__, port_id, copp, msm_bedais[be_idx].active,
- msm_bedais[be_idx].passthr_mode);
+ msm_bedais[be_idx].passthr_mode[i]);
switch (pp_id) {
case ADM_PP_PARAM_MUTE_ID:
pr_debug("%s: ADM_PP_PARAM_MUTE\n", __func__);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index fcd155e71317..234d57ca1c40 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -192,6 +192,7 @@ enum {
MSM_FRONTEND_DAI_MULTIMEDIA17,
MSM_FRONTEND_DAI_MULTIMEDIA18,
MSM_FRONTEND_DAI_MULTIMEDIA19,
+ MSM_FRONTEND_DAI_MULTIMEDIA20,
MSM_FRONTEND_DAI_CS_VOICE,
MSM_FRONTEND_DAI_VOIP,
MSM_FRONTEND_DAI_AFE_RX,
@@ -217,8 +218,8 @@ enum {
MSM_FRONTEND_DAI_MAX,
};
-#define MSM_FRONTEND_DAI_MM_SIZE (MSM_FRONTEND_DAI_MULTIMEDIA19 + 1)
-#define MSM_FRONTEND_DAI_MM_MAX_ID MSM_FRONTEND_DAI_MULTIMEDIA19
+#define MSM_FRONTEND_DAI_MM_SIZE (MSM_FRONTEND_DAI_MULTIMEDIA20 + 1)
+#define MSM_FRONTEND_DAI_MM_MAX_ID MSM_FRONTEND_DAI_MULTIMEDIA20
enum {
MSM_BACKEND_DAI_PRI_I2S_RX = 0,
@@ -423,7 +424,7 @@ struct msm_pcm_routing_bdai_data {
unsigned int channel;
unsigned int format;
unsigned int adm_override_ch;
- u32 passthr_mode;
+ u32 passthr_mode[MSM_FRONTEND_DAI_MAX];
char *name;
};
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 50bad5c2e48c..0d142f77db50 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -1038,14 +1038,12 @@ void q6asm_audio_client_free(struct audio_client *ac)
}
rtac_set_asm_handle(ac->session, NULL);
- if (!atomic_read(&ac->reset)) {
- apr_deregister(ac->apr2);
- apr_deregister(ac->apr);
- q6asm_mmap_apr_dereg();
- ac->apr2 = NULL;
- ac->apr = NULL;
- ac->mmap_apr = NULL;
- }
+ apr_deregister(ac->apr2);
+ apr_deregister(ac->apr);
+ q6asm_mmap_apr_dereg();
+ ac->apr2 = NULL;
+ ac->apr = NULL;
+ ac->mmap_apr = NULL;
q6asm_session_free(ac);
pr_debug("%s: APR De-Register\n", __func__);
@@ -1792,6 +1790,7 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
case ASM_STREAM_CMD_OPEN_LOOPBACK_V2:
case ASM_STREAM_CMD_OPEN_TRANSCODE_LOOPBACK:
case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2:
+ case ASM_DATA_CMD_IEC_60958_MEDIA_FMT:
case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
case ASM_STREAM_CMD_SET_ENCDEC_PARAM_V2:
case ASM_STREAM_CMD_REGISTER_ENCDEC_EVENTS:
@@ -2640,6 +2639,9 @@ int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format,
case FORMAT_TRUEHD:
open.fmt_id = ASM_MEDIA_FMT_TRUEHD;
break;
+ case FORMAT_IEC61937:
+ open.fmt_id = ASM_MEDIA_FMT_IEC;
+ break;
default:
pr_err("%s: Invalid format[%d]\n", __func__, format);
rc = -EINVAL;
@@ -2656,6 +2658,10 @@ int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format,
open.flags = 0x8;
pr_debug("%s: Flag 8 - COMPRESSED_PASSTHROUGH_CONVERT\n",
__func__);
+ } else if (passthrough_flag == COMPRESSED_PASSTHROUGH_IEC61937) {
+ open.flags = 0x1;
+ pr_debug("%s: Flag 1 - COMPRESSED_PASSTHROUGH_IEC61937\n",
+ __func__);
} else {
pr_err("%s: Invalid passthrough type[%d]\n",
__func__, passthrough_flag);
@@ -3225,6 +3231,102 @@ fail_cmd:
return rc;
}
+
+int q6asm_open_transcode_loopback(struct audio_client *ac,
+ uint16_t bits_per_sample,
+ uint32_t source_format, uint32_t sink_format)
+{
+ int rc = 0x00;
+ struct asm_stream_cmd_open_transcode_loopback_t open;
+
+ if (ac == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (ac->apr == NULL) {
+ pr_err("%s: AC APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: session[%d]\n", __func__, ac->session);
+
+ q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+ atomic_set(&ac->cmd_state, -1);
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_TRANSCODE_LOOPBACK;
+
+ open.mode_flags = 0;
+ open.src_endpoint_type = 0;
+ open.sink_endpoint_type = 0;
+ switch (source_format) {
+ case FORMAT_LINEAR_PCM:
+ case FORMAT_MULTI_CHANNEL_LINEAR_PCM:
+ open.src_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3;
+ break;
+ case FORMAT_AC3:
+ open.src_format_id = ASM_MEDIA_FMT_AC3;
+ break;
+ case FORMAT_EAC3:
+ open.src_format_id = ASM_MEDIA_FMT_EAC3;
+ break;
+ default:
+ pr_err("%s: Unsupported src fmt [%d]\n",
+ __func__, source_format);
+ return -EINVAL;
+ }
+ switch (sink_format) {
+ case FORMAT_LINEAR_PCM:
+ case FORMAT_MULTI_CHANNEL_LINEAR_PCM:
+ open.sink_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3;
+ break;
+ default:
+ pr_err("%s: Unsupported sink fmt [%d]\n",
+ __func__, sink_format);
+ return -EINVAL;
+ }
+
+ /* source endpoint : matrix */
+ open.audproc_topo_id = q6asm_get_asm_topology_cal();
+
+ ac->app_type = q6asm_get_asm_app_type_cal();
+ if (ac->perf_mode == LOW_LATENCY_PCM_MODE)
+ open.mode_flags |= ASM_LOW_LATENCY_STREAM_SESSION;
+ else
+ open.mode_flags |= ASM_LEGACY_STREAM_SESSION;
+ ac->topology = open.audproc_topo_id;
+ open.bits_per_sample = bits_per_sample;
+ open.reserved = 0;
+ pr_debug("%s: opening a transcode_loopback with mode_flags =[%d] session[%d]\n",
+ __func__, open.mode_flags, ac->session);
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+ if (rc < 0) {
+ pr_err("%s: open failed op[0x%x]rc[%d]\n",
+ __func__, open.hdr.opcode, rc);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout. waited for open_transcode_loopback\n",
+ __func__);
+ rc = -ETIMEDOUT;
+ goto fail_cmd;
+ }
+ if (atomic_read(&ac->cmd_state) > 0) {
+ pr_err("%s: DSP returned error[%s]\n",
+ __func__, adsp_err_get_err_str(
+ atomic_read(&ac->cmd_state)));
+ rc = adsp_err_get_lnx_err_code(
+ atomic_read(&ac->cmd_state));
+ goto fail_cmd;
+ }
+
+ return 0;
+fail_cmd:
+ return rc;
+}
+
static
int q6asm_set_shared_circ_buff(struct audio_client *ac,
struct asm_stream_cmd_open_shared_io *open,
@@ -5424,6 +5526,62 @@ fail_cmd:
}
EXPORT_SYMBOL(q6asm_media_format_block_gen_compr);
+
+/*
+ * q6asm_media_format_block_iec - set up IEC61937 (compressed) or IEC60958
+ * (pcm) format params. Both audio standards
+ * use the same format and are used for
+ * HDMI or SPDIF.
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ */
+int q6asm_media_format_block_iec(struct audio_client *ac,
+ uint32_t rate, uint32_t channels)
+{
+ struct asm_iec_compressed_fmt_blk_t fmt;
+ int rc = 0;
+
+ pr_debug("%s: session[%d]rate[%d]ch[%d]\n",
+ __func__, ac->session, rate,
+ channels);
+
+ memset(&fmt, 0, sizeof(fmt));
+ q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_IEC_60958_MEDIA_FMT;
+ fmt.num_channels = channels;
+ fmt.sampling_rate = rate;
+
+ atomic_set(&ac->cmd_state, -1);
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+ if (rc < 0) {
+ pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout. waited for format update\n", __func__);
+ rc = -ETIMEDOUT;
+ goto fail_cmd;
+ }
+
+ if (atomic_read(&ac->cmd_state) > 0) {
+ pr_err("%s: DSP returned error[%s]\n",
+ __func__, adsp_err_get_err_str(
+ atomic_read(&ac->cmd_state)));
+ rc = adsp_err_get_lnx_err_code(
+ atomic_read(&ac->cmd_state));
+ }
+ return 0;
+fail_cmd:
+ return rc;
+}
+EXPORT_SYMBOL(q6asm_media_format_block_iec);
+
static int __q6asm_media_format_block_multi_aac(struct audio_client *ac,
struct asm_aac_cfg *cfg, int stream_id)
{
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index d352133b7c32..01e31578f107 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -479,8 +479,10 @@ static void voc_set_error_state(uint16_t reset_proc)
for (i = 0; i < MAX_VOC_SESSIONS; i++) {
v = &common.voice[i];
- if (v != NULL)
+ if (v != NULL) {
v->voc_state = VOC_ERROR;
+ v->rec_info.recording = 0;
+ }
}
}
diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c
index 228e84ae8e1d..f4219148e81c 100644
--- a/sound/soc/msm/sdm660-internal.c
+++ b/sound/soc/msm/sdm660-internal.c
@@ -1301,15 +1301,6 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
msm_anlg_cdc_spk_ext_pa_cb(enable_spk_ext_pa, ana_cdc);
msm_dig_cdc_hph_comp_cb(msm_config_hph_compander_gpio, dig_cdc);
- mbhc_cfg_ptr->calibration = def_msm_int_wcd_mbhc_cal();
- if (mbhc_cfg_ptr->calibration) {
- ret = msm_anlg_cdc_hs_detect(ana_cdc, mbhc_cfg_ptr);
- if (ret) {
- pr_err("%s: msm_anlg_cdc_hs_detect failed\n", __func__);
- kfree(mbhc_cfg_ptr->calibration);
- return ret;
- }
- }
card = rtd->card->snd_card;
if (!codec_root)
codec_root = snd_register_module_info(card->module, "codecs",
@@ -1569,6 +1560,36 @@ end:
return ret;
}
+static int msm_snd_card_late_probe(struct snd_soc_card *card)
+{
+ const char *be_dl_name = LPASS_BE_INT0_MI2S_RX;
+ struct snd_soc_codec *ana_cdc;
+ struct snd_soc_pcm_runtime *rtd;
+ int ret = 0;
+
+ rtd = snd_soc_get_pcm_runtime(card, be_dl_name);
+ if (!rtd) {
+ dev_err(card->dev,
+ "%s: snd_soc_get_pcm_runtime for %s failed!\n",
+ __func__, be_dl_name);
+ return -EINVAL;
+ }
+
+ ana_cdc = rtd->codec_dais[ANA_CDC]->codec;
+ mbhc_cfg_ptr->calibration = def_msm_int_wcd_mbhc_cal();
+ if (!mbhc_cfg_ptr->calibration)
+ return -ENOMEM;
+
+ ret = msm_anlg_cdc_hs_detect(ana_cdc, mbhc_cfg_ptr);
+ if (ret) {
+ dev_err(card->dev,
+ "%s: msm_anlg_cdc_hs_detect failed\n", __func__);
+ kfree(mbhc_cfg_ptr->calibration);
+ }
+
+ return ret;
+}
+
static struct snd_soc_ops msm_tdm_be_ops = {
.hw_params = msm_tdm_snd_hw_params
};
@@ -2930,6 +2951,7 @@ static struct snd_soc_card sdm660_card = {
.name = "sdm660-snd-card",
.dai_link = msm_int_dai,
.num_links = ARRAY_SIZE(msm_int_dai),
+ .late_probe = msm_snd_card_late_probe,
};
static void msm_disable_int_mclk0(struct work_struct *work)
diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c
index 93b0aa74ca03..39c2c7d067bb 100644
--- a/tools/power/cpupower/utils/helpers/cpuid.c
+++ b/tools/power/cpupower/utils/helpers/cpuid.c
@@ -156,6 +156,7 @@ out:
*/
case 0x2C: /* Westmere EP - Gulftown */
cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
+ break;
case 0x2A: /* SNB */
case 0x2D: /* SNB Xeon */
case 0x3A: /* IVB */