summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_irq.c17
-rw-r--r--drivers/acpi/resource.c14
-rw-r--r--drivers/acpi/sleep.c1
-rw-r--r--drivers/android/binder.c170
-rw-r--r--drivers/base/power/opp/Makefile1
-rw-r--r--drivers/base/power/opp/core.c1239
-rw-r--r--drivers/base/power/opp/cpu.c25
-rw-r--r--drivers/base/power/opp/debugfs.c218
-rw-r--r--drivers/base/power/opp/opp.h107
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c267
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h11
-rw-r--r--drivers/block/rbd.c6
-rw-r--r--drivers/bluetooth/ath3k.c8
-rw-r--r--drivers/bluetooth/btusb.c4
-rw-r--r--drivers/char/tpm/tpm-chip.c14
-rw-r--r--drivers/char/tpm/tpm_crb.c4
-rw-r--r--drivers/char/tpm/tpm_eventlog.c14
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c12
-rw-r--r--drivers/clk/msm/clock-gpu-cobalt.c12
-rw-r--r--drivers/clk/msm/clock-mmss-cobalt.c4
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c48
-rw-r--r--drivers/cpufreq/cpufreq-dt.c309
-rw-r--r--drivers/crypto/atmel-aes.c4
-rw-r--r--drivers/crypto/atmel-sha.c4
-rw-r--r--drivers/crypto/atmel-tdes.c4
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c36
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c40
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h22
-rw-r--r--drivers/crypto/marvell/cesa.c2
-rw-r--r--drivers/crypto/msm/qcedev.c8
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c4
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c4
-rw-r--r--drivers/devfreq/arm-memlat-mon.c12
-rw-r--r--drivers/edac/amd64_edac.c2
-rw-r--r--drivers/edac/sb_edac.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c4
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c27
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c8
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/msm/adreno.c16
-rw-r--r--drivers/gpu/msm/adreno.h2
-rw-r--r--drivers/gpu/msm/adreno_a4xx_snapshot.c10
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c31
-rw-r--r--drivers/gpu/msm/adreno_a5xx.h2
-rw-r--r--drivers/gpu/msm/adreno_a5xx_snapshot.c174
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c41
-rw-r--r--drivers/gpu/msm/adreno_snapshot.c3
-rw-r--r--drivers/gpu/msm/kgsl.c28
-rw-r--r--drivers/gpu/msm/kgsl.h9
-rw-r--r--drivers/gpu/msm/kgsl_debugfs.c2
-rw-r--r--drivers/gpu/msm/kgsl_device.h5
-rw-r--r--drivers/gpu/msm/kgsl_events.c10
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c50
-rw-r--r--drivers/gpu/msm/kgsl_pool.c25
-rw-r--r--drivers/gpu/msm/kgsl_pool.h1
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c31
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.h2
-rw-r--r--drivers/gpu/msm/kgsl_pwrscale.c3
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.c137
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.h60
-rw-r--r--drivers/hid/hid-core.c8
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c16
-rw-r--r--drivers/hid/usbhid/hid-core.c73
-rw-r--r--drivers/hid/wacom_wac.c11
-rw-r--r--drivers/hwmon/max1111.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c4
-rw-r--r--drivers/idle/intel_idle.c108
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c7
-rw-r--r--drivers/iio/adc/qcom-rradc.c317
-rw-r--r--drivers/iio/gyro/bmg160_core.c9
-rw-r--r--drivers/iio/magnetometer/st_magn.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c24
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c122
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c59
-rw-r--r--drivers/input/misc/ati_remote2.c36
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/powermate.c3
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/touchscreen/ft5x06_ts.c12
-rw-r--r--drivers/input/touchscreen/gt9xx/goodix_tool.c615
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx.c1805
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx.h270
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx_firmware.h6
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx_update.c1930
-rw-r--r--drivers/input/touchscreen/it7258_ts_i2c.c2017
-rw-r--r--drivers/input/touchscreen/msg21xx_ts.c1757
-rw-r--r--drivers/iommu/arm-smmu.c14
-rw-r--r--drivers/iommu/iommu.c3
-rw-r--r--drivers/irqchip/irq-gic-v3.c3
-rw-r--r--drivers/leds/leds-qpnp-flash-v2.c404
-rw-r--r--drivers/leds/leds-qpnp-flash.c44
-rw-r--r--drivers/leds/leds.h16
-rw-r--r--drivers/md/bcache/super.c46
-rw-r--r--drivers/md/dm-cache-metadata.c98
-rw-r--r--drivers/md/dm-cache-metadata.h4
-rw-r--r--drivers/md/dm-cache-target.c12
-rw-r--r--drivers/md/dm-snap.c9
-rw-r--r--drivers/md/dm-table.c36
-rw-r--r--drivers/md/dm-thin-metadata.c5
-rw-r--r--drivers/md/dm.c15
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid1.c7
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c51
-rw-r--r--drivers/md/raid5.h4
-rw-r--r--drivers/media/i2c/adv7511.c21
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c26
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c18
-rw-r--r--drivers/media/platform/coda/coda-bit.c2
-rw-r--r--drivers/media/platform/coda/coda-common.c10
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c20
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c58
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c212
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h5
-rw-r--r--drivers/media/platform/msm/vidc/hfi_packetization.c29
-rw-r--r--drivers/media/platform/msm/vidc/msm_vdec.c90
-rw-r--r--drivers/media/platform/msm/vidc/msm_venc.c109
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c68
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_debug.c2
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_res_parse.c5
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_resources.h1
-rw-r--r--drivers/media/platform/msm/vidc/venus_hfi.c34
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi.h3
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi_api.h12
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi_helper.h12
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c1
-rw-r--r--drivers/media/usb/au0828/au0828-core.c2
-rw-r--r--drivers/media/usb/au0828/au0828-input.c4
-rw-r--r--drivers/media/usb/au0828/au0828-video.c63
-rw-r--r--drivers/media/usb/au0828/au0828.h9
-rw-r--r--drivers/media/usb/pwc/pwc-if.c6
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c21
-rw-r--r--drivers/mfd/wcd934x-regmap.c13
-rw-r--r--drivers/mfd/wcd934x-tables.c35
-rw-r--r--drivers/mfd/wcd9xxx-core.c2
-rw-r--r--drivers/mfd/wcd9xxx-utils.c10
-rw-r--r--drivers/misc/Kconfig4
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/hdcp.c4
-rw-r--r--drivers/misc/mei/bus.c9
-rw-r--r--drivers/misc/qseecom.c104
-rw-r--r--drivers/misc/uid_stat.c156
-rw-r--r--drivers/mmc/card/block.c24
-rw-r--r--drivers/mmc/host/mmc_spi.c6
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c25
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci.c23
-rw-r--r--drivers/mmc/host/sh_mmcif.c84
-rw-r--r--drivers/mtd/onenand/onenand_base.c3
-rw-r--r--drivers/net/bonding/bond_main.c65
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/jme.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c24
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c11
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c10
-rw-r--r--drivers/net/ethernet/rocker/rocker.c10
-rw-r--r--drivers/net/irda/irtty-sir.c10
-rw-r--r--drivers/net/macvtap.c9
-rw-r--r--drivers/net/ppp/ppp_generic.c36
-rw-r--r--drivers/net/rionet.c4
-rw-r--r--drivers/net/tun.c12
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/cdc_ncm.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/usbnet.c7
-rw-r--r--drivers/net/vrf.c13
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/nfc/nq-nci.c48
-rw-r--r--drivers/nvdimm/bus.c10
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/pci/probe.c14
-rw-r--r--drivers/pcmcia/db1xxx_ss.c11
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c17
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c24
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msmfalcon.c932
-rw-r--r--drivers/pinctrl/sh-pfc/core.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c17
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h21
-rw-r--r--drivers/platform/msm/gsi/gsi.c29
-rw-r--r--drivers/platform/msm/gsi/gsi_dbg.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_api.c48
-rw-r--r--drivers/platform/msm/ipa/ipa_api.h9
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/Makefile4
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c597
-rw-r--r--drivers/platform/msm/ipa/ipa_common_i.h6
-rw-r--r--drivers/platform/msm/ipa/ipa_uc_offload_common_i.h24
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/Makefile2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c48
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_client.c114
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c240
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c32
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c22
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h158
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c10
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_rt.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c438
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h514
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c46
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_utils.c34
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c10
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/Makefile2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c93
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c116
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c22
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h207
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c5
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c410
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h580
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c46
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c25
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c5
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c10
-rw-r--r--drivers/platform/x86/ideapad-laptop.c14
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/power/qcom-charger/qpnp-qnovo.c15
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c190
-rw-r--r--drivers/power/qcom-charger/smb-lib.c162
-rw-r--r--drivers/power/qcom-charger/smb-lib.h23
-rw-r--r--drivers/power/qcom-charger/smb-reg.h3
-rw-r--r--drivers/power/qcom-charger/smb138x-charger.c6
-rw-r--r--drivers/regulator/core.c22
-rw-r--r--drivers/regulator/cpr3-mmss-regulator.c64
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/aacraid/commsup.c37
-rw-r--r--drivers/scsi/aacraid/linit.c12
-rw-r--r--drivers/scsi/aacraid/src.c30
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1
-rw-r--r--drivers/scsi/ipr.c10
-rw-r--r--drivers/scsi/scsi_common.c12
-rw-r--r--drivers/scsi/sd.c30
-rw-r--r--drivers/scsi/sd.h7
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/storvsc_drv.c5
-rw-r--r--drivers/scsi/ufs/ufshcd.c3
-rw-r--r--drivers/slimbus/slim-msm-ngd.c69
-rw-r--r--drivers/slimbus/slim-msm.h2
-rw-r--r--drivers/soc/qcom/Kconfig19
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/core_ctl_helper.c22
-rw-r--r--drivers/soc/qcom/gladiator_erp_v2.c2
-rw-r--r--drivers/soc/qcom/glink.c60
-rw-r--r--drivers/soc/qcom/glink_core_if.h7
-rw-r--r--drivers/soc/qcom/glink_spi_xprt.c2192
-rw-r--r--drivers/soc/qcom/glink_xprt_if.h5
-rw-r--r--drivers/soc/qcom/icnss.c8
-rw-r--r--drivers/soc/qcom/irq-helper.c180
-rw-r--r--drivers/soc/qcom/peripheral-loader.c29
-rw-r--r--drivers/soc/qcom/pil-msa.c38
-rw-r--r--drivers/soc/qcom/pil-q6v5-mss.c8
-rw-r--r--drivers/soc/qcom/pil-q6v5.c28
-rw-r--r--drivers/soc/qcom/socinfo.c12
-rw-r--r--drivers/soc/qcom/subsys-pil-tz.c22
-rw-r--r--drivers/soc/qcom/subsystem_restart.c3
-rw-r--r--drivers/spmi/spmi-pmic-arb.c87
-rwxr-xr-xdrivers/staging/android/ion/ion.c5
-rw-r--r--drivers/staging/android/ion/ion_test.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c12
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c2
-rw-r--r--drivers/target/target_core_transport.c2
-rw-r--r--drivers/thermal/msm-tsens.c60
-rw-r--r--drivers/thermal/thermal_core.c55
-rw-r--r--drivers/tty/serial/8250/8250_port.c18
-rw-r--r--drivers/tty/serial/msm_serial_hs.c261
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/driver.c6
-rw-r--r--drivers/usb/core/hub.c24
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c28
-rw-r--r--drivers/usb/gadget/configfs.c3
-rw-r--r--drivers/usb/gadget/function/f_cdev.c10
-rw-r--r--drivers/usb/gadget/function/f_fs.c385
-rw-r--r--drivers/usb/gadget/function/f_gsi.c199
-rw-r--r--drivers/usb/misc/iowarrior.c6
-rw-r--r--drivers/usb/phy/phy-msm-qusb-v2.c70
-rw-r--r--drivers/usb/phy/phy-msm-qusb.c68
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c6
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/cypress_m8.c11
-rw-r--r--drivers/usb/serial/digi_acceleport.c19
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h8
-rw-r--r--drivers/usb/serial/mct_u232.c9
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/storage/uas.c23
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/storage/usb.c5
-rw-r--r--drivers/video/fbdev/msm/mdss.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_compat_utils.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c19
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_panel.c7
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c11
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.c14
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.h6
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c1
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c225
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.h26
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c3
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h12
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c20
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_hwio.h5
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_video.c120
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c71
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.c1
-rw-r--r--drivers/virtio/virtio_pci_modern.c11
-rw-r--r--drivers/watchdog/rc32434_wdt.c2
-rw-r--r--drivers/xen/events/events_base.c28
337 files changed, 21333 insertions, 4153 deletions
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index c9336751e5e3..8a10a7ae6a8a 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -409,7 +409,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return 0;
}
- if (pci_has_managed_irq(dev))
+ if (dev->irq_managed && dev->irq > 0)
return 0;
entry = acpi_pci_irq_lookup(dev, pin);
@@ -454,7 +454,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
kfree(entry);
return rc;
}
- pci_set_managed_irq(dev, rc);
+ dev->irq = rc;
+ dev->irq_managed = 1;
if (link)
snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@@ -477,9 +478,17 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
u8 pin;
pin = dev->pin;
- if (!pin || !pci_has_managed_irq(dev))
+ if (!pin || !dev->irq_managed || dev->irq <= 0)
return;
+ /* Keep IOAPIC pin configuration when suspending */
+ if (dev->dev.power.is_prepared)
+ return;
+#ifdef CONFIG_PM
+ if (dev->dev.power.runtime_status == RPM_SUSPENDING)
+ return;
+#endif
+
entry = acpi_pci_irq_lookup(dev, pin);
if (!entry)
return;
@@ -499,6 +508,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
if (gsi >= 0) {
acpi_unregister_gsi(gsi);
- pci_reset_managed_irq(dev);
+ dev->irq_managed = 0;
}
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index cdc5c2599beb..627f8fbb5e9a 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -26,8 +26,20 @@
#ifdef CONFIG_X86
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
+static inline bool acpi_iospace_resource_valid(struct resource *res)
+{
+ /* On X86 IO space is limited to the [0 - 64K] IO port range */
+ return res->end < 0x10003;
+}
#else
#define valid_IRQ(i) (true)
+/*
+ * ACPI IO descriptors on arches other than X86 contain MMIO CPU physical
+ * addresses mapping IO space in CPU physical address space, IO space
+ * resources can be placed anywhere in the 64-bit physical address space.
+ */
+static inline bool
+acpi_iospace_resource_valid(struct resource *res) { return true; }
#endif
static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
@@ -126,7 +138,7 @@ static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
- if (res->end >= 0x10003)
+ if (!acpi_iospace_resource_valid(res))
res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
if (io_decode == ACPI_DECODE_16)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 0d94621dc856..e3322adaaae0 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -714,6 +714,7 @@ static int acpi_hibernation_enter(void)
static void acpi_hibernation_leave(void)
{
+ pm_set_resume_via_firmware();
/*
* If ACPI is not enabled by the BIOS and the boot kernel, we need to
* enable it here.
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 4c67945ef36f..20d17906fc9b 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -379,6 +379,7 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
struct files_struct *files = proc->files;
unsigned long rlim_cur;
unsigned long irqs;
+ int ret;
if (files == NULL)
return -ESRCH;
@@ -389,7 +390,11 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
unlock_task_sighand(proc->tsk, &irqs);
- return __alloc_fd(files, 0, rlim_cur, flags);
+ preempt_enable_no_resched();
+ ret = __alloc_fd(files, 0, rlim_cur, flags);
+ preempt_disable();
+
+ return ret;
}
/*
@@ -398,8 +403,11 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
static void task_fd_install(
struct binder_proc *proc, unsigned int fd, struct file *file)
{
- if (proc->files)
+ if (proc->files) {
+ preempt_enable_no_resched();
__fd_install(proc->files, fd, file);
+ preempt_disable();
+ }
}
/*
@@ -427,6 +435,7 @@ static inline void binder_lock(const char *tag)
{
trace_binder_lock(tag);
mutex_lock(&binder_main_lock);
+ preempt_disable();
trace_binder_locked(tag);
}
@@ -434,8 +443,62 @@ static inline void binder_unlock(const char *tag)
{
trace_binder_unlock(tag);
mutex_unlock(&binder_main_lock);
+ preempt_enable();
+}
+
+static inline void *kzalloc_preempt_disabled(size_t size)
+{
+ void *ptr;
+
+ ptr = kzalloc(size, GFP_NOWAIT);
+ if (ptr)
+ return ptr;
+
+ preempt_enable_no_resched();
+ ptr = kzalloc(size, GFP_KERNEL);
+ preempt_disable();
+
+ return ptr;
+}
+
+static inline long copy_to_user_preempt_disabled(void __user *to, const void *from, long n)
+{
+ long ret;
+
+ preempt_enable_no_resched();
+ ret = copy_to_user(to, from, n);
+ preempt_disable();
+ return ret;
+}
+
+static inline long copy_from_user_preempt_disabled(void *to, const void __user *from, long n)
+{
+ long ret;
+
+ preempt_enable_no_resched();
+ ret = copy_from_user(to, from, n);
+ preempt_disable();
+ return ret;
}
+#define get_user_preempt_disabled(x, ptr) \
+({ \
+ int __ret; \
+ preempt_enable_no_resched(); \
+ __ret = get_user(x, ptr); \
+ preempt_disable(); \
+ __ret; \
+})
+
+#define put_user_preempt_disabled(x, ptr) \
+({ \
+ int __ret; \
+ preempt_enable_no_resched(); \
+ __ret = put_user(x, ptr); \
+ preempt_disable(); \
+ __ret; \
+})
+
static void binder_set_nice(long nice)
{
long min_nice;
@@ -568,6 +631,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
else
mm = get_task_mm(proc->tsk);
+ preempt_enable_no_resched();
+
if (mm) {
down_write(&mm->mmap_sem);
vma = proc->vma;
@@ -622,6 +687,9 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
up_write(&mm->mmap_sem);
mmput(mm);
}
+
+ preempt_disable();
+
return 0;
free_range:
@@ -644,6 +712,9 @@ err_no_vma:
up_write(&mm->mmap_sem);
mmput(mm);
}
+
+ preempt_disable();
+
return -ENOMEM;
}
@@ -903,7 +974,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
return NULL;
}
- node = kzalloc(sizeof(*node), GFP_KERNEL);
+ node = kzalloc_preempt_disabled(sizeof(*node));
if (node == NULL)
return NULL;
binder_stats_created(BINDER_STAT_NODE);
@@ -1040,7 +1111,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
else
return ref;
}
- new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ new_ref = kzalloc_preempt_disabled(sizeof(*ref));
if (new_ref == NULL)
return NULL;
binder_stats_created(BINDER_STAT_REF);
@@ -1438,14 +1509,14 @@ static void binder_transaction(struct binder_proc *proc,
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
- t = kzalloc(sizeof(*t), GFP_KERNEL);
+ t = kzalloc_preempt_disabled(sizeof(*t));
if (t == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
- tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+ tcomplete = kzalloc_preempt_disabled(sizeof(*tcomplete));
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_tcomplete_failed;
@@ -1502,14 +1573,14 @@ static void binder_transaction(struct binder_proc *proc,
offp = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
- if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
+ if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
- if (copy_from_user(offp, (const void __user *)(uintptr_t)
+ if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
@@ -1778,7 +1849,7 @@ static int binder_thread_write(struct binder_proc *proc,
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
- if (get_user(cmd, (uint32_t __user *)ptr))
+ if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
@@ -1796,7 +1867,7 @@ static int binder_thread_write(struct binder_proc *proc,
struct binder_ref *ref;
const char *debug_string;
- if (get_user(target, (uint32_t __user *)ptr))
+ if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (target == 0 && binder_context_mgr_node &&
@@ -1846,10 +1917,10 @@ static int binder_thread_write(struct binder_proc *proc,
binder_uintptr_t cookie;
struct binder_node *node;
- if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
+ if (get_user_preempt_disabled(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
node = binder_get_node(proc, node_ptr);
@@ -1907,7 +1978,7 @@ static int binder_thread_write(struct binder_proc *proc,
binder_uintptr_t data_ptr;
struct binder_buffer *buffer;
- if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
+ if (get_user_preempt_disabled(data_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
@@ -1949,7 +2020,7 @@ static int binder_thread_write(struct binder_proc *proc,
case BC_REPLY: {
struct binder_transaction_data tr;
- if (copy_from_user(&tr, ptr, sizeof(tr)))
+ if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
@@ -1999,10 +2070,10 @@ static int binder_thread_write(struct binder_proc *proc,
struct binder_ref *ref;
struct binder_ref_death *death;
- if (get_user(target, (uint32_t __user *)ptr))
+ if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
ref = binder_get_ref(proc, target);
@@ -2031,7 +2102,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid);
break;
}
- death = kzalloc(sizeof(*death), GFP_KERNEL);
+ death = kzalloc_preempt_disabled(sizeof(*death));
if (death == NULL) {
thread->return_error = BR_ERROR;
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
@@ -2085,8 +2156,7 @@ static int binder_thread_write(struct binder_proc *proc,
struct binder_work *w;
binder_uintptr_t cookie;
struct binder_ref_death *death = NULL;
-
- if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(cookie);
@@ -2118,7 +2188,8 @@ static int binder_thread_write(struct binder_proc *proc,
wake_up_interruptible(&proc->wait);
}
}
- } break;
+ }
+ break;
default:
pr_err("%d:%d unknown command %d\n",
@@ -2167,7 +2238,7 @@ static int binder_thread_read(struct binder_proc *proc,
int wait_for_proc_work;
if (*consumed == 0) {
- if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+ if (put_user_preempt_disabled(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
@@ -2178,7 +2249,7 @@ retry:
if (thread->return_error != BR_OK && ptr < end) {
if (thread->return_error2 != BR_OK) {
- if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+ if (put_user_preempt_disabled(thread->return_error2, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, thread->return_error2);
@@ -2186,7 +2257,7 @@ retry:
goto done;
thread->return_error2 = BR_OK;
}
- if (put_user(thread->return_error, (uint32_t __user *)ptr))
+ if (put_user_preempt_disabled(thread->return_error, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, thread->return_error);
@@ -2264,7 +2335,7 @@ retry:
} break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
cmd = BR_TRANSACTION_COMPLETE;
- if (put_user(cmd, (uint32_t __user *)ptr))
+ if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
@@ -2306,14 +2377,14 @@ retry:
node->has_weak_ref = 0;
}
if (cmd != BR_NOOP) {
- if (put_user(cmd, (uint32_t __user *)ptr))
+ if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (put_user(node->ptr,
+ if (put_user_preempt_disabled(node->ptr, (binder_uintptr_t __user *)
(binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- if (put_user(node->cookie,
+ if (put_user_preempt_disabled(node->cookie, (binder_uintptr_t __user *)
(binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
@@ -2357,11 +2428,10 @@ retry:
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER;
- if (put_user(cmd, (uint32_t __user *)ptr))
+ if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (put_user(death->cookie,
- (binder_uintptr_t __user *)ptr))
+ if (put_user_preempt_disabled(death->cookie, (binder_uintptr_t __user *) ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
binder_stat_br(proc, thread, cmd);
@@ -2428,10 +2498,10 @@ retry:
ALIGN(t->buffer->data_size,
sizeof(void *));
- if (put_user(cmd, (uint32_t __user *)ptr))
+ if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (copy_to_user(ptr, &tr, sizeof(tr)))
+ if (copy_to_user_preempt_disabled(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
@@ -2473,7 +2543,7 @@ done:
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BR_SPAWN_LOOPER\n",
proc->pid, thread->pid);
- if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+ if (put_user_preempt_disabled(BR_SPAWN_LOOPER, (uint32_t __user *) buffer))
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
}
@@ -2548,7 +2618,7 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
break;
}
if (*p == NULL) {
- thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+ thread = kzalloc_preempt_disabled(sizeof(*thread));
if (thread == NULL)
return NULL;
binder_stats_created(BINDER_STAT_THREAD);
@@ -2652,7 +2722,7 @@ static int binder_ioctl_write_read(struct file *filp,
ret = -EINVAL;
goto out;
}
- if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+ if (copy_from_user_preempt_disabled(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
@@ -2670,7 +2740,7 @@ static int binder_ioctl_write_read(struct file *filp,
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
@@ -2684,7 +2754,7 @@ static int binder_ioctl_write_read(struct file *filp,
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
@@ -2694,7 +2764,7 @@ static int binder_ioctl_write_read(struct file *filp,
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
- if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+ if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
@@ -2772,7 +2842,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err;
break;
case BINDER_SET_MAX_THREADS:
- if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+ if (copy_from_user_preempt_disabled(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
ret = -EINVAL;
goto err;
}
@@ -2795,9 +2865,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ret = -EINVAL;
goto err;
}
- if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
- &ver->protocol_version)) {
- ret = -EINVAL;
+ if (put_user_preempt_disabled(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) {
+ ret = -EINVAL;
goto err;
}
break;
@@ -2858,6 +2927,7 @@ static const struct vm_operations_struct binder_vm_ops = {
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
+
struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
@@ -2918,7 +2988,11 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
- if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+ /* binder_update_page_range assumes preemption is disabled */
+ preempt_disable();
+ ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma);
+ preempt_enable_no_resched();
+ if (ret) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
@@ -3188,8 +3262,12 @@ static void binder_deferred_func(struct work_struct *work)
int defer;
do {
- binder_lock(__func__);
+ trace_binder_lock(__func__);
+ mutex_lock(&binder_main_lock);
+ trace_binder_locked(__func__);
+
mutex_lock(&binder_deferred_lock);
+ preempt_disable();
if (!hlist_empty(&binder_deferred_list)) {
proc = hlist_entry(binder_deferred_list.first,
struct binder_proc, deferred_work_node);
@@ -3215,7 +3293,9 @@ static void binder_deferred_func(struct work_struct *work)
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
- binder_unlock(__func__);
+ trace_binder_unlock(__func__);
+ mutex_unlock(&binder_main_lock);
+ preempt_enable_no_resched();
if (files)
put_files_struct(files);
} while (proc);
diff --git a/drivers/base/power/opp/Makefile b/drivers/base/power/opp/Makefile
index 33c1e18c41a4..19837ef04d8e 100644
--- a/drivers/base/power/opp/Makefile
+++ b/drivers/base/power/opp/Makefile
@@ -1,2 +1,3 @@
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
obj-y += core.o cpu.o
+obj-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index b8e76f75073b..433b60092972 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -13,50 +13,52 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/export.h>
+#include <linux/regulator/consumer.h>
#include "opp.h"
/*
- * The root of the list of all devices. All device_opp structures branch off
- * from here, with each device_opp containing the list of opp it supports in
+ * The root of the list of all opp-tables. All opp_table structures branch off
+ * from here, with each opp_table containing the list of opps it supports in
* various states of availability.
*/
-static LIST_HEAD(dev_opp_list);
+static LIST_HEAD(opp_tables);
/* Lock to allow exclusive modification to the device and opp lists */
-DEFINE_MUTEX(dev_opp_list_lock);
+DEFINE_MUTEX(opp_table_lock);
#define opp_rcu_lockdep_assert() \
do { \
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
- !lockdep_is_held(&dev_opp_list_lock), \
- "Missing rcu_read_lock() or " \
- "dev_opp_list_lock protection"); \
+ !lockdep_is_held(&opp_table_lock), \
+ "Missing rcu_read_lock() or " \
+ "opp_table_lock protection"); \
} while (0)
-static struct device_list_opp *_find_list_dev(const struct device *dev,
- struct device_opp *dev_opp)
+static struct opp_device *_find_opp_dev(const struct device *dev,
+ struct opp_table *opp_table)
{
- struct device_list_opp *list_dev;
+ struct opp_device *opp_dev;
- list_for_each_entry(list_dev, &dev_opp->dev_list, node)
- if (list_dev->dev == dev)
- return list_dev;
+ list_for_each_entry(opp_dev, &opp_table->dev_list, node)
+ if (opp_dev->dev == dev)
+ return opp_dev;
return NULL;
}
-static struct device_opp *_managed_opp(const struct device_node *np)
+static struct opp_table *_managed_opp(const struct device_node *np)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
- list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
- if (dev_opp->np == np) {
+ list_for_each_entry_rcu(opp_table, &opp_tables, node) {
+ if (opp_table->np == np) {
/*
* Multiple devices can point to the same OPP table and
* so will have same node-pointer, np.
@@ -64,7 +66,7 @@ static struct device_opp *_managed_opp(const struct device_node *np)
* But the OPPs will be considered as shared only if the
* OPP table contains a "opp-shared" property.
*/
- return dev_opp->shared_opp ? dev_opp : NULL;
+ return opp_table->shared_opp ? opp_table : NULL;
}
}
@@ -72,24 +74,24 @@ static struct device_opp *_managed_opp(const struct device_node *np)
}
/**
- * _find_device_opp() - find device_opp struct using device pointer
- * @dev: device pointer used to lookup device OPPs
+ * _find_opp_table() - find opp_table struct using device pointer
+ * @dev: device pointer used to lookup OPP table
*
- * Search list of device OPPs for one containing matching device. Does a RCU
- * reader operation to grab the pointer needed.
+ * Search OPP table for one containing matching device. Does a RCU reader
+ * operation to grab the pointer needed.
*
- * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
+ * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
* -EINVAL based on type of error.
*
* Locking: For readers, this function must be called under rcu_read_lock().
- * device_opp is a RCU protected pointer, which means that device_opp is valid
+ * opp_table is a RCU protected pointer, which means that opp_table is valid
* as long as we are under RCU lock.
*
- * For Writers, this function must be called with dev_opp_list_lock held.
+ * For Writers, this function must be called with opp_table_lock held.
*/
-struct device_opp *_find_device_opp(struct device *dev)
+struct opp_table *_find_opp_table(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
opp_rcu_lockdep_assert();
@@ -98,9 +100,9 @@ struct device_opp *_find_device_opp(struct device *dev)
return ERR_PTR(-EINVAL);
}
- list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
- if (_find_list_dev(dev, dev_opp))
- return dev_opp;
+ list_for_each_entry_rcu(opp_table, &opp_tables, node)
+ if (_find_opp_dev(dev, opp_table))
+ return opp_table;
return ERR_PTR(-ENODEV);
}
@@ -213,16 +215,16 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
*/
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
unsigned long clock_latency_ns;
rcu_read_lock();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
clock_latency_ns = 0;
else
- clock_latency_ns = dev_opp->clock_latency_ns_max;
+ clock_latency_ns = opp_table->clock_latency_ns_max;
rcu_read_unlock();
return clock_latency_ns;
@@ -230,6 +232,82 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
/**
+ * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max voltage latency in nanoseconds.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *opp;
+ struct regulator *reg;
+ unsigned long latency_ns = 0;
+ unsigned long min_uV = ~0, max_uV = 0;
+ int ret;
+
+ rcu_read_lock();
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ reg = opp_table->regulator;
+ if (IS_ERR(reg)) {
+ /* Regulator may not be required for device */
+ if (reg)
+ dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
+ PTR_ERR(reg));
+ rcu_read_unlock();
+ return 0;
+ }
+
+ list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
+ if (!opp->available)
+ continue;
+
+ if (opp->u_volt_min < min_uV)
+ min_uV = opp->u_volt_min;
+ if (opp->u_volt_max > max_uV)
+ max_uV = opp->u_volt_max;
+ }
+
+ rcu_read_unlock();
+
+ /*
+ * The caller needs to ensure that opp_table (and hence the regulator)
+ * isn't freed, while we are executing this routine.
+ */
+ ret = regulator_set_voltage_time(reg, min_uV, max_uV);
+ if (ret > 0)
+ latency_ns = ret * 1000;
+
+ return latency_ns;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
+
+/**
+ * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
+ * nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max transition latency, in nanoseconds, to
+ * switch from one OPP to other.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
+{
+ return dev_pm_opp_get_max_volt_latency(dev) +
+ dev_pm_opp_get_max_clock_latency(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
+
+/**
* dev_pm_opp_get_suspend_opp() - Get suspend opp
* @dev: device for which we do this operation
*
@@ -244,21 +322,21 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
*/
struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
opp_rcu_lockdep_assert();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
- !dev_opp->suspend_opp->available)
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
+ !opp_table->suspend_opp->available)
return NULL;
- return dev_opp->suspend_opp;
+ return opp_table->suspend_opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
/**
- * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
+ * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
* @dev: device for which we do this operation
*
* Return: This function returns the number of available opps if there are any,
@@ -268,21 +346,21 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
*/
int dev_pm_opp_get_opp_count(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp;
int count = 0;
rcu_read_lock();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- count = PTR_ERR(dev_opp);
- dev_err(dev, "%s: device OPP not found (%d)\n",
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ count = PTR_ERR(opp_table);
+ dev_err(dev, "%s: OPP table not found (%d)\n",
__func__, count);
goto out_unlock;
}
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available)
count++;
}
@@ -299,7 +377,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
* @freq: frequency to search for
* @available: true/false - match for available opp
*
- * Return: Searches for exact match in the opp list and returns pointer to the
+ * Return: Searches for exact match in the opp table and returns pointer to the
* matching opp if found, else returns ERR_PTR in case of error and should
* be handled using IS_ERR. Error return values can be:
* EINVAL: for bad pointer
@@ -323,19 +401,20 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
bool available)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- int r = PTR_ERR(dev_opp);
- dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ int r = PTR_ERR(opp_table);
+
+ dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
return ERR_PTR(r);
}
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available == available &&
temp_opp->rate == freq) {
opp = temp_opp;
@@ -371,7 +450,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
@@ -381,11 +460,11 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
return ERR_PTR(-EINVAL);
}
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
- return ERR_CAST(dev_opp);
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table);
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available && temp_opp->rate >= *freq) {
opp = temp_opp;
*freq = opp->rate;
@@ -421,7 +500,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
@@ -431,11 +510,11 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
return ERR_PTR(-EINVAL);
}
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
- return ERR_CAST(dev_opp);
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table);
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available) {
/* go to the next node, before choosing prev */
if (temp_opp->rate > *freq)
@@ -451,116 +530,343 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
-/* List-dev Helpers */
-static void _kfree_list_dev_rcu(struct rcu_head *head)
+/*
+ * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
+ * while clk returned here is used.
+ */
+static struct clk *_get_opp_clk(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct clk *clk;
+
+ rcu_read_lock();
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+ clk = ERR_CAST(opp_table);
+ goto unlock;
+ }
+
+ clk = opp_table->clk;
+ if (IS_ERR(clk))
+ dev_err(dev, "%s: No clock available for the device\n",
+ __func__);
+
+unlock:
+ rcu_read_unlock();
+ return clk;
+}
+
+static int _set_opp_voltage(struct device *dev, struct regulator *reg,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max)
+{
+ int ret;
+
+ /* Regulator not available for device */
+ if (IS_ERR(reg)) {
+ dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
+ PTR_ERR(reg));
+ return 0;
+ }
+
+ dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
+ u_volt, u_volt_max);
+
+ ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
+ u_volt_max);
+ if (ret)
+ dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
+ __func__, u_volt_min, u_volt, u_volt_max, ret);
+
+ return ret;
+}
+
+/**
+ * dev_pm_opp_set_rate() - Configure new OPP based on frequency
+ * @dev: device for which we do this operation
+ * @target_freq: frequency to achieve
+ *
+ * This configures the power-supplies and clock source to the levels specified
+ * by the OPP corresponding to the target_freq.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *old_opp, *opp;
+ struct regulator *reg;
+ struct clk *clk;
+ unsigned long freq, old_freq;
+ unsigned long u_volt, u_volt_min, u_volt_max;
+ unsigned long ou_volt, ou_volt_min, ou_volt_max;
+ int ret;
+
+ if (unlikely(!target_freq)) {
+ dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
+ target_freq);
+ return -EINVAL;
+ }
+
+ clk = _get_opp_clk(dev);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ freq = clk_round_rate(clk, target_freq);
+ if ((long)freq <= 0)
+ freq = target_freq;
+
+ old_freq = clk_get_rate(clk);
+
+ /* Return early if nothing to do */
+ if (old_freq == freq) {
+ dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
+ __func__, freq);
+ return 0;
+ }
+
+ rcu_read_lock();
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+ rcu_read_unlock();
+ return PTR_ERR(opp_table);
+ }
+
+ old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
+ if (!IS_ERR(old_opp)) {
+ ou_volt = old_opp->u_volt;
+ ou_volt_min = old_opp->u_volt_min;
+ ou_volt_max = old_opp->u_volt_max;
+ } else {
+ dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
+ __func__, old_freq, PTR_ERR(old_opp));
+ }
+
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
+ __func__, freq, ret);
+ rcu_read_unlock();
+ return ret;
+ }
+
+ u_volt = opp->u_volt;
+ u_volt_min = opp->u_volt_min;
+ u_volt_max = opp->u_volt_max;
+
+ reg = opp_table->regulator;
+
+ rcu_read_unlock();
+
+ /* Scaling up? Scale voltage before frequency */
+ if (freq > old_freq) {
+ ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+ u_volt_max);
+ if (ret)
+ goto restore_voltage;
+ }
+
+ /* Change frequency */
+
+ dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
+ __func__, old_freq, freq);
+
+ ret = clk_set_rate(clk, freq);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ goto restore_voltage;
+ }
+
+ /* Scaling down? Scale voltage after frequency */
+ if (freq < old_freq) {
+ ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+ u_volt_max);
+ if (ret)
+ goto restore_freq;
+ }
+
+ return 0;
+
+restore_freq:
+ if (clk_set_rate(clk, old_freq))
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+ __func__, old_freq);
+restore_voltage:
+ /* This shouldn't harm even if the voltages weren't updated earlier */
+ if (!IS_ERR(old_opp))
+ _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
+
+/* OPP-dev Helpers */
+static void _kfree_opp_dev_rcu(struct rcu_head *head)
{
- struct device_list_opp *list_dev;
+ struct opp_device *opp_dev;
- list_dev = container_of(head, struct device_list_opp, rcu_head);
- kfree_rcu(list_dev, rcu_head);
+ opp_dev = container_of(head, struct opp_device, rcu_head);
+ kfree_rcu(opp_dev, rcu_head);
}
-static void _remove_list_dev(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+static void _remove_opp_dev(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
- list_del(&list_dev->node);
- call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
- _kfree_list_dev_rcu);
+ opp_debug_unregister(opp_dev, opp_table);
+ list_del(&opp_dev->node);
+ call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
+ _kfree_opp_dev_rcu);
}
-struct device_list_opp *_add_list_dev(const struct device *dev,
- struct device_opp *dev_opp)
+struct opp_device *_add_opp_dev(const struct device *dev,
+ struct opp_table *opp_table)
{
- struct device_list_opp *list_dev;
+ struct opp_device *opp_dev;
+ int ret;
- list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
- if (!list_dev)
+ opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
+ if (!opp_dev)
return NULL;
- /* Initialize list-dev */
- list_dev->dev = dev;
- list_add_rcu(&list_dev->node, &dev_opp->dev_list);
+ /* Initialize opp-dev */
+ opp_dev->dev = dev;
+ list_add_rcu(&opp_dev->node, &opp_table->dev_list);
+
+ /* Create debugfs entries for the opp_table */
+ ret = opp_debug_register(opp_dev, opp_table);
+ if (ret)
+ dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
+ __func__, ret);
- return list_dev;
+ return opp_dev;
}
/**
- * _add_device_opp() - Find device OPP table or allocate a new one
+ * _add_opp_table() - Find OPP table or allocate a new one
* @dev: device for which we do this operation
*
* It tries to find an existing table first, if it couldn't find one, it
* allocates a new OPP table and returns that.
*
- * Return: valid device_opp pointer if success, else NULL.
+ * Return: valid opp_table pointer if success, else NULL.
*/
-static struct device_opp *_add_device_opp(struct device *dev)
+static struct opp_table *_add_opp_table(struct device *dev)
{
- struct device_opp *dev_opp;
- struct device_list_opp *list_dev;
+ struct opp_table *opp_table;
+ struct opp_device *opp_dev;
+ struct device_node *np;
+ int ret;
- /* Check for existing list for 'dev' first */
- dev_opp = _find_device_opp(dev);
- if (!IS_ERR(dev_opp))
- return dev_opp;
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (!IS_ERR(opp_table))
+ return opp_table;
/*
- * Allocate a new device OPP table. In the infrequent case where a new
+ * Allocate a new OPP table. In the infrequent case where a new
* device is needed to be added, we pay this penalty.
*/
- dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
- if (!dev_opp)
+ opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
+ if (!opp_table)
return NULL;
- INIT_LIST_HEAD(&dev_opp->dev_list);
+ INIT_LIST_HEAD(&opp_table->dev_list);
- list_dev = _add_list_dev(dev, dev_opp);
- if (!list_dev) {
- kfree(dev_opp);
+ opp_dev = _add_opp_dev(dev, opp_table);
+ if (!opp_dev) {
+ kfree(opp_table);
return NULL;
}
- srcu_init_notifier_head(&dev_opp->srcu_head);
- INIT_LIST_HEAD(&dev_opp->opp_list);
+ /*
+ * Only required for backward compatibility with v1 bindings, but isn't
+ * harmful for other cases. And so we do it unconditionally.
+ */
+ np = of_node_get(dev->of_node);
+ if (np) {
+ u32 val;
+
+ if (!of_property_read_u32(np, "clock-latency", &val))
+ opp_table->clock_latency_ns_max = val;
+ of_property_read_u32(np, "voltage-tolerance",
+ &opp_table->voltage_tolerance_v1);
+ of_node_put(np);
+ }
+
+ /* Set regulator to a non-NULL error value */
+ opp_table->regulator = ERR_PTR(-ENXIO);
+
+ /* Find clk for the device */
+ opp_table->clk = clk_get(dev, NULL);
+ if (IS_ERR(opp_table->clk)) {
+ ret = PTR_ERR(opp_table->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
+ ret);
+ }
+
+ srcu_init_notifier_head(&opp_table->srcu_head);
+ INIT_LIST_HEAD(&opp_table->opp_list);
- /* Secure the device list modification */
- list_add_rcu(&dev_opp->node, &dev_opp_list);
- return dev_opp;
+ /* Secure the device table modification */
+ list_add_rcu(&opp_table->node, &opp_tables);
+ return opp_table;
}
/**
- * _kfree_device_rcu() - Free device_opp RCU handler
+ * _kfree_device_rcu() - Free opp_table RCU handler
* @head: RCU head
*/
static void _kfree_device_rcu(struct rcu_head *head)
{
- struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
+ struct opp_table *opp_table = container_of(head, struct opp_table,
+ rcu_head);
- kfree_rcu(device_opp, rcu_head);
+ kfree_rcu(opp_table, rcu_head);
}
/**
- * _remove_device_opp() - Removes a device OPP table
- * @dev_opp: device OPP table to be removed.
+ * _remove_opp_table() - Removes a OPP table
+ * @opp_table: OPP table to be removed.
*
- * Removes/frees device OPP table it it doesn't contain any OPPs.
+ * Removes/frees OPP table if it doesn't contain any OPPs.
*/
-static void _remove_device_opp(struct device_opp *dev_opp)
+static void _remove_opp_table(struct opp_table *opp_table)
{
- struct device_list_opp *list_dev;
+ struct opp_device *opp_dev;
+
+ if (!list_empty(&opp_table->opp_list))
+ return;
- if (!list_empty(&dev_opp->opp_list))
+ if (opp_table->supported_hw)
return;
- list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
- node);
+ if (opp_table->prop_name)
+ return;
+
+ if (!IS_ERR(opp_table->regulator))
+ return;
- _remove_list_dev(list_dev, dev_opp);
+ /* Release clk */
+ if (!IS_ERR(opp_table->clk))
+ clk_put(opp_table->clk);
+
+ opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
+ node);
+
+ _remove_opp_dev(opp_dev, opp_table);
/* dev_list must be empty now */
- WARN_ON(!list_empty(&dev_opp->dev_list));
+ WARN_ON(!list_empty(&opp_table->dev_list));
- list_del_rcu(&dev_opp->node);
- call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
+ list_del_rcu(&opp_table->node);
+ call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
_kfree_device_rcu);
}
@@ -577,17 +883,17 @@ static void _kfree_opp_rcu(struct rcu_head *head)
/**
* _opp_remove() - Remove an OPP from a table definition
- * @dev_opp: points back to the device_opp struct this opp belongs to
+ * @opp_table: points back to the opp_table struct this opp belongs to
* @opp: pointer to the OPP to remove
* @notify: OPP_EVENT_REMOVE notification should be sent or not
*
- * This function removes an opp definition from the opp list.
+ * This function removes an opp definition from the opp table.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* It is assumed that the caller holds required mutex for an RCU updater
* strategy.
*/
-static void _opp_remove(struct device_opp *dev_opp,
+static void _opp_remove(struct opp_table *opp_table,
struct dev_pm_opp *opp, bool notify)
{
/*
@@ -595,21 +901,23 @@ static void _opp_remove(struct device_opp *dev_opp,
* frequency/voltage list.
*/
if (notify)
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head,
+ OPP_EVENT_REMOVE, opp);
+ opp_debug_remove_one(opp);
list_del_rcu(&opp->node);
- call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+ call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
- _remove_device_opp(dev_opp);
+ _remove_opp_table(opp_table);
}
/**
- * dev_pm_opp_remove() - Remove an OPP from OPP list
+ * dev_pm_opp_remove() - Remove an OPP from OPP table
* @dev: device for which we do this operation
* @freq: OPP to remove with matching 'freq'
*
- * This function removes an opp from the opp list.
+ * This function removes an opp from the opp table.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -618,17 +926,17 @@ static void _opp_remove(struct device_opp *dev_opp,
void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
struct dev_pm_opp *opp;
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
bool found = false;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
goto unlock;
- list_for_each_entry(opp, &dev_opp->opp_list, node) {
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
if (opp->rate == freq) {
found = true;
break;
@@ -641,14 +949,14 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
goto unlock;
}
- _opp_remove(dev_opp, opp, true);
+ _opp_remove(opp_table, opp, true);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
static struct dev_pm_opp *_allocate_opp(struct device *dev,
- struct device_opp **dev_opp)
+ struct opp_table **opp_table)
{
struct dev_pm_opp *opp;
@@ -659,8 +967,8 @@ static struct dev_pm_opp *_allocate_opp(struct device *dev,
INIT_LIST_HEAD(&opp->node);
- *dev_opp = _add_device_opp(dev);
- if (!*dev_opp) {
+ *opp_table = _add_opp_table(dev);
+ if (!*opp_table) {
kfree(opp);
return NULL;
}
@@ -668,21 +976,38 @@ static struct dev_pm_opp *_allocate_opp(struct device *dev,
return opp;
}
+static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
+ struct opp_table *opp_table)
+{
+ struct regulator *reg = opp_table->regulator;
+
+ if (!IS_ERR(reg) &&
+ !regulator_is_supported_voltage(reg, opp->u_volt_min,
+ opp->u_volt_max)) {
+ pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
+ __func__, opp->u_volt_min, opp->u_volt_max);
+ return false;
+ }
+
+ return true;
+}
+
static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
- struct device_opp *dev_opp)
+ struct opp_table *opp_table)
{
struct dev_pm_opp *opp;
- struct list_head *head = &dev_opp->opp_list;
+ struct list_head *head = &opp_table->opp_list;
+ int ret;
/*
* Insert new OPP in order of increasing frequency and discard if
* already present.
*
- * Need to use &dev_opp->opp_list in the condition part of the 'for'
+ * Need to use &opp_table->opp_list in the condition part of the 'for'
* loop, don't replace it with head otherwise it will become an infinite
* loop.
*/
- list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
if (new_opp->rate > opp->rate) {
head = &opp->node;
continue;
@@ -700,9 +1025,20 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
0 : -EEXIST;
}
- new_opp->dev_opp = dev_opp;
+ new_opp->opp_table = opp_table;
list_add_rcu(&new_opp->node, head);
+ ret = opp_debug_create_one(new_opp, opp_table);
+ if (ret)
+ dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
+ __func__, ret);
+
+ if (!_opp_supported_by_regulators(new_opp, opp_table)) {
+ new_opp->available = false;
+ dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
+ __func__, new_opp->rate);
+ }
+
return 0;
}
@@ -713,14 +1049,14 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
* @u_volt: Voltage in uVolts for this OPP
* @dynamic: Dynamically added OPPs.
*
- * This function adds an opp definition to the opp list and returns status.
+ * This function adds an opp definition to the opp table and returns status.
* The opp is made available by default and it can be controlled using
* dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
*
* NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
* and freed by dev_pm_opp_of_remove_table.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -736,14 +1072,15 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
bool dynamic)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *new_opp;
+ unsigned long tol;
int ret;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- new_opp = _allocate_opp(dev, &dev_opp);
+ new_opp = _allocate_opp(dev, &opp_table);
if (!new_opp) {
ret = -ENOMEM;
goto unlock;
@@ -751,83 +1088,475 @@ static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
/* populate the opp table */
new_opp->rate = freq;
+ tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
new_opp->u_volt = u_volt;
+ new_opp->u_volt_min = u_volt - tol;
+ new_opp->u_volt_max = u_volt + tol;
new_opp->available = true;
new_opp->dynamic = dynamic;
- ret = _opp_add(dev, new_opp, dev_opp);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret)
goto free_opp;
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
/*
* Notify the changes in the availability of the operable
* frequency/voltage list.
*/
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
return 0;
free_opp:
- _opp_remove(dev_opp, new_opp, false);
+ _opp_remove(opp_table, new_opp, false);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
/* TODO: Support multiple regulators */
-static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
+static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
+ struct opp_table *opp_table)
{
u32 microvolt[3] = {0};
u32 val;
int count, ret;
+ struct property *prop = NULL;
+ char name[NAME_MAX];
+
+ /* Search for "opp-microvolt-<name>" */
+ if (opp_table->prop_name) {
+ snprintf(name, sizeof(name), "opp-microvolt-%s",
+ opp_table->prop_name);
+ prop = of_find_property(opp->np, name, NULL);
+ }
- /* Missing property isn't a problem, but an invalid entry is */
- if (!of_find_property(opp->np, "opp-microvolt", NULL))
- return 0;
+ if (!prop) {
+ /* Search for "opp-microvolt" */
+ sprintf(name, "opp-microvolt");
+ prop = of_find_property(opp->np, name, NULL);
+
+ /* Missing property isn't a problem, but an invalid entry is */
+ if (!prop)
+ return 0;
+ }
- count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+ count = of_property_count_u32_elems(opp->np, name);
if (count < 0) {
- dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
- __func__, count);
+ dev_err(dev, "%s: Invalid %s property (%d)\n",
+ __func__, name, count);
return count;
}
/* There can be one or three elements here */
if (count != 1 && count != 3) {
- dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
- __func__, count);
+ dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
+ __func__, name, count);
return -EINVAL;
}
- ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt,
- count);
+ ret = of_property_read_u32_array(opp->np, name, microvolt, count);
if (ret) {
- dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__,
- ret);
+ dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
return -EINVAL;
}
opp->u_volt = microvolt[0];
- opp->u_volt_min = microvolt[1];
- opp->u_volt_max = microvolt[2];
- if (!of_property_read_u32(opp->np, "opp-microamp", &val))
+ if (count == 1) {
+ opp->u_volt_min = opp->u_volt;
+ opp->u_volt_max = opp->u_volt;
+ } else {
+ opp->u_volt_min = microvolt[1];
+ opp->u_volt_max = microvolt[2];
+ }
+
+ /* Search for "opp-microamp-<name>" */
+ prop = NULL;
+ if (opp_table->prop_name) {
+ snprintf(name, sizeof(name), "opp-microamp-%s",
+ opp_table->prop_name);
+ prop = of_find_property(opp->np, name, NULL);
+ }
+
+ if (!prop) {
+ /* Search for "opp-microamp" */
+ sprintf(name, "opp-microamp");
+ prop = of_find_property(opp->np, name, NULL);
+ }
+
+ if (prop && !of_property_read_u32(opp->np, name, &val))
opp->u_amp = val;
return 0;
}
/**
+ * dev_pm_opp_set_supported_hw() - Set supported platforms
+ * @dev: Device for which supported-hw has to be set.
+ * @versions: Array of hierarchy of versions to match.
+ * @count: Number of elements in the array.
+ *
+ * This is required only for the V2 bindings, and it enables a platform to
+ * specify the hierarchy of versions it supports. OPP layer will then enable
+ * OPPs, which are available for those versions, based on its 'opp-supported-hw'
+ * property.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
+ unsigned int count)
+{
+ struct opp_table *opp_table;
+ int ret = 0;
+
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _add_opp_table(dev);
+ if (!opp_table) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ /* Do we already have a version hierarchy associated with opp_table? */
+ if (opp_table->supported_hw) {
+ dev_err(dev, "%s: Already have supported hardware list\n",
+ __func__);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
+ GFP_KERNEL);
+ if (!opp_table->supported_hw) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ opp_table->supported_hw_count = count;
+ mutex_unlock(&opp_table_lock);
+ return 0;
+
+err:
+ _remove_opp_table(opp_table);
+unlock:
+ mutex_unlock(&opp_table_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
+
+/**
+ * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
+ * @dev: Device for which supported-hw has to be put.
+ *
+ * This is required only for the V2 bindings, and is called for a matching
+ * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
+ * will not be freed.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_supported_hw(struct device *dev)
+{
+ struct opp_table *opp_table;
+
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
+
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "Failed to find opp_table: %ld\n",
+ PTR_ERR(opp_table));
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ if (!opp_table->supported_hw) {
+ dev_err(dev, "%s: Doesn't have supported hardware list\n",
+ __func__);
+ goto unlock;
+ }
+
+ kfree(opp_table->supported_hw);
+ opp_table->supported_hw = NULL;
+ opp_table->supported_hw_count = 0;
+
+ /* Try freeing opp_table if this was the last blocking resource */
+ _remove_opp_table(opp_table);
+
+unlock:
+ mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
+
+/**
+ * dev_pm_opp_set_prop_name() - Set prop-extn name
+ * @dev: Device for which the prop-name has to be set.
+ * @name: name to postfix to properties.
+ *
+ * This is required only for the V2 bindings, and it enables a platform to
+ * specify the extn to be used for certain property names. The properties to
+ * which the extension will apply are opp-microvolt and opp-microamp. OPP core
+ * should postfix the property name with -<name> while looking for them.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+{
+ struct opp_table *opp_table;
+ int ret = 0;
+
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _add_opp_table(dev);
+ if (!opp_table) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ /* Do we already have a prop-name associated with opp_table? */
+ if (opp_table->prop_name) {
+ dev_err(dev, "%s: Already have prop-name %s\n", __func__,
+ opp_table->prop_name);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ opp_table->prop_name = kstrdup(name, GFP_KERNEL);
+ if (!opp_table->prop_name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mutex_unlock(&opp_table_lock);
+ return 0;
+
+err:
+ _remove_opp_table(opp_table);
+unlock:
+ mutex_unlock(&opp_table_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
+
+/**
+ * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
+ * @dev: Device for which the prop-name has to be put.
+ *
+ * This is required only for the V2 bindings, and is called for a matching
+ * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
+ * will not be freed.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_prop_name(struct device *dev)
+{
+ struct opp_table *opp_table;
+
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
+
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "Failed to find opp_table: %ld\n",
+ PTR_ERR(opp_table));
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ if (!opp_table->prop_name) {
+ dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
+ goto unlock;
+ }
+
+ kfree(opp_table->prop_name);
+ opp_table->prop_name = NULL;
+
+ /* Try freeing opp_table if this was the last blocking resource */
+ _remove_opp_table(opp_table);
+
+unlock:
+ mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
+
+/**
+ * dev_pm_opp_set_regulator() - Set regulator name for the device
+ * @dev: Device for which regulator name is being set.
+ * @name: Name of the regulator.
+ *
+ * In order to support OPP switching, OPP layer needs to know the name of the
+ * device's regulator, as the core would be required to switch voltages as well.
+ *
+ * This must be called before any OPPs are initialized for the device.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+{
+ struct opp_table *opp_table;
+ struct regulator *reg;
+ int ret;
+
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _add_opp_table(dev);
+ if (!opp_table) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /* This should be called before OPPs are initialized */
+ if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* Already have a regulator set */
+ if (WARN_ON(!IS_ERR(opp_table->regulator))) {
+ ret = -EBUSY;
+ goto err;
+ }
+ /* Allocate the regulator */
+ reg = regulator_get_optional(dev, name);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "%s: no regulator (%s) found: %d\n",
+ __func__, name, ret);
+ goto err;
+ }
+
+ opp_table->regulator = reg;
+
+ mutex_unlock(&opp_table_lock);
+ return 0;
+
+err:
+ _remove_opp_table(opp_table);
+unlock:
+ mutex_unlock(&opp_table_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
+
+/**
+ * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
+ * @dev: Device for which regulator was set.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_regulator(struct device *dev)
+{
+ struct opp_table *opp_table;
+
+ mutex_lock(&opp_table_lock);
+
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "Failed to find opp_table: %ld\n",
+ PTR_ERR(opp_table));
+ goto unlock;
+ }
+
+ if (IS_ERR(opp_table->regulator)) {
+ dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ regulator_put(opp_table->regulator);
+ opp_table->regulator = ERR_PTR(-ENXIO);
+
+ /* Try freeing opp_table if this was the last blocking resource */
+ _remove_opp_table(opp_table);
+
+unlock:
+ mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
+
+static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
+ struct device_node *np)
+{
+ unsigned int count = opp_table->supported_hw_count;
+ u32 version;
+ int ret;
+
+ if (!opp_table->supported_hw)
+ return true;
+
+ while (count--) {
+ ret = of_property_read_u32_index(np, "opp-supported-hw", count,
+ &version);
+ if (ret) {
+ dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
+ __func__, count, ret);
+ return false;
+ }
+
+ /* Both of these are bitwise masks of the versions */
+ if (!(version & opp_table->supported_hw[count]))
+ return false;
+ }
+
+ return true;
+}
+
+/**
* _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
* @dev: device for which we do this operation
* @np: device node
*
- * This function adds an opp definition to the opp list and returns status. The
+ * This function adds an opp definition to the opp table and returns status. The
* opp can be controlled using dev_pm_opp_enable/disable functions and may be
* removed by dev_pm_opp_remove.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -843,16 +1572,16 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
*/
static int _opp_add_static_v2(struct device *dev, struct device_node *np)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *new_opp;
u64 rate;
u32 val;
int ret;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- new_opp = _allocate_opp(dev, &dev_opp);
+ new_opp = _allocate_opp(dev, &opp_table);
if (!new_opp) {
ret = -ENOMEM;
goto unlock;
@@ -864,6 +1593,12 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
goto free_opp;
}
+ /* Check if the OPP supports hardware's hierarchy of versions or not */
+ if (!_opp_is_supported(dev, opp_table, np)) {
+ dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
+ goto free_opp;
+ }
+
/*
* Rate is defined as an unsigned long in clk API, and so casting
* explicitly to its type. Must be fixed once rate is 64 bit
@@ -879,28 +1614,30 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
if (!of_property_read_u32(np, "clock-latency-ns", &val))
new_opp->clock_latency_ns = val;
- ret = opp_parse_supplies(new_opp, dev);
+ ret = opp_parse_supplies(new_opp, dev, opp_table);
if (ret)
goto free_opp;
- ret = _opp_add(dev, new_opp, dev_opp);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret)
goto free_opp;
/* OPP to select on device suspend */
if (of_property_read_bool(np, "opp-suspend")) {
- if (dev_opp->suspend_opp)
+ if (opp_table->suspend_opp) {
dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
- __func__, dev_opp->suspend_opp->rate,
+ __func__, opp_table->suspend_opp->rate,
new_opp->rate);
- else
- dev_opp->suspend_opp = new_opp;
+ } else {
+ new_opp->suspend = true;
+ opp_table->suspend_opp = new_opp;
+ }
}
- if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
- dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
+ if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
+ opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
__func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
@@ -911,13 +1648,13 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
* Notify the changes in the availability of the operable
* frequency/voltage list.
*/
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
return 0;
free_opp:
- _opp_remove(dev_opp, new_opp, false);
+ _opp_remove(opp_table, new_opp, false);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
@@ -927,11 +1664,11 @@ unlock:
* @freq: Frequency in Hz for this OPP
* @u_volt: Voltage in uVolts for this OPP
*
- * This function adds an opp definition to the opp list and returns status.
+ * This function adds an opp definition to the opp table and returns status.
* The opp is made available by default and it can be controlled using
* dev_pm_opp_enable/disable functions.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -963,7 +1700,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
* copy operation, returns 0 if no modification was done OR modification was
* successful.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks to
* keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -972,7 +1709,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
static int _opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
int r = 0;
@@ -981,18 +1718,18 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
if (!new_opp)
return -ENOMEM;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- /* Find the device_opp */
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- r = PTR_ERR(dev_opp);
+ /* Find the opp_table */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ r = PTR_ERR(opp_table);
dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
goto unlock;
}
/* Do we have the frequency? */
- list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
if (tmp_opp->rate == freq) {
opp = tmp_opp;
break;
@@ -1013,21 +1750,21 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
new_opp->available = availability_req;
list_replace_rcu(&opp->node, &new_opp->node);
- mutex_unlock(&dev_opp_list_lock);
- call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+ mutex_unlock(&opp_table_lock);
+ call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
/* Notify the change of the OPP availability */
if (availability_req)
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
- new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head,
+ OPP_EVENT_ENABLE, new_opp);
else
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
- new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head,
+ OPP_EVENT_DISABLE, new_opp);
return 0;
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
kfree(new_opp);
return r;
}
@@ -1041,7 +1778,7 @@ unlock:
* corresponding error value. It is meant to be used for users an OPP available
* after being temporarily made unavailable with dev_pm_opp_disable.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
* integrity of the internal data structures. Callers should ensure that
* this function is *NOT* called under RCU protection or in contexts where
@@ -1067,7 +1804,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
* control by users to make this OPP not available until the circumstances are
* right to make it available again (with a call to dev_pm_opp_enable).
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
* integrity of the internal data structures. Callers should ensure that
* this function is *NOT* called under RCU protection or in contexts where
@@ -1085,26 +1822,26 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
/**
* dev_pm_opp_get_notifier() - find notifier_head of the device with opp
- * @dev: device pointer used to lookup device OPPs.
+ * @dev: device pointer used to lookup OPP table.
*
* Return: pointer to notifier head if found, otherwise -ENODEV or
* -EINVAL based on type of error casted as pointer. value must be checked
* with IS_ERR to determine valid pointer or error result.
*
- * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
- * protected pointer. The reason for the same is that the opp pointer which is
- * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * Locking: This function must be called under rcu_read_lock(). opp_table is a
+ * RCU protected pointer. The reason for the same is that the opp pointer which
+ * is returned will remain valid for use with opp_get_{voltage, freq} only while
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
{
- struct device_opp *dev_opp = _find_device_opp(dev);
+ struct opp_table *opp_table = _find_opp_table(dev);
- if (IS_ERR(dev_opp))
- return ERR_CAST(dev_opp); /* matching type */
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table); /* matching type */
- return &dev_opp->srcu_head;
+ return &opp_table->srcu_head;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
@@ -1112,11 +1849,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
/**
* dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
* entries
- * @dev: device pointer used to lookup device OPPs.
+ * @dev: device pointer used to lookup OPP table.
*
* Free OPPs created using static entries present in DT.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -1124,38 +1861,38 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
*/
void dev_pm_opp_of_remove_table(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *opp, *tmp;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- /* Check for existing list for 'dev' */
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- int error = PTR_ERR(dev_opp);
+ /* Check for existing table for 'dev' */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ int error = PTR_ERR(opp_table);
if (error != -ENODEV)
- WARN(1, "%s: dev_opp: %d\n",
+ WARN(1, "%s: opp_table: %d\n",
IS_ERR_OR_NULL(dev) ?
"Invalid device" : dev_name(dev),
error);
goto unlock;
}
- /* Find if dev_opp manages a single device */
- if (list_is_singular(&dev_opp->dev_list)) {
+ /* Find if opp_table manages a single device */
+ if (list_is_singular(&opp_table->dev_list)) {
/* Free static OPPs */
- list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
+ list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
if (!opp->dynamic)
- _opp_remove(dev_opp, opp, true);
+ _opp_remove(opp_table, opp, true);
}
} else {
- _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
+ _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
}
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
@@ -1176,22 +1913,22 @@ struct device_node *_of_get_opp_desc_node(struct device *dev)
static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
{
struct device_node *np;
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
int ret = 0, count = 0;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- dev_opp = _managed_opp(opp_np);
- if (dev_opp) {
+ opp_table = _managed_opp(opp_np);
+ if (opp_table) {
/* OPPs are already managed */
- if (!_add_list_dev(dev, dev_opp))
+ if (!_add_opp_dev(dev, opp_table))
ret = -ENOMEM;
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
- /* We have opp-list node now, iterate over it and add OPPs */
+ /* We have opp-table node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_np, np) {
count++;
@@ -1207,19 +1944,19 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
if (WARN_ON(!count))
return -ENOENT;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- dev_opp = _find_device_opp(dev);
- if (WARN_ON(IS_ERR(dev_opp))) {
- ret = PTR_ERR(dev_opp);
- mutex_unlock(&dev_opp_list_lock);
+ opp_table = _find_opp_table(dev);
+ if (WARN_ON(IS_ERR(opp_table))) {
+ ret = PTR_ERR(opp_table);
+ mutex_unlock(&opp_table_lock);
goto free_table;
}
- dev_opp->np = opp_np;
- dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+ opp_table->np = opp_np;
+ opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return 0;
@@ -1248,7 +1985,7 @@ static int _of_add_opp_table_v1(struct device *dev)
*/
nr = prop->length / sizeof(u32);
if (nr % 2) {
- dev_err(dev, "%s: Invalid OPP list\n", __func__);
+ dev_err(dev, "%s: Invalid OPP table\n", __func__);
return -EINVAL;
}
@@ -1268,11 +2005,11 @@ static int _of_add_opp_table_v1(struct device *dev)
/**
* dev_pm_opp_of_add_table() - Initialize opp table from device tree
- * @dev: device pointer used to lookup device OPPs.
+ * @dev: device pointer used to lookup OPP table.
*
* Register the initial OPP table with the OPP library for given device.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
index 7b445e88a0d5..ba2bdbd932ef 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/base/power/opp/cpu.c
@@ -31,7 +31,7 @@
* @table: Cpufreq table returned back to caller
*
* Generate a cpufreq table for a provided device- this assumes that the
- * opp list is already initialized and ready for usage.
+ * opp table is already initialized and ready for usage.
*
* This function allocates required memory for the cpufreq table. It is
* expected that the caller does the required maintenance such as freeing
@@ -44,7 +44,7 @@
* WARNING: It is important for the callers to ensure refreshing their copy of
* the table if any of the mentioned functions have been invoked in the interim.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Since we just use the regular accessor functions to access the internal data
* structures, we use RCU read lock inside this function. As a result, users of
* this function DONOT need to use explicit locks for invoking.
@@ -122,15 +122,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
/* Required only for V1 bindings, as v2 can manage it from DT itself */
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
{
- struct device_list_opp *list_dev;
- struct device_opp *dev_opp;
+ struct opp_device *opp_dev;
+ struct opp_table *opp_table;
struct device *dev;
int cpu, ret = 0;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- dev_opp = _find_device_opp(cpu_dev);
- if (IS_ERR(dev_opp)) {
+ opp_table = _find_opp_table(cpu_dev);
+ if (IS_ERR(opp_table)) {
ret = -EINVAL;
goto unlock;
}
@@ -146,15 +146,15 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
continue;
}
- list_dev = _add_list_dev(dev, dev_opp);
- if (!list_dev) {
- dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
+ opp_dev = _add_opp_dev(dev, opp_table);
+ if (!opp_dev) {
+ dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
__func__, cpu);
continue;
}
}
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
@@ -214,7 +214,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
/*
* Works only for OPP v2 bindings.
*
- * cpumask should be already set to mask of cpu_dev->id.
* Returns -ENOENT if operating-points-v2 bindings aren't supported.
*/
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
@@ -230,6 +229,8 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask
return -ENOENT;
}
+ cpumask_set_cpu(cpu_dev->id, cpumask);
+
/* OPPs are shared ? */
if (!of_property_read_bool(np, "opp-shared"))
goto put_cpu_node;
diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c
new file mode 100644
index 000000000000..ef1ae6b52042
--- /dev/null
+++ b/drivers/base/power/opp/debugfs.c
@@ -0,0 +1,218 @@
+/*
+ * Generic OPP debugfs interface
+ *
+ * Copyright (C) 2015-2016 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/limits.h>
+
+#include "opp.h"
+
+static struct dentry *rootdir;
+
+static void opp_set_dev_name(const struct device *dev, char *name)
+{
+ if (dev->parent)
+ snprintf(name, NAME_MAX, "%s-%s", dev_name(dev->parent),
+ dev_name(dev));
+ else
+ snprintf(name, NAME_MAX, "%s", dev_name(dev));
+}
+
+void opp_debug_remove_one(struct dev_pm_opp *opp)
+{
+ debugfs_remove_recursive(opp->dentry);
+}
+
+int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
+{
+ struct dentry *pdentry = opp_table->dentry;
+ struct dentry *d;
+ char name[25]; /* 20 chars for 64 bit value + 5 (opp:\0) */
+
+ /* Rate is unique to each OPP, use it to give opp-name */
+ snprintf(name, sizeof(name), "opp:%lu", opp->rate);
+
+ /* Create per-opp directory */
+ d = debugfs_create_dir(name, pdentry);
+ if (!d)
+ return -ENOMEM;
+
+ if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available))
+ return -ENOMEM;
+
+ if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic))
+ return -ENOMEM;
+
+ if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo))
+ return -ENOMEM;
+
+ if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, &opp->u_volt))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, &opp->u_volt_min))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, &opp->u_volt_max))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->u_amp))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
+ &opp->clock_latency_ns))
+ return -ENOMEM;
+
+ opp->dentry = d;
+ return 0;
+}
+
+static int opp_list_debug_create_dir(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
+{
+ const struct device *dev = opp_dev->dev;
+ struct dentry *d;
+
+ opp_set_dev_name(dev, opp_table->dentry_name);
+
+ /* Create device specific directory */
+ d = debugfs_create_dir(opp_table->dentry_name, rootdir);
+ if (!d) {
+ dev_err(dev, "%s: Failed to create debugfs dir\n", __func__);
+ return -ENOMEM;
+ }
+
+ opp_dev->dentry = d;
+ opp_table->dentry = d;
+
+ return 0;
+}
+
+static int opp_list_debug_create_link(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
+{
+ const struct device *dev = opp_dev->dev;
+ char name[NAME_MAX];
+ struct dentry *d;
+
+ opp_set_dev_name(opp_dev->dev, name);
+
+ /* Create device specific directory link */
+ d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name);
+ if (!d) {
+ dev_err(dev, "%s: Failed to create link\n", __func__);
+ return -ENOMEM;
+ }
+
+ opp_dev->dentry = d;
+
+ return 0;
+}
+
+/**
+ * opp_debug_register - add a device opp node to the debugfs 'opp' directory
+ * @opp_dev: opp-dev pointer for device
+ * @opp_table: the device-opp being added
+ *
+ * Dynamically adds device specific directory in debugfs 'opp' directory. If the
+ * device-opp is shared with other devices, then links will be created for all
+ * devices except the first.
+ *
+ * Return: 0 on success, otherwise negative error.
+ */
+int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
+{
+ if (!rootdir) {
+ pr_debug("%s: Uninitialized rootdir\n", __func__);
+ return -EINVAL;
+ }
+
+ if (opp_table->dentry)
+ return opp_list_debug_create_link(opp_dev, opp_table);
+
+ return opp_list_debug_create_dir(opp_dev, opp_table);
+}
+
+static void opp_migrate_dentry(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
+{
+ struct opp_device *new_dev;
+ const struct device *dev;
+ struct dentry *dentry;
+
+ /* Look for next opp-dev */
+ list_for_each_entry(new_dev, &opp_table->dev_list, node)
+ if (new_dev != opp_dev)
+ break;
+
+ /* new_dev is guaranteed to be valid here */
+ dev = new_dev->dev;
+ debugfs_remove_recursive(new_dev->dentry);
+
+ opp_set_dev_name(dev, opp_table->dentry_name);
+
+ dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir,
+ opp_table->dentry_name);
+ if (!dentry) {
+ dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
+ __func__, dev_name(opp_dev->dev), dev_name(dev));
+ return;
+ }
+
+ new_dev->dentry = dentry;
+ opp_table->dentry = dentry;
+}
+
+/**
+ * opp_debug_unregister - remove a device opp node from debugfs opp directory
+ * @opp_dev: opp-dev pointer for device
+ * @opp_table: the device-opp being removed
+ *
+ * Dynamically removes device specific directory from debugfs 'opp' directory.
+ */
+void opp_debug_unregister(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
+{
+ if (opp_dev->dentry == opp_table->dentry) {
+ /* Move the real dentry object under another device */
+ if (!list_is_singular(&opp_table->dev_list)) {
+ opp_migrate_dentry(opp_dev, opp_table);
+ goto out;
+ }
+ opp_table->dentry = NULL;
+ }
+
+ debugfs_remove_recursive(opp_dev->dentry);
+
+out:
+ opp_dev->dentry = NULL;
+}
+
+static int __init opp_debug_init(void)
+{
+ /* Create /sys/kernel/debug/opp directory */
+ rootdir = debugfs_create_dir("opp", NULL);
+ if (!rootdir) {
+ pr_err("%s: Failed to create root directory\n", __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+core_initcall(opp_debug_init);
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index 7366b2aa8997..f67f806fcf3a 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -17,17 +17,21 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/limits.h>
#include <linux/pm_opp.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
+struct clk;
+struct regulator;
+
/* Lock to allow exclusive modification to the device and opp lists */
-extern struct mutex dev_opp_list_lock;
+extern struct mutex opp_table_lock;
/*
* Internal data structure organization with the OPP layer library is as
* follows:
- * dev_opp_list (root)
+ * opp_tables (root)
* |- device 1 (represents voltage domain 1)
* | |- opp 1 (availability, freq, voltage)
* | |- opp 2 ..
@@ -36,23 +40,24 @@ extern struct mutex dev_opp_list_lock;
* |- device 2 (represents the next voltage domain)
* ...
* `- device m (represents mth voltage domain)
- * device 1, 2.. are represented by dev_opp structure while each opp
+ * device 1, 2.. are represented by opp_table structure while each opp
* is represented by the opp structure.
*/
/**
* struct dev_pm_opp - Generic OPP description structure
- * @node: opp list node. The nodes are maintained throughout the lifetime
+ * @node: opp table node. The nodes are maintained throughout the lifetime
* of boot. It is expected only an optimal set of OPPs are
* added to the library by the SoC framework.
- * RCU usage: opp list is traversed with RCU locks. node
+ * RCU usage: opp table is traversed with RCU locks. node
* modification is possible realtime, hence the modifications
- * are protected by the dev_opp_list_lock for integrity.
+ * are protected by the opp_table_lock for integrity.
* IMPORTANT: the opp nodes should be maintained in increasing
* order.
- * @dynamic: not-created from static DT entries.
* @available: true/false - marks if this OPP as available or not
+ * @dynamic: not-created from static DT entries.
* @turbo: true if turbo (boost) OPP
+ * @suspend: true if suspend OPP
* @rate: Frequency in hertz
* @u_volt: Target voltage in microvolts corresponding to this OPP
* @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
@@ -60,9 +65,10 @@ extern struct mutex dev_opp_list_lock;
* @u_amp: Maximum current drawn by the device in microamperes
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
- * @dev_opp: points back to the device_opp struct this opp belongs to
+ * @opp_table: points back to the opp_table struct this opp belongs to
* @rcu_head: RCU callback head used for deferred freeing
* @np: OPP's device node.
+ * @dentry: debugfs dentry pointer (per opp)
*
* This structure stores the OPP information for a given device.
*/
@@ -72,6 +78,7 @@ struct dev_pm_opp {
bool available;
bool dynamic;
bool turbo;
+ bool suspend;
unsigned long rate;
unsigned long u_volt;
@@ -80,40 +87,60 @@ struct dev_pm_opp {
unsigned long u_amp;
unsigned long clock_latency_ns;
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct rcu_head rcu_head;
struct device_node *np;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dentry;
+#endif
};
/**
- * struct device_list_opp - devices managed by 'struct device_opp'
+ * struct opp_device - devices managed by 'struct opp_table'
* @node: list node
* @dev: device to which the struct object belongs
* @rcu_head: RCU callback head used for deferred freeing
+ * @dentry: debugfs dentry pointer (per device)
*
- * This is an internal data structure maintaining the list of devices that are
- * managed by 'struct device_opp'.
+ * This is an internal data structure maintaining the devices that are managed
+ * by 'struct opp_table'.
*/
-struct device_list_opp {
+struct opp_device {
struct list_head node;
const struct device *dev;
struct rcu_head rcu_head;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dentry;
+#endif
};
/**
- * struct device_opp - Device opp structure
- * @node: list node - contains the devices with OPPs that
+ * struct opp_table - Device opp structure
+ * @node: table node - contains the devices with OPPs that
* have been registered. Nodes once added are not modified in this
- * list.
- * RCU usage: nodes are not modified in the list of device_opp,
- * however addition is possible and is secured by dev_opp_list_lock
+ * table.
+ * RCU usage: nodes are not modified in the table of opp_table,
+ * however addition is possible and is secured by opp_table_lock
* @srcu_head: notifier head to notify the OPP availability changes.
* @rcu_head: RCU callback head used for deferred freeing
* @dev_list: list of devices that share these OPPs
- * @opp_list: list of opps
+ * @opp_list: table of opps
* @np: struct device_node pointer for opp's DT node.
+ * @clock_latency_ns_max: Max clock latency in nanoseconds.
* @shared_opp: OPP is shared between multiple devices.
+ * @suspend_opp: Pointer to OPP to be used during device suspend.
+ * @supported_hw: Array of version number to support.
+ * @supported_hw_count: Number of elements in supported_hw array.
+ * @prop_name: A name to postfix to many DT properties, while parsing them.
+ * @clk: Device's clock handle
+ * @regulator: Supply regulator
+ * @dentry: debugfs dentry pointer of the real device directory (not links).
+ * @dentry_name: Name of the real dentry.
+ *
+ * @voltage_tolerance_v1: In percentage, for v1 bindings only.
*
* This is an internal data structure maintaining the link to opps attached to
* a device. This structure is not meant to be shared to users as it is
@@ -123,7 +150,7 @@ struct device_list_opp {
* need to wait for the grace period of both of them before freeing any
* resources. And so we have used kfree_rcu() from within call_srcu() handlers.
*/
-struct device_opp {
+struct opp_table {
struct list_head node;
struct srcu_notifier_head srcu_head;
@@ -133,14 +160,48 @@ struct device_opp {
struct device_node *np;
unsigned long clock_latency_ns_max;
+
+ /* For backward compatibility with v1 bindings */
+ unsigned int voltage_tolerance_v1;
+
bool shared_opp;
struct dev_pm_opp *suspend_opp;
+
+ unsigned int *supported_hw;
+ unsigned int supported_hw_count;
+ const char *prop_name;
+ struct clk *clk;
+ struct regulator *regulator;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dentry;
+ char dentry_name[NAME_MAX];
+#endif
};
/* Routines internal to opp core */
-struct device_opp *_find_device_opp(struct device *dev);
-struct device_list_opp *_add_list_dev(const struct device *dev,
- struct device_opp *dev_opp);
+struct opp_table *_find_opp_table(struct device *dev);
+struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
struct device_node *_of_get_opp_desc_node(struct device *dev);
+#ifdef CONFIG_DEBUG_FS
+void opp_debug_remove_one(struct dev_pm_opp *opp);
+int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
+int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
+void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table);
+#else
+static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {}
+
+static inline int opp_debug_create_one(struct dev_pm_opp *opp,
+ struct opp_table *opp_table)
+{ return 0; }
+static inline int opp_debug_register(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
+{ return 0; }
+
+static inline void opp_debug_unregister(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
+{ }
+#endif /* DEBUG_FS */
+
#endif /* __DRIVER_OPP_H__ */
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index a5880f4ab40e..1914c63ca8b1 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -338,7 +338,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
- bio->bi_iter.bi_size & PAGE_MASK)
+ bio->bi_iter.bi_size & ~PAGE_MASK)
goto io_error;
discard_from_brd(brd, sector, bio->bi_iter.bi_size);
goto out;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 3457ac8c03e2..55d3d1da72de 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -173,7 +173,13 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
{
struct request *rq;
+ if (mtip_check_surprise_removal(dd->pdev))
+ return NULL;
+
rq = blk_mq_alloc_request(dd->queue, 0, __GFP_RECLAIM, true);
+ if (IS_ERR(rq))
+ return NULL;
+
return blk_mq_rq_to_pdu(rq);
}
@@ -233,15 +239,9 @@ static void mtip_async_complete(struct mtip_port *port,
"Command tag %d failed due to TFE\n", tag);
}
- /* Unmap the DMA scatter list entries */
- dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction);
-
rq = mtip_rq_from_tag(dd, tag);
- if (unlikely(cmd->unaligned))
- up(&port->cmd_slot_unal);
-
- blk_mq_end_request(rq, status ? -EIO : 0);
+ blk_mq_complete_request(rq, status);
}
/*
@@ -581,6 +581,8 @@ static void mtip_completion(struct mtip_port *port,
dev_warn(&port->dd->pdev->dev,
"Internal command %d completed with TFE\n", tag);
+ command->comp_func = NULL;
+ command->comp_data = NULL;
complete(waiting);
}
@@ -618,8 +620,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
port = dd->port;
- set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
-
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
@@ -628,7 +628,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
cmd->comp_func(port, MTIP_TAG_INTERNAL,
cmd, PORT_IRQ_TF_ERR);
}
- goto handle_tfe_exit;
+ return;
}
/* clear the tag accumulator */
@@ -701,7 +701,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
fail_reason = "thermal shutdown";
}
if (buf[288] == 0xBF) {
- set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
+ set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
dev_info(&dd->pdev->dev,
"Drive indicates rebuild has failed. Secure erase required.\n");
fail_all_ncq_cmds = 1;
@@ -771,11 +771,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
}
}
print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
-
-handle_tfe_exit:
- /* clear eh_active */
- clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
- wake_up_interruptible(&port->svc_wait);
}
/*
@@ -1007,6 +1002,7 @@ static bool mtip_pause_ncq(struct mtip_port *port,
(fis->features == 0x27 || fis->features == 0x72 ||
fis->features == 0x62 || fis->features == 0x26))) {
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+ clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
/* Com reset after secure erase or lowlevel format */
mtip_restart_port(port);
clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
@@ -1021,12 +1017,14 @@ static bool mtip_pause_ncq(struct mtip_port *port,
*
* @port Pointer to port data structure
* @timeout Max duration to wait (ms)
+ * @atomic gfp_t flag to indicate blockable context or not
*
* return value
* 0 Success
* -EBUSY Commands still active
*/
-static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
+static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout,
+ gfp_t atomic)
{
unsigned long to;
unsigned int n;
@@ -1037,16 +1035,21 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
to = jiffies + msecs_to_jiffies(timeout);
do {
if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
- test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
+ test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags) &&
+ atomic == GFP_KERNEL) {
msleep(20);
continue; /* svc thd is actively issuing commands */
}
- msleep(100);
+ if (atomic == GFP_KERNEL)
+ msleep(100);
+ else {
+ cpu_relax();
+ udelay(100);
+ }
+
if (mtip_check_surprise_removal(port->dd->pdev))
goto err_fault;
- if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
- goto err_fault;
/*
* Ignore s_active bit 0 of array element 0.
@@ -1099,6 +1102,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
struct mtip_cmd *int_cmd;
struct driver_data *dd = port->dd;
int rv = 0;
+ unsigned long start;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
if (buffer & 0x00000007) {
@@ -1107,6 +1111,10 @@ static int mtip_exec_internal_command(struct mtip_port *port,
}
int_cmd = mtip_get_int_command(dd);
+ if (!int_cmd) {
+ dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
+ return -EFAULT;
+ }
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
@@ -1119,7 +1127,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
if (fis->command != ATA_CMD_STANDBYNOW1) {
/* wait for io to complete if non atomic */
if (mtip_quiesce_io(port,
- MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
+ MTIP_QUIESCE_IO_TIMEOUT_MS, atomic) < 0) {
dev_warn(&dd->pdev->dev,
"Failed to quiesce IO\n");
mtip_put_int_command(dd, int_cmd);
@@ -1162,6 +1170,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
/* Populate the command header */
int_cmd->command_header->byte_count = 0;
+ start = jiffies;
+
/* Issue the command to the hardware */
mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
@@ -1170,10 +1180,12 @@ static int mtip_exec_internal_command(struct mtip_port *port,
if ((rv = wait_for_completion_interruptible_timeout(
&wait,
msecs_to_jiffies(timeout))) <= 0) {
+
if (rv == -ERESTARTSYS) { /* interrupted */
dev_err(&dd->pdev->dev,
- "Internal command [%02X] was interrupted after %lu ms\n",
- fis->command, timeout);
+ "Internal command [%02X] was interrupted after %u ms\n",
+ fis->command,
+ jiffies_to_msecs(jiffies - start));
rv = -EINTR;
goto exec_ic_exit;
} else if (rv == 0) /* timeout */
@@ -2897,6 +2909,42 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
return -EFAULT;
}
+static void mtip_softirq_done_fn(struct request *rq)
+{
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct driver_data *dd = rq->q->queuedata;
+
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
+ cmd->direction);
+
+ if (unlikely(cmd->unaligned))
+ up(&dd->port->cmd_slot_unal);
+
+ blk_mq_end_request(rq, rq->errors);
+}
+
+static void mtip_abort_cmd(struct request *req, void *data,
+ bool reserved)
+{
+ struct driver_data *dd = data;
+
+ dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
+
+ clear_bit(req->tag, dd->port->cmds_to_issue);
+ req->errors = -EIO;
+ mtip_softirq_done_fn(req);
+}
+
+static void mtip_queue_cmd(struct request *req, void *data,
+ bool reserved)
+{
+ struct driver_data *dd = data;
+
+ set_bit(req->tag, dd->port->cmds_to_issue);
+ blk_abort_request(req);
+}
+
/*
* service thread to issue queued commands
*
@@ -2909,7 +2957,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
static int mtip_service_thread(void *data)
{
struct driver_data *dd = (struct driver_data *)data;
- unsigned long slot, slot_start, slot_wrap;
+ unsigned long slot, slot_start, slot_wrap, to;
unsigned int num_cmd_slots = dd->slot_groups * 32;
struct mtip_port *port = dd->port;
@@ -2924,9 +2972,7 @@ static int mtip_service_thread(void *data)
* is in progress nor error handling is active
*/
wait_event_interruptible(port->svc_wait, (port->flags) &&
- !(port->flags & MTIP_PF_PAUSE_IO));
-
- set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+ (port->flags & MTIP_PF_SVC_THD_WORK));
if (kthread_should_stop() ||
test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
@@ -2936,6 +2982,8 @@ static int mtip_service_thread(void *data)
&dd->dd_flag)))
goto st_out;
+ set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+
restart_eh:
/* Demux bits: start with error handling */
if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
@@ -2946,6 +2994,32 @@ restart_eh:
if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
goto restart_eh;
+ if (test_bit(MTIP_PF_TO_ACTIVE_BIT, &port->flags)) {
+ to = jiffies + msecs_to_jiffies(5000);
+
+ do {
+ mdelay(100);
+ } while (atomic_read(&dd->irq_workers_active) != 0 &&
+ time_before(jiffies, to));
+
+ if (atomic_read(&dd->irq_workers_active) != 0)
+ dev_warn(&dd->pdev->dev,
+ "Completion workers still active!");
+
+ spin_lock(dd->queue->queue_lock);
+ blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ mtip_queue_cmd, dd);
+ spin_unlock(dd->queue->queue_lock);
+
+ set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
+
+ if (mtip_device_reset(dd))
+ blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ mtip_abort_cmd, dd);
+
+ clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
+ }
+
if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
slot = 1;
/* used to restrict the loop to one iteration */
@@ -2978,10 +3052,8 @@ restart_eh:
}
if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
- if (mtip_ftl_rebuild_poll(dd) < 0)
- set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
- &dd->dd_flag);
- clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
+ if (mtip_ftl_rebuild_poll(dd) == 0)
+ clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
}
}
@@ -3096,7 +3168,7 @@ static int mtip_hw_get_identify(struct driver_data *dd)
if (buf[288] == 0xBF) {
dev_info(&dd->pdev->dev,
"Drive indicates rebuild has failed.\n");
- /* TODO */
+ set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
}
}
@@ -3270,20 +3342,25 @@ out1:
return rv;
}
-static void mtip_standby_drive(struct driver_data *dd)
+static int mtip_standby_drive(struct driver_data *dd)
{
- if (dd->sr)
- return;
+ int rv = 0;
+ if (dd->sr || !dd->port)
+ return -ENODEV;
/*
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
- !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
- if (mtip_standby_immediate(dd->port))
+ !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
+ !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
+ rv = mtip_standby_immediate(dd->port);
+ if (rv)
dev_warn(&dd->pdev->dev,
"STANDBY IMMEDIATE failed\n");
+ }
+ return rv;
}
/*
@@ -3296,10 +3373,6 @@ static void mtip_standby_drive(struct driver_data *dd)
*/
static int mtip_hw_exit(struct driver_data *dd)
{
- /*
- * Send standby immediate (E0h) to the drive so that it
- * saves its state.
- */
if (!dd->sr) {
/* de-initialize the port. */
mtip_deinit_port(dd->port);
@@ -3341,8 +3414,7 @@ static int mtip_hw_shutdown(struct driver_data *dd)
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
- if (!dd->sr && dd->port)
- mtip_standby_immediate(dd->port);
+ mtip_standby_drive(dd);
return 0;
}
@@ -3365,7 +3437,7 @@ static int mtip_hw_suspend(struct driver_data *dd)
* Send standby immediate (E0h) to the drive
* so that it saves its state.
*/
- if (mtip_standby_immediate(dd->port) != 0) {
+ if (mtip_standby_drive(dd) != 0) {
dev_err(&dd->pdev->dev,
"Failed standby-immediate command\n");
return -EFAULT;
@@ -3603,6 +3675,28 @@ static int mtip_block_getgeo(struct block_device *dev,
return 0;
}
+static int mtip_block_open(struct block_device *dev, fmode_t mode)
+{
+ struct driver_data *dd;
+
+ if (dev && dev->bd_disk) {
+ dd = (struct driver_data *) dev->bd_disk->private_data;
+
+ if (dd) {
+ if (test_bit(MTIP_DDF_REMOVAL_BIT,
+ &dd->dd_flag)) {
+ return -ENODEV;
+ }
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+void mtip_block_release(struct gendisk *disk, fmode_t mode)
+{
+}
+
/*
* Block device operation function.
*
@@ -3610,6 +3704,8 @@ static int mtip_block_getgeo(struct block_device *dev,
* layer.
*/
static const struct block_device_operations mtip_block_ops = {
+ .open = mtip_block_open,
+ .release = mtip_block_release,
.ioctl = mtip_block_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mtip_block_compat_ioctl,
@@ -3671,10 +3767,9 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
rq_data_dir(rq))) {
return -ENODATA;
}
- if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
+ if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) ||
+ test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)))
return -ENODATA;
- if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
- return -ENXIO;
}
if (rq->cmd_flags & REQ_DISCARD) {
@@ -3786,11 +3881,33 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
return 0;
}
+static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
+ bool reserved)
+{
+ struct driver_data *dd = req->q->queuedata;
+ int ret = BLK_EH_RESET_TIMER;
+
+ if (reserved)
+ goto exit_handler;
+
+ if (test_bit(req->tag, dd->port->cmds_to_issue))
+ goto exit_handler;
+
+ if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
+ goto exit_handler;
+
+ wake_up_interruptible(&dd->port->svc_wait);
+exit_handler:
+ return ret;
+}
+
static struct blk_mq_ops mtip_mq_ops = {
.queue_rq = mtip_queue_rq,
.map_queue = blk_mq_map_queue,
.init_request = mtip_init_cmd,
.exit_request = mtip_free_cmd,
+ .complete = mtip_softirq_done_fn,
+ .timeout = mtip_cmd_timeout,
};
/*
@@ -3857,7 +3974,6 @@ static int mtip_block_initialize(struct driver_data *dd)
mtip_hw_debugfs_init(dd);
-skip_create_disk:
memset(&dd->tags, 0, sizeof(dd->tags));
dd->tags.ops = &mtip_mq_ops;
dd->tags.nr_hw_queues = 1;
@@ -3867,12 +3983,13 @@ skip_create_disk:
dd->tags.numa_node = dd->numa_node;
dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
dd->tags.driver_data = dd;
+ dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
rv = blk_mq_alloc_tag_set(&dd->tags);
if (rv) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
- goto block_queue_alloc_init_error;
+ goto block_queue_alloc_tag_error;
}
/* Allocate the request queue. */
@@ -3887,6 +4004,7 @@ skip_create_disk:
dd->disk->queue = dd->queue;
dd->queue->queuedata = dd;
+skip_create_disk:
/* Initialize the protocol layer. */
wait_for_rebuild = mtip_hw_get_identify(dd);
if (wait_for_rebuild < 0) {
@@ -3983,8 +4101,9 @@ kthread_run_error:
read_capacity_error:
init_hw_cmds_error:
blk_cleanup_queue(dd->queue);
- blk_mq_free_tag_set(&dd->tags);
block_queue_alloc_init_error:
+ blk_mq_free_tag_set(&dd->tags);
+block_queue_alloc_tag_error:
mtip_hw_debugfs_exit(dd);
disk_index_error:
spin_lock(&rssd_index_lock);
@@ -4001,6 +4120,22 @@ protocol_init_error:
return rv;
}
+static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
+{
+ struct driver_data *dd = (struct driver_data *)data;
+ struct mtip_cmd *cmd;
+
+ if (likely(!reserv))
+ blk_mq_complete_request(rq, -ENODEV);
+ else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
+
+ cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
+ if (cmd->comp_func)
+ cmd->comp_func(dd->port, MTIP_TAG_INTERNAL,
+ cmd, -ENODEV);
+ }
+}
+
/*
* Block layer deinitialization function.
*
@@ -4032,12 +4167,23 @@ static int mtip_block_remove(struct driver_data *dd)
}
}
- if (!dd->sr)
- mtip_standby_drive(dd);
+ if (!dd->sr) {
+ /*
+ * Explicitly wait here for IOs to quiesce,
+ * as mtip_standby_drive usually won't wait for IOs.
+ */
+ if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS,
+ GFP_KERNEL))
+ mtip_standby_drive(dd);
+ }
else
dev_info(&dd->pdev->dev, "device %s surprise removal\n",
dd->disk->disk_name);
+ blk_mq_freeze_queue_start(dd->queue);
+ blk_mq_stop_hw_queues(dd->queue);
+ blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
+
/*
* Delete our gendisk structure. This also removes the device
* from /dev
@@ -4047,7 +4193,8 @@ static int mtip_block_remove(struct driver_data *dd)
dd->bdev = NULL;
}
if (dd->disk) {
- del_gendisk(dd->disk);
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ del_gendisk(dd->disk);
if (dd->disk->queue) {
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
@@ -4088,7 +4235,8 @@ static int mtip_block_shutdown(struct driver_data *dd)
dev_info(&dd->pdev->dev,
"Shutting down %s ...\n", dd->disk->disk_name);
- del_gendisk(dd->disk);
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ del_gendisk(dd->disk);
if (dd->disk->queue) {
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
@@ -4433,7 +4581,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
struct driver_data *dd = pci_get_drvdata(pdev);
unsigned long flags, to;
- set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
+ set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
spin_lock_irqsave(&dev_lock, flags);
list_del_init(&dd->online_list);
@@ -4450,12 +4598,17 @@ static void mtip_pci_remove(struct pci_dev *pdev)
} while (atomic_read(&dd->irq_workers_active) != 0 &&
time_before(jiffies, to));
+ if (!dd->sr)
+ fsync_bdev(dd->bdev);
+
if (atomic_read(&dd->irq_workers_active) != 0) {
dev_warn(&dd->pdev->dev,
"Completion workers still active!\n");
}
- blk_mq_stop_hw_queues(dd->queue);
+ blk_set_queue_dying(dd->queue);
+ set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
+
/* Clean up the block layer. */
mtip_block_remove(dd);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 3274784008eb..7617888f7944 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -134,16 +134,24 @@ enum {
MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
+ MTIP_PF_TO_ACTIVE_BIT = 9, /* timeout handling */
MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) |
(1 << MTIP_PF_EH_ACTIVE_BIT) |
(1 << MTIP_PF_SE_ACTIVE_BIT) |
- (1 << MTIP_PF_DM_ACTIVE_BIT)),
+ (1 << MTIP_PF_DM_ACTIVE_BIT) |
+ (1 << MTIP_PF_TO_ACTIVE_BIT)),
MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
MTIP_PF_ISSUE_CMDS_BIT = 5,
MTIP_PF_REBUILD_BIT = 6,
MTIP_PF_SVC_THD_STOP_BIT = 8,
+ MTIP_PF_SVC_THD_WORK = ((1 << MTIP_PF_EH_ACTIVE_BIT) |
+ (1 << MTIP_PF_ISSUE_CMDS_BIT) |
+ (1 << MTIP_PF_REBUILD_BIT) |
+ (1 << MTIP_PF_SVC_THD_STOP_BIT) |
+ (1 << MTIP_PF_TO_ACTIVE_BIT)),
+
/* below are bit numbers in 'dd_flag' defined in driver_data */
MTIP_DDF_SEC_LOCK_BIT = 0,
MTIP_DDF_REMOVE_PENDING_BIT = 1,
@@ -153,6 +161,7 @@ enum {
MTIP_DDF_RESUME_BIT = 6,
MTIP_DDF_INIT_DONE_BIT = 7,
MTIP_DDF_REBUILD_FAILED_BIT = 8,
+ MTIP_DDF_REMOVAL_BIT = 9,
MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
(1 << MTIP_DDF_SEC_LOCK_BIT) |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 81ea69fee7ca..fbdddd6f94b8 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1955,7 +1955,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
osdc = &rbd_dev->rbd_client->client->osdc;
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
- GFP_ATOMIC);
+ GFP_NOIO);
if (!osd_req)
return NULL; /* ENOMEM */
@@ -2004,7 +2004,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
rbd_dev = img_request->rbd_dev;
osdc = &rbd_dev->rbd_client->client->osdc;
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
- false, GFP_ATOMIC);
+ false, GFP_NOIO);
if (!osd_req)
return NULL; /* ENOMEM */
@@ -2506,7 +2506,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
bio_chain_clone_range(&bio_list,
&bio_offset,
clone_size,
- GFP_ATOMIC);
+ GFP_NOIO);
if (!obj_request->bio_list)
goto out_unwind;
} else if (type == OBJ_REQUEST_PAGES) {
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index fa893c3ec408..0beaa52df66b 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0489, 0xe05f) },
{ USB_DEVICE(0x0489, 0xe076) },
{ USB_DEVICE(0x0489, 0xe078) },
+ { USB_DEVICE(0x0489, 0xe095) },
{ USB_DEVICE(0x04c5, 0x1330) },
{ USB_DEVICE(0x04CA, 0x3004) },
{ USB_DEVICE(0x04CA, 0x3005) },
@@ -92,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x300d) },
{ USB_DEVICE(0x04CA, 0x300f) },
{ USB_DEVICE(0x04CA, 0x3010) },
+ { USB_DEVICE(0x04CA, 0x3014) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x021c) },
{ USB_DEVICE(0x0930, 0x0220) },
@@ -113,10 +115,12 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x13d3, 0x3393) },
+ { USB_DEVICE(0x13d3, 0x3395) },
{ USB_DEVICE(0x13d3, 0x3402) },
{ USB_DEVICE(0x13d3, 0x3408) },
{ USB_DEVICE(0x13d3, 0x3423) },
{ USB_DEVICE(0x13d3, 0x3432) },
+ { USB_DEVICE(0x13d3, 0x3472) },
{ USB_DEVICE(0x13d3, 0x3474) },
/* Atheros AR5BBU12 with sflash firmware */
@@ -144,6 +148,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@@ -154,6 +159,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -175,10 +181,12 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 968897108c76..79107597a594 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -196,6 +196,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@@ -206,6 +207,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -227,10 +229,12 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 45cc39aabeee..252142524ff2 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -136,11 +136,13 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
chip->cdev.owner = chip->pdev->driver->owner;
chip->cdev.kobj.parent = &chip->dev.kobj;
+ devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
+
return chip;
}
EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
-static int tpm_dev_add_device(struct tpm_chip *chip)
+static int tpm_add_char_device(struct tpm_chip *chip)
{
int rc;
@@ -151,7 +153,6 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
chip->devname, MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
- device_unregister(&chip->dev);
return rc;
}
@@ -162,16 +163,17 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
chip->devname, MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
+ cdev_del(&chip->cdev);
return rc;
}
return rc;
}
-static void tpm_dev_del_device(struct tpm_chip *chip)
+static void tpm_del_char_device(struct tpm_chip *chip)
{
cdev_del(&chip->cdev);
- device_unregister(&chip->dev);
+ device_del(&chip->dev);
}
static int tpm1_chip_register(struct tpm_chip *chip)
@@ -222,7 +224,7 @@ int tpm_chip_register(struct tpm_chip *chip)
tpm_add_ppi(chip);
- rc = tpm_dev_add_device(chip);
+ rc = tpm_add_char_device(chip);
if (rc)
goto out_err;
@@ -274,6 +276,6 @@ void tpm_chip_unregister(struct tpm_chip *chip)
sysfs_remove_link(&chip->pdev->kobj, "ppi");
tpm1_chip_unregister(chip);
- tpm_dev_del_device(chip);
+ tpm_del_char_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 4bb9727c1047..61e64293b765 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -310,11 +310,11 @@ static int crb_acpi_remove(struct acpi_device *device)
struct device *dev = &device->dev;
struct tpm_chip *chip = dev_get_drvdata(dev);
- tpm_chip_unregister(chip);
-
if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ tpm_chip_unregister(chip);
+
return 0;
}
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index bd72fb04225e..4e6940acf639 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -232,7 +232,7 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
{
struct tcpa_event *event = v;
struct tcpa_event temp_event;
- char *tempPtr;
+ char *temp_ptr;
int i;
memcpy(&temp_event, event, sizeof(struct tcpa_event));
@@ -242,10 +242,16 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
temp_event.event_type = do_endian_conversion(event->event_type);
temp_event.event_size = do_endian_conversion(event->event_size);
- tempPtr = (char *)&temp_event;
+ temp_ptr = (char *) &temp_event;
- for (i = 0; i < sizeof(struct tcpa_event) + temp_event.event_size; i++)
- seq_putc(m, tempPtr[i]);
+ for (i = 0; i < (sizeof(struct tcpa_event) - 1) ; i++)
+ seq_putc(m, temp_ptr[i]);
+
+ temp_ptr = (char *) v;
+
+ for (i = (sizeof(struct tcpa_event) - 1);
+ i < (sizeof(struct tcpa_event) + temp_event.event_size); i++)
+ seq_putc(m, temp_ptr[i]);
return 0;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 39bf5820297e..4f9830c1b121 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -1097,13 +1097,15 @@ static int bcm2835_pll_divider_set_rate(struct clk_hw *hw,
struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
struct bcm2835_cprman *cprman = divider->cprman;
const struct bcm2835_pll_divider_data *data = divider->data;
- u32 cm;
- int ret;
+ u32 cm, div, max_div = 1 << A2W_PLL_DIV_BITS;
- ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
- if (ret)
- return ret;
+ div = DIV_ROUND_UP_ULL(parent_rate, rate);
+
+ div = min(div, max_div);
+ if (div == max_div)
+ div = 0;
+ cprman_write(cprman, data->a2w_reg, div);
cm = cprman_read(cprman, data->cm_reg);
cprman_write(cprman, data->cm_reg, cm | data->load_mask);
cprman_write(cprman, data->cm_reg, cm & ~data->load_mask);
diff --git a/drivers/clk/msm/clock-gpu-cobalt.c b/drivers/clk/msm/clock-gpu-cobalt.c
index ce3e7916e658..7230c7a2bc04 100644
--- a/drivers/clk/msm/clock-gpu-cobalt.c
+++ b/drivers/clk/msm/clock-gpu-cobalt.c
@@ -109,7 +109,7 @@ static struct alpha_pll_clk gpu_pll0_pll = {
.parent = &gpucc_xo.c,
.dbg_name = "gpu_pll0_pll",
.ops = &clk_ops_fabia_alpha_pll,
- VDD_GPU_PLL_FMAX_MAP1(NOMINAL, 1300000500),
+ VDD_GPU_PLL_FMAX_MAP1(MIN, 1300000500),
CLK_INIT(gpu_pll0_pll.c),
},
};
@@ -168,7 +168,7 @@ static struct alpha_pll_clk gpu_pll1_pll = {
.parent = &gpucc_xo.c,
.dbg_name = "gpu_pll1_pll",
.ops = &clk_ops_fabia_alpha_pll,
- VDD_GPU_PLL_FMAX_MAP1(NOMINAL, 1300000500),
+ VDD_GPU_PLL_FMAX_MAP1(MIN, 1300000500),
CLK_INIT(gpu_pll1_pll.c),
},
};
@@ -670,15 +670,15 @@ static struct clk_lookup msm_clocks_gfxcc_cobalt[] = {
static void msm_gfxcc_hamster_fixup(void)
{
- gpu_pll0_pll.c.fmax[VDD_DIG_NOMINAL] = 1420000500;
- gpu_pll1_pll.c.fmax[VDD_DIG_NOMINAL] = 1420000500;
+ gpu_pll0_pll.c.fmax[VDD_DIG_MIN] = 1420000500;
+ gpu_pll1_pll.c.fmax[VDD_DIG_MIN] = 1420000500;
gfx3d_clk_src.freq_tbl = ftbl_gfx3d_clk_src_vq;
}
static void msm_gfxcc_cobalt_v2_fixup(void)
{
- gpu_pll0_pll.c.fmax[VDD_DIG_NOMINAL] = 1420000500;
- gpu_pll1_pll.c.fmax[VDD_DIG_NOMINAL] = 1420000500;
+ gpu_pll0_pll.c.fmax[VDD_DIG_MIN] = 1420000500;
+ gpu_pll1_pll.c.fmax[VDD_DIG_MIN] = 1420000500;
gfx3d_clk_src.freq_tbl = ftbl_gfx3d_clk_src_v2;
}
diff --git a/drivers/clk/msm/clock-mmss-cobalt.c b/drivers/clk/msm/clock-mmss-cobalt.c
index 53c657ef3a25..288abb133743 100644
--- a/drivers/clk/msm/clock-mmss-cobalt.c
+++ b/drivers/clk/msm/clock-mmss-cobalt.c
@@ -1124,6 +1124,7 @@ static struct rcg_clk dp_pixel_clk_src = {
.dbg_name = "dp_pixel_clk_src",
.parent = &ext_dp_phy_pll_vco.c,
.ops = &clk_ops_rcg_dp,
+ .flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 148380000, LOW, 296740000,
NOMINAL, 593470000),
CLK_INIT(dp_pixel_clk_src.c),
@@ -2033,6 +2034,7 @@ static struct branch_clk mmss_mdss_dp_pixel_clk = {
.c = {
.dbg_name = "mmss_mdss_dp_pixel_clk",
.parent = &dp_pixel_clk_src.c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_dp_pixel_clk.c),
},
@@ -2801,8 +2803,10 @@ int msm_mmsscc_cobalt_probe(struct platform_device *pdev)
ext_dp_phy_pll_link.dev = &pdev->dev;
ext_dp_phy_pll_link.clk_id = "dp_link_src";
+ ext_dp_phy_pll_link.c.flags = CLKFLAG_NO_RATE_CACHE;
ext_dp_phy_pll_vco.dev = &pdev->dev;
ext_dp_phy_pll_vco.clk_id = "dp_vco_div";
+ ext_dp_phy_pll_vco.c.flags = CLKFLAG_NO_RATE_CACHE;
is_vq = of_device_is_compatible(pdev->dev.of_node,
"qcom,mmsscc-hamster");
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index abb47608713b..fe728f8dcbe4 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -718,6 +718,7 @@ static const char *const rk3188_critical_clocks[] __initconst = {
"hclk_peri",
"pclk_cpu",
"pclk_peri",
+ "hclk_cpubus"
};
static void __init rk3188_common_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 7e6b783e6eee..1b148694b633 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -165,7 +165,7 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = {
.core_reg = RK3368_CLKSEL_CON(0),
.div_core_shift = 0,
.div_core_mask = 0x1f,
- .mux_core_shift = 15,
+ .mux_core_shift = 7,
};
static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
@@ -218,29 +218,29 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
}
static struct rockchip_cpuclk_rate_table rk3368_cpuclkb_rates[] __initdata = {
- RK3368_CPUCLKB_RATE(1512000000, 2, 6, 6),
- RK3368_CPUCLKB_RATE(1488000000, 2, 5, 5),
- RK3368_CPUCLKB_RATE(1416000000, 2, 5, 5),
- RK3368_CPUCLKB_RATE(1200000000, 2, 4, 4),
- RK3368_CPUCLKB_RATE(1008000000, 2, 4, 4),
- RK3368_CPUCLKB_RATE( 816000000, 2, 3, 3),
- RK3368_CPUCLKB_RATE( 696000000, 2, 3, 3),
- RK3368_CPUCLKB_RATE( 600000000, 2, 2, 2),
- RK3368_CPUCLKB_RATE( 408000000, 2, 2, 2),
- RK3368_CPUCLKB_RATE( 312000000, 2, 2, 2),
+ RK3368_CPUCLKB_RATE(1512000000, 1, 5, 5),
+ RK3368_CPUCLKB_RATE(1488000000, 1, 4, 4),
+ RK3368_CPUCLKB_RATE(1416000000, 1, 4, 4),
+ RK3368_CPUCLKB_RATE(1200000000, 1, 3, 3),
+ RK3368_CPUCLKB_RATE(1008000000, 1, 3, 3),
+ RK3368_CPUCLKB_RATE( 816000000, 1, 2, 2),
+ RK3368_CPUCLKB_RATE( 696000000, 1, 2, 2),
+ RK3368_CPUCLKB_RATE( 600000000, 1, 1, 1),
+ RK3368_CPUCLKB_RATE( 408000000, 1, 1, 1),
+ RK3368_CPUCLKB_RATE( 312000000, 1, 1, 1),
};
static struct rockchip_cpuclk_rate_table rk3368_cpuclkl_rates[] __initdata = {
- RK3368_CPUCLKL_RATE(1512000000, 2, 7, 7),
- RK3368_CPUCLKL_RATE(1488000000, 2, 6, 6),
- RK3368_CPUCLKL_RATE(1416000000, 2, 6, 6),
- RK3368_CPUCLKL_RATE(1200000000, 2, 5, 5),
- RK3368_CPUCLKL_RATE(1008000000, 2, 5, 5),
- RK3368_CPUCLKL_RATE( 816000000, 2, 4, 4),
- RK3368_CPUCLKL_RATE( 696000000, 2, 3, 3),
- RK3368_CPUCLKL_RATE( 600000000, 2, 3, 3),
- RK3368_CPUCLKL_RATE( 408000000, 2, 2, 2),
- RK3368_CPUCLKL_RATE( 312000000, 2, 2, 2),
+ RK3368_CPUCLKL_RATE(1512000000, 1, 6, 6),
+ RK3368_CPUCLKL_RATE(1488000000, 1, 5, 5),
+ RK3368_CPUCLKL_RATE(1416000000, 1, 5, 5),
+ RK3368_CPUCLKL_RATE(1200000000, 1, 4, 4),
+ RK3368_CPUCLKL_RATE(1008000000, 1, 4, 4),
+ RK3368_CPUCLKL_RATE( 816000000, 1, 3, 3),
+ RK3368_CPUCLKL_RATE( 696000000, 1, 2, 2),
+ RK3368_CPUCLKL_RATE( 600000000, 1, 2, 2),
+ RK3368_CPUCLKL_RATE( 408000000, 1, 1, 1),
+ RK3368_CPUCLKL_RATE( 312000000, 1, 1, 1),
};
static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
@@ -384,10 +384,10 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
* Clock-Architecture Diagram 3
*/
- COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb_p, 0,
+ COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
RK3368_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3368_CLKGATE_CON(4), 6, GFLAGS),
- COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb_p, 0,
+ COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
RK3368_CLKSEL_CON(15), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3368_CLKGATE_CON(4), 7, GFLAGS),
@@ -442,7 +442,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
RK3368_CLKGATE_CON(4), 13, GFLAGS),
GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 0,
- RK3368_CLKGATE_CON(5), 12, GFLAGS),
+ RK3368_CLKGATE_CON(4), 12, GFLAGS),
COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 90d64081ddb3..f951f911786e 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -31,9 +31,8 @@
struct private_data {
struct device *cpu_dev;
- struct regulator *cpu_reg;
struct thermal_cooling_device *cdev;
- unsigned int voltage_tolerance; /* in percentage */
+ const char *reg_name;
};
static struct freq_attr *cpufreq_dt_attr[] = {
@@ -44,175 +43,128 @@ static struct freq_attr *cpufreq_dt_attr[] = {
static int set_target(struct cpufreq_policy *policy, unsigned int index)
{
- struct dev_pm_opp *opp;
- struct cpufreq_frequency_table *freq_table = policy->freq_table;
- struct clk *cpu_clk = policy->clk;
struct private_data *priv = policy->driver_data;
- struct device *cpu_dev = priv->cpu_dev;
- struct regulator *cpu_reg = priv->cpu_reg;
- unsigned long volt = 0, volt_old = 0, tol = 0;
- unsigned int old_freq, new_freq;
- long freq_Hz, freq_exact;
- int ret;
-
- freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
- if (freq_Hz <= 0)
- freq_Hz = freq_table[index].frequency * 1000;
- freq_exact = freq_Hz;
- new_freq = freq_Hz / 1000;
- old_freq = clk_get_rate(cpu_clk) / 1000;
+ return dev_pm_opp_set_rate(priv->cpu_dev,
+ policy->freq_table[index].frequency * 1000);
+}
- if (!IS_ERR(cpu_reg)) {
- unsigned long opp_freq;
+/*
+ * An earlier version of opp-v1 bindings used to name the regulator
+ * "cpu0-supply", we still need to handle that for backwards compatibility.
+ */
+static const char *find_supply_name(struct device *dev)
+{
+ struct device_node *np;
+ struct property *pp;
+ int cpu = dev->id;
+ const char *name = NULL;
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- dev_err(cpu_dev, "failed to find OPP for %ld\n",
- freq_Hz);
- return PTR_ERR(opp);
- }
- volt = dev_pm_opp_get_voltage(opp);
- opp_freq = dev_pm_opp_get_freq(opp);
- rcu_read_unlock();
- tol = volt * priv->voltage_tolerance / 100;
- volt_old = regulator_get_voltage(cpu_reg);
- dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
- opp_freq / 1000, volt);
- }
+ np = of_node_get(dev->of_node);
- dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
- old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
- new_freq / 1000, volt ? volt / 1000 : -1);
+ /* This must be valid for sure */
+ if (WARN_ON(!np))
+ return NULL;
- /* scaling up? scale voltage before frequency */
- if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
- ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
- if (ret) {
- dev_err(cpu_dev, "failed to scale voltage up: %d\n",
- ret);
- return ret;
+ /* Try "cpu0" for older DTs */
+ if (!cpu) {
+ pp = of_find_property(np, "cpu0-supply", NULL);
+ if (pp) {
+ name = "cpu0";
+ goto node_put;
}
}
- ret = clk_set_rate(cpu_clk, freq_exact);
- if (ret) {
- dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
- if (!IS_ERR(cpu_reg) && volt_old > 0)
- regulator_set_voltage_tol(cpu_reg, volt_old, tol);
- return ret;
+ pp = of_find_property(np, "cpu-supply", NULL);
+ if (pp) {
+ name = "cpu";
+ goto node_put;
}
- /* scaling down? scale voltage after frequency */
- if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
- ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
- if (ret) {
- dev_err(cpu_dev, "failed to scale voltage down: %d\n",
- ret);
- clk_set_rate(cpu_clk, old_freq * 1000);
- }
- }
-
- return ret;
+ dev_dbg(dev, "no regulator for cpu%d\n", cpu);
+node_put:
+ of_node_put(np);
+ return name;
}
-static int allocate_resources(int cpu, struct device **cdev,
- struct regulator **creg, struct clk **cclk)
+static int resources_available(void)
{
struct device *cpu_dev;
struct regulator *cpu_reg;
struct clk *cpu_clk;
int ret = 0;
- char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
+ const char *name;
- cpu_dev = get_cpu_device(cpu);
+ cpu_dev = get_cpu_device(0);
if (!cpu_dev) {
- pr_err("failed to get cpu%d device\n", cpu);
+ pr_err("failed to get cpu0 device\n");
return -ENODEV;
}
- /* Try "cpu0" for older DTs */
- if (!cpu)
- reg = reg_cpu0;
- else
- reg = reg_cpu;
-
-try_again:
- cpu_reg = regulator_get_optional(cpu_dev, reg);
- if (IS_ERR(cpu_reg)) {
+ cpu_clk = clk_get(cpu_dev, NULL);
+ ret = PTR_ERR_OR_ZERO(cpu_clk);
+ if (ret) {
/*
- * If cpu's regulator supply node is present, but regulator is
- * not yet registered, we should try defering probe.
+ * If cpu's clk node is present, but clock is not yet
+ * registered, we should try defering probe.
*/
- if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
- dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
- cpu);
- return -EPROBE_DEFER;
- }
-
- /* Try with "cpu-supply" */
- if (reg == reg_cpu0) {
- reg = reg_cpu;
- goto try_again;
- }
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(cpu_dev, "clock not ready, retry\n");
+ else
+ dev_err(cpu_dev, "failed to get clock: %d\n", ret);
- dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
- cpu, PTR_ERR(cpu_reg));
+ return ret;
}
- cpu_clk = clk_get(cpu_dev, NULL);
- if (IS_ERR(cpu_clk)) {
- /* put regulator */
- if (!IS_ERR(cpu_reg))
- regulator_put(cpu_reg);
+ clk_put(cpu_clk);
- ret = PTR_ERR(cpu_clk);
+ name = find_supply_name(cpu_dev);
+ /* Platform doesn't require regulator */
+ if (!name)
+ return 0;
+ cpu_reg = regulator_get_optional(cpu_dev, name);
+ ret = PTR_ERR_OR_ZERO(cpu_reg);
+ if (ret) {
/*
- * If cpu's clk node is present, but clock is not yet
- * registered, we should try defering probe.
+ * If cpu's regulator supply node is present, but regulator is
+ * not yet registered, we should try defering probe.
*/
if (ret == -EPROBE_DEFER)
- dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
+ dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
else
- dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
- ret);
- } else {
- *cdev = cpu_dev;
- *creg = cpu_reg;
- *cclk = cpu_clk;
+ dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret);
+
+ return ret;
}
- return ret;
+ regulator_put(cpu_reg);
+ return 0;
}
static int cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
- struct device_node *np;
struct private_data *priv;
struct device *cpu_dev;
- struct regulator *cpu_reg;
struct clk *cpu_clk;
struct dev_pm_opp *suspend_opp;
- unsigned long min_uV = ~0, max_uV = 0;
unsigned int transition_latency;
- bool need_update = false;
+ bool opp_v1 = false;
+ const char *name;
int ret;
- ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
- if (ret) {
- pr_err("%s: Failed to allocate resources: %d\n", __func__, ret);
- return ret;
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", policy->cpu);
+ return -ENODEV;
}
- np = of_node_get(cpu_dev->of_node);
- if (!np) {
- dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu);
- ret = -ENOENT;
- goto out_put_reg_clk;
+ cpu_clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(cpu_clk)) {
+ ret = PTR_ERR(cpu_clk);
+ dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret);
+ return ret;
}
/* Get OPP-sharing information from "operating-points-v2" bindings */
@@ -223,9 +175,23 @@ static int cpufreq_init(struct cpufreq_policy *policy)
* finding shared-OPPs for backward compatibility.
*/
if (ret == -ENOENT)
- need_update = true;
+ opp_v1 = true;
else
- goto out_node_put;
+ goto out_put_clk;
+ }
+
+ /*
+ * OPP layer will be taking care of regulators now, but it needs to know
+ * the name of the regulator first.
+ */
+ name = find_supply_name(cpu_dev);
+ if (name) {
+ ret = dev_pm_opp_set_regulator(cpu_dev, name);
+ if (ret) {
+ dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n",
+ policy->cpu, ret);
+ goto out_put_clk;
+ }
}
/*
@@ -246,12 +212,12 @@ static int cpufreq_init(struct cpufreq_policy *policy)
*/
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
- pr_debug("OPP table is not ready, deferring probe\n");
+ dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
ret = -EPROBE_DEFER;
goto out_free_opp;
}
- if (need_update) {
+ if (opp_v1) {
struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
if (!pd || !pd->independent_clocks)
@@ -265,10 +231,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
-
- of_property_read_u32(np, "clock-latency", &transition_latency);
- } else {
- transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -277,62 +239,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
- of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
-
- if (!transition_latency)
- transition_latency = CPUFREQ_ETERNAL;
-
- if (!IS_ERR(cpu_reg)) {
- unsigned long opp_freq = 0;
-
- /*
- * Disable any OPPs where the connected regulator isn't able to
- * provide the specified voltage and record minimum and maximum
- * voltage levels.
- */
- while (1) {
- struct dev_pm_opp *opp;
- unsigned long opp_uV, tol_uV;
-
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- break;
- }
- opp_uV = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
-
- tol_uV = opp_uV * priv->voltage_tolerance / 100;
- if (regulator_is_supported_voltage(cpu_reg,
- opp_uV - tol_uV,
- opp_uV + tol_uV)) {
- if (opp_uV < min_uV)
- min_uV = opp_uV;
- if (opp_uV > max_uV)
- max_uV = opp_uV;
- } else {
- dev_pm_opp_disable(cpu_dev, opp_freq);
- }
-
- opp_freq++;
- }
-
- ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
- if (ret > 0)
- transition_latency += ret * 1000;
- }
+ priv->reg_name = name;
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
- pr_err("failed to init cpufreq table: %d\n", ret);
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto out_free_priv;
}
priv->cpu_dev = cpu_dev;
- priv->cpu_reg = cpu_reg;
policy->driver_data = priv;
-
policy->clk = cpu_clk;
rcu_read_lock();
@@ -357,9 +273,11 @@ static int cpufreq_init(struct cpufreq_policy *policy)
cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
}
- policy->cpuinfo.transition_latency = transition_latency;
+ transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
+ if (!transition_latency)
+ transition_latency = CPUFREQ_ETERNAL;
- of_node_put(np);
+ policy->cpuinfo.transition_latency = transition_latency;
return 0;
@@ -369,12 +287,10 @@ out_free_priv:
kfree(priv);
out_free_opp:
dev_pm_opp_of_cpumask_remove_table(policy->cpus);
-out_node_put:
- of_node_put(np);
-out_put_reg_clk:
+ if (name)
+ dev_pm_opp_put_regulator(cpu_dev);
+out_put_clk:
clk_put(cpu_clk);
- if (!IS_ERR(cpu_reg))
- regulator_put(cpu_reg);
return ret;
}
@@ -386,9 +302,10 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
+ if (priv->reg_name)
+ dev_pm_opp_put_regulator(priv->cpu_dev);
+
clk_put(policy->clk);
- if (!IS_ERR(priv->cpu_reg))
- regulator_put(priv->cpu_reg);
kfree(priv);
return 0;
@@ -407,8 +324,13 @@ static void cpufreq_ready(struct cpufreq_policy *policy)
* thermal DT code takes care of matching them.
*/
if (of_find_property(np, "#cooling-cells", NULL)) {
- priv->cdev = of_cpufreq_cooling_register(np,
- policy->related_cpus);
+ u32 power_coefficient = 0;
+
+ of_property_read_u32(np, "dynamic-power-coefficient",
+ &power_coefficient);
+
+ priv->cdev = of_cpufreq_power_cooling_register(np,
+ policy->related_cpus, power_coefficient, NULL);
if (IS_ERR(priv->cdev)) {
dev_err(priv->cpu_dev,
"running cpufreq without cooling device: %ld\n",
@@ -436,9 +358,6 @@ static struct cpufreq_driver dt_cpufreq_driver = {
static int dt_cpufreq_probe(struct platform_device *pdev)
{
- struct device *cpu_dev;
- struct regulator *cpu_reg;
- struct clk *cpu_clk;
int ret;
/*
@@ -448,19 +367,15 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
*
* FIXME: Is checking this only for CPU0 sufficient ?
*/
- ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
+ ret = resources_available();
if (ret)
return ret;
- clk_put(cpu_clk);
- if (!IS_ERR(cpu_reg))
- regulator_put(cpu_reg);
-
dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
ret = cpufreq_register_driver(&dt_cpufreq_driver);
if (ret)
- dev_err(cpu_dev, "failed register driver: %d\n", ret);
+ dev_err(&pdev->dev, "failed register driver: %d\n", ret);
return ret;
}
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index fb16d812c8f5..1dffb13e5c2f 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1396,9 +1396,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
}
aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
- if (!aes_dd->io_base) {
+ if (IS_ERR(aes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(aes_dd->io_base);
goto res_err;
}
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 3178f84d2757..0dadb6332f0e 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1405,9 +1405,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
}
sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
- if (!sha_dd->io_base) {
+ if (IS_ERR(sha_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(sha_dd->io_base);
goto res_err;
}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 2c7a628d0375..bf467d7be35c 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1417,9 +1417,9 @@ static int atmel_tdes_probe(struct platform_device *pdev)
}
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
- if (!tdes_dd->io_base) {
+ if (IS_ERR(tdes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(tdes_dd->io_base);
goto res_err;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index d89f20c04266..3d9acc53d247 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -220,6 +220,39 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
return ccp_aes_cmac_finup(req);
}
+static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
+{
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_exp_ctx state;
+
+ state.null_msg = rctx->null_msg;
+ memcpy(state.iv, rctx->iv, sizeof(state.iv));
+ state.buf_count = rctx->buf_count;
+ memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+ /* 'out' may not be aligned so memcpy from local variable */
+ memcpy(out, &state, sizeof(state));
+
+ return 0;
+}
+
+static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
+{
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_exp_ctx state;
+
+ /* 'in' may not be aligned so memcpy to local variable */
+ memcpy(&state, in, sizeof(state));
+
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->null_msg = state.null_msg;
+ memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
+ rctx->buf_count = state.buf_count;
+ memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+ return 0;
+}
+
static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
@@ -352,10 +385,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
alg->final = ccp_aes_cmac_final;
alg->finup = ccp_aes_cmac_finup;
alg->digest = ccp_aes_cmac_digest;
+ alg->export = ccp_aes_cmac_export;
+ alg->import = ccp_aes_cmac_import;
alg->setkey = ccp_aes_cmac_setkey;
halg = &alg->halg;
halg->digestsize = AES_BLOCK_SIZE;
+ halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index d14b3f28e010..8ef06fad8b14 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -207,6 +207,43 @@ static int ccp_sha_digest(struct ahash_request *req)
return ccp_sha_finup(req);
}
+static int ccp_sha_export(struct ahash_request *req, void *out)
+{
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_exp_ctx state;
+
+ state.type = rctx->type;
+ state.msg_bits = rctx->msg_bits;
+ state.first = rctx->first;
+ memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
+ state.buf_count = rctx->buf_count;
+ memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+ /* 'out' may not be aligned so memcpy from local variable */
+ memcpy(out, &state, sizeof(state));
+
+ return 0;
+}
+
+static int ccp_sha_import(struct ahash_request *req, const void *in)
+{
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_exp_ctx state;
+
+ /* 'in' may not be aligned so memcpy to local variable */
+ memcpy(&state, in, sizeof(state));
+
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->type = state.type;
+ rctx->msg_bits = state.msg_bits;
+ rctx->first = state.first;
+ memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
+ rctx->buf_count = state.buf_count;
+ memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+ return 0;
+}
+
static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
@@ -403,9 +440,12 @@ static int ccp_register_sha_alg(struct list_head *head,
alg->final = ccp_sha_final;
alg->finup = ccp_sha_finup;
alg->digest = ccp_sha_digest;
+ alg->export = ccp_sha_export;
+ alg->import = ccp_sha_import;
halg = &alg->halg;
halg->digestsize = def->digest_size;
+ halg->statesize = sizeof(struct ccp_sha_exp_ctx);
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 76a96f0f44c6..a326ec20bfa8 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
struct ccp_cmd cmd;
};
+struct ccp_aes_cmac_exp_ctx {
+ unsigned int null_msg;
+
+ u8 iv[AES_BLOCK_SIZE];
+
+ unsigned int buf_count;
+ u8 buf[AES_BLOCK_SIZE];
+};
+
/***** SHA related defines *****/
#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
@@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
struct ccp_cmd cmd;
};
+struct ccp_sha_exp_ctx {
+ enum ccp_sha_type type;
+
+ u64 msg_bits;
+
+ unsigned int first;
+
+ u8 ctx[MAX_SHA_CONTEXT_SIZE];
+
+ unsigned int buf_count;
+ u8 buf[MAX_SHA_BLOCK_SIZE];
+};
+
/***** Common Context Structure *****/
struct ccp_ctx {
int (*complete)(struct crypto_async_request *req, int ret);
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index c0656e7f37b5..80239ae69527 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -420,7 +420,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
cesa->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(cesa->regs))
- return -ENOMEM;
+ return PTR_ERR(cesa->regs);
ret = mv_cesa_dev_dma_init(cesa);
if (ret)
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 51f50698e597..e63f061175ad 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -1,6 +1,6 @@
/* Qualcomm CE device driver.
*
- * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1543,7 +1543,7 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
}
/* Check for sum of all dst length is equal to data_len */
for (i = 0; i < req->entries; i++) {
- if (req->vbuf.dst[i].len >= ULONG_MAX - total) {
+ if (req->vbuf.dst[i].len >= U32_MAX - total) {
pr_err("%s: Integer overflow on total req dst vbuf length\n",
__func__);
goto error;
@@ -1557,7 +1557,7 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
}
/* Check for sum of all src length is equal to data_len */
for (i = 0, total = 0; i < req->entries; i++) {
- if (req->vbuf.src[i].len > ULONG_MAX - total) {
+ if (req->vbuf.src[i].len > U32_MAX - total) {
pr_err("%s: Integer overflow on total req src vbuf length\n",
__func__);
goto error;
@@ -1619,7 +1619,7 @@ static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
/* Check for sum of all src length is equal to data_len */
for (i = 0, total = 0; i < req->entries; i++) {
- if (req->data[i].len > ULONG_MAX - total) {
+ if (req->data[i].len > U32_MAX - total) {
pr_err("%s: Integer overflow on total req buf length\n",
__func__);
goto sha_error;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 4c243c1ffc7f..790f7cadc1ed 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1440,9 +1440,9 @@ static int ux500_cryp_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
- if (!device_data->base) {
+ if (IS_ERR(device_data->base)) {
dev_err(dev, "[%s]: ioremap failed!", __func__);
- ret = -ENOMEM;
+ ret = PTR_ERR(device_data->base);
goto out;
}
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index f47d112041b2..66b1c3313e2e 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1675,9 +1675,9 @@ static int ux500_hash_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
- if (!device_data->base) {
+ if (IS_ERR(device_data->base)) {
dev_err(dev, "%s: ioremap() failed!\n", __func__);
- ret = -ENOMEM;
+ ret = PTR_ERR(device_data->base);
goto out;
}
spin_lock_init(&device_data->ctx_lock);
diff --git a/drivers/devfreq/arm-memlat-mon.c b/drivers/devfreq/arm-memlat-mon.c
index 370d7d95042b..4fb0a5ffda50 100644
--- a/drivers/devfreq/arm-memlat-mon.c
+++ b/drivers/devfreq/arm-memlat-mon.c
@@ -311,19 +311,19 @@ static int arm_memlat_mon_driver_probe(struct platform_device *pdev)
hw->of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
if (!hw->of_node) {
dev_err(dev, "Couldn't find a target device\n");
- goto err_out;
+ return -ENODEV;
}
if (get_mask_from_dev_handle(pdev, &cpu_grp->cpus)) {
dev_err(dev, "CPU list is empty\n");
- goto err_out;
+ return -ENODEV;
}
hw->num_cores = cpumask_weight(&cpu_grp->cpus);
hw->core_stats = devm_kzalloc(dev, hw->num_cores *
sizeof(*(hw->core_stats)), GFP_KERNEL);
if (!hw->core_stats)
- goto err_out;
+ return -ENOMEM;
for_each_cpu(cpu, &cpu_grp->cpus)
hw->core_stats[cpu - cpumask_first(&cpu_grp->cpus)].id = cpu;
@@ -335,14 +335,10 @@ static int arm_memlat_mon_driver_probe(struct platform_device *pdev)
ret = register_memlat(dev, hw);
if (ret) {
pr_err("Mem Latency Gov registration failed\n");
- goto err_out;
+ return ret;
}
return 0;
-
-err_out:
- kfree(cpu_grp);
- return -EINVAL;
}
static struct of_device_id match_table[] = {
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 9eee13ef83a5..d87a47547ba5 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1452,7 +1452,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
u64 chan_off;
u64 dram_base = get_dram_base(pvt, range);
u64 hole_off = f10_dhar_offset(pvt);
- u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
+ u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
if (hi_rng) {
/*
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 429309c62699..cbee3179ec08 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1117,8 +1117,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
n_tads, gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
- (u32)TAD_SOCK(reg),
- (u32)TAD_CH(reg),
+ (u32)(1 << TAD_SOCK(reg)),
+ (u32)TAD_CH(reg) + 1,
(u32)TAD_TGT0(reg),
(u32)TAD_TGT1(reg),
(u32)TAD_TGT2(reg),
@@ -1396,7 +1396,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
ch_way = TAD_CH(reg) + 1;
- sck_way = TAD_SOCK(reg) + 1;
+ sck_way = 1 << TAD_SOCK(reg);
if (ch_way == 3)
idx = addr >> 6;
@@ -1453,7 +1453,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
n_tads,
addr,
limit,
- (u32)TAD_SOCK(reg),
+ sck_way,
ch_way,
offset,
idx,
@@ -1468,18 +1468,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
offset, addr);
return -EINVAL;
}
- addr -= offset;
- /* Store the low bits [0:6] of the addr */
- ch_addr = addr & 0x7f;
- /* Remove socket wayness and remove 6 bits */
- addr >>= 6;
- addr = div_u64(addr, sck_xch);
-#if 0
- /* Divide by channel way */
- addr = addr / ch_way;
-#endif
- /* Recover the last 6 bits */
- ch_addr |= addr << 6;
+
+ ch_addr = addr - offset;
+ ch_addr >>= (6 + shiftup);
+ ch_addr /= ch_way * sck_way;
+ ch_addr <<= (6 + shiftup);
+ ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
/*
* Step 3) Decode rank
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 5a8fbadbd27b..8ac49812a716 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -63,6 +63,10 @@ bool amdgpu_has_atpx(void) {
return amdgpu_atpx_priv.atpx_detected;
}
+bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+ return amdgpu_atpx_priv.atpx.functions.power_cntl;
+}
+
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -142,10 +146,6 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
*/
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- atpx->functions.power_cntl = true;
-
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c961fe093e12..9d88023df836 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -61,6 +61,12 @@ static const char *amdgpu_asic_name[] = {
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool amdgpu_has_atpx_dgpu_power_cntl(void);
+#else
+static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
+#endif
+
bool amdgpu_device_is_px(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
@@ -1469,7 +1475,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_runtime_pm == 1)
runtime = true;
- if (amdgpu_device_is_px(ddev))
+ if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
runtime = true;
vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
if (runtime)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 272110cc18c2..ea87033bfaf6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -898,14 +898,6 @@ static int gmc_v7_0_early_init(void *handle)
gmc_v7_0_set_gart_funcs(adev);
gmc_v7_0_set_irq_funcs(adev);
- if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- } else {
- u32 tmp = RREG32(mmMC_SEQ_MISC0);
- tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
- }
-
return 0;
}
@@ -926,6 +918,14 @@ static int gmc_v7_0_sw_init(void *handle)
if (r)
return r;
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ } else {
+ u32 tmp = RREG32(mmMC_SEQ_MISC0);
+ tmp &= MC_SEQ_MISC0__MT__MASK;
+ adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
+ }
+
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index ba4ad00ba8b4..08423089fb84 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -852,14 +852,6 @@ static int gmc_v8_0_early_init(void *handle)
gmc_v8_0_set_gart_funcs(adev);
gmc_v8_0_set_irq_funcs(adev);
- if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- } else {
- u32 tmp = RREG32(mmMC_SEQ_MISC0);
- tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
- }
-
return 0;
}
@@ -870,6 +862,8 @@ static int gmc_v8_0_late_init(void *handle)
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
}
+#define mmMC_SEQ_MISC0_FIJI 0xA71
+
static int gmc_v8_0_sw_init(void *handle)
{
int r;
@@ -880,6 +874,19 @@ static int gmc_v8_0_sw_init(void *handle)
if (r)
return r;
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ } else {
+ u32 tmp;
+
+ if (adev->asic_type == CHIP_FIJI)
+ tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
+ else
+ tmp = RREG32(mmMC_SEQ_MISC0);
+ tmp &= MC_SEQ_MISC0__MT__MASK;
+ adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
+ }
+
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 2cf50180cc51..b1c7a9b3631b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -32,8 +32,8 @@
#include "oss/oss_2_4_d.h"
#include "oss/oss_2_4_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 9535c5b60387..7e5a97204051 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -178,7 +178,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
{
struct drm_dp_aux_msg msg;
unsigned int retry;
- int err;
+ int err = 0;
memset(&msg, 0, sizeof(msg));
msg.address = offset;
@@ -186,6 +186,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
msg.buffer = buffer;
msg.size = size;
+ mutex_lock(&aux->hw_mutex);
+
/*
* The specification doesn't give any recommendation on how often to
* retry native transactions. We used to retry 7 times like for
@@ -194,25 +196,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
*/
for (retry = 0; retry < 32; retry++) {
- mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, &msg);
- mutex_unlock(&aux->hw_mutex);
if (err < 0) {
if (err == -EBUSY)
continue;
- return err;
+ goto unlock;
}
switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
case DP_AUX_NATIVE_REPLY_ACK:
if (err < size)
- return -EPROTO;
- return err;
+ err = -EPROTO;
+ goto unlock;
case DP_AUX_NATIVE_REPLY_NACK:
- return -EIO;
+ err = -EIO;
+ goto unlock;
case DP_AUX_NATIVE_REPLY_DEFER:
usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
@@ -221,7 +222,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
}
DRM_DEBUG_KMS("too many retries, giving up\n");
- return -EIO;
+ err = -EIO;
+
+unlock:
+ mutex_unlock(&aux->hw_mutex);
+ return err;
}
/**
@@ -543,9 +548,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
- mutex_lock(&aux->hw_mutex);
ret = aux->transfer(aux, msg);
- mutex_unlock(&aux->hw_mutex);
if (ret < 0) {
if (ret == -EBUSY)
continue;
@@ -684,6 +687,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
memset(&msg, 0, sizeof(msg));
+ mutex_lock(&aux->hw_mutex);
+
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -738,6 +743,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
msg.size = 0;
(void)drm_dp_i2c_do_msg(aux, &msg);
+ mutex_unlock(&aux->hw_mutex);
+
return err;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index bb292143997e..adf74f4366bb 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -892,8 +892,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
else
args.v1.ucLaneNum = 4;
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -910,6 +908,10 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+
break;
case 2:
case 3:
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index c4b4f298a283..9bc408c9f9f6 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -62,6 +62,10 @@ bool radeon_has_atpx(void) {
return radeon_atpx_priv.atpx_detected;
}
+bool radeon_has_atpx_dgpu_power_cntl(void) {
+ return radeon_atpx_priv.atpx.functions.power_cntl;
+}
+
/**
* radeon_atpx_call - call an ATPX method
*
@@ -141,10 +145,6 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
*/
static int radeon_atpx_validate(struct radeon_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- atpx->functions.power_cntl = true;
-
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c566993a2ec3..f78f111e68de 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,6 +103,12 @@ static const char radeon_family_name[][16] = {
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_has_atpx_dgpu_power_cntl(void);
+#else
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+#endif
+
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
@@ -1433,7 +1439,7 @@ int radeon_device_init(struct radeon_device *rdev,
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- if (rdev->flags & RADEON_IS_PX)
+ if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
runtime = true;
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
if (runtime)
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index a82b891ae1fe..7285adb27099 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2926,9 +2926,11 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
{ 0, 0, 0, 0 },
};
@@ -3008,6 +3010,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
++p;
}
+ /* limit mclk on all R7 370 parts for stability */
+ if (rdev->pdev->device == 0x6811 &&
+ rdev->pdev->revision == 0x81)
+ max_mclk = 120000;
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 62c7b1dafaa4..73e41a8613da 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -539,7 +539,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_gfree:
- drm_gem_object_unreference(&ufbdev->ufb.obj->base);
+ drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
out:
return ret;
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 2a0a784ab6ee..d7528e0d8442 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
return ret;
}
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference_unlocked(&obj->base);
*handle_p = handle;
return 0;
}
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 3052166c7a18..b57fe05b21d5 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -579,7 +579,6 @@ void adreno_cp_callback(struct adreno_device *adreno_dev, int bit)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- kgsl_schedule_work(&device->event_work);
adreno_dispatcher_schedule(device);
}
@@ -1130,7 +1129,10 @@ static int adreno_init(struct kgsl_device *device)
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
int ret;
- kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
+ ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
+ if (ret)
+ return ret;
+
/*
* initialization only needs to be done once initially until
* device is shutdown
@@ -1595,6 +1597,8 @@ static int adreno_stop(struct kgsl_device *device)
adreno_ringbuffer_stop(adreno_dev);
+ kgsl_pwrscale_update_stats(device);
+
adreno_irqctrl(adreno_dev, 0);
adreno_ocmem_free(adreno_dev);
@@ -2117,6 +2121,14 @@ bool adreno_hw_isidle(struct adreno_device *adreno_dev)
const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
unsigned int reg_rbbm_status;
+ if (adreno_is_a540(adreno_dev))
+ /**
+ * Due to CRC idle throttling GPU
+ * idle hysteresys can take up to
+ * 3usec for expire - account for it
+ */
+ udelay(5);
+
adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS,
&reg_rbbm_status);
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 9f462bca26ce..f5fb4e48c3ee 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -613,11 +613,13 @@ struct adreno_vbif_platform {
* struct adreno_vbif_snapshot_registers - Holds an array of vbif registers
* listed for snapshot dump for a particular core
* @version: vbif version
+ * @mask: vbif revision mask
* @registers: vbif registers listed for snapshot dump
* @count: count of vbif registers listed for snapshot
*/
struct adreno_vbif_snapshot_registers {
const unsigned int version;
+ const unsigned int mask;
const unsigned int *registers;
const int count;
};
diff --git a/drivers/gpu/msm/adreno_a4xx_snapshot.c b/drivers/gpu/msm/adreno_a4xx_snapshot.c
index 6921af5c0ab5..540b42b984c0 100644
--- a/drivers/gpu/msm/adreno_a4xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a4xx_snapshot.c
@@ -168,15 +168,15 @@ static const unsigned int a4xx_vbif_ver_20050000_registers[] = {
static const struct adreno_vbif_snapshot_registers
a4xx_vbif_snapshot_registers[] = {
- { 0x20000000, a4xx_vbif_ver_20000000_registers,
+ { 0x20000000, 0xFFFF0000, a4xx_vbif_ver_20000000_registers,
ARRAY_SIZE(a4xx_vbif_ver_20000000_registers)/2},
- { 0x20020000, a4xx_vbif_ver_20020000_registers,
+ { 0x20020000, 0xFFFF0000, a4xx_vbif_ver_20020000_registers,
ARRAY_SIZE(a4xx_vbif_ver_20020000_registers)/2},
- { 0x20050000, a4xx_vbif_ver_20050000_registers,
+ { 0x20050000, 0xFFFF0000, a4xx_vbif_ver_20050000_registers,
ARRAY_SIZE(a4xx_vbif_ver_20050000_registers)/2},
- { 0x20070000, a4xx_vbif_ver_20020000_registers,
+ { 0x20070000, 0xFFFF0000, a4xx_vbif_ver_20020000_registers,
ARRAY_SIZE(a4xx_vbif_ver_20020000_registers)/2},
- { 0x20090000, a4xx_vbif_ver_20050000_registers,
+ { 0x20090000, 0xFFFF0000, a4xx_vbif_ver_20050000_registers,
ARRAY_SIZE(a4xx_vbif_ver_20050000_registers)/2},
};
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 96f72c59e4cd..467b385f6d56 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -2373,17 +2373,25 @@ static int a5xx_microcode_read(struct adreno_device *adreno_dev)
{
int ret;
- ret = _load_firmware(KGSL_DEVICE(adreno_dev),
- adreno_dev->gpucore->pm4fw_name, &adreno_dev->pm4,
- &adreno_dev->pm4_fw_size, &adreno_dev->pm4_fw_version);
- if (ret)
- return ret;
+ if (adreno_dev->pm4.hostptr == NULL) {
+ ret = _load_firmware(KGSL_DEVICE(adreno_dev),
+ adreno_dev->gpucore->pm4fw_name,
+ &adreno_dev->pm4,
+ &adreno_dev->pm4_fw_size,
+ &adreno_dev->pm4_fw_version);
+ if (ret)
+ return ret;
+ }
- ret = _load_firmware(KGSL_DEVICE(adreno_dev),
- adreno_dev->gpucore->pfpfw_name, &adreno_dev->pfp,
- &adreno_dev->pfp_fw_size, &adreno_dev->pfp_fw_version);
- if (ret)
- return ret;
+ if (adreno_dev->pfp.hostptr == NULL) {
+ ret = _load_firmware(KGSL_DEVICE(adreno_dev),
+ adreno_dev->gpucore->pfpfw_name,
+ &adreno_dev->pfp,
+ &adreno_dev->pfp_fw_size,
+ &adreno_dev->pfp_fw_version);
+ if (ret)
+ return ret;
+ }
ret = _load_gpmu_firmware(adreno_dev);
if (ret)
@@ -3058,7 +3066,6 @@ static void a5xx_irq_storm_worker(struct work_struct *work)
mutex_unlock(&device->mutex);
/* Reschedule just to make sure everything retires */
- kgsl_schedule_work(&device->event_work);
adreno_dispatcher_schedule(device);
}
@@ -3109,8 +3116,6 @@ static void a5xx_cp_callback(struct adreno_device *adreno_dev, int bit)
}
a5xx_preemption_trigger(adreno_dev);
-
- kgsl_schedule_work(&device->event_work);
adreno_dispatcher_schedule(device);
}
diff --git a/drivers/gpu/msm/adreno_a5xx.h b/drivers/gpu/msm/adreno_a5xx.h
index 7965bb7b5440..27d5a4b31c71 100644
--- a/drivers/gpu/msm/adreno_a5xx.h
+++ b/drivers/gpu/msm/adreno_a5xx.h
@@ -52,7 +52,7 @@
#define A5XX_CP_CTXRECORD_MAGIC_REF 0x27C4BAFCUL
/* Size of each CP preemption record */
-#define A5XX_CP_CTXRECORD_SIZE_IN_BYTES 0x100000
+#define A5XX_CP_CTXRECORD_SIZE_IN_BYTES 0x10000
/* Size of the preemption counter block (in bytes) */
#define A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE (16 * 4)
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index 4f368a8f93f3..04d82844a5e9 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -128,6 +128,9 @@ static const struct adreno_debugbus_block a5xx_debugbus_blocks[] = {
#define A5XX_NUM_AXI_ARB_BLOCKS 2
#define A5XX_NUM_XIN_BLOCKS 4
+/* Width of A5XX_CP_DRAW_STATE_ADDR is 8 bits */
+#define A5XX_CP_DRAW_STATE_ADDR_WIDTH 8
+
/* a5xx_snapshot_cp_pm4() - Dump PM4 data in snapshot */
static size_t a5xx_snapshot_cp_pm4(struct kgsl_device *device, u8 *buf,
size_t remain, void *priv)
@@ -326,8 +329,7 @@ static void a5xx_snapshot_debugbus(struct kgsl_device *device,
}
}
-static const unsigned int a5xx_vbif_ver_20040000_registers[] = {
- /* VBIF version 0x20040000*/
+static const unsigned int a5xx_vbif_ver_20xxxxxx_registers[] = {
0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302C, 0x3030, 0x3030,
0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040,
0x3042, 0x3042, 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061,
@@ -341,10 +343,8 @@ static const unsigned int a5xx_vbif_ver_20040000_registers[] = {
static const struct adreno_vbif_snapshot_registers
a5xx_vbif_snapshot_registers[] = {
- { 0x20040000, a5xx_vbif_ver_20040000_registers,
- ARRAY_SIZE(a5xx_vbif_ver_20040000_registers)/2},
- { 0x20040001, a5xx_vbif_ver_20040000_registers,
- ARRAY_SIZE(a5xx_vbif_ver_20040000_registers)/2},
+ { 0x20000000, 0xFF000000, a5xx_vbif_ver_20xxxxxx_registers,
+ ARRAY_SIZE(a5xx_vbif_ver_20xxxxxx_registers)/2},
};
/*
@@ -379,7 +379,7 @@ static const unsigned int a5xx_registers[] = {
/* VPC */
0x0E60, 0x0E7C,
/* UCHE */
- 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0xEA0, 0xEA8, 0xEB0, 0xEB2,
+ 0x0E80, 0x0E8F, 0x0E90, 0x0E96, 0xEA0, 0xEA8, 0xEB0, 0xEB2,
/* RB CTX 0 */
0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6,
@@ -414,49 +414,49 @@ static const unsigned int a5xx_registers[] = {
0xB000, 0xB97F, 0xB9A0, 0xB9BF,
};
-/*
- * The HLSQ registers can only be read via the crash dumper (not AHB) so they
- * need to be in their own array because the array above does double duty for
- * the fallback path too
- */
-static const unsigned int a5xx_hlsq_registers[] = {
+struct a5xx_hlsq_sp_tp_regs {
+ unsigned int statetype;
+ unsigned int ahbaddr;
+ unsigned int size;
+ uint64_t offset;
+};
+
+static struct a5xx_hlsq_sp_tp_regs a5xx_hlsq_sp_tp_registers[] = {
+ /* HSLQ non context. 0xe32 - 0xe3f are holes so don't include them */
+ { 0x35, 0xE00, 0x32 },
+ /* HLSQ CTX 0 2D */
+ { 0x31, 0x2080, 0x1 },
+ /* HLSQ CTX 1 2D */
+ { 0x33, 0x2480, 0x1 },
+ /* HLSQ CTX 0 3D. 0xe7e2 - 0xe7ff are holes so don't inculde them */
+ { 0x32, 0xE780, 0x62 },
+ /* HLSQ CTX 1 3D. 0xefe2 - 0xefff are holes so don't include them */
+ { 0x34, 0xEF80, 0x62 },
+
/* SP non context */
- 0x0EC0, 0xEC2, 0xED0, 0xEE0, 0xEF0, 0xEF2, 0xEFA, 0xEFF,
+ { 0x3f, 0x0EC0, 0x40 },
/* SP CTX 0 2D */
- 0x2040, 0x2040,
+ { 0x3d, 0x2040, 0x1 },
/* SP CTX 1 2D */
- 0x2440, 0x2440,
- /* SP CTXT 0 3D */
- 0xE580, 0xE580, 0xE584, 0xE58B, 0xE590, 0xE5B1, 0xE5C0, 0xE5DF,
- 0xE5F0, 0xE5F9, 0xE600, 0xE608, 0xE610, 0xE631, 0xE640, 0xE661,
- 0xE670, 0xE673, 0xE6F0, 0xE6F0,
- /* SP CTXT 1 3D */
- 0xED80, 0xED80, 0xED84, 0xED8B, 0xED90, 0xEDB1, 0xEDC0, 0xEDDF,
- 0xEDF0, 0xEDF9, 0xEE00, 0xEE08, 0xEE10, 0xEE31, 0xEE40, 0xEE61,
- 0xEE70, 0xEE73, 0xEEF0, 0xEEF0,
- /* TP non context */
- 0xF00, 0xF03, 0xF08, 0xF08, 0xF10, 0xF1B,
- /* TP CTX 0 2D */
- 0x2000, 0x2009,
- /* TP CTX 1 2D */
- 0x2400, 0x2409,
+ { 0x3b, 0x2440, 0x1 },
+ /* SP CTX 0 3D */
+ { 0x3e, 0xE580, 0x180 },
+ /* SP CTX 1 3D */
+ { 0x3c, 0xED80, 0x180 },
+
+ /* TP non context. 0x0f1c - 0x0f3f are holes so don't include them */
+ { 0x3a, 0x0F00, 0x1c },
+ /* TP CTX 0 2D. 0x200a - 0x200f are holes so don't include them */
+ { 0x38, 0x2000, 0xa },
+ /* TP CTX 1 2D. 0x240a - 0x240f are holes so don't include them */
+ { 0x36, 0x2400, 0xa },
/* TP CTX 0 3D */
- 0xE700, 0xE707, 0xE70E, 0xE731,
- 0xE750, 0xE751, 0xE75A, 0xE764, 0xE76C, 0xE77F,
+ { 0x39, 0xE700, 0x80 },
/* TP CTX 1 3D */
- 0xEF00, 0xEF07, 0xEF0E, 0xEF31,
- 0xEF50, 0xEF51, 0xEF5A, 0xEF64, 0xEF6C, 0xEF7F,
- /* HLSQ non context */
- 0xE00, 0xE01, 0xE04, 0xE06, 0xE08, 0xE09, 0xE10, 0xE17,
- 0xE20, 0xE25,
- /* HLSQ CTXT 0 3D */
- 0xE784, 0xE789, 0xE78B, 0xE796, 0xE7A0, 0xE7A2, 0xE7B0, 0xE7BB,
- 0xE7C0, 0xE7DD, 0xE7E0, 0xE7E1,
- /* HLSQ CTXT 1 3D */
- 0xEF84, 0xEF89, 0xEF8B, 0xEF96, 0xEFA0, 0xEFA2, 0xEFB0, 0xEFBB,
- 0xEFC0, 0xEFDD, 0xEFE0, 0xEFE1,
+ { 0x37, 0xEF00, 0x80 },
};
+
#define A5XX_NUM_SHADER_BANKS 4
#define A5XX_SHADER_STATETYPE_SHIFT 8
@@ -652,7 +652,6 @@ static struct cdregs {
unsigned int size;
} _a5xx_cd_registers[] = {
{ a5xx_registers, ARRAY_SIZE(a5xx_registers) },
- { a5xx_hlsq_registers, ARRAY_SIZE(a5xx_hlsq_registers) },
};
#define REG_PAIR_COUNT(_a, _i) \
@@ -776,6 +775,46 @@ static void _a5xx_do_crashdump(struct kgsl_device *device)
crash_dump_valid = true;
}
+static int get_hlsq_registers(struct kgsl_device *device,
+ const struct a5xx_hlsq_sp_tp_regs *regs, unsigned int *data)
+{
+ unsigned int i;
+ unsigned int *src = registers.hostptr + regs->offset;
+
+ for (i = 0; i < regs->size; i++) {
+ *data++ = regs->ahbaddr + i;
+ *data++ = *(src + i);
+ }
+
+ return (2 * regs->size);
+}
+
+static size_t a5xx_snapshot_dump_hlsq_sp_tp_regs(struct kgsl_device *device,
+ u8 *buf, size_t remain, void *priv)
+{
+ struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
+ unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+ int count = 0, i;
+
+ /* Figure out how many registers we are going to dump */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_sp_tp_registers); i++)
+ count += a5xx_hlsq_sp_tp_registers[i].size;
+
+ if (remain < (count * 8) + sizeof(*header)) {
+ SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_sp_tp_registers); i++)
+ data += get_hlsq_registers(device,
+ &a5xx_hlsq_sp_tp_registers[i], data);
+
+ header->count = count;
+
+ /* Return the size of the section */
+ return (count * 8) + sizeof(*header);
+}
+
/*
* a5xx_snapshot() - A5XX GPU snapshot function
* @adreno_dev: Device being snapshotted
@@ -806,6 +845,10 @@ void a5xx_snapshot(struct adreno_device *adreno_dev,
a5xx_vbif_snapshot_registers,
ARRAY_SIZE(a5xx_vbif_snapshot_registers));
+ /* Dump SP TP HLSQ registers */
+ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot,
+ a5xx_snapshot_dump_hlsq_sp_tp_regs, NULL);
+
/* CP_PFP indexed registers */
kgsl_snapshot_indexed_registers(device, snapshot,
A5XX_CP_PFP_STAT_ADDR, A5XX_CP_PFP_STAT_DATA,
@@ -819,7 +862,7 @@ void a5xx_snapshot(struct adreno_device *adreno_dev,
/* CP_DRAW_STATE */
kgsl_snapshot_indexed_registers(device, snapshot,
A5XX_CP_DRAW_STATE_ADDR, A5XX_CP_DRAW_STATE_DATA,
- 0, 128);
+ 0, 1 << A5XX_CP_DRAW_STATE_ADDR_WIDTH);
/*
* CP needs to be halted on a530v1 before reading CP_PFP_UCODE_DBG_DATA
@@ -878,8 +921,8 @@ void a5xx_snapshot(struct adreno_device *adreno_dev,
}
-static int _a5xx_crashdump_init(struct a5xx_shader_block *block, uint64_t *ptr,
- uint64_t *offset)
+static int _a5xx_crashdump_init_shader(struct a5xx_shader_block *block,
+ uint64_t *ptr, uint64_t *offset)
{
int qwords = 0;
unsigned int j;
@@ -908,6 +951,31 @@ static int _a5xx_crashdump_init(struct a5xx_shader_block *block, uint64_t *ptr,
return qwords;
}
+static int _a5xx_crashdump_init_hlsq(struct a5xx_hlsq_sp_tp_regs *regs,
+ uint64_t *ptr, uint64_t *offset)
+{
+ int qwords = 0;
+
+ /* Program the aperture */
+ ptr[qwords++] =
+ (regs->statetype << A5XX_SHADER_STATETYPE_SHIFT);
+ ptr[qwords++] = (((uint64_t) A5XX_HLSQ_DBG_READ_SEL << 44)) |
+ (1 << 21) | 1;
+
+ /* Read all the data in one chunk */
+ ptr[qwords++] = registers.gpuaddr + *offset;
+ ptr[qwords++] =
+ (((uint64_t) A5XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
+ regs->size;
+
+ /* Remember the offset of the first bank for easy access */
+ regs->offset = *offset;
+
+ *offset += regs->size * sizeof(unsigned int);
+
+ return qwords;
+}
+
void a5xx_crashdump_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -954,6 +1022,11 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev)
data_size += a5xx_shader_blocks[i].sz * sizeof(unsigned int) *
A5XX_NUM_SHADER_BANKS;
}
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_sp_tp_registers); i++) {
+ script_size += 32;
+ data_size +=
+ a5xx_hlsq_sp_tp_registers[i].size * sizeof(unsigned int);
+ }
/* Now allocate the script and data buffers */
@@ -968,7 +1041,6 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev)
kgsl_free_global(KGSL_DEVICE(adreno_dev), &capturescript);
return;
}
-
/* Build the crash script */
ptr = (uint64_t *) capturescript.hostptr;
@@ -987,9 +1059,13 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev)
/* Program each shader block */
for (i = 0; i < ARRAY_SIZE(a5xx_shader_blocks); i++) {
- ptr += _a5xx_crashdump_init(&a5xx_shader_blocks[i], ptr,
+ ptr += _a5xx_crashdump_init_shader(&a5xx_shader_blocks[i], ptr,
&offset);
}
+ /* Program the hlsq sp tp register sets */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_sp_tp_registers); i++)
+ ptr += _a5xx_crashdump_init_hlsq(&a5xx_hlsq_sp_tp_registers[i],
+ ptr, &offset);
*ptr++ = 0;
*ptr++ = 0;
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index ac3805800691..5d3b2b8a7266 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -284,6 +284,7 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
struct kgsl_context *context = cmdbatch->context;
struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
struct kgsl_device *device = context->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
/*
* Write the start and end timestamp to the memstore to keep the
@@ -301,7 +302,16 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
/* Retire pending GPU events for the object */
kgsl_process_event_group(device, &context->events);
- trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
+ /*
+ * For A3xx we still get the rptr from the CP_RB_RPTR instead of
+ * rptr scratch out address. At this point GPU clocks turned off.
+ * So avoid reading GPU register directly for A3xx.
+ */
+ if (adreno_is_a3xx(adreno_dev))
+ trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
+ 0);
+ else
+ trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
adreno_get_rptr(drawctxt->rb));
kgsl_cmdbatch_destroy(cmdbatch);
}
@@ -613,12 +623,13 @@ static int sendcmd(struct adreno_device *adreno_dev,
}
}
- mutex_unlock(&device->mutex);
if (ret) {
dispatcher->inflight--;
dispatch_q->inflight--;
+ mutex_unlock(&device->mutex);
+
/*
* Don't log a message in case of:
* -ENOENT means that the context was detached before the
@@ -642,6 +653,8 @@ static int sendcmd(struct adreno_device *adreno_dev,
time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb,
adreno_get_rptr(drawctxt->rb));
+ mutex_unlock(&device->mutex);
+
cmdbatch->submit_ticks = time.ticks;
dispatch_q->cmd_q[dispatch_q->tail] = cmdbatch;
@@ -1923,9 +1936,20 @@ static void retire_cmdbatch(struct adreno_device *adreno_dev,
if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv))
cmdbatch_profile_ticks(adreno_dev, cmdbatch, &start, &end);
- trace_adreno_cmdbatch_retired(cmdbatch, (int) dispatcher->inflight,
- start, end, ADRENO_CMDBATCH_RB(cmdbatch),
- adreno_get_rptr(drawctxt->rb));
+ /*
+ * For A3xx we still get the rptr from the CP_RB_RPTR instead of
+ * rptr scratch out address. At this point GPU clocks turned off.
+ * So avoid reading GPU register directly for A3xx.
+ */
+ if (adreno_is_a3xx(adreno_dev))
+ trace_adreno_cmdbatch_retired(cmdbatch,
+ (int) dispatcher->inflight, start, end,
+ ADRENO_CMDBATCH_RB(cmdbatch), 0);
+ else
+ trace_adreno_cmdbatch_retired(cmdbatch,
+ (int) dispatcher->inflight, start, end,
+ ADRENO_CMDBATCH_RB(cmdbatch),
+ adreno_get_rptr(drawctxt->rb));
drawctxt->submit_retire_ticks[drawctxt->ticks_index] =
end - cmdbatch->submit_ticks;
@@ -2099,19 +2123,18 @@ static void adreno_dispatcher_work(struct work_struct *work)
break;
}
+ kgsl_process_event_groups(device);
+
/*
* dispatcher_do_fault() returns 0 if no faults occurred. If that is the
* case, then clean up preemption and try to schedule more work
*/
if (dispatcher_do_fault(adreno_dev) == 0) {
+
/* Clean up after preemption */
if (gpudev->preemption_schedule)
gpudev->preemption_schedule(adreno_dev);
- /* Re-kick the event engine to catch stragglers */
- if (dispatcher->inflight == 0 && count != 0)
- kgsl_schedule_work(&device->event_work);
-
/* Run the scheduler for to dispatch new commands */
_adreno_dispatcher_issuecmds(adreno_dev);
}
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index b069b16c75ef..0eff3da0e494 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -1118,7 +1118,8 @@ static const struct adreno_vbif_snapshot_registers *vbif_registers(
adreno_readreg(adreno_dev, ADRENO_REG_VBIF_VERSION, &version);
for (i = 0; i < count; i++) {
- if (list[i].version == version)
+ if ((list[i].version & list[i].mask) ==
+ (version & list[i].mask))
return &list[i];
}
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index f77dbb7f20af..c203ac7bfe8c 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2144,8 +2144,8 @@ static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device,
}
up_read(&current->mm->mmap_sem);
- if (dmabuf == NULL)
- return -ENODEV;
+ if (IS_ERR_OR_NULL(dmabuf))
+ return dmabuf ? PTR_ERR(dmabuf) : -ENODEV;
ret = kgsl_setup_dma_buf(device, pagetable, entry, dmabuf);
if (ret) {
@@ -2249,7 +2249,7 @@ static long _gpuobj_map_dma_buf(struct kgsl_device *device,
if (ret)
return ret;
- if (buf.fd == 0)
+ if (buf.fd < 0)
return -EINVAL;
*fd = buf.fd;
@@ -3663,19 +3663,15 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
if (cache == KGSL_CACHEMODE_WRITEBACK
|| cache == KGSL_CACHEMODE_WRITETHROUGH) {
- struct scatterlist *s;
int i;
unsigned long addr = vma->vm_start;
+ struct kgsl_memdesc *m = &entry->memdesc;
+
+ for (i = 0; i < m->page_count; i++) {
+ struct page *page = m->pages[i];
- for_each_sg(entry->memdesc.sgt->sgl, s,
- entry->memdesc.sgt->nents, i) {
- int j;
- for (j = 0; j < (s->length >> PAGE_SHIFT); j++) {
- struct page *page = sg_page(s);
- page = nth_page(page, j);
- vm_insert_page(vma, addr, page);
- addr += PAGE_SIZE;
- }
+ vm_insert_page(vma, addr, page);
+ addr += PAGE_SIZE;
}
}
@@ -3913,7 +3909,7 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
goto error_close_mmu;
status = kgsl_allocate_global(device, &device->memstore,
- KGSL_MEMSTORE_SIZE, 0, 0);
+ KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG);
if (status != 0)
goto error_close_mmu;
@@ -3957,8 +3953,8 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
PM_QOS_DEFAULT_VALUE);
}
-
- device->events_wq = create_singlethread_workqueue("kgsl-events");
+ device->events_wq = alloc_workqueue("kgsl-events",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
/* Initalize the snapshot engine */
kgsl_device_snapshot_init(device);
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index c172021c8944..ee7149e1fd41 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -163,6 +163,8 @@ struct kgsl_memdesc_ops {
#define KGSL_MEMDESC_PRIVILEGED BIT(6)
/* The memdesc is TZ locked content protection */
#define KGSL_MEMDESC_TZ_LOCKED BIT(7)
+/* The memdesc is allocated through contiguous memory */
+#define KGSL_MEMDESC_CONTIG BIT(8)
/**
* struct kgsl_memdesc - GPU memory object descriptor
@@ -179,8 +181,9 @@ struct kgsl_memdesc_ops {
* @ops: Function hooks for the memdesc memory type
* @flags: Flags set from userspace
* @dev: Pointer to the struct device that owns this memory
- * @memmap: bitmap of pages for mmapsize
- * @memmap_len: Number of bits for memmap
+ * @attrs: dma attributes for this memory
+ * @pages: An array of pointers to allocated pages
+ * @page_count: Total number of pages allocated
*/
struct kgsl_memdesc {
struct kgsl_pagetable *pagetable;
@@ -197,6 +200,8 @@ struct kgsl_memdesc {
uint64_t flags;
struct device *dev;
struct dma_attrs attrs;
+ struct page **pages;
+ unsigned int page_count;
};
/*
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 766cd811588c..93ac790f3a55 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -150,7 +150,7 @@ static int print_mem_entry(int id, void *ptr, void *data)
(unsigned long *) m->useraddr,
m->size, entry->id, flags,
memtype_str(kgsl_memdesc_usermem_type(m)),
- usage, m->sgt->nents, m->mapsize);
+ usage, (m->sgt ? m->sgt->nents : 0), m->mapsize);
if (entry->metadata[0] != 0)
seq_printf(s, " %s", entry->metadata);
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 4159a5fe375f..f55b795b1d2b 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -272,7 +272,6 @@ struct kgsl_device {
int mem_log;
int pwr_log;
struct kgsl_pwrscale pwrscale;
- struct work_struct event_work;
int reset_counter; /* Track how many GPU core resets have occured */
int cff_dump_enable;
@@ -292,8 +291,6 @@ struct kgsl_device {
.cmdbatch_gate = COMPLETION_INITIALIZER((_dev).cmdbatch_gate),\
.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
kgsl_idle_check),\
- .event_work = __WORK_INITIALIZER((_dev).event_work,\
- kgsl_process_events),\
.context_idr = IDR_INIT((_dev).context_idr),\
.wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).wait_queue),\
.active_cnt_wq = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).active_cnt_wq),\
@@ -602,7 +599,7 @@ void kgsl_process_event_group(struct kgsl_device *device,
struct kgsl_event_group *group);
void kgsl_flush_event_group(struct kgsl_device *device,
struct kgsl_event_group *group);
-void kgsl_process_events(struct work_struct *work);
+void kgsl_process_event_groups(struct kgsl_device *device);
void kgsl_context_destroy(struct kref *kref);
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index 6f70b9ddd376..6e8abf36c50f 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -314,22 +314,16 @@ EXPORT_SYMBOL(kgsl_add_event);
static DEFINE_RWLOCK(group_lock);
static LIST_HEAD(group_list);
-/**
- * kgsl_process_events() - Work queue for processing new timestamp events
- * @work: Pointer to a work_struct
- */
-void kgsl_process_events(struct work_struct *work)
+void kgsl_process_event_groups(struct kgsl_device *device)
{
struct kgsl_event_group *group;
- struct kgsl_device *device = container_of(work, struct kgsl_device,
- event_work);
read_lock(&group_lock);
list_for_each_entry(group, &group_list, group)
_process_event_group(device, group, false);
read_unlock(&group_lock);
}
-EXPORT_SYMBOL(kgsl_process_events);
+EXPORT_SYMBOL(kgsl_process_event_groups);
/**
* kgsl_del_event_group() - Remove a GPU event group
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 865cd9d8f498..b467ef81d257 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1627,16 +1627,34 @@ kgsl_iommu_map(struct kgsl_pagetable *pt,
uint64_t addr = memdesc->gpuaddr;
uint64_t size = memdesc->size;
unsigned int flags = _get_protection_flags(memdesc);
+ struct sg_table *sgt = NULL;
- ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, memdesc->sgt->sgl,
- memdesc->sgt->nents, flags);
+ /*
+ * For paged memory allocated through kgsl, memdesc->pages is not NULL.
+ * Allocate sgt here just for its map operation. Contiguous memory
+ * already has its sgt, so no need to allocate it here.
+ */
+ if (memdesc->pages != NULL)
+ sgt = kgsl_alloc_sgt_from_pages(memdesc);
+ else
+ sgt = memdesc->sgt;
+
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, sgt->sgl,
+ sgt->nents, flags);
if (ret)
- return ret;
+ goto done;
ret = _iommu_map_guard_page(pt, memdesc, addr + size, flags);
if (ret)
_iommu_unmap_sync_pc(pt, memdesc, addr, size);
+done:
+ if (memdesc->pages != NULL)
+ kgsl_free_sgt(sgt);
+
return ret;
}
@@ -1647,6 +1665,8 @@ static int kgsl_iommu_map_offset(struct kgsl_pagetable *pt,
{
int pg_sz;
unsigned int protflags = _get_protection_flags(memdesc);
+ int ret;
+ struct sg_table *sgt = NULL;
pg_sz = (1 << kgsl_memdesc_get_align(memdesc));
if (!IS_ALIGNED(virtaddr | virtoffset | physoffset | size, pg_sz))
@@ -1655,9 +1675,27 @@ static int kgsl_iommu_map_offset(struct kgsl_pagetable *pt,
if (size == 0)
return -EINVAL;
- return _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
- memdesc, memdesc->sgt->sgl, memdesc->sgt->nents,
- physoffset, size, protflags);
+ /*
+ * For paged memory allocated through kgsl, memdesc->pages is not NULL.
+ * Allocate sgt here just for its map operation. Contiguous memory
+ * already has its sgt, so no need to allocate it here.
+ */
+ if (memdesc->pages != NULL)
+ sgt = kgsl_alloc_sgt_from_pages(memdesc);
+ else
+ sgt = memdesc->sgt;
+
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ret = _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
+ memdesc, sgt->sgl, sgt->nents,
+ physoffset, size, protflags);
+
+ if (memdesc->pages != NULL)
+ kgsl_free_sgt(sgt);
+
+ return ret;
}
/* This function must be called with context bank attached */
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index 7fb3b37ac191..7967b19779db 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -263,6 +263,31 @@ void kgsl_pool_free_sgt(struct sg_table *sgt)
}
}
+/**
+ * kgsl_pool_free_pages() - Free pages in the pages array
+ * @pages: pointer of the pages array
+ *
+ * Free the pages by collapsing any physical adjacent pages.
+ * Pages are added back to the pool, if pool has sufficient space
+ * otherwise they are given back to system.
+ */
+void kgsl_pool_free_pages(struct page **pages, unsigned int pcount)
+{
+ int i;
+
+ if (pages == NULL || pcount == 0)
+ return;
+
+ for (i = 0; i < pcount;) {
+ /*
+ * Free each page or compound page group individually.
+ */
+ struct page *p = pages[i];
+
+ i += 1 << compound_order(p);
+ kgsl_pool_free_page(p);
+ }
+}
static int kgsl_pool_idx_lookup(unsigned int order)
{
int i;
diff --git a/drivers/gpu/msm/kgsl_pool.h b/drivers/gpu/msm/kgsl_pool.h
index f2cdda19140b..efbfa96f1498 100644
--- a/drivers/gpu/msm/kgsl_pool.h
+++ b/drivers/gpu/msm/kgsl_pool.h
@@ -34,6 +34,7 @@ kgsl_gfp_mask(unsigned int page_order)
}
void kgsl_pool_free_sgt(struct sg_table *sgt);
+void kgsl_pool_free_pages(struct page **pages, unsigned int page_count);
void kgsl_init_page_pools(void);
void kgsl_exit_page_pools(void);
int kgsl_pool_alloc_page(int *page_size, struct page **pages,
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 2b9eef8b6351..11b323e9d40c 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -363,6 +363,8 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
if (new_level == old_level)
return;
+ kgsl_pwrscale_update_stats(device);
+
/*
* Set the active and previous powerlevel first in case the clocks are
* off - if we don't do this then the pwrlevel change won't take effect
@@ -934,6 +936,31 @@ static ssize_t kgsl_pwrctrl_gpu_available_frequencies_show(
return num_chars;
}
+static ssize_t kgsl_pwrctrl_gpu_clock_stats_show(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ int index, num_chars = 0;
+
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ mutex_lock(&device->mutex);
+ kgsl_pwrscale_update_stats(device);
+ mutex_unlock(&device->mutex);
+ for (index = 0; index < pwr->num_pwrlevels - 1; index++)
+ num_chars += snprintf(buf + num_chars, PAGE_SIZE - num_chars,
+ "%llu ", pwr->clock_times[index]);
+
+ if (num_chars < PAGE_SIZE)
+ buf[num_chars++] = '\n';
+
+ return num_chars;
+}
+
static ssize_t kgsl_pwrctrl_reset_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1201,6 +1228,9 @@ static DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show,
static DEVICE_ATTR(gpu_available_frequencies, 0444,
kgsl_pwrctrl_gpu_available_frequencies_show,
NULL);
+static DEVICE_ATTR(gpu_clock_stats, 0444,
+ kgsl_pwrctrl_gpu_clock_stats_show,
+ NULL);
static DEVICE_ATTR(max_pwrlevel, 0644,
kgsl_pwrctrl_max_pwrlevel_show,
kgsl_pwrctrl_max_pwrlevel_store);
@@ -1249,6 +1279,7 @@ static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_deep_nap_timer,
&dev_attr_gpubusy,
&dev_attr_gpu_available_frequencies,
+ &dev_attr_gpu_clock_stats,
&dev_attr_max_pwrlevel,
&dev_attr_min_pwrlevel,
&dev_attr_thermal_pwrlevel,
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 0029c389484f..8fd06531aa81 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -122,6 +122,7 @@ struct kgsl_regulator {
* @min_pwrlevel - minimum allowable powerlevel per the user
* @num_pwrlevels - number of available power levels
* @interval_timeout - timeout in jiffies to be idle before a power event
+ * @clock_times - Each GPU frequency's accumulated active time in us
* @strtstp_sleepwake - true if the device supports low latency GPU start/stop
* @regulators - array of pointers to kgsl_regulator structs
* @pcl - bus scale identifier
@@ -178,6 +179,7 @@ struct kgsl_pwrctrl {
unsigned int min_pwrlevel;
unsigned int num_pwrlevels;
unsigned long interval_timeout;
+ u64 clock_times[KGSL_MAX_PWRLEVELS];
bool strtstp_sleepwake;
struct kgsl_regulator regulators[KGSL_MAX_REGULATORS];
uint32_t pcl;
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 4f6677d9a1de..d90aec42f30a 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -127,6 +127,7 @@ EXPORT_SYMBOL(kgsl_pwrscale_busy);
*/
void kgsl_pwrscale_update_stats(struct kgsl_device *device)
{
+ struct kgsl_pwrctrl *pwrctrl = &device->pwrctrl;
struct kgsl_pwrscale *psc = &device->pwrscale;
BUG_ON(!mutex_is_locked(&device->mutex));
@@ -150,6 +151,8 @@ void kgsl_pwrscale_update_stats(struct kgsl_device *device)
device->pwrscale.accum_stats.busy_time += stats.busy_time;
device->pwrscale.accum_stats.ram_time += stats.ram_time;
device->pwrscale.accum_stats.ram_wait += stats.ram_wait;
+ pwrctrl->clock_times[pwrctrl->active_pwrlevel] +=
+ stats.busy_time;
}
}
EXPORT_SYMBOL(kgsl_pwrscale_update_stats);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 50dcd39fac58..73edc3f7e146 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -313,10 +313,6 @@ kgsl_sharedmem_init_sysfs(void)
drv_attr_list);
}
-static int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
- struct kgsl_pagetable *pagetable,
- uint64_t size);
-
static int kgsl_cma_alloc_secure(struct kgsl_device *device,
struct kgsl_memdesc *memdesc, uint64_t size);
@@ -358,8 +354,7 @@ static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
- int i, pgoff;
- struct scatterlist *s = memdesc->sgt->sgl;
+ int pgoff;
unsigned int offset;
offset = ((unsigned long) vmf->virtual_address - vma->vm_start);
@@ -369,30 +364,15 @@ static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
pgoff = offset >> PAGE_SHIFT;
- /*
- * The sglist might be comprised of mixed blocks of memory depending
- * on how many 64K pages were allocated. This means we have to do math
- * to find the actual 4K page to map in user space
- */
-
- for (i = 0; i < memdesc->sgt->nents; i++) {
- int npages = s->length >> PAGE_SHIFT;
-
- if (pgoff < npages) {
- struct page *page = sg_page(s);
+ if (pgoff < memdesc->page_count) {
+ struct page *page = memdesc->pages[pgoff];
- page = nth_page(page, pgoff);
+ get_page(page);
+ vmf->page = page;
- get_page(page);
- vmf->page = page;
+ memdesc->mapsize += PAGE_SIZE;
- memdesc->mapsize += PAGE_SIZE;
-
- return 0;
- }
-
- pgoff -= npages;
- s = sg_next(s);
+ return 0;
}
return VM_FAULT_SIGBUS;
@@ -455,9 +435,15 @@ static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)
for_each_sg_page(memdesc->sgt->sgl, &sg_iter,
memdesc->sgt->nents, 0)
ClearPagePrivate(sg_page_iter_page(&sg_iter));
+
}
- kgsl_pool_free_sgt(memdesc->sgt);
+ /* Free pages using the pages array for non secure paged memory */
+ if (memdesc->pages != NULL)
+ kgsl_pool_free_pages(memdesc->pages, memdesc->page_count);
+ else
+ kgsl_pool_free_sgt(memdesc->sgt);
+
}
/*
@@ -477,31 +463,10 @@ static int kgsl_page_alloc_map_kernel(struct kgsl_memdesc *memdesc)
return -ENOMEM;
mutex_lock(&kernel_map_global_lock);
- if (!memdesc->hostptr) {
+ if ((!memdesc->hostptr) && (memdesc->pages != NULL)) {
pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
- struct page **pages = NULL;
- struct scatterlist *sg;
- int npages = PAGE_ALIGN(memdesc->size) >> PAGE_SHIFT;
- int sglen = memdesc->sgt->nents;
- int i, count = 0;
-
- /* create a list of pages to call vmap */
- pages = kgsl_malloc(npages * sizeof(struct page *));
- if (pages == NULL) {
- ret = -ENOMEM;
- goto done;
- }
-
- for_each_sg(memdesc->sgt->sgl, sg, sglen, i) {
- struct page *page = sg_page(sg);
- int j;
-
- for (j = 0; j < sg->length >> PAGE_SHIFT; j++)
- pages[count++] = page++;
- }
-
- memdesc->hostptr = vmap(pages, count,
+ memdesc->hostptr = vmap(memdesc->pages, memdesc->page_count,
VM_IOREMAP, page_prot);
if (memdesc->hostptr)
KGSL_STATS_ADD(memdesc->size,
@@ -509,11 +474,10 @@ static int kgsl_page_alloc_map_kernel(struct kgsl_memdesc *memdesc)
&kgsl_driver.stats.vmalloc_max);
else
ret = -ENOMEM;
- kgsl_free(pages);
}
if (memdesc->hostptr)
memdesc->hostptr_count++;
-done:
+
mutex_unlock(&kernel_map_global_lock);
return ret;
@@ -672,7 +636,7 @@ static inline int get_page_size(size_t size, unsigned int align)
}
#endif
-static int
+int
kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
uint64_t size)
@@ -681,7 +645,6 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
unsigned int j, page_size, len_alloc;
unsigned int pcount = 0;
size_t len;
- struct page **pages = NULL;
unsigned int align;
size = PAGE_ALIGN(size);
@@ -712,18 +675,17 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
memdesc->pagetable = pagetable;
memdesc->ops = &kgsl_page_alloc_ops;
- memdesc->sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (memdesc->sgt == NULL)
- return -ENOMEM;
-
/*
- * Allocate space to store the list of pages to send to vmap. This is an
- * array of pointers so we can track 1024 pages per page of allocation
+ * Allocate space to store the list of pages. This is an array of
+ * pointers so we can track 1024 pages per page of allocation.
+ * Keep this array around for non global non secure buffers that
+ * are allocated by kgsl. This helps with improving the vm fault
+ * routine by finding the faulted page in constant time.
*/
- pages = kgsl_malloc(len_alloc * sizeof(struct page *));
+ memdesc->pages = kgsl_malloc(len_alloc * sizeof(struct page *));
- if (pages == NULL) {
+ if (memdesc->pages == NULL) {
ret = -ENOMEM;
goto done;
}
@@ -734,9 +696,9 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
int page_count;
page_count = kgsl_pool_alloc_page(&page_size,
- pages + pcount, len_alloc - pcount,
+ memdesc->pages + pcount,
+ len_alloc - pcount,
&align);
-
if (page_count <= 0) {
if (page_count == -EAGAIN)
continue;
@@ -760,16 +722,12 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
pcount += page_count;
len -= page_size;
memdesc->size += page_size;
+ memdesc->page_count += page_count;
/* Get the needed page size for the next iteration */
page_size = get_page_size(len, align);
}
- ret = sg_alloc_table_from_pages(memdesc->sgt, pages, pcount, 0,
- memdesc->size, GFP_KERNEL);
- if (ret)
- goto done;
-
/* Call to the hypervisor to lock any secure buffer allocations */
if (memdesc->flags & KGSL_MEMFLAGS_SECURE) {
unsigned int i;
@@ -778,10 +736,27 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
int source_vm = VMID_HLOS;
int dest_vm = VMID_CP_PIXEL;
+ memdesc->sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (memdesc->sgt == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ret = sg_alloc_table_from_pages(memdesc->sgt, memdesc->pages,
+ memdesc->page_count, 0, memdesc->size, GFP_KERNEL);
+ if (ret) {
+ kfree(memdesc->sgt);
+ goto done;
+ }
+
ret = hyp_assign_table(memdesc->sgt, &source_vm, 1,
&dest_vm, &dest_perms, 1);
- if (ret)
+ if (ret) {
+ sg_free_table(memdesc->sgt);
+ kfree(memdesc->sgt);
+ memdesc->sgt = NULL;
goto done;
+ }
/* Set private bit for each sg to indicate that its secured */
for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i)
@@ -793,6 +768,14 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
KGSL_STATS_ADD(memdesc->size, &kgsl_driver.stats.secure,
&kgsl_driver.stats.secure_max);
+ /*
+ * We don't need the array for secure buffers because they are
+ * not mapped to CPU
+ */
+ kgsl_free(memdesc->pages);
+ memdesc->pages = NULL;
+ memdesc->page_count = 0;
+
/* Don't map and zero the locked secure buffer */
goto done;
}
@@ -802,19 +785,18 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
done:
if (ret) {
- if (pages) {
+ if (memdesc->pages) {
unsigned int count = 1;
for (j = 0; j < pcount; j += count) {
- count = 1 << compound_order(pages[j]);
- kgsl_pool_free_page(pages[j]);
+ count = 1 << compound_order(memdesc->pages[j]);
+ kgsl_pool_free_page(memdesc->pages[j]);
}
}
- kfree(memdesc->sgt);
+ kgsl_free(memdesc->pages);
memset(memdesc, 0, sizeof(*memdesc));
}
- kgsl_free(pages);
return ret;
}
@@ -837,6 +819,9 @@ void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
kfree(memdesc->sgt);
}
+ if (memdesc->pages)
+ kgsl_free(memdesc->pages);
+
memset(memdesc, 0, sizeof(*memdesc));
}
EXPORT_SYMBOL(kgsl_sharedmem_free);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 5093ebd6e51a..c05aaecb5284 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -71,6 +71,10 @@ int kgsl_allocate_user(struct kgsl_device *device,
void kgsl_get_memory_usage(char *str, size_t len, uint64_t memflags);
+int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ uint64_t size);
+
#define MEMFLAGS(_flags, _mask, _shift) \
((unsigned int) (((_flags) & (_mask)) >> (_shift)))
@@ -266,7 +270,16 @@ static inline int kgsl_allocate_global(struct kgsl_device *device,
memdesc->flags = flags;
memdesc->priv = priv;
- ret = kgsl_sharedmem_alloc_contig(device, memdesc, NULL, (size_t) size);
+ if ((memdesc->priv & KGSL_MEMDESC_CONTIG) != 0)
+ ret = kgsl_sharedmem_alloc_contig(device, memdesc, NULL,
+ (size_t) size);
+ else {
+ ret = kgsl_sharedmem_page_alloc_user(memdesc, NULL,
+ (size_t) size);
+ if (ret == 0)
+ kgsl_memdesc_map(memdesc);
+ }
+
if (ret == 0)
kgsl_mmu_add_global(device, memdesc);
@@ -293,4 +306,47 @@ static inline void kgsl_free_global(struct kgsl_device *device,
void kgsl_sharedmem_set_noretry(bool val);
bool kgsl_sharedmem_get_noretry(void);
+/**
+ * kgsl_alloc_sgt_from_pages() - Allocate a sg table
+ *
+ * @memdesc: memory descriptor of the allocation
+ *
+ * Allocate and return pointer to a sg table
+ */
+static inline struct sg_table *kgsl_alloc_sgt_from_pages(
+ struct kgsl_memdesc *m)
+{
+ int ret;
+ struct sg_table *sgt;
+
+ sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (sgt == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table_from_pages(sgt, m->pages, m->page_count, 0,
+ m->size, GFP_KERNEL);
+ if (ret) {
+ kfree(sgt);
+ return ERR_PTR(ret);
+ }
+
+ return sgt;
+}
+
+/**
+ * kgsl_free_sgt() - Free a sg table structure
+ *
+ * @sgt: sg table pointer to be freed
+ *
+ * Free the sg table allocated using sgt and free the
+ * sgt structure itself
+ */
+static inline void kgsl_free_sgt(struct sg_table *sgt)
+{
+ if (sgt != NULL) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
+}
+
#endif /* __KGSL_SHAREDMEM_H */
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index c6f7a694f67a..ec791e169f8f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1897,6 +1897,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
@@ -2615,9 +2616,10 @@ int hid_add_device(struct hid_device *hdev)
/*
* Scan generic devices for group information
*/
- if (hid_ignore_special_drivers ||
- (!hdev->group &&
- !hid_match_id(hdev, hid_have_special_driver))) {
+ if (hid_ignore_special_drivers) {
+ hdev->group = HID_GROUP_GENERIC;
+ } else if (!hdev->group &&
+ !hid_match_id(hdev, hid_have_special_driver)) {
ret = hid_scan_report(hdev);
if (ret)
hid_warn(hdev, "bad device descriptor (%d)\n", ret);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 591d4ad7708f..7ecd96bdf834 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -396,6 +396,11 @@ static void mt_feature_mapping(struct hid_device *hdev,
td->is_buttonpad = true;
break;
+ case 0xff0000c5:
+ /* Retrieve the Win8 blob once to enable some devices */
+ if (usage->usage_index == 0)
+ mt_get_feature(hdev, field->report);
+ break;
}
}
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 10bd8e6e4c9c..0b80633bae91 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -282,17 +282,21 @@ static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
+ u16 size;
+ int args_len;
+ int index = 0;
+
+ i2c_hid_dbg(ihid, "%s\n", __func__);
+
+ if (data_len > ihid->bufsize)
+ return -EINVAL;
- /* hid_hw_* already checked that data_len < HID_MAX_BUFFER_SIZE */
- u16 size = 2 /* size */ +
+ size = 2 /* size */ +
(reportID ? 1 : 0) /* reportID */ +
data_len /* buf */;
- int args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
+ args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
2 /* dataRegister */ +
size /* args */;
- int index = 0;
-
- i2c_hid_dbg(ihid, "%s\n", __func__);
if (!use_data && maxOutputLength == 0)
return -ENOSYS;
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 5dd426fee8cc..0df32fe0e345 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
return ret;
}
-static void usbhid_restart_queues(struct usbhid_device *usbhid)
-{
- if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
- usbhid_restart_out_queue(usbhid);
- if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
- usbhid_restart_ctrl_queue(usbhid);
-}
-
static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
@@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
usb_kill_urb(usbhid->urbout);
}
+static void hid_restart_io(struct hid_device *hid)
+{
+ struct usbhid_device *usbhid = hid->driver_data;
+ int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
+ int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
+
+ spin_lock_irq(&usbhid->lock);
+ clear_bit(HID_SUSPENDED, &usbhid->iofl);
+ usbhid_mark_busy(usbhid);
+
+ if (clear_halt || reset_pending)
+ schedule_work(&usbhid->reset_work);
+ usbhid->retry_delay = 0;
+ spin_unlock_irq(&usbhid->lock);
+
+ if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
+ return;
+
+ if (!clear_halt) {
+ if (hid_start_in(hid) < 0)
+ hid_io_error(hid);
+ }
+
+ spin_lock_irq(&usbhid->lock);
+ if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+ usbhid_restart_out_queue(usbhid);
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+ usbhid_restart_ctrl_queue(usbhid);
+ spin_unlock_irq(&usbhid->lock);
+}
+
/* Treat USB reset pretty much the same as suspend/resume */
static int hid_pre_reset(struct usb_interface *intf)
{
@@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
return 1;
}
+ /* No need to do another reset or clear a halted endpoint */
spin_lock_irq(&usbhid->lock);
clear_bit(HID_RESET_PENDING, &usbhid->iofl);
+ clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
- usbhid_restart_queues(usbhid);
+
+ hid_restart_io(hid);
return 0;
}
@@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
#ifdef CONFIG_PM
static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
{
- struct usbhid_device *usbhid = hid->driver_data;
- int status;
-
- spin_lock_irq(&usbhid->lock);
- clear_bit(HID_SUSPENDED, &usbhid->iofl);
- usbhid_mark_busy(usbhid);
-
- if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
- test_bit(HID_RESET_PENDING, &usbhid->iofl))
- schedule_work(&usbhid->reset_work);
- usbhid->retry_delay = 0;
-
- usbhid_restart_queues(usbhid);
- spin_unlock_irq(&usbhid->lock);
-
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
+ int status = 0;
+ hid_restart_io(hid);
if (driver_suspended && hid->driver && hid->driver->resume)
status = hid->driver->resume(hid);
return status;
@@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
static int hid_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata (intf);
- struct usbhid_device *usbhid = hid->driver_data;
int status;
- if (!test_bit(HID_STARTED, &usbhid->iofl))
- return 0;
-
status = hid_resume_common(hid, true);
dev_dbg(&intf->dev, "resume status %d\n", status);
return 0;
@@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
static int hid_reset_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata(intf);
- struct usbhid_device *usbhid = hid->driver_data;
int status;
- clear_bit(HID_SUSPENDED, &usbhid->iofl);
status = hid_post_reset(intf);
if (status >= 0 && hid->driver && hid->driver->reset_resume) {
int ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 01a4f05c1642..3c0f47ac8e53 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2493,6 +2493,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
}
/*
+ * Hack for the Bamboo One:
+ * the device presents a PAD/Touch interface as most Bamboos and even
+ * sends ghosts PAD data on it. However, later, we must disable this
+ * ghost interface, and we can not detect it unless we set it here
+ * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
+ */
+ if (features->type == BAMBOO_PEN &&
+ features->pktlen == WACOM_PKGLEN_BBTOUCH3)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+
+ /*
* Raw Wacom-mode pen and touch events both come from interface
* 0, whose HID descriptor has an application usage of 0xFF0D
* (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index 36544c4f653c..303d0c9df907 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
int max1111_read_channel(int channel)
{
+ if (!the_max1111 || !the_max1111->spi)
+ return -ENODEV;
+
return max1111_read(&the_max1111->spi->dev, channel);
}
EXPORT_SYMBOL(max1111_read_channel);
@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
{
struct max1111_data *data = spi_get_drvdata(spi);
+#ifdef CONFIG_SHARPSL_PM
+ the_max1111 = NULL;
+#endif
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index a80f07b88b42..3a11b061e5b0 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -2647,11 +2647,11 @@ static ssize_t tpdm_store_dsb_edge_ctrl_mask(struct device *dev,
size_t size)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- unsigned start, end, val;
+ unsigned long start, end, val;
uint32_t set;
int i, bit, reg;
- if (sscanf(buf, "%ui %ui %ui", &start, &end, &val) != 3)
+ if (sscanf(buf, "%lx %lx %lx", &start, &end, &val) != 3)
return -EINVAL;
if (!test_bit(TPDM_DS_DSB, drvdata->datasets) ||
(start >= TPDM_DSB_MAX_LINES) || (end >= TPDM_DSB_MAX_LINES))
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index cd4510a63375..146eed70bdf4 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -65,7 +65,7 @@
#include <asm/mwait.h>
#include <asm/msr.h>
-#define INTEL_IDLE_VERSION "0.4"
+#define INTEL_IDLE_VERSION "0.4.1"
#define PREFIX "intel_idle: "
static struct cpuidle_driver intel_idle_driver = {
@@ -994,36 +994,92 @@ static void intel_idle_cpuidle_devices_uninit(void)
}
/*
- * intel_idle_state_table_update()
- *
- * Update the default state_table for this CPU-id
+ * ivt_idle_state_table_update(void)
*
- * Currently used to access tuned IVT multi-socket targets
+ * Tune IVT multi-socket targets
* Assumption: num_sockets == (max_package_num + 1)
*/
-void intel_idle_state_table_update(void)
+static void ivt_idle_state_table_update(void)
{
/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
- if (boot_cpu_data.x86_model == 0x3e) { /* IVT */
- int cpu, package_num, num_sockets = 1;
-
- for_each_online_cpu(cpu) {
- package_num = topology_physical_package_id(cpu);
- if (package_num + 1 > num_sockets) {
- num_sockets = package_num + 1;
-
- if (num_sockets > 4) {
- cpuidle_state_table = ivt_cstates_8s;
- return;
- }
+ int cpu, package_num, num_sockets = 1;
+
+ for_each_online_cpu(cpu) {
+ package_num = topology_physical_package_id(cpu);
+ if (package_num + 1 > num_sockets) {
+ num_sockets = package_num + 1;
+
+ if (num_sockets > 4) {
+ cpuidle_state_table = ivt_cstates_8s;
+ return;
}
}
+ }
+
+ if (num_sockets > 2)
+ cpuidle_state_table = ivt_cstates_4s;
+
+ /* else, 1 and 2 socket systems use default ivt_cstates */
+}
+/*
+ * sklh_idle_state_table_update(void)
+ *
+ * On SKL-H (model 0x5e) disable C8 and C9 if:
+ * C10 is enabled and SGX disabled
+ */
+static void sklh_idle_state_table_update(void)
+{
+ unsigned long long msr;
+ unsigned int eax, ebx, ecx, edx;
+
+
+ /* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */
+ if (max_cstate <= 7)
+ return;
+
+ /* if PC10 not present in CPUID.MWAIT.EDX */
+ if ((mwait_substates & (0xF << 28)) == 0)
+ return;
+
+ rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr);
+
+ /* PC10 is not enabled in PKG C-state limit */
+ if ((msr & 0xF) != 8)
+ return;
+
+ ecx = 0;
+ cpuid(7, &eax, &ebx, &ecx, &edx);
+
+ /* if SGX is present */
+ if (ebx & (1 << 2)) {
- if (num_sockets > 2)
- cpuidle_state_table = ivt_cstates_4s;
- /* else, 1 and 2 socket systems use default ivt_cstates */
+ rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
+
+ /* if SGX is enabled */
+ if (msr & (1 << 18))
+ return;
+ }
+
+ skl_cstates[5].disabled = 1; /* C8-SKL */
+ skl_cstates[6].disabled = 1; /* C9-SKL */
+}
+/*
+ * intel_idle_state_table_update()
+ *
+ * Update the default state_table for this CPU-id
+ */
+
+static void intel_idle_state_table_update(void)
+{
+ switch (boot_cpu_data.x86_model) {
+
+ case 0x3e: /* IVT */
+ ivt_idle_state_table_update();
+ break;
+ case 0x5e: /* SKL-H */
+ sklh_idle_state_table_update();
+ break;
}
- return;
}
/*
@@ -1063,6 +1119,14 @@ static int __init intel_idle_cpuidle_driver_init(void)
if (num_substates == 0)
continue;
+ /* if state marked as disabled, skip it */
+ if (cpuidle_state_table[cstate].disabled != 0) {
+ pr_debug(PREFIX "state %s is disabled",
+ cpuidle_state_table[cstate].name);
+ continue;
+ }
+
+
if (((mwait_cstate + 1) > 2) &&
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halts in idle"
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 2d33f1e821db..291c61a41c9a 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -547,7 +547,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
{
int ret;
int axis = chan->scan_index;
- unsigned int raw_val;
+ __le16 raw_val;
mutex_lock(&data->mutex);
ret = bmc150_accel_set_power_state(data, true);
@@ -557,14 +557,14 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
}
ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
- &raw_val, 2);
+ &raw_val, sizeof(raw_val));
if (ret < 0) {
dev_err(data->dev, "Error reading axis %d\n", axis);
bmc150_accel_set_power_state(data, false);
mutex_unlock(&data->mutex);
return ret;
}
- *val = sign_extend32(raw_val >> chan->scan_type.shift,
+ *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
chan->scan_type.realbits - 1);
ret = bmc150_accel_set_power_state(data, false);
mutex_unlock(&data->mutex);
@@ -988,6 +988,7 @@ static const struct iio_event_spec bmc150_accel_event = {
.realbits = (bits), \
.storagebits = 16, \
.shift = 16 - (bits), \
+ .endianness = IIO_LE, \
}, \
.event_spec = &bmc150_accel_event, \
.num_event_specs = 1 \
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index dea0448c365c..ae2df4f7ff0d 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -149,12 +149,12 @@
#define FG_ADC_RR_TEMP_FS_VOLTAGE_NUM 5000000
#define FG_ADC_RR_TEMP_FS_VOLTAGE_DEN 3
#define FG_ADC_RR_DIE_TEMP_OFFSET 600000
-#define FG_ADC_RR_DIE_TEMP_SLOPE 2000
-#define FG_ADC_RR_DIE_TEMP_OFFSET_DEGC 25
+#define FG_ADC_RR_DIE_TEMP_SLOPE 2
+#define FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC 25000
#define FG_ADC_RR_CHG_TEMP_OFFSET 1288000
-#define FG_ADC_RR_CHG_TEMP_SLOPE 4000
-#define FG_ADC_RR_CHG_TEMP_OFFSET_DEGC 27
+#define FG_ADC_RR_CHG_TEMP_SLOPE 4
+#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC 27000
#define FG_ADC_RR_VOLT_INPUT_FACTOR 8
#define FG_ADC_RR_CURR_INPUT_FACTOR 2
@@ -162,6 +162,9 @@
#define FG_ADC_KELVINMIL_CELSIUSMIL 273150
#define FG_ADC_RR_GPIO_FS_RANGE 5000
+#define FG_RR_ADC_COHERENT_CHECK_RETRY 5
+#define FG_RR_ADC_MAX_CONTINUOUS_BUFFER_LEN 16
+#define FG_RR_ADC_STS_CHANNEL_READING_MASK 0x3
/*
* The channel number is not a physical index in hardware,
@@ -171,21 +174,20 @@
* the RR ADC before RR_ADC_MAX.
*/
enum rradc_channel_id {
- RR_ADC_BATT_ID_5 = 0,
- RR_ADC_BATT_ID_15,
- RR_ADC_BATT_ID_150,
- RR_ADC_BATT_ID,
+ RR_ADC_BATT_ID = 0,
RR_ADC_BATT_THERM,
RR_ADC_SKIN_TEMP,
- RR_ADC_USBIN_V,
RR_ADC_USBIN_I,
- RR_ADC_DCIN_V,
+ RR_ADC_USBIN_V,
RR_ADC_DCIN_I,
+ RR_ADC_DCIN_V,
RR_ADC_DIE_TEMP,
RR_ADC_CHG_TEMP,
RR_ADC_GPIO,
- RR_ADC_ATEST,
- RR_ADC_TM_ADC,
+ RR_ADC_CHG_HOT_TEMP,
+ RR_ADC_CHG_TOO_HOT_TEMP,
+ RR_ADC_SKIN_HOT_TEMP,
+ RR_ADC_SKIN_TOO_HOT_TEMP,
RR_ADC_MAX
};
@@ -205,51 +207,75 @@ struct rradc_channels {
long info_mask;
u8 lsb;
u8 msb;
+ u8 sts;
int (*scale)(struct rradc_chip *chip, struct rradc_chan_prop *prop,
u16 adc_code, int *result);
};
struct rradc_chan_prop {
enum rradc_channel_id channel;
+ uint32_t channel_data;
int (*scale)(struct rradc_chip *chip, struct rradc_chan_prop *prop,
u16 adc_code, int *result);
};
static int rradc_read(struct rradc_chip *rr_adc, u16 offset, u8 *data, int len)
{
- int rc = 0;
+ int rc = 0, retry_cnt = 0, i = 0;
+ u8 data_check[FG_RR_ADC_MAX_CONTINUOUS_BUFFER_LEN];
+ bool coherent_err = false;
+
+ if (len > FG_RR_ADC_MAX_CONTINUOUS_BUFFER_LEN) {
+ pr_err("Increase the buffer length\n");
+ return -EINVAL;
+ }
+
+ while (retry_cnt < FG_RR_ADC_COHERENT_CHECK_RETRY) {
+ rc = regmap_bulk_read(rr_adc->regmap, rr_adc->base + offset,
+ data, len);
+ if (rc < 0) {
+ pr_err("rr_adc reg 0x%x failed :%d\n", offset, rc);
+ return rc;
+ }
+
+ rc = regmap_bulk_read(rr_adc->regmap, rr_adc->base + offset,
+ data_check, len);
+ if (rc < 0) {
+ pr_err("rr_adc reg 0x%x failed :%d\n", offset, rc);
+ return rc;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (data[i] != data_check[i])
+ coherent_err = true;
+ }
+
+ if (coherent_err) {
+ retry_cnt++;
+ coherent_err = false;
+ pr_debug("retry_cnt:%d\n", retry_cnt);
+ } else {
+ break;
+ }
+ }
- rc = regmap_bulk_read(rr_adc->regmap, rr_adc->base + offset, data, len);
- if (rc < 0)
- pr_err("rr adc read reg %d failed with %d\n", offset, rc);
+ if (retry_cnt == FG_RR_ADC_COHERENT_CHECK_RETRY)
+ pr_err("Retry exceeded for coherrency check\n");
return rc;
}
static int rradc_post_process_batt_id(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
- int *result_mohms)
+ int *result_ohms)
{
uint32_t current_value;
int64_t r_id;
- switch (prop->channel) {
- case RR_ADC_BATT_ID_5:
- current_value = FG_ADC_RR_BATT_ID_5_MA;
- break;
- case RR_ADC_BATT_ID_15:
- current_value = FG_ADC_RR_BATT_ID_15_MA;
- break;
- case RR_ADC_BATT_ID_150:
- current_value = FG_ADC_RR_BATT_ID_150_MA;
- break;
- default:
- return -EINVAL;
- }
-
+ current_value = prop->channel_data;
r_id = ((int64_t)adc_code * FG_ADC_RR_FS_VOLTAGE_MV);
r_id = div64_s64(r_id, (FG_MAX_ADC_READINGS * current_value));
- *result_mohms = (r_id * FG_ADC_SCALE_MILLI_FACTOR);
+ *result_ohms = (r_id * FG_ADC_SCALE_MILLI_FACTOR);
return 0;
}
@@ -270,30 +296,30 @@ static int rradc_post_process_therm(struct rradc_chip *chip,
static int rradc_post_process_volt(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
- int *result_mv)
+ int *result_uv)
{
- int64_t mv = 0;
+ int64_t uv = 0;
/* 8x input attenuation; 2.5V ADC full scale */
- mv = ((int64_t)adc_code * FG_ADC_RR_VOLT_INPUT_FACTOR);
- mv *= FG_ADC_RR_FS_VOLTAGE_MV;
- mv = div64_s64(mv, FG_MAX_ADC_READINGS);
- *result_mv = mv;
+ uv = ((int64_t)adc_code * FG_ADC_RR_VOLT_INPUT_FACTOR);
+ uv *= (FG_ADC_RR_FS_VOLTAGE_MV * FG_ADC_SCALE_MILLI_FACTOR);
+ uv = div64_s64(uv, FG_MAX_ADC_READINGS);
+ *result_uv = uv;
return 0;
}
static int rradc_post_process_curr(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
- int *result_ma)
+ int *result_ua)
{
- int64_t ma = 0;
+ int64_t ua = 0;
/* 0.5 V/A; 2.5V ADC full scale */
- ma = ((int64_t)adc_code * FG_ADC_RR_CURR_INPUT_FACTOR);
- ma *= FG_ADC_RR_FS_VOLTAGE_MV;
- ma = div64_s64(ma, FG_MAX_ADC_READINGS);
- *result_ma = ma;
+ ua = ((int64_t)adc_code * FG_ADC_RR_CURR_INPUT_FACTOR);
+ ua *= (FG_ADC_RR_FS_VOLTAGE_MV * FG_ADC_SCALE_MILLI_FACTOR);
+ ua = div64_s64(ua, FG_MAX_ADC_READINGS);
+ *result_ua = ua;
return 0;
}
@@ -309,8 +335,41 @@ static int rradc_post_process_die_temp(struct rradc_chip *chip,
FG_MAX_ADC_READINGS));
temp -= FG_ADC_RR_DIE_TEMP_OFFSET;
temp = div64_s64(temp, FG_ADC_RR_DIE_TEMP_SLOPE);
- temp += FG_ADC_RR_DIE_TEMP_OFFSET_DEGC;
- *result_millidegc = (temp * FG_ADC_SCALE_MILLI_FACTOR);
+ temp += FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC;
+ *result_millidegc = temp;
+
+ return 0;
+}
+
+static int rradc_post_process_chg_temp_hot(struct rradc_chip *chip,
+ struct rradc_chan_prop *prop, u16 adc_code,
+ int *result_millidegc)
+{
+ int64_t temp = 0;
+
+ temp = (int64_t) adc_code * 4;
+ temp = temp * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM;
+ temp = div64_s64(temp, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+ FG_MAX_ADC_READINGS));
+ temp = FG_ADC_RR_CHG_TEMP_OFFSET - temp;
+ temp = div64_s64(temp, FG_ADC_RR_CHG_TEMP_SLOPE);
+ temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+ *result_millidegc = temp;
+
+ return 0;
+}
+
+static int rradc_post_process_skin_temp_hot(struct rradc_chip *chip,
+ struct rradc_chan_prop *prop, u16 adc_code,
+ int *result_millidegc)
+{
+ int64_t temp = 0;
+
+ temp = (int64_t) adc_code;
+ temp = div64_s64(temp, 2);
+ temp = temp - 30;
+ temp *= FG_ADC_SCALE_MILLI_FACTOR;
+ *result_millidegc = temp;
return 0;
}
@@ -326,8 +385,8 @@ static int rradc_post_process_chg_temp(struct rradc_chip *chip,
FG_MAX_ADC_READINGS));
temp = FG_ADC_RR_CHG_TEMP_OFFSET - temp;
temp = div64_s64(temp, FG_ADC_RR_CHG_TEMP_SLOPE);
- temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_DEGC;
- *result_millidegc = (temp * FG_ADC_SCALE_MILLI_FACTOR);
+ temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+ *result_millidegc = temp;
return 0;
}
@@ -346,63 +405,80 @@ static int rradc_post_process_gpio(struct rradc_chip *chip,
return 0;
}
-#define RR_ADC_CHAN(_dname, _type, _mask, _scale, _lsb, _msb) \
+#define RR_ADC_CHAN(_dname, _type, _mask, _scale, _lsb, _msb, _sts) \
{ \
- .datasheet_name = __stringify(_dname), \
+ .datasheet_name = (_dname), \
.type = _type, \
.info_mask = _mask, \
.scale = _scale, \
.lsb = _lsb, \
.msb = _msb, \
+ .sts = _sts, \
}, \
-#define RR_ADC_CHAN_TEMP(_dname, _scale, _lsb, _msb) \
+#define RR_ADC_CHAN_TEMP(_dname, _scale, _lsb, _msb, _sts) \
RR_ADC_CHAN(_dname, IIO_TEMP, \
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED), \
- _scale, _lsb, _msb) \
+ _scale, _lsb, _msb, _sts) \
-#define RR_ADC_CHAN_VOLT(_dname, _scale, _lsb, _msb) \
+#define RR_ADC_CHAN_VOLT(_dname, _scale, _lsb, _msb, _sts) \
RR_ADC_CHAN(_dname, IIO_VOLTAGE, \
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),\
- _scale, _lsb, _msb) \
+ _scale, _lsb, _msb, _sts) \
-#define RR_ADC_CHAN_CURRENT(_dname, _scale, _lsb, _msb) \
+#define RR_ADC_CHAN_CURRENT(_dname, _scale, _lsb, _msb, _sts) \
RR_ADC_CHAN(_dname, IIO_CURRENT, \
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),\
- _scale, _lsb, _msb) \
+ _scale, _lsb, _msb, _sts) \
-#define RR_ADC_CHAN_RESISTANCE(_dname, _scale, _lsb, _msb) \
+#define RR_ADC_CHAN_RESISTANCE(_dname, _scale, _lsb, _msb, _sts) \
RR_ADC_CHAN(_dname, IIO_RESISTANCE, \
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),\
- _scale, _lsb, _msb) \
+ _scale, _lsb, _msb, _sts) \
static const struct rradc_channels rradc_chans[] = {
- RR_ADC_CHAN_RESISTANCE("batt_id_5", rradc_post_process_batt_id,
- FG_ADC_RR_BATT_ID_5_LSB, FG_ADC_RR_BATT_ID_5_MSB)
- RR_ADC_CHAN_RESISTANCE("batt_id_15", rradc_post_process_batt_id,
- FG_ADC_RR_BATT_ID_15_LSB, FG_ADC_RR_BATT_ID_15_MSB)
- RR_ADC_CHAN_RESISTANCE("batt_id_150", rradc_post_process_batt_id,
- FG_ADC_RR_BATT_ID_150_LSB, FG_ADC_RR_BATT_ID_150_MSB)
RR_ADC_CHAN_RESISTANCE("batt_id", rradc_post_process_batt_id,
- FG_ADC_RR_BATT_ID_5_LSB, FG_ADC_RR_BATT_ID_5_MSB)
+ FG_ADC_RR_BATT_ID_5_LSB, FG_ADC_RR_BATT_ID_5_MSB,
+ FG_ADC_RR_BATT_ID_STS)
RR_ADC_CHAN_TEMP("batt_therm", &rradc_post_process_therm,
- FG_ADC_RR_BATT_THERM_LSB, FG_ADC_RR_BATT_THERM_MSB)
+ FG_ADC_RR_BATT_THERM_LSB, FG_ADC_RR_BATT_THERM_MSB,
+ FG_ADC_RR_BATT_THERM_STS)
RR_ADC_CHAN_TEMP("skin_temp", &rradc_post_process_therm,
- FG_ADC_RR_SKIN_TEMP_LSB, FG_ADC_RR_SKIN_TEMP_MSB)
+ FG_ADC_RR_SKIN_TEMP_LSB, FG_ADC_RR_SKIN_TEMP_MSB,
+ FG_ADC_RR_AUX_THERM_STS)
RR_ADC_CHAN_CURRENT("usbin_i", &rradc_post_process_curr,
- FG_ADC_RR_USB_IN_V_LSB, FG_ADC_RR_USB_IN_V_MSB)
+ FG_ADC_RR_USB_IN_I_LSB, FG_ADC_RR_USB_IN_I_MSB,
+ FG_ADC_RR_USB_IN_I_STS)
RR_ADC_CHAN_VOLT("usbin_v", &rradc_post_process_volt,
- FG_ADC_RR_USB_IN_I_LSB, FG_ADC_RR_USB_IN_I_MSB)
+ FG_ADC_RR_USB_IN_V_LSB, FG_ADC_RR_USB_IN_V_MSB,
+ FG_ADC_RR_USB_IN_V_STS)
RR_ADC_CHAN_CURRENT("dcin_i", &rradc_post_process_curr,
- FG_ADC_RR_DC_IN_V_LSB, FG_ADC_RR_DC_IN_V_MSB)
+ FG_ADC_RR_DC_IN_I_LSB, FG_ADC_RR_DC_IN_I_MSB,
+ FG_ADC_RR_DC_IN_I_STS)
RR_ADC_CHAN_VOLT("dcin_v", &rradc_post_process_volt,
- FG_ADC_RR_DC_IN_I_LSB, FG_ADC_RR_DC_IN_I_MSB)
+ FG_ADC_RR_DC_IN_V_LSB, FG_ADC_RR_DC_IN_V_MSB,
+ FG_ADC_RR_DC_IN_V_STS)
RR_ADC_CHAN_TEMP("die_temp", &rradc_post_process_die_temp,
- FG_ADC_RR_PMI_DIE_TEMP_LSB, FG_ADC_RR_PMI_DIE_TEMP_MSB)
+ FG_ADC_RR_PMI_DIE_TEMP_LSB, FG_ADC_RR_PMI_DIE_TEMP_MSB,
+ FG_ADC_RR_PMI_DIE_TEMP_STS)
RR_ADC_CHAN_TEMP("chg_temp", &rradc_post_process_chg_temp,
- FG_ADC_RR_CHARGER_TEMP_LSB, FG_ADC_RR_CHARGER_TEMP_MSB)
+ FG_ADC_RR_CHARGER_TEMP_LSB, FG_ADC_RR_CHARGER_TEMP_MSB,
+ FG_ADC_RR_CHARGER_TEMP_STS)
RR_ADC_CHAN_VOLT("gpio", &rradc_post_process_gpio,
- FG_ADC_RR_GPIO_LSB, FG_ADC_RR_GPIO_MSB)
+ FG_ADC_RR_GPIO_LSB, FG_ADC_RR_GPIO_MSB,
+ FG_ADC_RR_GPIO_STS)
+ RR_ADC_CHAN_TEMP("chg_temp_hot", &rradc_post_process_chg_temp_hot,
+ FG_ADC_RR_CHARGER_HOT, FG_ADC_RR_CHARGER_HOT,
+ FG_ADC_RR_CHARGER_TEMP_STS)
+ RR_ADC_CHAN_TEMP("chg_temp_too_hot", &rradc_post_process_chg_temp_hot,
+ FG_ADC_RR_CHARGER_TOO_HOT, FG_ADC_RR_CHARGER_TOO_HOT,
+ FG_ADC_RR_CHARGER_TEMP_STS)
+ RR_ADC_CHAN_TEMP("skin_temp_hot", &rradc_post_process_skin_temp_hot,
+ FG_ADC_RR_SKIN_HOT, FG_ADC_RR_SKIN_HOT,
+ FG_ADC_RR_AUX_THERM_STS)
+ RR_ADC_CHAN_TEMP("skin_temp_too_hot", &rradc_post_process_skin_temp_hot,
+ FG_ADC_RR_SKIN_TOO_HOT, FG_ADC_RR_SKIN_TOO_HOT,
+ FG_ADC_RR_AUX_THERM_STS)
};
static int rradc_do_conversion(struct rradc_chip *chip,
@@ -411,15 +487,44 @@ static int rradc_do_conversion(struct rradc_chip *chip,
int rc = 0, bytes_to_read = 0;
u8 buf[6];
u16 offset = 0, batt_id_5 = 0, batt_id_15 = 0, batt_id_150 = 0;
+ u16 status = 0;
mutex_lock(&chip->lock);
+ if ((prop->channel != RR_ADC_BATT_ID) &&
+ (prop->channel != RR_ADC_CHG_HOT_TEMP) &&
+ (prop->channel != RR_ADC_CHG_TOO_HOT_TEMP) &&
+ (prop->channel != RR_ADC_SKIN_HOT_TEMP) &&
+ (prop->channel != RR_ADC_SKIN_TOO_HOT_TEMP)) {
+ /* BATT_ID STS bit does not get set initially */
+ status = rradc_chans[prop->channel].sts;
+ rc = rradc_read(chip, status, buf, 1);
+ if (rc < 0) {
+ pr_err("status read failed:%d\n", rc);
+ goto fail;
+ }
+
+ buf[0] &= FG_RR_ADC_STS_CHANNEL_READING_MASK;
+ if (buf[0] != FG_RR_ADC_STS_CHANNEL_READING_MASK) {
+ pr_warn("%s is not ready; nothing to read\n",
+ rradc_chans[prop->channel].datasheet_name);
+ rc = -ENODATA;
+ goto fail;
+ }
+ }
+
offset = rradc_chans[prop->channel].lsb;
if (prop->channel == RR_ADC_BATT_ID)
bytes_to_read = 6;
+ else if ((prop->channel == RR_ADC_CHG_HOT_TEMP) ||
+ (prop->channel == RR_ADC_CHG_TOO_HOT_TEMP) ||
+ (prop->channel == RR_ADC_SKIN_HOT_TEMP) ||
+ (prop->channel == RR_ADC_SKIN_TOO_HOT_TEMP))
+ bytes_to_read = 1;
else
bytes_to_read = 2;
+ buf[0] = 0;
rc = rradc_read(chip, offset, buf, bytes_to_read);
if (rc) {
pr_err("read data failed\n");
@@ -427,19 +532,33 @@ static int rradc_do_conversion(struct rradc_chip *chip,
}
if (prop->channel == RR_ADC_BATT_ID) {
- batt_id_150 = (buf[4] << 8) | buf[5];
- batt_id_15 = (buf[2] << 8) | buf[3];
- batt_id_5 = (buf[0] << 8) | buf[1];
+ batt_id_150 = (buf[5] << 8) | buf[4];
+ batt_id_15 = (buf[3] << 8) | buf[2];
+ batt_id_5 = (buf[1] << 8) | buf[0];
+ if ((!batt_id_150) && (!batt_id_15) && (!batt_id_5)) {
+ pr_err("Invalid batt_id values with all zeros\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+
if (batt_id_150 <= FG_ADC_RR_BATT_ID_RANGE) {
pr_debug("Batt_id_150 is chosen\n");
*data = batt_id_150;
+ prop->channel_data = FG_ADC_RR_BATT_ID_150_MA;
} else if (batt_id_15 <= FG_ADC_RR_BATT_ID_RANGE) {
pr_debug("Batt_id_15 is chosen\n");
*data = batt_id_15;
+ prop->channel_data = FG_ADC_RR_BATT_ID_15_MA;
} else {
pr_debug("Batt_id_5 is chosen\n");
*data = batt_id_5;
+ prop->channel_data = FG_ADC_RR_BATT_ID_5_MA;
}
+ } else if ((prop->channel == RR_ADC_CHG_HOT_TEMP) ||
+ (prop->channel == RR_ADC_CHG_TOO_HOT_TEMP) ||
+ (prop->channel == RR_ADC_SKIN_HOT_TEMP) ||
+ (prop->channel == RR_ADC_SKIN_TOO_HOT_TEMP)) {
+ *data = buf[0];
} else {
*data = (buf[1] << 8) | buf[0];
}
@@ -458,6 +577,11 @@ static int rradc_read_raw(struct iio_dev *indio_dev,
u16 adc_code;
int rc = 0;
+ if (chan->address >= RR_ADC_MAX) {
+ pr_err("Invalid channel index:%ld\n", chan->address);
+ return -EINVAL;
+ }
+
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
prop = &chip->chan_props[chan->address];
@@ -477,10 +601,6 @@ static int rradc_read_raw(struct iio_dev *indio_dev,
*val = (int) adc_code;
return IIO_VAL_INT;
- case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = 1000;
- return IIO_VAL_INT_PLUS_MICRO;
default:
rc = -EINVAL;
break;
@@ -498,15 +618,11 @@ static int rradc_get_dt_data(struct rradc_chip *chip, struct device_node *node)
{
const struct rradc_channels *rradc_chan;
struct iio_chan_spec *iio_chan;
- struct device_node *child;
- unsigned int index = 0, chan, base;
+ unsigned int i = 0, base;
int rc = 0;
struct rradc_chan_prop prop;
- chip->nchannels = of_get_available_child_count(node);
- if (!chip->nchannels || (chip->nchannels >= RR_ADC_MAX))
- return -EINVAL;
-
+ chip->nchannels = RR_ADC_MAX;
chip->iio_chans = devm_kcalloc(chip->dev, chip->nchannels,
sizeof(*chip->iio_chans), GFP_KERNEL);
if (!chip->iio_chans)
@@ -529,30 +645,21 @@ static int rradc_get_dt_data(struct rradc_chip *chip, struct device_node *node)
chip->base = base;
iio_chan = chip->iio_chans;
- for_each_available_child_of_node(node, child) {
- rc = of_property_read_u32(child, "channel", &chan);
- if (rc) {
- dev_err(chip->dev, "invalid channel number %d\n", chan);
- return rc;
- }
-
- if (chan > RR_ADC_MAX || chan < RR_ADC_BATT_ID_5) {
- dev_err(chip->dev, "invalid channel number %d\n", chan);
- return -EINVAL;
- }
-
- prop.channel = chan;
- prop.scale = rradc_chans[chan].scale;
- chip->chan_props[index] = prop;
+ for (i = 0; i < RR_ADC_MAX; i++) {
+ prop.channel = i;
+ prop.scale = rradc_chans[i].scale;
+ /* Private channel data used for selecting batt_id */
+ prop.channel_data = 0;
+ chip->chan_props[i] = prop;
- rradc_chan = &rradc_chans[chan];
+ rradc_chan = &rradc_chans[i];
iio_chan->channel = prop.channel;
iio_chan->datasheet_name = rradc_chan->datasheet_name;
+ iio_chan->extend_name = rradc_chan->datasheet_name;
iio_chan->info_mask_separate = rradc_chan->info_mask;
iio_chan->type = rradc_chan->type;
- iio_chan->indexed = 1;
- iio_chan->address = index++;
+ iio_chan->address = i;
iio_chan++;
}
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 02ff789852a0..acb3b303d800 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -452,7 +452,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
{
int ret;
- unsigned int raw_val;
+ __le16 raw_val;
mutex_lock(&data->mutex);
ret = bmg160_set_power_state(data, true);
@@ -462,7 +462,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
}
ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
- 2);
+ sizeof(raw_val));
if (ret < 0) {
dev_err(data->dev, "Error reading axis %d\n", axis);
bmg160_set_power_state(data, false);
@@ -470,7 +470,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
return ret;
}
- *val = sign_extend32(raw_val, 15);
+ *val = sign_extend32(le16_to_cpu(raw_val), 15);
ret = bmg160_set_power_state(data, false);
mutex_unlock(&data->mutex);
if (ret < 0)
@@ -733,6 +733,7 @@ static const struct iio_event_spec bmg160_event = {
.sign = 's', \
.realbits = 16, \
.storagebits = 16, \
+ .endianness = IIO_LE, \
}, \
.event_spec = &bmg160_event, \
.num_event_specs = 1 \
@@ -780,7 +781,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
mutex_unlock(&data->mutex);
goto err;
}
- data->buffer[i++] = ret;
+ data->buffer[i++] = val;
}
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 06a4d9c35581..9daca4681922 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -44,6 +44,7 @@ static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
{
}
+#define ST_MAGN_TRIGGER_SET_STATE NULL
#endif /* CONFIG_IIO_BUFFER */
#endif /* ST_MAGN_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index f357ca67a41c..87799de90a1d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -456,7 +456,10 @@ out_locked:
return status;
}
-static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
+/*
+ * Caller must hold 'priv->lock'
+ */
+static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_sa_multicast *multicast;
@@ -466,6 +469,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
ib_sa_comp_mask comp_mask;
int ret = 0;
+ if (!priv->broadcast ||
+ !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return -EINVAL;
+
ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
rec.mgid = mcast->mcmember.mgid;
@@ -525,20 +532,23 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
rec.join_state = 4;
#endif
}
+ spin_unlock_irq(&priv->lock);
multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
+ spin_lock_irq(&priv->lock);
if (IS_ERR(multicast)) {
ret = PTR_ERR(multicast);
ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
- spin_lock_irq(&priv->lock);
/* Requeue this join task with a backoff delay */
__ipoib_mcast_schedule_join_thread(priv, mcast, 1);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
spin_unlock_irq(&priv->lock);
complete(&mcast->done);
+ spin_lock_irq(&priv->lock);
}
+ return 0;
}
void ipoib_mcast_join_task(struct work_struct *work)
@@ -620,9 +630,10 @@ void ipoib_mcast_join_task(struct work_struct *work)
/* Found the next unjoined group */
init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- spin_unlock_irq(&priv->lock);
- ipoib_mcast_join(dev, mcast);
- spin_lock_irq(&priv->lock);
+ if (ipoib_mcast_join(dev, mcast)) {
+ spin_unlock_irq(&priv->lock);
+ return;
+ }
} else if (!delay_until ||
time_before(mcast->delay_until, delay_until))
delay_until = mcast->delay_until;
@@ -641,10 +652,9 @@ out:
if (mcast) {
init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ ipoib_mcast_join(dev, mcast);
}
spin_unlock_irq(&priv->lock);
- if (mcast)
- ipoib_mcast_join(dev, mcast);
}
int ipoib_mcast_start_thread(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 8a51c3b5d657..b0edb66a291b 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -66,6 +66,7 @@ isert_rdma_accept(struct isert_conn *isert_conn);
struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
static void isert_release_work(struct work_struct *work);
+static void isert_wait4flush(struct isert_conn *isert_conn);
static inline bool
isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
@@ -815,12 +816,31 @@ isert_put_conn(struct isert_conn *isert_conn)
kref_put(&isert_conn->kref, isert_release_kref);
}
+static void
+isert_handle_unbound_conn(struct isert_conn *isert_conn)
+{
+ struct isert_np *isert_np = isert_conn->cm_id->context;
+
+ mutex_lock(&isert_np->mutex);
+ if (!list_empty(&isert_conn->node)) {
+ /*
+ * This means iscsi doesn't know this connection
+ * so schedule a cleanup ourselves
+ */
+ list_del_init(&isert_conn->node);
+ isert_put_conn(isert_conn);
+ complete(&isert_conn->wait);
+ queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ mutex_unlock(&isert_np->mutex);
+}
+
/**
* isert_conn_terminate() - Initiate connection termination
* @isert_conn: isert connection struct
*
* Notes:
- * In case the connection state is FULL_FEATURE, move state
+ * In case the connection state is BOUND, move state
* to TEMINATING and start teardown sequence (rdma_disconnect).
* In case the connection state is UP, complete flush as well.
*
@@ -832,23 +852,19 @@ isert_conn_terminate(struct isert_conn *isert_conn)
{
int err;
- switch (isert_conn->state) {
- case ISER_CONN_TERMINATING:
- break;
- case ISER_CONN_UP:
- case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
- isert_info("Terminating conn %p state %d\n",
- isert_conn, isert_conn->state);
- isert_conn->state = ISER_CONN_TERMINATING;
- err = rdma_disconnect(isert_conn->cm_id);
- if (err)
- isert_warn("Failed rdma_disconnect isert_conn %p\n",
- isert_conn);
- break;
- default:
- isert_warn("conn %p teminating in state %d\n",
- isert_conn, isert_conn->state);
- }
+ if (isert_conn->state >= ISER_CONN_TERMINATING)
+ return;
+
+ isert_info("Terminating conn %p state %d\n",
+ isert_conn, isert_conn->state);
+ isert_conn->state = ISER_CONN_TERMINATING;
+ err = rdma_disconnect(isert_conn->cm_id);
+ if (err)
+ isert_warn("Failed rdma_disconnect isert_conn %p\n",
+ isert_conn);
+
+ isert_info("conn %p completing wait\n", isert_conn);
+ complete(&isert_conn->wait);
}
static int
@@ -882,35 +898,27 @@ static int
isert_disconnected_handler(struct rdma_cm_id *cma_id,
enum rdma_cm_event_type event)
{
- struct isert_np *isert_np = cma_id->context;
- struct isert_conn *isert_conn;
- bool terminating = false;
-
- if (isert_np->cm_id == cma_id)
- return isert_np_cma_handler(cma_id->context, event);
-
- isert_conn = cma_id->qp->qp_context;
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
mutex_lock(&isert_conn->mutex);
- terminating = (isert_conn->state == ISER_CONN_TERMINATING);
- isert_conn_terminate(isert_conn);
- mutex_unlock(&isert_conn->mutex);
-
- isert_info("conn %p completing wait\n", isert_conn);
- complete(&isert_conn->wait);
-
- if (terminating)
- goto out;
-
- mutex_lock(&isert_np->mutex);
- if (!list_empty(&isert_conn->node)) {
- list_del_init(&isert_conn->node);
- isert_put_conn(isert_conn);
- queue_work(isert_release_wq, &isert_conn->release_work);
+ switch (isert_conn->state) {
+ case ISER_CONN_TERMINATING:
+ break;
+ case ISER_CONN_UP:
+ isert_conn_terminate(isert_conn);
+ isert_wait4flush(isert_conn);
+ isert_handle_unbound_conn(isert_conn);
+ break;
+ case ISER_CONN_BOUND:
+ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ break;
+ default:
+ isert_warn("conn %p teminating in state %d\n",
+ isert_conn, isert_conn->state);
}
- mutex_unlock(&isert_np->mutex);
+ mutex_unlock(&isert_conn->mutex);
-out:
return 0;
}
@@ -929,12 +937,16 @@ isert_connect_error(struct rdma_cm_id *cma_id)
static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
+ struct isert_np *isert_np = cma_id->context;
int ret = 0;
isert_info("%s (%d): status %d id %p np %p\n",
rdma_event_msg(event->event), event->event,
event->status, cma_id, cma_id->context);
+ if (isert_np->cm_id == cma_id)
+ return isert_np_cma_handler(cma_id->context, event->event);
+
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = isert_connect_request(cma_id, event);
@@ -980,13 +992,10 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */
- isert_conn->post_recv_buf_count += count;
ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
&rx_wr_failed);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
- isert_conn->post_recv_buf_count -= count;
- }
return ret;
}
@@ -1002,12 +1011,9 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
rx_wr.num_sge = 1;
rx_wr.next = NULL;
- isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
- isert_conn->post_recv_buf_count--;
- }
return ret;
}
@@ -1120,12 +1126,9 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
- isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed: %d\n", ret);
- isert_conn->post_recv_buf_count--;
- }
return ret;
}
@@ -1620,7 +1623,6 @@ isert_rcv_completion(struct iser_rx_desc *desc,
ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
DMA_FROM_DEVICE);
- isert_conn->post_recv_buf_count--;
}
static int
@@ -2035,7 +2037,8 @@ is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
void *start = isert_conn->rx_descs;
int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
- if (wr_id >= start && wr_id < start + len)
+ if ((wr_id >= start && wr_id < start + len) ||
+ (wr_id == isert_conn->login_req_buf))
return false;
return true;
@@ -2059,10 +2062,6 @@ isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
isert_unmap_tx_desc(desc, ib_dev);
else
isert_completion_put(desc, isert_cmd, ib_dev, true);
- } else {
- isert_conn->post_recv_buf_count--;
- if (!isert_conn->post_recv_buf_count)
- iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
}
}
@@ -3193,6 +3192,7 @@ accept_wait:
conn->context = isert_conn;
isert_conn->conn = conn;
+ isert_conn->state = ISER_CONN_BOUND;
isert_set_conn_info(np, conn, isert_conn);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 3d7fbc47c343..1874d21daee0 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -50,6 +50,7 @@ enum iser_ib_op_code {
enum iser_conn_state {
ISER_CONN_INIT,
ISER_CONN_UP,
+ ISER_CONN_BOUND,
ISER_CONN_FULL_FEATURE,
ISER_CONN_TERMINATING,
ISER_CONN_DOWN,
@@ -144,7 +145,6 @@ struct isert_device;
struct isert_conn {
enum iser_conn_state state;
- int post_recv_buf_count;
u32 responder_resources;
u32 initiator_depth;
bool pi_support;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 2e2fe818ca9f..eaabf3125846 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1737,47 +1737,6 @@ send_sense:
return -1;
}
-/**
- * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
- * @ch: RDMA channel of the task management request.
- * @fn: Task management function to perform.
- * @req_tag: Tag of the SRP task management request.
- * @mgmt_ioctx: I/O context of the task management request.
- *
- * Returns zero if the target core will process the task management
- * request asynchronously.
- *
- * Note: It is assumed that the initiator serializes tag-based task management
- * requests.
- */
-static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
-{
- struct srpt_device *sdev;
- struct srpt_rdma_ch *ch;
- struct srpt_send_ioctx *target;
- int ret, i;
-
- ret = -EINVAL;
- ch = ioctx->ch;
- BUG_ON(!ch);
- BUG_ON(!ch->sport);
- sdev = ch->sport->sdev;
- BUG_ON(!sdev);
- spin_lock_irq(&sdev->spinlock);
- for (i = 0; i < ch->rq_size; ++i) {
- target = ch->ioctx_ring[i];
- if (target->cmd.se_lun == ioctx->cmd.se_lun &&
- target->cmd.tag == tag &&
- srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
- ret = 0;
- /* now let the target core abort &target->cmd; */
- break;
- }
- }
- spin_unlock_irq(&sdev->spinlock);
- return ret;
-}
-
static int srp_tmr_to_tcm(int fn)
{
switch (fn) {
@@ -1812,7 +1771,6 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
struct se_cmd *cmd;
struct se_session *sess = ch->sess;
uint64_t unpacked_lun;
- uint32_t tag = 0;
int tcm_tmr;
int rc;
@@ -1828,25 +1786,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
send_ioctx->cmd.tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
- if (tcm_tmr < 0) {
- send_ioctx->cmd.se_tmr_req->response =
- TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
- goto fail;
- }
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
sizeof(srp_tsk->lun));
-
- if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
- rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
- if (rc < 0) {
- send_ioctx->cmd.se_tmr_req->response =
- TMR_TASK_DOES_NOT_EXIST;
- goto fail;
- }
- tag = srp_tsk->task_tag;
- }
rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
- srp_tsk, tcm_tmr, GFP_KERNEL, tag,
+ srp_tsk, tcm_tmr, GFP_KERNEL, srp_tsk->task_tag,
TARGET_SCF_ACK_KREF);
if (rc != 0) {
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index cfd58e87da26..1c5914cae853 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -817,26 +817,49 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
ar2->udev = udev;
+ /* Sanity check, first interface must have an endpoint */
+ if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
+ dev_err(&interface->dev,
+ "%s(): interface 0 must have an endpoint\n", __func__);
+ r = -ENODEV;
+ goto fail1;
+ }
ar2->intf[0] = interface;
ar2->ep[0] = &alt->endpoint[0].desc;
+ /* Sanity check, the device must have two interfaces */
ar2->intf[1] = usb_ifnum_to_if(udev, 1);
+ if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) {
+ dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n",
+ __func__, udev->actconfig->desc.bNumInterfaces);
+ r = -ENODEV;
+ goto fail1;
+ }
+
r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2);
if (r)
goto fail1;
+
+ /* Sanity check, second interface must have an endpoint */
alt = ar2->intf[1]->cur_altsetting;
+ if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
+ dev_err(&interface->dev,
+ "%s(): interface 1 must have an endpoint\n", __func__);
+ r = -ENODEV;
+ goto fail2;
+ }
ar2->ep[1] = &alt->endpoint[0].desc;
r = ati_remote2_urb_init(ar2);
if (r)
- goto fail2;
+ goto fail3;
ar2->channel_mask = channel_mask;
ar2->mode_mask = mode_mask;
r = ati_remote2_setup(ar2, ar2->channel_mask);
if (r)
- goto fail2;
+ goto fail3;
usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
@@ -845,11 +868,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
if (r)
- goto fail2;
+ goto fail3;
r = ati_remote2_input_init(ar2);
if (r)
- goto fail3;
+ goto fail4;
usb_set_intfdata(interface, ar2);
@@ -857,10 +880,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
return 0;
- fail3:
+ fail4:
sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
- fail2:
+ fail3:
ati_remote2_urb_cleanup(ar2);
+ fail2:
usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
fail1:
kfree(ar2);
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index ac1fa5f44580..9c0ea36913b4 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1663,6 +1663,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev,
union_desc->bMasterInterface0);
+ if (!pcu->ctrl_intf)
+ return -EINVAL;
alt = pcu->ctrl_intf->cur_altsetting;
pcu->ep_ctrl = &alt->endpoint[0].desc;
@@ -1670,6 +1672,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
pcu->data_intf = usb_ifnum_to_if(pcu->udev,
union_desc->bSlaveInterface0);
+ if (!pcu->data_intf)
+ return -EINVAL;
alt = pcu->data_intf->cur_altsetting;
if (alt->desc.bNumEndpoints != 2) {
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index 63b539d3daba..84909a12ff36 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -307,6 +307,9 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
int error = -ENOMEM;
interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints < 1)
+ return -EINVAL;
+
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -EIO;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 6025eb430c0a..a41d8328c064 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -862,8 +862,9 @@ static void synaptics_report_ext_buttons(struct psmouse *psmouse,
if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
return;
- /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
- if (SYN_ID_FULL(priv->identity) == 0x801 &&
+ /* Bug in FW 8.1 & 8.2, buttons are reported only when ExtBit is 1 */
+ if ((SYN_ID_FULL(priv->identity) == 0x801 ||
+ SYN_ID_FULL(priv->identity) == 0x802) &&
!((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
return;
diff --git a/drivers/input/touchscreen/ft5x06_ts.c b/drivers/input/touchscreen/ft5x06_ts.c
index c9905a4a87df..d619c1d06e9e 100644
--- a/drivers/input/touchscreen/ft5x06_ts.c
+++ b/drivers/input/touchscreen/ft5x06_ts.c
@@ -200,8 +200,9 @@ static int ft5x06_handle_touchdata(struct ft5x06_ts_data *data)
pointid = (buf[FT_TOUCH_ID_POS + FT_TOUCH_STEP * i]) >> 4;
if (pointid >= FT_MAX_ID)
break;
- else
- event->touch_point++;
+
+ event->touch_point++;
+
event->x[i] =
(s16) (buf[FT_TOUCH_X_H_POS + FT_TOUCH_STEP * i] & 0x0F) <<
8 | (s16) buf[FT_TOUCH_X_L_POS + FT_TOUCH_STEP * i];
@@ -419,10 +420,8 @@ static int ft5x06_ts_probe(struct i2c_client *client,
}
data = kzalloc(sizeof(struct ft5x06_ts_data), GFP_KERNEL);
- if (!data) {
- dev_err(&client->dev, "Not enough memory\n");
+ if (!data)
return -ENOMEM;
- }
input_dev = input_allocate_device();
if (!input_dev) {
@@ -445,6 +444,7 @@ static int ft5x06_ts_probe(struct i2c_client *client,
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(EV_ABS, input_dev->evbit);
__set_bit(BTN_TOUCH, input_dev->keybit);
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
pdata->x_max, 0, 0);
@@ -587,7 +587,7 @@ free_mem:
return err;
}
-static int __devexit ft5x06_ts_remove(struct i2c_client *client)
+static int ft5x06_ts_remove(struct i2c_client *client)
{
struct ft5x06_ts_data *data = i2c_get_clientdata(client);
diff --git a/drivers/input/touchscreen/gt9xx/goodix_tool.c b/drivers/input/touchscreen/gt9xx/goodix_tool.c
new file mode 100644
index 000000000000..3dfe4e1d334e
--- /dev/null
+++ b/drivers/input/touchscreen/gt9xx/goodix_tool.c
@@ -0,0 +1,615 @@
+/* drivers/input/touchscreen/goodix_tool.c
+ *
+ * 2010 - 2012 Goodix Technology.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be a reference
+ * to you, when you are integrating the GOODiX's CTP IC into your system,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Version:1.6
+ * V1.0:2012/05/01,create file.
+ * V1.2:2012/06/08,modify some warning.
+ * V1.4:2012/08/28,modified to support GT9XX
+ * V1.6:new proc name
+ */
+
+#include "gt9xx.h"
+
+#define DATA_LENGTH_UINT 512
+#define CMD_HEAD_LENGTH (sizeof(st_cmd_head) - sizeof(u8*))
+static char procname[20] = {0};
+
+#define UPDATE_FUNCTIONS
+
+#ifdef UPDATE_FUNCTIONS
+extern s32 gup_enter_update_mode(struct i2c_client *client);
+extern void gup_leave_update_mode(void);
+extern s32 gup_update_proc(void *dir);
+#endif
+
+extern void gtp_irq_disable(struct goodix_ts_data *);
+extern void gtp_irq_enable(struct goodix_ts_data *);
+
+#pragma pack(1)
+typedef struct{
+ u8 wr; //write read flag£¬0:R 1:W 2:PID 3:
+ u8 flag; //0:no need flag/int 1: need flag 2:need int
+ u8 flag_addr[2]; //flag address
+ u8 flag_val; //flag val
+ u8 flag_relation; //flag_val:flag 0:not equal 1:equal 2:> 3:<
+ u16 circle; //polling cycle
+ u8 times; //plling times
+ u8 retry; //I2C retry times
+ u16 delay; //delay befor read or after write
+ u16 data_len; //data length
+ u8 addr_len; //address length
+ u8 addr[2]; //address
+ u8 res[3]; //reserved
+ u8* data; //data pointer
+}st_cmd_head;
+#pragma pack()
+st_cmd_head cmd_head;
+
+static struct i2c_client *gt_client = NULL;
+
+static struct proc_dir_entry *goodix_proc_entry;
+
+static s32 goodix_tool_write(struct file *filp, const char __user *buff, unsigned long len, void *data);
+static s32 goodix_tool_read( char *page, char **start, off_t off, int count, int *eof, void *data );
+static s32 (*tool_i2c_read)(u8 *, u16);
+static s32 (*tool_i2c_write)(u8 *, u16);
+
+#if GTP_ESD_PROTECT
+extern void gtp_esd_switch(struct i2c_client *, s32);
+#endif
+s32 DATA_LENGTH = 0;
+s8 IC_TYPE[16] = {0};
+
+static void tool_set_proc_name(char * procname)
+{
+ char *months[12] = {"Jan", "Feb", "Mar", "Apr", "May",
+ "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
+ char date[20] = {0};
+ char month[4] = {0};
+ int i = 0, n_month = 1, n_day = 0, n_year = 0;
+
+ sprintf(date, "%s", __DATE__);
+
+ //GTP_DEBUG("compile date: %s", date);
+
+ sscanf(date, "%s %d %d", month, &n_day, &n_year);
+
+ for (i = 0; i < 12; ++i)
+ {
+ if (!memcmp(months[i], month, 3))
+ {
+ n_month = i+1;
+ break;
+ }
+ }
+
+ sprintf(procname, "gmnode%04d%02d%02d", n_year, n_month, n_day);
+
+ //GTP_DEBUG("procname = %s", procname);
+}
+
+
+static s32 tool_i2c_read_no_extra(u8* buf, u16 len)
+{
+ s32 ret = -1;
+ s32 i = 0;
+ struct i2c_msg msgs[2];
+
+ msgs[0].flags = !I2C_M_RD;
+ msgs[0].addr = gt_client->addr;
+ msgs[0].len = cmd_head.addr_len;
+ msgs[0].buf = &buf[0];
+
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].addr = gt_client->addr;
+ msgs[1].len = len;
+ msgs[1].buf = &buf[GTP_ADDR_LENGTH];
+
+ for (i = 0; i < cmd_head.retry; i++)
+ {
+ ret=i2c_transfer(gt_client->adapter, msgs, 2);
+ if (ret > 0)
+ {
+ break;
+ }
+ }
+ return ret;
+}
+
+static s32 tool_i2c_write_no_extra(u8* buf, u16 len)
+{
+ s32 ret = -1;
+ s32 i = 0;
+ struct i2c_msg msg;
+
+ msg.flags = !I2C_M_RD;
+ msg.addr = gt_client->addr;
+ msg.len = len;
+ msg.buf = buf;
+
+ for (i = 0; i < cmd_head.retry; i++)
+ {
+ ret=i2c_transfer(gt_client->adapter, &msg, 1);
+ if (ret > 0)
+ {
+ break;
+ }
+ }
+ return ret;
+}
+
+static s32 tool_i2c_read_with_extra(u8* buf, u16 len)
+{
+ s32 ret = -1;
+ u8 pre[2] = {0x0f, 0xff};
+ u8 end[2] = {0x80, 0x00};
+
+ tool_i2c_write_no_extra(pre, 2);
+ ret = tool_i2c_read_no_extra(buf, len);
+ tool_i2c_write_no_extra(end, 2);
+
+ return ret;
+}
+
+static s32 tool_i2c_write_with_extra(u8* buf, u16 len)
+{
+ s32 ret = -1;
+ u8 pre[2] = {0x0f, 0xff};
+ u8 end[2] = {0x80, 0x00};
+
+ tool_i2c_write_no_extra(pre, 2);
+ ret = tool_i2c_write_no_extra(buf, len);
+ tool_i2c_write_no_extra(end, 2);
+
+ return ret;
+}
+
+static void register_i2c_func(void)
+{
+// if (!strncmp(IC_TYPE, "GT818", 5) || !strncmp(IC_TYPE, "GT816", 5)
+// || !strncmp(IC_TYPE, "GT811", 5) || !strncmp(IC_TYPE, "GT818F", 6)
+// || !strncmp(IC_TYPE, "GT827", 5) || !strncmp(IC_TYPE,"GT828", 5)
+// || !strncmp(IC_TYPE, "GT813", 5))
+ if (strncmp(IC_TYPE, "GT8110", 6) && strncmp(IC_TYPE, "GT8105", 6)
+ && strncmp(IC_TYPE, "GT801", 5) && strncmp(IC_TYPE, "GT800", 5)
+ && strncmp(IC_TYPE, "GT801PLUS", 9) && strncmp(IC_TYPE, "GT811", 5)
+ && strncmp(IC_TYPE, "GTxxx", 5))
+ {
+ tool_i2c_read = tool_i2c_read_with_extra;
+ tool_i2c_write = tool_i2c_write_with_extra;
+ GTP_DEBUG("I2C function: with pre and end cmd!");
+ }
+ else
+ {
+ tool_i2c_read = tool_i2c_read_no_extra;
+ tool_i2c_write = tool_i2c_write_no_extra;
+ GTP_INFO("I2C function: without pre and end cmd!");
+ }
+}
+
+static void unregister_i2c_func(void)
+{
+ tool_i2c_read = NULL;
+ tool_i2c_write = NULL;
+ GTP_INFO("I2C function: unregister i2c transfer function!");
+}
+
+
+s32 init_wr_node(struct i2c_client *client)
+{
+ s32 i;
+
+ gt_client = client;
+ memset(&cmd_head, 0, sizeof(cmd_head));
+ cmd_head.data = NULL;
+
+ i = 5;
+ while ((!cmd_head.data) && i)
+ {
+ cmd_head.data = kzalloc(i * DATA_LENGTH_UINT, GFP_KERNEL);
+ if (NULL != cmd_head.data)
+ {
+ break;
+ }
+ i--;
+ }
+ if (i)
+ {
+ DATA_LENGTH = i * DATA_LENGTH_UINT + GTP_ADDR_LENGTH;
+ GTP_INFO("Applied memory size:%d.", DATA_LENGTH);
+ }
+ else
+ {
+ GTP_ERROR("Apply for memory failed.");
+ return FAIL;
+ }
+
+ cmd_head.addr_len = 2;
+ cmd_head.retry = 5;
+
+ register_i2c_func();
+
+ tool_set_proc_name(procname);
+ goodix_proc_entry = create_proc_entry(procname, 0666, NULL);
+ if (goodix_proc_entry == NULL)
+ {
+ GTP_ERROR("Couldn't create proc entry!");
+ return FAIL;
+ }
+ else
+ {
+ GTP_INFO("Create proc entry success!");
+ goodix_proc_entry->write_proc = goodix_tool_write;
+ goodix_proc_entry->read_proc = goodix_tool_read;
+ }
+
+ return SUCCESS;
+}
+
+void uninit_wr_node(void)
+{
+ kfree(cmd_head.data);
+ cmd_head.data = NULL;
+ unregister_i2c_func();
+ remove_proc_entry(procname, NULL);
+}
+
+static u8 relation(u8 src, u8 dst, u8 rlt)
+{
+ u8 ret = 0;
+
+ switch (rlt)
+ {
+ case 0:
+ ret = (src != dst) ? true : false;
+ break;
+
+ case 1:
+ ret = (src == dst) ? true : false;
+ GTP_DEBUG("equal:src:0x%02x dst:0x%02x ret:%d.", src, dst, (s32)ret);
+ break;
+
+ case 2:
+ ret = (src > dst) ? true : false;
+ break;
+
+ case 3:
+ ret = (src < dst) ? true : false;
+ break;
+
+ case 4:
+ ret = (src & dst) ? true : false;
+ break;
+
+ case 5:
+ ret = (!(src | dst)) ? true : false;
+ break;
+
+ default:
+ ret = false;
+ break;
+ }
+
+ return ret;
+}
+
+/*******************************************************
+Function:
+ Comfirm function.
+Input:
+ None.
+Output:
+ Return write length.
+********************************************************/
+static u8 comfirm(void)
+{
+ s32 i = 0;
+ u8 buf[32];
+
+// memcpy(&buf[GTP_ADDR_LENGTH - cmd_head.addr_len], &cmd_head.flag_addr, cmd_head.addr_len);
+// memcpy(buf, &cmd_head.flag_addr, cmd_head.addr_len);//Modified by Scott, 2012-02-17
+ memcpy(buf, cmd_head.flag_addr, cmd_head.addr_len);
+
+ for (i = 0; i < cmd_head.times; i++)
+ {
+ if (tool_i2c_read(buf, 1) <= 0)
+ {
+ GTP_ERROR("Read flag data failed!");
+ return FAIL;
+ }
+ if (true == relation(buf[GTP_ADDR_LENGTH], cmd_head.flag_val, cmd_head.flag_relation))
+ {
+ GTP_DEBUG("value at flag addr:0x%02x.", buf[GTP_ADDR_LENGTH]);
+ GTP_DEBUG("flag value:0x%02x.", cmd_head.flag_val);
+ break;
+ }
+
+ msleep(cmd_head.circle);
+ }
+
+ if (i >= cmd_head.times)
+ {
+ GTP_ERROR("Didn't get the flag to continue!");
+ return FAIL;
+ }
+
+ return SUCCESS;
+}
+
+/*******************************************************
+Function:
+ Goodix tool write function.
+Input:
+ standard proc write function param.
+Output:
+ Return write length.
+********************************************************/
+static s32 goodix_tool_write(struct file *filp, const char __user *buff, unsigned long len, void *data)
+{
+ s32 ret = 0;
+ GTP_DEBUG_FUNC();
+ GTP_DEBUG_ARRAY((u8*)buff, len);
+
+ ret = copy_from_user(&cmd_head, buff, CMD_HEAD_LENGTH);
+ if(ret)
+ {
+ GTP_ERROR("copy_from_user failed.");
+ }
+
+ GTP_DEBUG("wr :0x%02x.", cmd_head.wr);
+ GTP_DEBUG("flag:0x%02x.", cmd_head.flag);
+ GTP_DEBUG("flag addr:0x%02x%02x.", cmd_head.flag_addr[0], cmd_head.flag_addr[1]);
+ GTP_DEBUG("flag val:0x%02x.", cmd_head.flag_val);
+ GTP_DEBUG("flag rel:0x%02x.", cmd_head.flag_relation);
+ GTP_DEBUG("circle :%d.", (s32)cmd_head.circle);
+ GTP_DEBUG("times :%d.", (s32)cmd_head.times);
+ GTP_DEBUG("retry :%d.", (s32)cmd_head.retry);
+ GTP_DEBUG("delay :%d.", (s32)cmd_head.delay);
+ GTP_DEBUG("data len:%d.", (s32)cmd_head.data_len);
+ GTP_DEBUG("addr len:%d.", (s32)cmd_head.addr_len);
+ GTP_DEBUG("addr:0x%02x%02x.", cmd_head.addr[0], cmd_head.addr[1]);
+ GTP_DEBUG("len:%d.", (s32)len);
+ GTP_DEBUG("buf[20]:0x%02x.", buff[CMD_HEAD_LENGTH]);
+
+ if (1 == cmd_head.wr)
+ {
+ // copy_from_user(&cmd_head.data[cmd_head.addr_len], &buff[CMD_HEAD_LENGTH], cmd_head.data_len);
+ ret = copy_from_user(&cmd_head.data[GTP_ADDR_LENGTH], &buff[CMD_HEAD_LENGTH], cmd_head.data_len);
+ if(ret)
+ {
+ GTP_ERROR("copy_from_user failed.");
+ }
+ memcpy(&cmd_head.data[GTP_ADDR_LENGTH - cmd_head.addr_len], cmd_head.addr, cmd_head.addr_len);
+
+ GTP_DEBUG_ARRAY(cmd_head.data, cmd_head.data_len + cmd_head.addr_len);
+ GTP_DEBUG_ARRAY((u8*)&buff[CMD_HEAD_LENGTH], cmd_head.data_len);
+
+ if (1 == cmd_head.flag)
+ {
+ if (FAIL == comfirm())
+ {
+ GTP_ERROR("[WRITE]Comfirm fail!");
+ return FAIL;
+ }
+ }
+ else if (2 == cmd_head.flag)
+ {
+ //Need interrupt!
+ }
+ if (tool_i2c_write(&cmd_head.data[GTP_ADDR_LENGTH - cmd_head.addr_len],
+ cmd_head.data_len + cmd_head.addr_len) <= 0)
+ {
+ GTP_ERROR("[WRITE]Write data failed!");
+ return FAIL;
+ }
+
+ GTP_DEBUG_ARRAY(&cmd_head.data[GTP_ADDR_LENGTH - cmd_head.addr_len],cmd_head.data_len + cmd_head.addr_len);
+ if (cmd_head.delay)
+ {
+ msleep(cmd_head.delay);
+ }
+
+ return cmd_head.data_len + CMD_HEAD_LENGTH;
+ }
+ else if (3 == cmd_head.wr) //Write ic type
+ {
+ ret = copy_from_user(&cmd_head.data[0], &buff[CMD_HEAD_LENGTH], cmd_head.data_len);
+ if(ret)
+ {
+ GTP_ERROR("copy_from_user failed.");
+ }
+ memcpy(IC_TYPE, cmd_head.data, cmd_head.data_len);
+
+ register_i2c_func();
+
+ return cmd_head.data_len + CMD_HEAD_LENGTH;
+ }
+ else if (5 == cmd_head.wr)
+ {
+ //memcpy(IC_TYPE, cmd_head.data, cmd_head.data_len);
+
+ return cmd_head.data_len + CMD_HEAD_LENGTH;
+ }
+ else if (7 == cmd_head.wr)//disable irq!
+ {
+ gtp_irq_disable(i2c_get_clientdata(gt_client));
+
+ #if GTP_ESD_PROTECT
+ gtp_esd_switch(gt_client, SWITCH_OFF);
+ #endif
+ return CMD_HEAD_LENGTH;
+ }
+ else if (9 == cmd_head.wr) //enable irq!
+ {
+ gtp_irq_enable(i2c_get_clientdata(gt_client));
+
+ #if GTP_ESD_PROTECT
+ gtp_esd_switch(gt_client, SWITCH_ON);
+ #endif
+ return CMD_HEAD_LENGTH;
+ }
+ else if(17 == cmd_head.wr)
+ {
+ struct goodix_ts_data *ts = i2c_get_clientdata(gt_client);
+ ret = copy_from_user(&cmd_head.data[GTP_ADDR_LENGTH], &buff[CMD_HEAD_LENGTH], cmd_head.data_len);
+ if(ret)
+ {
+ GTP_DEBUG("copy_from_user failed.");
+ }
+ if(cmd_head.data[GTP_ADDR_LENGTH])
+ {
+ GTP_DEBUG("gtp enter rawdiff.");
+ ts->gtp_rawdiff_mode = true;
+ }
+ else
+ {
+ ts->gtp_rawdiff_mode = false;
+ GTP_DEBUG("gtp leave rawdiff.");
+ }
+ return CMD_HEAD_LENGTH;
+ }
+#ifdef UPDATE_FUNCTIONS
+ else if (11 == cmd_head.wr)//Enter update mode!
+ {
+ if (FAIL == gup_enter_update_mode(gt_client))
+ {
+ return FAIL;
+ }
+ }
+ else if (13 == cmd_head.wr)//Leave update mode!
+ {
+ gup_leave_update_mode();
+ }
+ else if (15 == cmd_head.wr) //Update firmware!
+ {
+ show_len = 0;
+ total_len = 0;
+ memset(cmd_head.data, 0, cmd_head.data_len + 1);
+ memcpy(cmd_head.data, &buff[CMD_HEAD_LENGTH], cmd_head.data_len);
+
+ if (FAIL == gup_update_proc((void*)cmd_head.data))
+ {
+ return FAIL;
+ }
+ }
+#endif
+
+ return CMD_HEAD_LENGTH;
+}
+
+/*******************************************************
+Function:
+ Goodix tool read function.
+Input:
+ standard proc read function param.
+Output:
+ Return read length.
+********************************************************/
+static s32 goodix_tool_read( char *page, char **start, off_t off, int count, int *eof, void *data )
+{
+ GTP_DEBUG_FUNC();
+
+ if (cmd_head.wr % 2)
+ {
+ return FAIL;
+ }
+ else if (!cmd_head.wr)
+ {
+ u16 len = 0;
+ s16 data_len = 0;
+ u16 loc = 0;
+
+ if (1 == cmd_head.flag)
+ {
+ if (FAIL == comfirm())
+ {
+ GTP_ERROR("[READ]Comfirm fail!");
+ return FAIL;
+ }
+ }
+ else if (2 == cmd_head.flag)
+ {
+ //Need interrupt!
+ }
+
+ memcpy(cmd_head.data, cmd_head.addr, cmd_head.addr_len);
+
+ GTP_DEBUG("[CMD HEAD DATA] ADDR:0x%02x%02x.", cmd_head.data[0], cmd_head.data[1]);
+ GTP_DEBUG("[CMD HEAD ADDR] ADDR:0x%02x%02x.", cmd_head.addr[0], cmd_head.addr[1]);
+
+ if (cmd_head.delay)
+ {
+ msleep(cmd_head.delay);
+ }
+
+ data_len = cmd_head.data_len;
+ while(data_len > 0)
+ {
+ if (data_len > DATA_LENGTH)
+ {
+ len = DATA_LENGTH;
+ }
+ else
+ {
+ len = data_len;
+ }
+ data_len -= DATA_LENGTH;
+
+ if (tool_i2c_read(cmd_head.data, len) <= 0)
+ {
+ GTP_ERROR("[READ]Read data failed!");
+ return FAIL;
+ }
+ memcpy(&page[loc], &cmd_head.data[GTP_ADDR_LENGTH], len);
+ loc += len;
+
+ GTP_DEBUG_ARRAY(&cmd_head.data[GTP_ADDR_LENGTH], len);
+ GTP_DEBUG_ARRAY(page, len);
+ }
+ }
+ else if (2 == cmd_head.wr)
+ {
+ // memcpy(page, "gt8", cmd_head.data_len);
+ // memcpy(page, "GT818", 5);
+ // page[5] = 0;
+
+ GTP_DEBUG("Return ic type:%s len:%d.", page, (s32)cmd_head.data_len);
+ return cmd_head.data_len;
+ //return sizeof(IC_TYPE_NAME);
+ }
+ else if (4 == cmd_head.wr)
+ {
+ page[0] = show_len >> 8;
+ page[1] = show_len & 0xff;
+ page[2] = total_len >> 8;
+ page[3] = total_len & 0xff;
+
+ return cmd_head.data_len;
+ }
+ else if (6 == cmd_head.wr)
+ {
+ //Read error code!
+ }
+ else if (8 == cmd_head.wr) //Read driver version
+ {
+ // memcpy(page, GTP_DRIVER_VERSION, strlen(GTP_DRIVER_VERSION));
+ s32 tmp_len;
+ tmp_len = strlen(GTP_DRIVER_VERSION);
+ memcpy(page, GTP_DRIVER_VERSION, tmp_len);
+ page[tmp_len] = 0;
+ }
+
+ return cmd_head.data_len;
+}
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.c b/drivers/input/touchscreen/gt9xx/gt9xx.c
new file mode 100644
index 000000000000..6615c3a039a0
--- /dev/null
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.c
@@ -0,0 +1,1805 @@
+/* drivers/input/touchscreen/gt9xx.c
+ *
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * 2010 - 2013 Goodix Technology.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be a reference
+ * to you, when you are integrating the GOODiX's CTP IC into your system,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Version: 1.8
+ * Authors: andrew@goodix.com, meta@goodix.com
+ * Release Date: 2013/04/25
+ * Revision record:
+ * V1.0:
+ * first Release. By Andrew, 2012/08/31
+ * V1.2:
+ * modify gtp_reset_guitar,slot report,tracking_id & 0x0F.
+ * By Andrew, 2012/10/15
+ * V1.4:
+ * modify gt9xx_update.c. By Andrew, 2012/12/12
+ * V1.6:
+ * 1. new heartbeat/esd_protect mechanism(add external watchdog)
+ * 2. doze mode, sliding wakeup
+ * 3. 3 more cfg_group(GT9 Sensor_ID: 0~5)
+ * 3. config length verification
+ * 4. names & comments
+ * By Meta, 2013/03/11
+ * V1.8:
+ * 1. pen/stylus identification
+ * 2. read double check & fixed config support
+ * 2. new esd & slide wakeup optimization
+ * By Meta, 2013/06/08
+ */
+
+#include "gt9xx.h"
+
+#if GTP_ICS_SLOT_REPORT
+#include <linux/input/mt.h>
+#endif
+
+#define GOODIX_DEV_NAME "Goodix Capacitive TouchScreen"
+#define CFG_MAX_TOUCH_POINTS 5
+#define GOODIX_COORDS_ARR_SIZE 4
+#define MAX_BUTTONS 4
+
+/* HIGH: 0x28/0x29, LOW: 0xBA/0xBB */
+#define GTP_I2C_ADDRESS_HIGH 0x14
+#define GTP_I2C_ADDRESS_LOW 0x5D
+
+#define RESET_DELAY_T3_US 200 /* T3: > 100us */
+#define RESET_DELAY_T4 20 /* T4: > 5ms */
+
+#define PHY_BUF_SIZE 32
+
+#define GTP_MAX_TOUCH 5
+#define GTP_ESD_CHECK_CIRCLE_MS 2000
+
+#if GTP_HAVE_TOUCH_KEY
+static const u16 touch_key_array[] = {KEY_MENU, KEY_HOMEPAGE, KEY_BACK};
+
+#if GTP_DEBUG_ON
+static const int key_codes[] = {
+ KEY_HOME, KEY_BACK, KEY_MENU, KEY_SEARCH
+};
+static const char *const key_names[] = {
+ "Key_Home", "Key_Back", "Key_Menu", "Key_Search"
+};
+#endif
+#endif
+
+static void gtp_reset_guitar(struct goodix_ts_data *ts, int ms);
+static void gtp_int_sync(struct goodix_ts_data *ts, int ms);
+static int gtp_i2c_test(struct i2c_client *client);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void goodix_ts_early_suspend(struct early_suspend *h);
+static void goodix_ts_late_resume(struct early_suspend *h);
+#endif
+
+#if GTP_ESD_PROTECT
+static struct delayed_work gtp_esd_check_work;
+static struct workqueue_struct *gtp_esd_check_workqueue;
+static void gtp_esd_check_func(struct work_struct *work);
+static int gtp_init_ext_watchdog(struct i2c_client *client);
+struct i2c_client *i2c_connect_client;
+#endif
+
+#if GTP_SLIDE_WAKEUP
+enum doze_status {
+ DOZE_DISABLED = 0,
+ DOZE_ENABLED = 1,
+ DOZE_WAKEUP = 2,
+};
+static enum doze_status = DOZE_DISABLED;
+static s8 gtp_enter_doze(struct goodix_ts_data *ts);
+#endif
+bool init_done;
+static u8 chip_gt9xxs; /* true if ic is gt9xxs, like gt915s */
+u8 grp_cfg_version;
+
+/*******************************************************
+Function:
+ Read data from the i2c slave device.
+Input:
+ client: i2c device.
+ buf[0~1]: read start address.
+ buf[2~len-1]: read data buffer.
+ len: GTP_ADDR_LENGTH + read bytes count
+Output:
+ numbers of i2c_msgs to transfer:
+ 2: succeed, otherwise: failed
+*********************************************************/
+int gtp_i2c_read(struct i2c_client *client, u8 *buf, int len)
+{
+ struct goodix_ts_data *ts = i2c_get_clientdata(client);
+ struct i2c_msg msgs[2];
+ int ret = -EIO;
+ int retries = 0;
+
+ GTP_DEBUG_FUNC();
+
+ msgs[0].flags = !I2C_M_RD;
+ msgs[0].addr = client->addr;
+ msgs[0].len = GTP_ADDR_LENGTH;
+ msgs[0].buf = &buf[0];
+
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].addr = client->addr;
+ msgs[1].len = len - GTP_ADDR_LENGTH;
+ msgs[1].buf = &buf[GTP_ADDR_LENGTH];
+
+ while (retries < 5) {
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ if (ret == 2)
+ break;
+ retries++;
+ }
+ if (retries >= 5) {
+#if GTP_SLIDE_WAKEUP
+ /* reset chip would quit doze mode */
+ if (doze_status == DOZE_ENABLED)
+ return ret;
+#endif
+ GTP_DEBUG("I2C communication timeout, resetting chip...");
+ if (init_done)
+ gtp_reset_guitar(ts, 10);
+ else
+ dev_warn(&client->dev,
+ "<GTP> gtp_reset_guitar exit init_done=%d:\n",
+ init_done);
+ }
+ return ret;
+}
+
+/*******************************************************
+Function:
+ Write data to the i2c slave device.
+Input:
+ client: i2c device.
+ buf[0~1]: write start address.
+ buf[2~len-1]: data buffer
+ len: GTP_ADDR_LENGTH + write bytes count
+Output:
+ numbers of i2c_msgs to transfer:
+ 1: succeed, otherwise: failed
+*********************************************************/
+int gtp_i2c_write(struct i2c_client *client, u8 *buf, int len)
+{
+ struct goodix_ts_data *ts = i2c_get_clientdata(client);
+ struct i2c_msg msg;
+ int ret = -EIO;
+ int retries = 0;
+
+ GTP_DEBUG_FUNC();
+
+ msg.flags = !I2C_M_RD;
+ msg.addr = client->addr;
+ msg.len = len;
+ msg.buf = buf;
+
+ while (retries < 5) {
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret == 1)
+ break;
+ retries++;
+ }
+ if ((retries >= 5)) {
+#if GTP_SLIDE_WAKEUP
+ if (doze_status == DOZE_ENABLED)
+ return ret;
+#endif
+ GTP_DEBUG("I2C communication timeout, resetting chip...");
+ if (init_done)
+ gtp_reset_guitar(ts, 10);
+ else
+ dev_warn(&client->dev,
+ "<GTP> gtp_reset_guitar exit init_done=%d:\n",
+ init_done);
+ }
+ return ret;
+}
+/*******************************************************
+Function:
+ i2c read twice, compare the results
+Input:
+ client: i2c device
+ addr: operate address
+ rxbuf: read data to store, if compare successful
+ len: bytes to read
+Output:
+ FAIL: read failed
+ SUCCESS: read successful
+*********************************************************/
+int gtp_i2c_read_dbl_check(struct i2c_client *client,
+ u16 addr, u8 *rxbuf, int len)
+{
+ u8 buf[16] = {0};
+ u8 confirm_buf[16] = {0};
+ u8 retry = 0;
+
+ while (retry++ < 3) {
+ memset(buf, 0xAA, 16);
+ buf[0] = (u8)(addr >> 8);
+ buf[1] = (u8)(addr & 0xFF);
+ gtp_i2c_read(client, buf, len + 2);
+
+ memset(confirm_buf, 0xAB, 16);
+ confirm_buf[0] = (u8)(addr >> 8);
+ confirm_buf[1] = (u8)(addr & 0xFF);
+ gtp_i2c_read(client, confirm_buf, len + 2);
+
+ if (!memcmp(buf, confirm_buf, len + 2))
+ break;
+ }
+ if (retry < 3) {
+ memcpy(rxbuf, confirm_buf + 2, len);
+ return SUCCESS;
+ }
+ dev_err(&client->dev,
+ "i2c read 0x%04X, %d bytes, double check failed!", addr, len);
+ return FAIL;
+}
+
+/*******************************************************
+Function:
+ Send config data.
+Input:
+ client: i2c device.
+Output:
+ result of i2c write operation.
+ > 0: succeed, otherwise: failed
+*********************************************************/
+static int gtp_send_cfg(struct goodix_ts_data *ts)
+{
+ int ret;
+#if GTP_DRIVER_SEND_CFG
+ int retry = 0;
+
+ if (ts->fixed_cfg) {
+ dev_dbg(&ts->client->dev,
+ "Ic fixed config, no config sent!");
+ ret = 2;
+ } else {
+ for (retry = 0; retry < 5; retry++) {
+ ret = gtp_i2c_write(ts->client,
+ ts->config_data,
+ GTP_CONFIG_MAX_LENGTH + GTP_ADDR_LENGTH);
+ if (ret > 0)
+ break;
+ }
+ }
+#endif
+
+ return ret;
+}
+
+/*******************************************************
+Function:
+ Disable irq function
+Input:
+ ts: goodix i2c_client private data
+Output:
+ None.
+*********************************************************/
+void gtp_irq_disable(struct goodix_ts_data *ts)
+{
+ unsigned long irqflags;
+
+ GTP_DEBUG_FUNC();
+
+ spin_lock_irqsave(&ts->irq_lock, irqflags);
+ if (!ts->irq_is_disabled) {
+ ts->irq_is_disabled = true;
+ disable_irq_nosync(ts->client->irq);
+ }
+ spin_unlock_irqrestore(&ts->irq_lock, irqflags);
+}
+
+/*******************************************************
+Function:
+ Enable irq function
+Input:
+ ts: goodix i2c_client private data
+Output:
+ None.
+*********************************************************/
+void gtp_irq_enable(struct goodix_ts_data *ts)
+{
+ unsigned long irqflags = 0;
+
+ GTP_DEBUG_FUNC();
+
+ spin_lock_irqsave(&ts->irq_lock, irqflags);
+ if (ts->irq_is_disabled) {
+ enable_irq(ts->client->irq);
+ ts->irq_is_disabled = false;
+ }
+ spin_unlock_irqrestore(&ts->irq_lock, irqflags);
+}
+
+/*******************************************************
+Function:
+ Report touch point event
+Input:
+ ts: goodix i2c_client private data
+ id: trackId
+ x: input x coordinate
+ y: input y coordinate
+ w: input pressure
+Output:
+ None.
+*********************************************************/
+static void gtp_touch_down(struct goodix_ts_data *ts, int id, int x, int y,
+ int w)
+{
+#if GTP_CHANGE_X2Y
+ GTP_SWAP(x, y);
+#endif
+
+#if GTP_ICS_SLOT_REPORT
+ input_mt_slot(ts->input_dev, id);
+ input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, id);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y);
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+#else
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y);
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, id);
+ input_mt_sync(ts->input_dev);
+#endif
+
+ GTP_DEBUG("ID:%d, X:%d, Y:%d, W:%d", id, x, y, w);
+}
+
+/*******************************************************
+Function:
+ Report touch release event
+Input:
+ ts: goodix i2c_client private data
+Output:
+ None.
+*********************************************************/
+static void gtp_touch_up(struct goodix_ts_data *ts, int id)
+{
+#if GTP_ICS_SLOT_REPORT
+ input_mt_slot(ts->input_dev, id);
+ input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, -1);
+ GTP_DEBUG("Touch id[%2d] release!", id);
+#else
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0);
+ input_mt_sync(ts->input_dev);
+#endif
+}
+
+
+
+/*******************************************************
+Function:
+ Goodix touchscreen work function
+Input:
+ work: work struct of goodix_workqueue
+Output:
+ None.
+*********************************************************/
+static void goodix_ts_work_func(struct work_struct *work)
+{
+ u8 end_cmd[3] = { GTP_READ_COOR_ADDR >> 8,
+ GTP_READ_COOR_ADDR & 0xFF, 0};
+ u8 point_data[2 + 1 + 8 * GTP_MAX_TOUCH + 1] = {
+ GTP_READ_COOR_ADDR >> 8,
+ GTP_READ_COOR_ADDR & 0xFF};
+ u8 touch_num = 0;
+ u8 finger = 0;
+ static u16 pre_touch;
+ static u8 pre_key;
+#if GTP_WITH_PEN
+ static u8 pre_pen;
+#endif
+ u8 key_value = 0;
+ u8 *coor_data = NULL;
+ s32 input_x = 0;
+ s32 input_y = 0;
+ s32 input_w = 0;
+ s32 id = 0;
+ s32 i = 0;
+ int ret = -1;
+ struct goodix_ts_data *ts = NULL;
+
+#if GTP_SLIDE_WAKEUP
+ u8 doze_buf[3] = {0x81, 0x4B};
+#endif
+
+ GTP_DEBUG_FUNC();
+
+ ts = container_of(work, struct goodix_ts_data, work);
+#ifdef CONFIG_GT9XX_TOUCHPANEL_UPDATE
+ if (ts->enter_update)
+ return;
+#endif
+
+#if GTP_SLIDE_WAKEUP
+ if (doze_status == DOZE_ENABLED) {
+ ret = gtp_i2c_read(ts->client, doze_buf, 3);
+ GTP_DEBUG("0x814B = 0x%02X", doze_buf[2]);
+ if (ret > 0) {
+ if (doze_buf[2] == 0xAA) {
+ dev_dbg(&ts->client->dev,
+ "Slide(0xAA) To Light up the screen!");
+ doze_status = DOZE_WAKEUP;
+ input_report_key(
+ ts->input_dev, KEY_POWER, 1);
+ input_sync(ts->input_dev);
+ input_report_key(
+ ts->input_dev, KEY_POWER, 0);
+ input_sync(ts->input_dev);
+ /* clear 0x814B */
+ doze_buf[2] = 0x00;
+ gtp_i2c_write(ts->client, doze_buf, 3);
+ } else if (doze_buf[2] == 0xBB) {
+ dev_dbg(&ts->client->dev,
+ "Slide(0xBB) To Light up the screen!");
+ doze_status = DOZE_WAKEUP;
+ input_report_key(ts->input_dev, KEY_POWER, 1);
+ input_sync(ts->input_dev);
+ input_report_key(ts->input_dev, KEY_POWER, 0);
+ input_sync(ts->input_dev);
+ /* clear 0x814B*/
+ doze_buf[2] = 0x00;
+ gtp_i2c_write(ts->client, doze_buf, 3);
+ } else if (0xC0 == (doze_buf[2] & 0xC0)) {
+ dev_dbg(&ts->client->dev,
+ "double click to light up the screen!");
+ doze_status = DOZE_WAKEUP;
+ input_report_key(ts->input_dev, KEY_POWER, 1);
+ input_sync(ts->input_dev);
+ input_report_key(ts->input_dev, KEY_POWER, 0);
+ input_sync(ts->input_dev);
+ /* clear 0x814B */
+ doze_buf[2] = 0x00;
+ gtp_i2c_write(ts->client, doze_buf, 3);
+ } else {
+ gtp_enter_doze(ts);
+ }
+ }
+ if (ts->use_irq)
+ gtp_irq_enable(ts);
+
+ return;
+ }
+#endif
+
+ ret = gtp_i2c_read(ts->client, point_data, 12);
+ if (ret < 0) {
+ dev_err(&ts->client->dev,
+ "I2C transfer error. errno:%d\n ", ret);
+ goto exit_work_func;
+ }
+
+ finger = point_data[GTP_ADDR_LENGTH];
+ if ((finger & 0x80) == 0)
+ goto exit_work_func;
+
+ touch_num = finger & 0x0f;
+ if (touch_num > GTP_MAX_TOUCH)
+ goto exit_work_func;
+
+ if (touch_num > 1) {
+ u8 buf[8 * GTP_MAX_TOUCH] = { (GTP_READ_COOR_ADDR + 10) >> 8,
+ (GTP_READ_COOR_ADDR + 10) & 0xff };
+
+ ret = gtp_i2c_read(ts->client, buf,
+ 2 + 8 * (touch_num - 1));
+ memcpy(&point_data[12], &buf[2], 8 * (touch_num - 1));
+ }
+
+#if GTP_HAVE_TOUCH_KEY
+ key_value = point_data[3 + 8 * touch_num];
+
+ if (key_value || pre_key) {
+ for (i = 0; i < ARRAY_SIZE(touch_key_array); i++) {
+#if GTP_DEBUG_ON
+ for (ret = 0; ret < 4; ++ret) {
+ if (key_codes[ret] == touch_key_array[i]) {
+ GTP_DEBUG("Key: %s %s",
+ key_names[ret],
+ (key_value & (0x01 << i))
+ ? "Down" : "Up");
+ break;
+ }
+ }
+#endif
+
+ input_report_key(ts->input_dev,
+ touch_key_array[i], key_value & (0x01<<i));
+ }
+ touch_num = 0;
+ pre_touch = 0;
+ }
+#endif
+ pre_key = key_value;
+
+ GTP_DEBUG("pre_touch:%02x, finger:%02x.", pre_touch, finger);
+
+#if GTP_ICS_SLOT_REPORT
+#if GTP_WITH_PEN
+ if (pre_pen && (touch_num == 0)) {
+ GTP_DEBUG("Pen touch UP(Slot)!");
+ input_report_key(ts->input_dev, BTN_TOOL_PEN, 0);
+ input_mt_slot(ts->input_dev, 5);
+ input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, -1);
+ pre_pen = 0;
+ }
+#endif
+ if (pre_touch || touch_num) {
+ s32 pos = 0;
+ u16 touch_index = 0;
+
+ coor_data = &point_data[3];
+ if (touch_num) {
+ id = coor_data[pos] & 0x0F;
+#if GTP_WITH_PEN
+ id = coor_data[pos];
+ if (id == 128) {
+ GTP_DEBUG("Pen touch DOWN(Slot)!");
+ input_x = coor_data[pos + 1]
+ | (coor_data[pos + 2] << 8);
+ input_y = coor_data[pos + 3]
+ | (coor_data[pos + 4] << 8);
+ input_w = coor_data[pos + 5]
+ | (coor_data[pos + 6] << 8);
+
+ input_report_key(ts->input_dev,
+ BTN_TOOL_PEN, 1);
+ input_mt_slot(ts->input_dev, 5);
+ input_report_abs(ts->input_dev,
+ ABS_MT_TRACKING_ID, 5);
+ input_report_abs(ts->input_dev,
+ ABS_MT_POSITION_X, input_x);
+ input_report_abs(ts->input_dev,
+ ABS_MT_POSITION_Y, input_y);
+ input_report_abs(ts->input_dev,
+ ABS_MT_TOUCH_MAJOR, input_w);
+ GTP_DEBUG("Pen/Stylus: (%d, %d)[%d]",
+ input_x, input_y, input_w);
+ pre_pen = 1;
+ pre_touch = 0;
+ }
+#endif
+
+ touch_index |= (0x01<<id);
+ }
+
+ GTP_DEBUG("id = %d,touch_index = 0x%x, pre_touch = 0x%x\n",
+ id, touch_index, pre_touch);
+ for (i = 0; i < GTP_MAX_TOUCH; i++) {
+#if GTP_WITH_PEN
+ if (pre_pen == 1)
+ break;
+#endif
+ if (touch_index & (0x01<<i)) {
+ input_x = coor_data[pos + 1] |
+ coor_data[pos + 2] << 8;
+ input_y = coor_data[pos + 3] |
+ coor_data[pos + 4] << 8;
+ input_w = coor_data[pos + 5] |
+ coor_data[pos + 6] << 8;
+
+ gtp_touch_down(ts, id,
+ input_x, input_y, input_w);
+ pre_touch |= 0x01 << i;
+
+ pos += 8;
+ id = coor_data[pos] & 0x0F;
+ touch_index |= (0x01<<id);
+ } else {
+ gtp_touch_up(ts, i);
+ pre_touch &= ~(0x01 << i);
+ }
+ }
+ }
+#else
+ input_report_key(ts->input_dev, BTN_TOUCH, (touch_num || key_value));
+ if (touch_num) {
+ for (i = 0; i < touch_num; i++) {
+ coor_data = &point_data[i * 8 + 3];
+
+ id = coor_data[0];
+ input_x = coor_data[1] | coor_data[2] << 8;
+ input_y = coor_data[3] | coor_data[4] << 8;
+ input_w = coor_data[5] | coor_data[6] << 8;
+#if GTP_WITH_PEN
+ if (id == 128) {
+ GTP_DEBUG("Pen touch DOWN!");
+ input_report_key(ts->input_dev,
+ BTN_TOOL_PEN, 1);
+ pre_pen = 1;
+ id = 0;
+ }
+#endif
+ gtp_touch_down(ts, id, input_x, input_y, input_w);
+ }
+ } else if (pre_touch) {
+#if GTP_WITH_PEN
+ if (pre_pen == 1) {
+ GTP_DEBUG("Pen touch UP!");
+ input_report_key(ts->input_dev, BTN_TOOL_PEN, 0);
+ pre_pen = 0;
+ }
+#endif
+ GTP_DEBUG("Touch Released!");
+ gtp_touch_up(ts, 0);
+ }
+
+ pre_touch = touch_num;
+#endif
+
+ input_sync(ts->input_dev);
+
+exit_work_func:
+ if (!ts->gtp_rawdiff_mode) {
+ ret = gtp_i2c_write(ts->client, end_cmd, 3);
+ if (ret < 0)
+ dev_warn(&ts->client->dev, "I2C write end_cmd error!\n");
+
+ }
+ if (ts->use_irq)
+ gtp_irq_enable(ts);
+
+ return;
+}
+
+/*******************************************************
+Function:
+ Timer interrupt service routine for polling mode.
+Input:
+ timer: timer struct pointer
+Output:
+ Timer work mode.
+ HRTIMER_NORESTART: no restart mode
+*********************************************************/
+static enum hrtimer_restart goodix_ts_timer_handler(struct hrtimer *timer)
+{
+ struct goodix_ts_data
+ *ts = container_of(timer, struct goodix_ts_data, timer);
+
+ GTP_DEBUG_FUNC();
+
+ queue_work(ts->goodix_wq, &ts->work);
+ hrtimer_start(&ts->timer, ktime_set(0, (GTP_POLL_TIME + 6) * 1000000),
+ HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+}
+
+/*******************************************************
+Function:
+ External interrupt service routine for interrupt mode.
+Input:
+ irq: interrupt number.
+ dev_id: private data pointer
+Output:
+ Handle Result.
+ IRQ_HANDLED: interrupt handled successfully
+*********************************************************/
+static irqreturn_t goodix_ts_irq_handler(int irq, void *dev_id)
+{
+ struct goodix_ts_data *ts = dev_id;
+
+ GTP_DEBUG_FUNC();
+
+ gtp_irq_disable(ts);
+
+ queue_work(ts->goodix_wq, &ts->work);
+
+ return IRQ_HANDLED;
+}
+/*******************************************************
+Function:
+ Synchronization.
+Input:
+ ms: synchronization time in millisecond.
+Output:
+ None.
+*******************************************************/
+void gtp_int_sync(struct goodix_ts_data *ts, int ms)
+{
+ gpio_direction_output(ts->pdata->irq_gpio, 0);
+ msleep(ms);
+ gpio_direction_input(ts->pdata->irq_gpio);
+}
+
+/*******************************************************
+Function:
+ Reset chip.
+Input:
+ ms: reset time in millisecond, must >10ms
+Output:
+ None.
+*******************************************************/
+static void gtp_reset_guitar(struct goodix_ts_data *ts, int ms)
+{
+ GTP_DEBUG_FUNC();
+
+ /* This reset sequence will selcet I2C slave address */
+ gpio_direction_output(ts->pdata->reset_gpio, 0);
+ msleep(ms);
+
+ if (ts->client->addr == GTP_I2C_ADDRESS_HIGH)
+ gpio_direction_output(ts->pdata->irq_gpio, 1);
+ else
+ gpio_direction_output(ts->pdata->irq_gpio, 0);
+
+ usleep(RESET_DELAY_T3_US);
+ gpio_direction_output(ts->pdata->reset_gpio, 1);
+ msleep(RESET_DELAY_T4);
+
+ gpio_direction_input(ts->pdata->reset_gpio);
+
+ gtp_int_sync(ts, 50);
+
+#if GTP_ESD_PROTECT
+ gtp_init_ext_watchdog(ts->client);
+#endif
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#if GTP_SLIDE_WAKEUP
+/*******************************************************
+Function:
+ Enter doze mode for sliding wakeup.
+Input:
+ ts: goodix tp private data
+Output:
+ 1: succeed, otherwise failed
+*******************************************************/
+static s8 gtp_enter_doze(struct goodix_ts_data *ts)
+{
+ int ret = -1;
+ s8 retry = 0;
+ u8 i2c_control_buf[3] = {
+ (u8)(GTP_REG_SLEEP >> 8),
+ (u8)GTP_REG_SLEEP, 8};
+
+ GTP_DEBUG_FUNC();
+
+#if GTP_DBL_CLK_WAKEUP
+ i2c_control_buf[2] = 0x09;
+#endif
+ gtp_irq_disable(ts);
+
+ GTP_DEBUG("entering doze mode...");
+ while (retry++ < 5) {
+ i2c_control_buf[0] = 0x80;
+ i2c_control_buf[1] = 0x46;
+ ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
+ if (ret < 0) {
+ GTP_DEBUG(
+ "failed to set doze flag into 0x8046, %d",
+ retry);
+ continue;
+ }
+ i2c_control_buf[0] = 0x80;
+ i2c_control_buf[1] = 0x40;
+ ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
+ if (ret > 0) {
+ doze_status = DOZE_ENABLED;
+ dev_dbg(&ts->client->dev,
+ "GTP has been working in doze mode!");
+ gtp_irq_enable(ts);
+ return ret;
+ }
+ msleep(20);
+ }
+ dev_err(&ts->client->dev, "GTP send doze cmd failed.\n");
+ gtp_irq_enable(ts);
+ return ret;
+}
+#else
+/*******************************************************
+Function:
+ Enter sleep mode.
+Input:
+ ts: private data.
+Output:
+ Executive outcomes.
+ 1: succeed, otherwise failed.
+*******************************************************/
+static s8 gtp_enter_sleep(struct goodix_ts_data *ts)
+{
+ int ret = -1;
+ s8 retry = 0;
+ u8 i2c_control_buf[3] = {
+ (u8)(GTP_REG_SLEEP >> 8),
+ (u8)GTP_REG_SLEEP, 5};
+
+ GTP_DEBUG_FUNC();
+
+ ret = gpio_direction_output(ts->pdata->irq_gpio, 0);
+ usleep(5000);
+ while (retry++ < 5) {
+ ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
+ if (ret > 0) {
+ dev_dbg(&ts->client->dev,
+ "GTP enter sleep!");
+ return ret;
+ }
+ msleep(20);
+ }
+ dev_err(&ts->client->dev, "GTP send sleep cmd failed.\n");
+ return ret;
+}
+#endif
+
+/*******************************************************
+Function:
+ Wakeup from sleep.
+Input:
+ ts: private data.
+Output:
+ Executive outcomes.
+ >0: succeed, otherwise: failed.
+*******************************************************/
+static s8 gtp_wakeup_sleep(struct goodix_ts_data *ts)
+{
+ u8 retry = 0;
+ s8 ret = -1;
+
+ GTP_DEBUG_FUNC();
+
+#if GTP_POWER_CTRL_SLEEP
+ while (retry++ < 5) {
+ gtp_reset_guitar(ts, 20);
+
+ ret = gtp_send_cfg(ts);
+ if (ret > 0) {
+ dev_dbg(&ts->client->dev,
+ "Wakeup sleep send config success.");
+ continue;
+ }
+ dev_dbg(&ts->client->dev, "GTP Wakeup!");
+ return 1;
+ }
+#else
+ while (retry++ < 10) {
+#if GTP_SLIDE_WAKEUP
+ /* wakeup not by slide */
+ if (doze_status != DOZE_WAKEUP)
+ gtp_reset_guitar(ts, 10);
+ else
+ /* wakeup by slide */
+ doze_status = DOZE_DISABLED;
+#else
+ if (chip_gt9xxs == 1) {
+ gtp_reset_guitar(ts, 10);
+ } else {
+ ret = gpio_direction_output(ts->pdata->irq_gpio, 1);
+ usleep(5000);
+ }
+#endif
+ ret = gtp_i2c_test(ts->client);
+ if (ret > 0) {
+ dev_dbg(&ts->client->dev, "GTP wakeup sleep.");
+#if (!GTP_SLIDE_WAKEUP)
+ if (chip_gt9xxs == 0) {
+ gtp_int_sync(ts, 25);
+ msleep(20);
+#if GTP_ESD_PROTECT
+ gtp_init_ext_watchdog(ts->client);
+#endif
+ }
+#endif
+ return ret;
+ }
+ gtp_reset_guitar(ts, 20);
+ }
+#endif
+
+ dev_err(&ts->client->dev, "GTP wakeup sleep failed.\n");
+ return ret;
+}
+#endif /* !CONFIG_HAS_EARLYSUSPEND */
+
+/*******************************************************
+Function:
+ Initialize gtp.
+Input:
+ ts: goodix private data
+Output:
+ Executive outcomes.
+ > =0: succeed, otherwise: failed
+*******************************************************/
+static int gtp_init_panel(struct goodix_ts_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ unsigned char *config_data;
+ int ret = -EIO;
+
+#if GTP_DRIVER_SEND_CFG
+ int i;
+ u8 check_sum = 0;
+ u8 opr_buf[16];
+ u8 sensor_id = 0;
+
+ u8 cfg_info_group1[] = CTP_CFG_GROUP1;
+ u8 cfg_info_group2[] = CTP_CFG_GROUP2;
+ u8 cfg_info_group3[] = CTP_CFG_GROUP3;
+ u8 cfg_info_group4[] = CTP_CFG_GROUP4;
+ u8 cfg_info_group5[] = CTP_CFG_GROUP5;
+ u8 cfg_info_group6[] = CTP_CFG_GROUP6;
+ u8 *send_cfg_buf[] = {cfg_info_group1, cfg_info_group2,
+ cfg_info_group3, cfg_info_group4,
+ cfg_info_group5, cfg_info_group6};
+
+ u8 cfg_info_len[] = {ARRAY_SIZE(cfg_info_group1),
+ ARRAY_SIZE(cfg_info_group2),
+ ARRAY_SIZE(cfg_info_group3),
+ ARRAY_SIZE(cfg_info_group4),
+ ARRAY_SIZE(cfg_info_group5),
+ ARRAY_SIZE(cfg_info_group6)};
+
+ GTP_DEBUG("Config Groups\' Lengths: %d, %d, %d, %d, %d, %d",
+ cfg_info_len[0], cfg_info_len[1], cfg_info_len[2],
+ cfg_info_len[3], cfg_info_len[4], cfg_info_len[5]);
+
+ ret = gtp_i2c_read_dbl_check(ts->client, 0x41E4, opr_buf, 1);
+ if (ret == SUCCESS) {
+ if (opr_buf[0] != 0xBE) {
+ ts->fw_error = 1;
+ dev_err(&client->dev,
+ "Firmware error, no config sent!");
+ return -EINVAL;
+ }
+ }
+ if ((!cfg_info_len[1]) && (!cfg_info_len[2]) && (!cfg_info_len[3])
+ && (!cfg_info_len[4]) && (!cfg_info_len[5])) {
+ sensor_id = 0;
+ } else {
+ ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_SENSOR_ID,
+ &sensor_id, 1);
+ if (ret == SUCCESS) {
+ if (sensor_id >= 0x06) {
+ dev_err(&client->dev,
+ "Invalid sensor_id(0x%02X), No Config Sent!",
+ sensor_id);
+ return -EINVAL;
+ }
+ } else {
+ dev_err(&client->dev,
+ "Failed to get sensor_id, No config sent!");
+ return -EINVAL;
+ }
+ }
+ GTP_DEBUG("Sensor_ID: %d", sensor_id);
+
+ ts->gtp_cfg_len = cfg_info_len[sensor_id];
+
+ if (ts->gtp_cfg_len < GTP_CONFIG_MIN_LENGTH) {
+ dev_err(&client->dev,
+ "Sensor_ID(%d) matches with NULL or INVALID CONFIG GROUP! NO Config Sent! You need to check you header file CFG_GROUP section!\n",
+ sensor_id);
+ return -EINVAL;
+ }
+ ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_CONFIG_DATA,
+ &opr_buf[0], 1);
+
+ if (ret == SUCCESS) {
+ if (opr_buf[0] < 90) {
+ /* backup group config version */
+ grp_cfg_version = send_cfg_buf[sensor_id][0];
+ send_cfg_buf[sensor_id][0] = 0x00;
+ ts->fixed_cfg = 0;
+ } else {
+ /* treated as fixed config, not send config */
+ dev_warn(&client->dev,
+ "Ic fixed config with config version(%d, 0x%02X)",
+ opr_buf[0], opr_buf[0]);
+ ts->fixed_cfg = 1;
+ }
+ } else {
+ dev_err(&client->dev,
+ "Failed to get ic config version!No config sent!");
+ return -EINVAL;
+ }
+
+ config_data = devm_kzalloc(&client->dev,
+ GTP_CONFIG_MAX_LENGTH + GTP_ADDR_LENGTH,
+ GFP_KERNEL);
+ if (!config_data) {
+ dev_err(&client->dev,
+ "Not enough memory for panel config data\n");
+ return -ENOMEM;
+ }
+
+ ts->config_data = config_data;
+ config_data[0] = GTP_REG_CONFIG_DATA >> 8;
+ config_data[1] = GTP_REG_CONFIG_DATA & 0xff;
+ memset(&config_data[GTP_ADDR_LENGTH], 0, GTP_CONFIG_MAX_LENGTH);
+ memcpy(&config_data[GTP_ADDR_LENGTH], send_cfg_buf[sensor_id],
+ ts->gtp_cfg_len);
+
+#if GTP_CUSTOM_CFG
+ config_data[RESOLUTION_LOC] =
+ (unsigned char)(GTP_MAX_WIDTH && 0xFF);
+ config_data[RESOLUTION_LOC + 1] =
+ (unsigned char)(GTP_MAX_WIDTH >> 8);
+ config_data[RESOLUTION_LOC + 2] =
+ (unsigned char)(GTP_MAX_HEIGHT && 0xFF);
+ config_data[RESOLUTION_LOC + 3] =
+ (unsigned char)(GTP_MAX_HEIGHT >> 8);
+
+ if (GTP_INT_TRIGGER == 0)
+ config_data[TRIGGER_LOC] &= 0xfe;
+ else if (GTP_INT_TRIGGER == 1)
+ config_data[TRIGGER_LOC] |= 0x01;
+#endif /* !GTP_CUSTOM_CFG */
+
+ check_sum = 0;
+ for (i = GTP_ADDR_LENGTH; i < ts->gtp_cfg_len; i++)
+ check_sum += config_data[i];
+
+ config_data[ts->gtp_cfg_len] = (~check_sum) + 1;
+
+#else /* DRIVER NOT SEND CONFIG */
+ ts->gtp_cfg_len = GTP_CONFIG_MAX_LENGTH;
+ ret = gtp_i2c_read(ts->client, config_data,
+ ts->gtp_cfg_len + GTP_ADDR_LENGTH);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Read Config Failed, Using DEFAULT Resolution & INT Trigger!\n");
+ ts->abs_x_max = GTP_MAX_WIDTH;
+ ts->abs_y_max = GTP_MAX_HEIGHT;
+ ts->int_trigger_type = GTP_INT_TRIGGER;
+ }
+#endif /* !DRIVER NOT SEND CONFIG */
+
+ GTP_DEBUG_FUNC();
+ if ((ts->abs_x_max == 0) && (ts->abs_y_max == 0)) {
+ ts->abs_x_max = (config_data[RESOLUTION_LOC + 1] << 8)
+ + config_data[RESOLUTION_LOC];
+ ts->abs_y_max = (config_data[RESOLUTION_LOC + 3] << 8)
+ + config_data[RESOLUTION_LOC + 2];
+ ts->int_trigger_type = (config_data[TRIGGER_LOC]) & 0x03;
+ }
+ ret = gtp_send_cfg(ts);
+ if (ret < 0)
+ dev_err(&client->dev, "%s: Send config error.\n", __func__);
+
+ GTP_DEBUG("X_MAX = %d, Y_MAX = %d, TRIGGER = 0x%02x",
+ ts->abs_x_max, ts->abs_y_max,
+ ts->int_trigger_type);
+
+ msleep(20);
+ return ret;
+}
+
+/*******************************************************
+Function:
+ Read chip version.
+Input:
+ client: i2c device
+ version: buffer to keep ic firmware version
+Output:
+ read operation return.
+ 2: succeed, otherwise: failed
+*******************************************************/
+int gtp_read_version(struct i2c_client *client, u16 *version)
+{
+ int ret = -EIO;
+ u8 buf[8] = { GTP_REG_VERSION >> 8, GTP_REG_VERSION & 0xff };
+
+ GTP_DEBUG_FUNC();
+
+ ret = gtp_i2c_read(client, buf, sizeof(buf));
+ if (ret < 0) {
+ dev_err(&client->dev, "GTP read version failed.\n");
+ return ret;
+ }
+
+ if (version)
+ *version = (buf[7] << 8) | buf[6];
+
+ if (buf[5] == 0x00) {
+ dev_dbg(&client->dev, "IC Version: %c%c%c_%02x%02x\n", buf[2],
+ buf[3], buf[4], buf[7], buf[6]);
+ } else {
+ if (buf[5] == 'S' || buf[5] == 's')
+ chip_gt9xxs = 1;
+ dev_dbg(&client->dev, "IC Version: %c%c%c%c_%02x%02x\n", buf[2],
+ buf[3], buf[4], buf[5], buf[7], buf[6]);
+ }
+ return ret;
+}
+
+/*******************************************************
+Function:
+ I2c test Function.
+Input:
+ client:i2c client.
+Output:
+ Executive outcomes.
+ 2: succeed, otherwise failed.
+*******************************************************/
+static int gtp_i2c_test(struct i2c_client *client)
+{
+ u8 buf[3] = { GTP_REG_CONFIG_DATA >> 8, GTP_REG_CONFIG_DATA & 0xff };
+ int retry = 5;
+ int ret = -EIO;
+
+ GTP_DEBUG_FUNC();
+
+ while (retry--) {
+ ret = gtp_i2c_read(client, buf, 3);
+ if (ret > 0)
+ return ret;
+ dev_err(&client->dev, "GTP i2c test failed time %d.\n", retry);
+ msleep(20);
+ }
+ return ret;
+}
+
+/*******************************************************
+Function:
+ Request gpio(INT & RST) ports.
+Input:
+ ts: private data.
+Output:
+ Executive outcomes.
+ = 0: succeed, != 0: failed
+*******************************************************/
+static int gtp_request_io_port(struct goodix_ts_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ struct goodix_ts_platform_data *pdata = ts->pdata;
+ int ret;
+
+ if (gpio_is_valid(pdata->irq_gpio)) {
+ ret = gpio_request(pdata->irq_gpio, "goodix_ts_irq_gpio");
+ if (ret) {
+ dev_err(&client->dev, "irq gpio request failed\n");
+ goto pwr_off;
+ }
+ ret = gpio_direction_input(pdata->irq_gpio);
+ if (ret) {
+ dev_err(&client->dev,
+ "set_direction for irq gpio failed\n");
+ goto free_irq_gpio;
+ }
+ } else {
+ dev_err(&client->dev, "irq gpio is invalid!\n");
+ ret = -EINVAL;
+ goto free_irq_gpio;
+ }
+
+ if (gpio_is_valid(pdata->reset_gpio)) {
+ ret = gpio_request(pdata->reset_gpio, "goodix_ts__reset_gpio");
+ if (ret) {
+ dev_err(&client->dev, "reset gpio request failed\n");
+ goto free_irq_gpio;
+ }
+
+ ret = gpio_direction_output(pdata->reset_gpio, 0);
+ if (ret) {
+ dev_err(&client->dev,
+ "set_direction for reset gpio failed\n");
+ goto free_reset_gpio;
+ }
+ } else {
+ dev_err(&client->dev, "reset gpio is invalid!\n");
+ ret = -EINVAL;
+ goto free_reset_gpio;
+ }
+ gpio_direction_input(pdata->reset_gpio);
+
+ return ret;
+
+free_reset_gpio:
+ if (gpio_is_valid(pdata->reset_gpio))
+ gpio_free(pdata->reset_gpio);
+free_irq_gpio:
+ if (gpio_is_valid(pdata->irq_gpio))
+ gpio_free(pdata->irq_gpio);
+pwr_off:
+ return ret;
+}
+
+/*******************************************************
+Function:
+ Request interrupt.
+Input:
+ ts: private data.
+Output:
+ Executive outcomes.
+ 0: succeed, -1: failed.
+*******************************************************/
+static int gtp_request_irq(struct goodix_ts_data *ts)
+{
+ int ret;
+ const u8 irq_table[] = GTP_IRQ_TAB;
+
+ GTP_DEBUG("INT trigger type:%x, irq=%d", ts->int_trigger_type,
+ ts->client->irq);
+
+ ret = request_irq(ts->client->irq, goodix_ts_irq_handler,
+ irq_table[ts->int_trigger_type],
+ ts->client->name, ts);
+ if (ret) {
+ dev_err(&ts->client->dev, "Request IRQ failed!ERRNO:%d.\n",
+ ret);
+ gpio_direction_input(ts->pdata->irq_gpio);
+
+ hrtimer_init(&ts->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ts->timer.function = goodix_ts_timer_handler;
+ hrtimer_start(&ts->timer, ktime_set(1, 0),
+ HRTIMER_MODE_REL);
+ ts->use_irq = false;
+ return ret;
+ }
+ gtp_irq_disable(ts);
+ ts->use_irq = true;
+ return 0;
+}
+
+/*******************************************************
+Function:
+ Request input device Function.
+Input:
+ ts:private data.
+Output:
+ Executive outcomes.
+ 0: succeed, otherwise: failed.
+*******************************************************/
+static int gtp_request_input_dev(struct goodix_ts_data *ts)
+{
+ int ret;
+ char phys[PHY_BUF_SIZE];
+#if GTP_HAVE_TOUCH_KEY
+ int index = 0;
+#endif
+
+ GTP_DEBUG_FUNC();
+
+ ts->input_dev = input_allocate_device();
+ if (ts->input_dev == NULL) {
+ dev_err(&ts->client->dev,
+ "Failed to allocate input device.\n");
+ return -ENOMEM;
+ }
+
+ ts->input_dev->evbit[0] =
+ BIT_MASK(EV_SYN) | BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+#if GTP_ICS_SLOT_REPORT
+ __set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit);
+ input_mt_init_slots(ts->input_dev, 10);/* in case of "out of memory" */
+#else
+ ts->input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+#endif
+
+#if GTP_HAVE_TOUCH_KEY
+ for (index = 0; index < ARRAY_SIZE(touch_key_array); index++) {
+ input_set_capability(ts->input_dev,
+ EV_KEY, touch_key_array[index]);
+ }
+#endif
+
+#if GTP_SLIDE_WAKEUP
+ input_set_capability(ts->input_dev, EV_KEY, KEY_POWER);
+#endif
+
+#if GTP_WITH_PEN
+ /* pen support */
+ __set_bit(BTN_TOOL_PEN, ts->input_dev->keybit);
+ __set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit);
+ __set_bit(INPUT_PROP_POINTER, ts->input_dev->propbit);
+#endif
+
+#if GTP_CHANGE_X2Y
+ GTP_SWAP(ts->abs_x_max, ts->abs_y_max);
+#endif
+
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X,
+ 0, ts->abs_x_max, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y,
+ 0, ts->abs_y_max, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR,
+ 0, 255, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR,
+ 0, 255, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TRACKING_ID,
+ 0, 255, 0, 0);
+
+ snprintf(phys, PHY_BUF_SIZE, "input/ts");
+ ts->input_dev->name = GOODIX_DEV_NAME;
+ ts->input_dev->phys = phys;
+ ts->input_dev->id.bustype = BUS_I2C;
+ ts->input_dev->id.vendor = 0xDEAD;
+ ts->input_dev->id.product = 0xBEEF;
+ ts->input_dev->id.version = 10427;
+
+ ret = input_register_device(ts->input_dev);
+ if (ret) {
+ dev_err(&ts->client->dev,
+ "Register %s input device failed.\n",
+ ts->input_dev->name);
+ goto exit_free_inputdev;
+ }
+
+ return 0;
+
+exit_free_inputdev:
+ input_free_device(ts->input_dev);
+ ts->input_dev = NULL;
+ return ret;
+}
+
+/*******************************************************
+Function:
+ I2c probe.
+Input:
+ client: i2c device struct.
+ id: device id.
+Output:
+ Executive outcomes.
+ 0: succeed.
+*******************************************************/
+
+static int goodix_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct goodix_ts_data *ts;
+ u16 version_info;
+ int ret;
+
+ dev_dbg(&client->dev, "GTP I2C Address: 0x%02x\n", client->addr);
+
+#if GTP_ESD_PROTECT
+ i2c_connect_client = client;
+#endif
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "GTP I2C not supported\n");
+ return -ENODEV;
+ }
+
+ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ memset(ts, 0, sizeof(*ts));
+ ts->client = client;
+ /* For kernel 2.6.39 later we spin_lock_init(&ts->irq_lock)
+ * For 2.6.39 & before, use ts->irq_lock = SPIN_LOCK_UNLOCKED
+ */
+ spin_lock_init(&ts->irq_lock);
+ i2c_set_clientdata(client, ts);
+
+ ts->gtp_rawdiff_mode = 0;
+
+ ret = gtp_request_io_port(ts);
+ if (ret) {
+ dev_err(&client->dev, "GTP request IO port failed.\n");
+ goto exit_power_off;
+ }
+
+ gtp_reset_guitar(ts, 20);
+
+ ret = gtp_i2c_test(client);
+ if (ret != 2) {
+ dev_err(&client->dev, "I2C communication ERROR!\n");
+ goto exit_free_io_port;
+ }
+
+#if GTP_AUTO_UPDATE
+ ret = gup_init_update_proc(ts);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "GTP Create firmware update thread error.\n");
+ goto exit_free_io_port;
+ }
+#endif
+
+ ret = gtp_init_panel(ts);
+ if (ret < 0) {
+ dev_err(&client->dev, "GTP init panel failed.\n");
+ ts->abs_x_max = GTP_MAX_WIDTH;
+ ts->abs_y_max = GTP_MAX_HEIGHT;
+ ts->int_trigger_type = GTP_INT_TRIGGER;
+ }
+
+ ret = gtp_request_input_dev(ts);
+ if (ret) {
+ dev_err(&client->dev, "GTP request input dev failed.\n");
+ goto exit_free_inputdev;
+ }
+
+ ts->goodix_wq = create_singlethread_workqueue("goodix_wq");
+ INIT_WORK(&ts->work, goodix_ts_work_func);
+
+ ret = gtp_request_irq(ts);
+ if (ret < 0)
+ dev_info(&client->dev, "GTP works in polling mode.\n");
+ else
+ dev_info(&client->dev, "GTP works in interrupt mode.\n");
+
+ ret = gtp_read_version(client, &version_info);
+ if (ret != 2) {
+ dev_err(&client->dev, "Read version failed.\n");
+ goto exit_free_irq;
+ }
+ if (ts->use_irq)
+ gtp_irq_enable(ts);
+
+#if GTP_CREATE_WR_NODE
+ init_wr_node(client);
+#endif
+
+#if GTP_ESD_PROTECT
+ gtp_esd_switch(client, SWITCH_ON);
+#endif
+ init_done = true;
+ return 0;
+exit_free_irq:
+ if (ts->use_irq)
+ free_irq(client->irq, ts);
+ else
+ hrtimer_cancel(&ts->timer);
+ cancel_work_sync(&ts->work);
+ flush_workqueue(ts->goodix_wq);
+ destroy_workqueue(ts->goodix_wq);
+
+ input_unregister_device(ts->input_dev);
+ if (ts->input_dev) {
+ input_free_device(ts->input_dev);
+ ts->input_dev = NULL;
+ }
+exit_free_inputdev:
+ kfree(ts->config_data);
+exit_free_io_port:
+exit_power_off:
+ i2c_set_clientdata(client, NULL);
+ kfree(ts);
+ return ret;
+}
+
+/*******************************************************
+Function:
+ Goodix touchscreen driver release function.
+Input:
+ client: i2c device struct.
+Output:
+ Executive outcomes. 0---succeed.
+*******************************************************/
+static int goodix_ts_remove(struct i2c_client *client)
+{
+ struct goodix_ts_data *ts = i2c_get_clientdata(client);
+
+ GTP_DEBUG_FUNC();
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&ts->early_suspend);
+#endif
+
+#if GTP_CREATE_WR_NODE
+ uninit_wr_node();
+#endif
+
+#if GTP_ESD_PROTECT
+ cancel_work_sync(gtp_esd_check_workqueue);
+ flush_workqueue(gtp_esd_check_workqueue);
+ destroy_workqueue(gtp_esd_check_workqueue);
+#endif
+
+ if (ts) {
+ if (ts->use_irq)
+ free_irq(client->irq, ts);
+ else
+ hrtimer_cancel(&ts->timer);
+
+ cancel_work_sync(&ts->work);
+ flush_workqueue(ts->goodix_wq);
+ destroy_workqueue(ts->goodix_wq);
+
+ input_unregister_device(ts->input_dev);
+ if (ts->input_dev) {
+ input_free_device(ts->input_dev);
+ ts->input_dev = NULL;
+ }
+ kfree(ts->config_data);
+
+ if (gpio_is_valid(ts->pdata->reset_gpio))
+ gpio_free(ts->pdata->reset_gpio);
+ if (gpio_is_valid(ts->pdata->irq_gpio))
+ gpio_free(ts->pdata->irq_gpio);
+
+ i2c_set_clientdata(client, NULL);
+ kfree(ts);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/*******************************************************
+Function:
+ Early suspend function.
+Input:
+ h: early_suspend struct.
+Output:
+ None.
+*******************************************************/
+static void goodix_ts_early_suspend(struct early_suspend *h)
+{
+ struct goodix_ts_data *ts;
+ s8 ret = -1;
+
+ ts = container_of(h, struct goodix_ts_data, early_suspend);
+
+ GTP_DEBUG_FUNC();
+
+#if GTP_ESD_PROTECT
+ ts->gtp_is_suspend = 1;
+ gtp_esd_switch(ts->client, SWITCH_OFF);
+#endif
+
+#if GTP_SLIDE_WAKEUP
+ ret = gtp_enter_doze(ts);
+#else
+ if (ts->use_irq)
+ gtp_irq_disable(ts);
+ else
+ hrtimer_cancel(&ts->timer);
+ ret = gtp_enter_sleep(ts);
+#endif
+ if (ret < 0)
+ dev_err(&ts->client->dev, "GTP early suspend failed.\n");
+ /* to avoid waking up while not sleeping,
+ * delay 48 + 10ms to ensure reliability
+ */
+ msleep(58);
+}
+
+/*******************************************************
+Function:
+ Late resume function.
+Input:
+ h: early_suspend struct.
+Output:
+ None.
+*******************************************************/
+static void goodix_ts_late_resume(struct early_suspend *h)
+{
+ struct goodix_ts_data *ts;
+ s8 ret = -1;
+
+ ts = container_of(h, struct goodix_ts_data, early_suspend);
+
+ GTP_DEBUG_FUNC();
+
+ ret = gtp_wakeup_sleep(ts);
+
+#if GTP_SLIDE_WAKEUP
+ doze_status = DOZE_DISABLED;
+#endif
+
+ if (ret < 0)
+ dev_err(&ts->client->dev, "GTP later resume failed.\n");
+
+ if (ts->use_irq)
+ gtp_irq_enable(ts);
+ else
+ hrtimer_start(&ts->timer,
+ ktime_set(1, 0), HRTIMER_MODE_REL);
+
+#if GTP_ESD_PROTECT
+ ts->gtp_is_suspend = 0;
+ gtp_esd_switch(ts->client, SWITCH_ON);
+#endif
+}
+#endif
+
+#if GTP_ESD_PROTECT
+/*******************************************************
+Function:
+ switch on & off esd delayed work
+Input:
+ client: i2c device
+ on: SWITCH_ON / SWITCH_OFF
+Output:
+ void
+*********************************************************/
+void gtp_esd_switch(struct i2c_client *client, int on)
+{
+ struct goodix_ts_data *ts;
+
+ ts = i2c_get_clientdata(client);
+ if (on == SWITCH_ON) {
+ /* switch on esd */
+ if (!ts->esd_running) {
+ ts->esd_running = 1;
+ dev_dbg(&client->dev, "Esd started\n");
+ queue_delayed_work(gtp_esd_check_workqueue,
+ &gtp_esd_check_work, GTP_ESD_CHECK_CIRCLE);
+ }
+ } else {
+ /* switch off esd */
+ if (ts->esd_running) {
+ ts->esd_running = 0;
+ dev_dbg(&client->dev, "Esd cancelled\n");
+ cancel_delayed_work_sync(&gtp_esd_check_work);
+ }
+ }
+}
+
+/*******************************************************
+Function:
+ Initialize external watchdog for esd protect
+Input:
+ client: i2c device.
+Output:
+ result of i2c write operation.
+ 1: succeed, otherwise: failed
+*********************************************************/
+static int gtp_init_ext_watchdog(struct i2c_client *client)
+{
+ /* in case of recursively reset by calling gtp_i2c_write*/
+ struct i2c_msg msg;
+ u8 opr_buffer[4] = {0x80, 0x40, 0xAA, 0xAA};
+ int ret;
+ int retries = 0;
+
+ GTP_DEBUG("Init external watchdog...");
+ GTP_DEBUG_FUNC();
+
+ msg.flags = !I2C_M_RD;
+ msg.addr = client->addr;
+ msg.len = 4;
+ msg.buf = opr_buffer;
+
+ while (retries < 5) {
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret == 1)
+ return 1;
+ retries++;
+ }
+ if (retries >= 5)
+ dev_err(&client->dev, "init external watchdog failed!");
+ return 0;
+}
+
+/*******************************************************
+Function:
+ Esd protect function.
+ Added external watchdog by meta, 2013/03/07
+Input:
+ work: delayed work
+Output:
+ None.
+*******************************************************/
+static void gtp_esd_check_func(struct work_struct *work)
+{
+ s32 i;
+ s32 ret = -1;
+ struct goodix_ts_data *ts = NULL;
+ u8 test[4] = {0x80, 0x40};
+
+ GTP_DEBUG_FUNC();
+
+ ts = i2c_get_clientdata(i2c_connect_client);
+
+ if (ts->gtp_is_suspend) {
+ dev_dbg(&ts->client->dev, "Esd terminated!\n");
+ ts->esd_running = 0;
+ return;
+ }
+#ifdef CONFIG_GT9XX_TOUCHPANEL_UPDATE
+ if (ts->enter_update)
+ return;
+#endif
+
+ for (i = 0; i < 3; i++) {
+ ret = gtp_i2c_read(ts->client, test, 4);
+
+ GTP_DEBUG("0x8040 = 0x%02X, 0x8041 = 0x%02X", test[2], test[3]);
+ if ((ret < 0)) {
+ /* IC works abnormally..*/
+ continue;
+ } else {
+ if ((test[2] == 0xAA) || (test[3] != 0xAA)) {
+ /* IC works abnormally..*/
+ i = 3;
+ break;
+ }
+ /* IC works normally, Write 0x8040 0xAA*/
+ test[2] = 0xAA;
+ gtp_i2c_write(ts->client, test, 3);
+ break;
+ }
+ }
+ if (i >= 3) {
+ dev_err(&ts->client->dev,
+ "IC Working ABNORMALLY, Resetting Guitar...\n");
+ gtp_reset_guitar(ts, 50);
+ }
+
+ if (!ts->gtp_is_suspend)
+ queue_delayed_work(gtp_esd_check_workqueue,
+ &gtp_esd_check_work, GTP_ESD_CHECK_CIRCLE);
+ else {
+ dev_dbg(&ts->client->dev, "Esd terminated!\n");
+ ts->esd_running = 0;
+ }
+
+ return;
+}
+#endif
+
+static const struct i2c_device_id goodix_ts_id[] = {
+ { GTP_I2C_NAME, 0 },
+ { }
+};
+
+static struct i2c_driver goodix_ts_driver = {
+ .probe = goodix_ts_probe,
+ .remove = goodix_ts_remove,
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ .suspend = goodix_ts_early_suspend,
+ .resume = goodix_ts_late_resume,
+#endif
+ .id_table = goodix_ts_id,
+ .driver = {
+ .name = GTP_I2C_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+/*******************************************************
+Function:
+ Driver Install function.
+Input:
+ None.
+Output:
+ Executive Outcomes. 0---succeed.
+********************************************************/
+static int __init goodix_ts_init(void)
+{
+ int ret;
+
+ GTP_DEBUG_FUNC();
+#if GTP_ESD_PROTECT
+ INIT_DELAYED_WORK(&gtp_esd_check_work, gtp_esd_check_func);
+ gtp_esd_check_workqueue = create_workqueue("gtp_esd_check");
+#endif
+ ret = i2c_add_driver(&goodix_ts_driver);
+ return ret;
+}
+
+/*******************************************************
+Function:
+ Driver uninstall function.
+Input:
+ None.
+Output:
+ Executive Outcomes. 0---succeed.
+********************************************************/
+static void __exit goodix_ts_exit(void)
+{
+ GTP_DEBUG_FUNC();
+ i2c_del_driver(&goodix_ts_driver);
+}
+
+late_initcall(goodix_ts_init);
+module_exit(goodix_ts_exit);
+
+MODULE_DESCRIPTION("GTP Series Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.h b/drivers/input/touchscreen/gt9xx/gt9xx.h
new file mode 100644
index 000000000000..48fa2ad2faca
--- /dev/null
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.h
@@ -0,0 +1,270 @@
+/* drivers/input/touchscreen/gt9xx.h
+ *
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * 2010 - 2013 Goodix Technology.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be a reference
+ * to you, when you are integrating the GOODiX's CTP IC into your system,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef _GOODIX_GT9XX_H_
+#define _GOODIX_GT9XX_H_
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/firmware.h>
+#include <linux/debugfs.h>
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#define GOODIX_SUSPEND_LEVEL 1
+#endif
+
+struct goodix_ts_platform_data {
+ int irq_gpio;
+ u32 irq_gpio_flags;
+ int reset_gpio;
+ u32 reset_gpio_flags;
+ int ldo_en_gpio;
+ u32 ldo_en_gpio_flags;
+ u32 family_id;
+ u32 x_max;
+ u32 y_max;
+ u32 x_min;
+ u32 y_min;
+ u32 panel_minx;
+ u32 panel_miny;
+ u32 panel_maxx;
+ u32 panel_maxy;
+ bool no_force_update;
+ bool i2c_pull_up;
+};
+struct goodix_ts_data {
+ spinlock_t irq_lock;
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ struct goodix_ts_platform_data *pdata;
+ struct hrtimer timer;
+ struct workqueue_struct *goodix_wq;
+ struct work_struct work;
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ struct early_suspend early_suspend;
+#endif
+ s32 irq_is_disabled;
+ s32 use_irq;
+ u16 abs_x_max;
+ u16 abs_y_max;
+ u8 max_touch_num;
+ u8 int_trigger_type;
+ u8 green_wake_mode;
+ u8 chip_type;
+ u8 *config_data;
+ u8 enter_update;
+ u8 gtp_is_suspend;
+ u8 gtp_rawdiff_mode;
+ u8 gtp_cfg_len;
+ u8 fixed_cfg;
+ u8 esd_running;
+ u8 fw_error;
+};
+
+extern u16 show_len;
+extern u16 total_len;
+
+/***************************PART1:ON/OFF define*******************************/
+#define GTP_CUSTOM_CFG 0
+#define GTP_CHANGE_X2Y 0
+#define GTP_DRIVER_SEND_CFG 1
+#define GTP_HAVE_TOUCH_KEY 1
+#define GTP_POWER_CTRL_SLEEP 1
+#define GTP_ICS_SLOT_REPORT 0
+
+/* auto updated by .bin file as default */
+#define GTP_AUTO_UPDATE 0
+/* auto updated by head_fw_array in gt9xx_firmware.h,
+ * function together with GTP_AUTO_UPDATE
+ */
+#define GTP_HEADER_FW_UPDATE 0
+
+#define GTP_CREATE_WR_NODE 0
+#define GTP_ESD_PROTECT 0
+#define GTP_WITH_PEN 0
+
+#define GTP_SLIDE_WAKEUP 0
+/* double-click wakeup, function together with GTP_SLIDE_WAKEUP */
+#define GTP_DBL_CLK_WAKEUP 0
+
+#define GTP_DEBUG_ON 1
+#define GTP_DEBUG_ARRAY_ON 0
+#define GTP_DEBUG_FUNC_ON 0
+
+/*************************** PART2:TODO define *******************************/
+/* STEP_1(REQUIRED): Define Configuration Information Group(s) */
+/* Sensor_ID Map: */
+/* sensor_opt1 sensor_opt2 Sensor_ID
+ * GND GND 0
+ * VDDIO GND 1
+ * NC GND 2
+ * GND NC/300K 3
+ * VDDIO NC/300K 4
+ * NC NC/300K 5
+*/
+/* Define your own default or for Sensor_ID == 0 config here */
+/* The predefined one is just a sample config,
+ * which is not suitable for your tp in most cases.
+ */
+#define CTP_CFG_GROUP1 {\
+ 0x41, 0x1C, 0x02, 0xC0, 0x03, 0x0A, 0x05, 0x01, 0x01, 0x0F,\
+ 0x23, 0x0F, 0x5F, 0x41, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0x00, 0x0A,\
+ 0x28, 0x00, 0xB8, 0x0B, 0x00, 0x00, 0x00, 0x9A, 0x03, 0x25,\
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x64, 0x32, 0x00, 0x00,\
+ 0x00, 0x32, 0x8C, 0x94, 0x05, 0x01, 0x05, 0x00, 0x00, 0x96,\
+ 0x0C, 0x22, 0xD8, 0x0E, 0x23, 0x56, 0x11, 0x25, 0xFF, 0x13,\
+ 0x28, 0xA7, 0x15, 0x2E, 0x00, 0x00, 0x10, 0x30, 0x48, 0x00,\
+ 0x56, 0x4A, 0x3A, 0xFF, 0xFF, 0x16, 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x01, 0x1B, 0x14, 0x0D, 0x19, 0x00, 0x00, 0x01, 0x00,\
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x1A, 0x18, 0x16, 0x14, 0x12, 0x10, 0x0E, 0x0C,\
+ 0x0A, 0x08, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\
+ 0xFF, 0xFF, 0x1D, 0x1E, 0x1F, 0x20, 0x22, 0x24, 0x28, 0x29,\
+ 0x0C, 0x0A, 0x08, 0x00, 0x02, 0x04, 0x05, 0x06, 0x0E, 0xFF,\
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0x01\
+ }
+
+/* Define your config for Sensor_ID == 1 here, if needed */
+#define CTP_CFG_GROUP2 {\
+ }
+
+/* Define your config for Sensor_ID == 2 here, if needed */
+#define CTP_CFG_GROUP3 {\
+ }
+
+/* Define your config for Sensor_ID == 3 here, if needed */
+#define CTP_CFG_GROUP4 {\
+ }
+
+/* Define your config for Sensor_ID == 4 here, if needed */
+#define CTP_CFG_GROUP5 {\
+ }
+
+/* Define your config for Sensor_ID == 5 here, if needed */
+#define CTP_CFG_GROUP6 {\
+ }
+
+#define GTP_IRQ_TAB {\
+ IRQ_TYPE_EDGE_RISING,\
+ IRQ_TYPE_EDGE_FALLING,\
+ IRQ_TYPE_LEVEL_LOW,\
+ IRQ_TYPE_LEVEL_HIGH\
+ }
+
+/* STEP_3(optional): Specify your special config info if needed */
+#define GTP_IRQ_TAB_RISING 0
+#define GTP_IRQ_TAB_FALLING 1
+#if GTP_CUSTOM_CFG
+#define GTP_MAX_HEIGHT 800
+#define GTP_MAX_WIDTH 480
+#define GTP_INT_TRIGGER GTP_IRQ_TAB_RISING
+#else
+#define GTP_MAX_HEIGHT 4096
+#define GTP_MAX_WIDTH 4096
+#define GTP_INT_TRIGGER GTP_IRQ_TAB_FALLING
+#endif
+
+#define GTP_MAX_TOUCH 5
+#define GTP_ESD_CHECK_CIRCLE 2000 /* jiffy: ms */
+
+/***************************PART3:OTHER define*********************************/
+#define GTP_DRIVER_VERSION "V1.8<2013/06/08>"
+#define GTP_I2C_NAME "Goodix-TS"
+#define GTP_POLL_TIME 10 /* jiffy: ms*/
+#define GTP_ADDR_LENGTH 2
+#define GTP_CONFIG_MIN_LENGTH 186
+#define GTP_CONFIG_MAX_LENGTH 240
+#define FAIL 0
+#define SUCCESS 1
+#define SWITCH_OFF 0
+#define SWITCH_ON 1
+
+/* Registers define */
+#define GTP_READ_COOR_ADDR 0x814E
+#define GTP_REG_SLEEP 0x8040
+#define GTP_REG_SENSOR_ID 0x814A
+#define GTP_REG_CONFIG_DATA 0x8047
+#define GTP_REG_VERSION 0x8140
+
+#define RESOLUTION_LOC 3
+#define TRIGGER_LOC 8
+
+/* Log define */
+#define GTP_DEBUG(fmt, arg...) do {\
+ if (GTP_DEBUG_ON) {\
+ pr_debug("<<-GTP-DEBUG->> [%d]"fmt"\n",\
+ __LINE__, ##arg); } \
+ } while (0)
+
+#define GTP_DEBUG_ARRAY(array, num) do {\
+ s32 i; \
+ u8 *a = array; \
+ if (GTP_DEBUG_ARRAY_ON) {\
+ pr_debug("<<-GTP-DEBUG-ARRAY->>\n");\
+ for (i = 0; i < (num); i++) { \
+ pr_debug("%02x ", (a)[i]);\
+ if ((i + 1) % 10 == 0) { \
+ pr_debug("\n");\
+ } \
+ } \
+ pr_debug("\n");\
+ } \
+ } while (0)
+
+#define GTP_DEBUG_FUNC() do {\
+ if (GTP_DEBUG_FUNC_ON)\
+ pr_debug("<<-GTP-FUNC->> Func:%s@Line:%d\n",\
+ __func__, __LINE__);\
+ } while (0)
+
+#define GTP_SWAP(x, y) do {\
+ typeof(x) z = x;\
+ x = y;\
+ y = z;\
+ } while (0)
+/*****************************End of Part III********************************/
+
+void gtp_esd_switch(struct i2c_client *client, int on);
+
+#if GTP_CREATE_WR_NODE
+extern s32 init_wr_node(struct i2c_client *client);
+extern void uninit_wr_node(void);
+#endif
+
+#if GTP_AUTO_UPDATE
+extern u8 gup_init_update_proc(struct goodix_ts_data *ts);
+#endif
+#endif /* _GOODIX_GT9XX_H_ */
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx_firmware.h b/drivers/input/touchscreen/gt9xx/gt9xx_firmware.h
new file mode 100644
index 000000000000..3998bf0023f8
--- /dev/null
+++ b/drivers/input/touchscreen/gt9xx/gt9xx_firmware.h
@@ -0,0 +1,6 @@
+// make sense only when GTP_HEADER_FW_UPDATE & GTP_AUTO_UPDATE are enabled
+// define your own firmware array here
+const unsigned char header_fw_array[] =
+{
+
+}; \ No newline at end of file
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx_update.c b/drivers/input/touchscreen/gt9xx/gt9xx_update.c
new file mode 100644
index 000000000000..f564a6b3aaed
--- /dev/null
+++ b/drivers/input/touchscreen/gt9xx/gt9xx_update.c
@@ -0,0 +1,1930 @@
+/* drivers/input/touchscreen/gt9xx_update.c
+ *
+ * 2010 - 2012 Goodix Technology.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be a reference
+ * to you, when you are integrating the GOODiX's CTP IC into your system,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Latest Version:1.6
+ * Author: andrew@goodix.com
+ * Revision Record:
+ * V1.0:
+ * first release. By Andrew, 2012/08/31
+ * V1.2:
+ * add force update,GT9110P pid map. By Andrew, 2012/10/15
+ * V1.4:
+ * 1. add config auto update function;
+ * 2. modify enter_update_mode;
+ * 3. add update file cal checksum.
+ * By Andrew, 2012/12/12
+ * V1.6:
+ * 1. replace guitar_client with i2c_connect_client;
+ * 2. support firmware header array update.
+ * By Meta, 2013/03/11
+ */
+#include <linux/kthread.h>
+#include "gt9xx.h"
+
+#if GTP_HEADER_FW_UPDATE
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include "gt9xx_firmware.h"
+#endif
+
+#define GUP_REG_HW_INFO 0x4220
+#define GUP_REG_FW_MSG 0x41E4
+#define GUP_REG_PID_VID 0x8140
+
+#define GUP_SEARCH_FILE_TIMES 50
+#define UPDATE_FILE_PATH_2 "/data/_goodix_update_.bin"
+#define UPDATE_FILE_PATH_1 "/sdcard/_goodix_update_.bin"
+
+#define CONFIG_FILE_PATH_1 "/data/_goodix_config_.cfg"
+#define CONFIG_FILE_PATH_2 "/sdcard/_goodix_config_.cfg"
+
+#define FW_HEAD_LENGTH 14
+#define FW_SECTION_LENGTH 0x2000
+#define FW_DSP_ISP_LENGTH 0x1000
+#define FW_DSP_LENGTH 0x1000
+#define FW_BOOT_LENGTH 0x800
+
+#define PACK_SIZE 256
+#define MAX_FRAME_CHECK_TIME 5
+
+#define _bRW_MISCTL__SRAM_BANK 0x4048
+#define _bRW_MISCTL__MEM_CD_EN 0x4049
+#define _bRW_MISCTL__CACHE_EN 0x404B
+#define _bRW_MISCTL__TMR0_EN 0x40B0
+#define _rRW_MISCTL__SWRST_B0_ 0x4180
+#define _bWO_MISCTL__CPU_SWRST_PULSE 0x4184
+#define _rRW_MISCTL__BOOTCTL_B0_ 0x4190
+#define _rRW_MISCTL__BOOT_OPT_B0_ 0x4218
+#define _rRW_MISCTL__BOOT_CTL_ 0x5094
+
+#define FAIL 0
+#define SUCCESS 1
+
+#pragma pack(1)
+typedef struct
+{
+ u8 hw_info[4]; //hardware info//
+ u8 pid[8]; //product id //
+ u16 vid; //version id //
+}st_fw_head;
+#pragma pack()
+
+typedef struct
+{
+ u8 force_update;
+ u8 fw_flag;
+ struct file *file;
+ struct file *cfg_file;
+ st_fw_head ic_fw_msg;
+ mm_segment_t old_fs;
+}st_update_msg;
+
+st_update_msg update_msg;
+u16 show_len;
+u16 total_len;
+u8 got_file_flag = 0;
+u8 searching_file = 0;
+extern u8 config[GTP_CONFIG_MAX_LENGTH + GTP_ADDR_LENGTH];
+extern void gtp_reset_guitar(struct i2c_client *client, s32 ms);
+extern s32 gtp_send_cfg(struct i2c_client *client);
+extern struct i2c_client * i2c_connect_client;
+extern void gtp_irq_enable(struct goodix_ts_data *ts);
+extern void gtp_irq_disable(struct goodix_ts_data *ts);
+extern s32 gtp_i2c_read_dbl_check(struct i2c_client *, u16, u8 *, int);
+#if GTP_ESD_PROTECT
+extern void gtp_esd_switch(struct i2c_client *, s32);
+#endif
+/*******************************************************
+Function:
+ Read data from the i2c slave device.
+Input:
+ client: i2c device.
+ buf[0~1]: read start address.
+ buf[2~len-1]: read data buffer.
+ len: GTP_ADDR_LENGTH + read bytes count
+Output:
+ numbers of i2c_msgs to transfer:
+ 2: succeed, otherwise: failed
+*********************************************************/
+s32 gup_i2c_read(struct i2c_client *client, u8 *buf, s32 len)
+{
+ struct i2c_msg msgs[2];
+ s32 ret=-1;
+ s32 retries = 0;
+
+ GTP_DEBUG_FUNC();
+
+ msgs[0].flags = !I2C_M_RD;
+ msgs[0].addr = client->addr;
+ msgs[0].len = GTP_ADDR_LENGTH;
+ msgs[0].buf = &buf[0];
+ //msgs[0].scl_rate = 300 * 1000; // for Rockchip
+
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].addr = client->addr;
+ msgs[1].len = len - GTP_ADDR_LENGTH;
+ msgs[1].buf = &buf[GTP_ADDR_LENGTH];
+ //msgs[1].scl_rate = 300 * 1000;
+
+ while(retries < 5)
+ {
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ if(ret == 2)break;
+ retries++;
+ }
+
+ return ret;
+}
+
+/*******************************************************
+Function:
+ Write data to the i2c slave device.
+Input:
+ client: i2c device.
+ buf[0~1]: write start address.
+ buf[2~len-1]: data buffer
+ len: GTP_ADDR_LENGTH + write bytes count
+Output:
+ numbers of i2c_msgs to transfer:
+ 1: succeed, otherwise: failed
+*********************************************************/
+s32 gup_i2c_write(struct i2c_client *client,u8 *buf,s32 len)
+{
+ struct i2c_msg msg;
+ s32 ret=-1;
+ s32 retries = 0;
+
+ GTP_DEBUG_FUNC();
+
+ msg.flags = !I2C_M_RD;
+ msg.addr = client->addr;
+ msg.len = len;
+ msg.buf = buf;
+ //msg.scl_rate = 300 * 1000; // for Rockchip
+
+ while(retries < 5)
+ {
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret == 1)break;
+ retries++;
+ }
+
+ return ret;
+}
+
+static s32 gup_init_panel(struct goodix_ts_data *ts)
+{
+ s32 ret = 0;
+ s32 i = 0;
+ u8 check_sum = 0;
+ u8 opr_buf[16];
+ u8 sensor_id = 0;
+
+ u8 cfg_info_group1[] = CTP_CFG_GROUP1;
+ u8 cfg_info_group2[] = CTP_CFG_GROUP2;
+ u8 cfg_info_group3[] = CTP_CFG_GROUP3;
+ u8 cfg_info_group4[] = CTP_CFG_GROUP4;
+ u8 cfg_info_group5[] = CTP_CFG_GROUP5;
+ u8 cfg_info_group6[] = CTP_CFG_GROUP6;
+ u8 *send_cfg_buf[] = {cfg_info_group1, cfg_info_group2, cfg_info_group3,
+ cfg_info_group4, cfg_info_group5, cfg_info_group6};
+ u8 cfg_info_len[] = { CFG_GROUP_LEN(cfg_info_group1),
+ CFG_GROUP_LEN(cfg_info_group2),
+ CFG_GROUP_LEN(cfg_info_group3),
+ CFG_GROUP_LEN(cfg_info_group4),
+ CFG_GROUP_LEN(cfg_info_group5),
+ CFG_GROUP_LEN(cfg_info_group6)};
+
+ if ((!cfg_info_len[1]) && (!cfg_info_len[2]) &&
+ (!cfg_info_len[3]) && (!cfg_info_len[4]) &&
+ (!cfg_info_len[5]))
+ {
+ sensor_id = 0;
+ }
+ else
+ {
+ ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_SENSOR_ID, &sensor_id, 1);
+ if (SUCCESS == ret)
+ {
+ if (sensor_id >= 0x06)
+ {
+ GTP_ERROR("Invalid sensor_id(0x%02X), No Config Sent!", sensor_id);
+ return -1;
+ }
+ }
+ else
+ {
+ GTP_ERROR("Failed to get sensor_id, No config sent!");
+ return -1;
+ }
+ }
+
+ GTP_DEBUG("Sensor_ID: %d", sensor_id);
+
+ ts->gtp_cfg_len = cfg_info_len[sensor_id];
+
+ if (ts->gtp_cfg_len < GTP_CONFIG_MIN_LENGTH)
+ {
+ GTP_ERROR("Sensor_ID(%d) matches with NULL or INVALID CONFIG GROUP! NO Config Sent! You need to check you header file CFG_GROUP section!", sensor_id);
+ return -1;
+ }
+
+ ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_CONFIG_DATA, &opr_buf[0], 1);
+
+ if (ret == SUCCESS)
+ {
+ GTP_DEBUG("CFG_GROUP%d Config Version: %d, IC Config Version: %d", sensor_id+1,
+ send_cfg_buf[sensor_id][0], opr_buf[0]);
+
+ send_cfg_buf[sensor_id][0] = opr_buf[0];
+ ts->fixed_cfg = 0;
+ /*
+ if (opr_buf[0] < 90)
+ {
+ grp_cfg_version = send_cfg_buf[sensor_id][0]; // backup group config version
+ send_cfg_buf[sensor_id][0] = 0x00;
+ ts->fixed_cfg = 0;
+ }
+ else // treated as fixed config, not send config
+ {
+ GTP_INFO("Ic fixed config with config version(%d)", opr_buf[0]);
+ ts->fixed_cfg = 1;
+ }*/
+ }
+ else
+ {
+ GTP_ERROR("Failed to get ic config version!No config sent!");
+ return -1;
+ }
+
+ memset(&config[GTP_ADDR_LENGTH], 0, GTP_CONFIG_MAX_LENGTH);
+ memcpy(&config[GTP_ADDR_LENGTH], send_cfg_buf[sensor_id], ts->gtp_cfg_len);
+
+ GTP_DEBUG("X_MAX = %d, Y_MAX = %d, TRIGGER = 0x%02x",
+ ts->abs_x_max, ts->abs_y_max, ts->int_trigger_type);
+
+ config[RESOLUTION_LOC] = (u8)GTP_MAX_WIDTH;
+ config[RESOLUTION_LOC + 1] = (u8)(GTP_MAX_WIDTH>>8);
+ config[RESOLUTION_LOC + 2] = (u8)GTP_MAX_HEIGHT;
+ config[RESOLUTION_LOC + 3] = (u8)(GTP_MAX_HEIGHT>>8);
+
+ if (GTP_INT_TRIGGER == 0) //RISING
+ {
+ config[TRIGGER_LOC] &= 0xfe;
+ }
+ else if (GTP_INT_TRIGGER == 1) //FALLING
+ {
+ config[TRIGGER_LOC] |= 0x01;
+ }
+
+ check_sum = 0;
+ for (i = GTP_ADDR_LENGTH; i < ts->gtp_cfg_len; i++)
+ {
+ check_sum += config[i];
+ }
+ config[ts->gtp_cfg_len] = (~check_sum) + 1;
+
+ GTP_DEBUG_FUNC();
+ ret = gtp_send_cfg(ts->client);
+ if (ret < 0)
+ {
+ GTP_ERROR("Send config error.");
+ }
+
+ msleep(10);
+ return 0;
+}
+
+
+static u8 gup_get_ic_msg(struct i2c_client *client, u16 addr, u8* msg, s32 len)
+{
+ s32 i = 0;
+
+ msg[0] = (addr >> 8) & 0xff;
+ msg[1] = addr & 0xff;
+
+ for (i = 0; i < 5; i++)
+ {
+ if (gup_i2c_read(client, msg, GTP_ADDR_LENGTH + len) > 0)
+ {
+ break;
+ }
+ }
+
+ if (i >= 5)
+ {
+ GTP_ERROR("Read data from 0x%02x%02x failed!", msg[0], msg[1]);
+ return FAIL;
+ }
+
+ return SUCCESS;
+}
+
+static u8 gup_set_ic_msg(struct i2c_client *client, u16 addr, u8 val)
+{
+ s32 i = 0;
+ u8 msg[3];
+
+ msg[0] = (addr >> 8) & 0xff;
+ msg[1] = addr & 0xff;
+ msg[2] = val;
+
+ for (i = 0; i < 5; i++)
+ {
+ if (gup_i2c_write(client, msg, GTP_ADDR_LENGTH + 1) > 0)
+ {
+ break;
+ }
+ }
+
+ if (i >= 5)
+ {
+ GTP_ERROR("Set data to 0x%02x%02x failed!", msg[0], msg[1]);
+ return FAIL;
+ }
+
+ return SUCCESS;
+}
+
+static u8 gup_get_ic_fw_msg(struct i2c_client *client)
+{
+ s32 ret = -1;
+ u8 retry = 0;
+ u8 buf[16];
+ u8 i;
+
+ // step1:get hardware info
+ ret = gtp_i2c_read_dbl_check(client, GUP_REG_HW_INFO, &buf[GTP_ADDR_LENGTH], 4);
+ if (FAIL == ret)
+ {
+ GTP_ERROR("[get_ic_fw_msg]get hw_info failed,exit");
+ return FAIL;
+ }
+
+ // buf[2~5]: 00 06 90 00
+ // hw_info: 00 90 06 00
+ for(i=0; i<4; i++)
+ {
+ update_msg.ic_fw_msg.hw_info[i] = buf[GTP_ADDR_LENGTH + 3 - i];
+ }
+ GTP_DEBUG("IC Hardware info:%02x%02x%02x%02x", update_msg.ic_fw_msg.hw_info[0], update_msg.ic_fw_msg.hw_info[1],
+ update_msg.ic_fw_msg.hw_info[2], update_msg.ic_fw_msg.hw_info[3]);
+ // step2:get firmware message
+ for(retry=0; retry<2; retry++)
+ {
+ ret = gup_get_ic_msg(client, GUP_REG_FW_MSG, buf, 1);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("Read firmware message fail.");
+ return ret;
+ }
+
+ update_msg.force_update = buf[GTP_ADDR_LENGTH];
+ if((0xBE != update_msg.force_update)&&(!retry))
+ {
+ GTP_INFO("The check sum in ic is error.");
+ GTP_INFO("The IC will be updated by force.");
+ continue;
+ }
+ break;
+ }
+ GTP_DEBUG("IC force update flag:0x%x", update_msg.force_update);
+
+ // step3:get pid & vid
+ ret = gtp_i2c_read_dbl_check(client, GUP_REG_PID_VID, &buf[GTP_ADDR_LENGTH], 6);
+ if (FAIL == ret)
+ {
+ GTP_ERROR("[get_ic_fw_msg]get pid & vid failed,exit");
+ return FAIL;
+ }
+
+ memset(update_msg.ic_fw_msg.pid, 0, sizeof(update_msg.ic_fw_msg.pid));
+ memcpy(update_msg.ic_fw_msg.pid, &buf[GTP_ADDR_LENGTH], 4);
+ GTP_DEBUG("IC Product id:%s", update_msg.ic_fw_msg.pid);
+
+ //GT9XX PID MAPPING
+ /*|-----FLASH-----RAM-----|
+ |------918------918-----|
+ |------968------968-----|
+ |------913------913-----|
+ |------913P-----913P----|
+ |------927------927-----|
+ |------927P-----927P----|
+ |------9110-----9110----|
+ |------9110P----9111----|*/
+ if(update_msg.ic_fw_msg.pid[0] != 0)
+ {
+ if(!memcmp(update_msg.ic_fw_msg.pid, "9111", 4))
+ {
+ GTP_DEBUG("IC Mapping Product id:%s", update_msg.ic_fw_msg.pid);
+ memcpy(update_msg.ic_fw_msg.pid, "9110P", 5);
+ }
+ }
+
+ update_msg.ic_fw_msg.vid = buf[GTP_ADDR_LENGTH+4] + (buf[GTP_ADDR_LENGTH+5]<<8);
+ GTP_DEBUG("IC version id:%04x", update_msg.ic_fw_msg.vid);
+
+ return SUCCESS;
+}
+
+s32 gup_enter_update_mode(struct i2c_client *client)
+{
+ s32 ret = -1;
+ s32 retry = 0;
+ u8 rd_buf[3];
+
+ //step1:RST output low last at least 2ms
+ GTP_GPIO_OUTPUT(GTP_RST_PORT, 0);
+ msleep(2);
+
+ //step2:select I2C slave addr,INT:0--0xBA;1--0x28.
+ GTP_GPIO_OUTPUT(GTP_INT_PORT, (client->addr == 0x14));
+ msleep(2);
+
+ //step3:RST output high reset guitar
+ GTP_GPIO_OUTPUT(GTP_RST_PORT, 1);
+
+ //20121211 modify start
+ msleep(5);
+ while(retry++ < 200)
+ {
+ //step4:Hold ss51 & dsp
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
+ if(ret <= 0)
+ {
+ GTP_DEBUG("Hold ss51 & dsp I2C error,retry:%d", retry);
+ continue;
+ }
+
+ //step5:Confirm hold
+ ret = gup_get_ic_msg(client, _rRW_MISCTL__SWRST_B0_, rd_buf, 1);
+ if(ret <= 0)
+ {
+ GTP_DEBUG("Hold ss51 & dsp I2C error,retry:%d", retry);
+ continue;
+ }
+ if(0x0C == rd_buf[GTP_ADDR_LENGTH])
+ {
+ GTP_DEBUG("Hold ss51 & dsp confirm SUCCESS");
+ break;
+ }
+ GTP_DEBUG("Hold ss51 & dsp confirm 0x4180 failed,value:%d", rd_buf[GTP_ADDR_LENGTH]);
+ }
+ if(retry >= 200)
+ {
+ GTP_ERROR("Enter update Hold ss51 failed.");
+ return FAIL;
+ }
+
+ //step6:DSP_CK and DSP_ALU_CK PowerOn
+ ret = gup_set_ic_msg(client, 0x4010, 0x00);
+
+ //20121211 modify end
+ return ret;
+}
+
+void gup_leave_update_mode(void)
+{
+ GTP_GPIO_AS_INT(GTP_INT_PORT);
+
+ GTP_DEBUG("[leave_update_mode]reset chip.");
+ gtp_reset_guitar(i2c_connect_client, 20);
+}
+
+// Get the correct nvram data
+// The correct conditions:
+// 1. the hardware info is the same
+// 2. the product id is the same
+// 3. the firmware version in update file is greater than the firmware version in ic
+// or the check sum in ic is wrong
+/* Update Conditions:
+ 1. Same hardware info
+ 2. Same PID
+ 3. File PID > IC PID
+ Force Update Conditions:
+ 1. Wrong ic firmware checksum
+ 2. INVALID IC PID or VID
+ 3. IC PID == 91XX || File PID == 91XX
+*/
+
+static u8 gup_enter_update_judge(st_fw_head *fw_head)
+{
+ u16 u16_tmp;
+ s32 i = 0;
+
+ u16_tmp = fw_head->vid;
+ fw_head->vid = (u16)(u16_tmp>>8) + (u16)(u16_tmp<<8);
+
+ GTP_DEBUG("FILE HARDWARE INFO:%02x%02x%02x%02x", fw_head->hw_info[0], fw_head->hw_info[1], fw_head->hw_info[2], fw_head->hw_info[3]);
+ GTP_DEBUG("FILE PID:%s", fw_head->pid);
+ GTP_DEBUG("FILE VID:%04x", fw_head->vid);
+
+ GTP_DEBUG("IC HARDWARE INFO:%02x%02x%02x%02x", update_msg.ic_fw_msg.hw_info[0], update_msg.ic_fw_msg.hw_info[1],
+ update_msg.ic_fw_msg.hw_info[2], update_msg.ic_fw_msg.hw_info[3]);
+ GTP_DEBUG("IC PID:%s", update_msg.ic_fw_msg.pid);
+ GTP_DEBUG("IC VID:%04x", update_msg.ic_fw_msg.vid);
+
+ //First two conditions
+ if ( !memcmp(fw_head->hw_info, update_msg.ic_fw_msg.hw_info, sizeof(update_msg.ic_fw_msg.hw_info)))
+ {
+ GTP_DEBUG("Get the same hardware info.");
+ if( update_msg.force_update != 0xBE )
+ {
+ GTP_INFO("FW chksum error,need enter update.");
+ return SUCCESS;
+ }
+
+ // 20130523 start
+ if (strlen(update_msg.ic_fw_msg.pid) < 3)
+ {
+ GTP_INFO("Illegal IC pid, need enter update");
+ return SUCCESS;
+ }
+ else
+ {
+ for (i = 0; i < 3; i++)
+ {
+ if ((update_msg.ic_fw_msg.pid[i] < 0x30) || (update_msg.ic_fw_msg.pid[i] > 0x39))
+ {
+ GTP_INFO("Illegal IC pid, out of bound, need enter update");
+ return SUCCESS;
+ }
+ }
+ }
+ // 20130523 end
+
+
+ if (( !memcmp(fw_head->pid, update_msg.ic_fw_msg.pid, (strlen(fw_head->pid)<3?3:strlen(fw_head->pid))))||
+ (!memcmp(update_msg.ic_fw_msg.pid, "91XX", 4))||
+ (!memcmp(fw_head->pid, "91XX", 4)))
+ {
+ if(!memcmp(fw_head->pid, "91XX", 4))
+ {
+ GTP_DEBUG("Force none same pid update mode.");
+ }
+ else
+ {
+ GTP_DEBUG("Get the same pid.");
+ }
+ //The third condition
+ if (fw_head->vid > update_msg.ic_fw_msg.vid)
+ {
+
+ GTP_INFO("Need enter update.");
+ return SUCCESS;
+ }
+ GTP_ERROR("Don't meet the third condition.");
+ GTP_ERROR("File VID <= Ic VID, update aborted!");
+ }
+ else
+ {
+ GTP_ERROR("File PID != Ic PID, update aborted!");
+ }
+ }
+ else
+ {
+ GTP_ERROR("Different Hardware, update aborted!");
+ }
+ return FAIL;
+}
+
+static u8 ascii2hex(u8 a)
+{
+ s8 value = 0;
+
+ if(a >= '0' && a <= '9')
+ {
+ value = a - '0';
+ }
+ else if(a >= 'A' && a <= 'F')
+ {
+ value = a - 'A' + 0x0A;
+ }
+ else if(a >= 'a' && a <= 'f')
+ {
+ value = a - 'a' + 0x0A;
+ }
+ else
+ {
+ value = 0xff;
+ }
+
+ return value;
+}
+
+static s8 gup_update_config(struct i2c_client *client)
+{
+ s32 file_len = 0;
+ s32 ret = 0;
+ s32 i = 0;
+ s32 file_cfg_len = 0;
+ s32 chip_cfg_len = 0;
+ s32 count = 0;
+ u8 *buf;
+ u8 *pre_buf;
+ u8 *file_config;
+ //u8 checksum = 0;
+ u8 pid[8];
+
+ if(NULL == update_msg.cfg_file)
+ {
+ GTP_ERROR("[update_cfg]No need to upgrade config!");
+ return FAIL;
+ }
+ file_len = update_msg.cfg_file->f_op->llseek(update_msg.cfg_file, 0, SEEK_END);
+
+ ret = gup_get_ic_msg(client, GUP_REG_PID_VID, pid, 6);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[update_cfg]Read product id & version id fail.");
+ return FAIL;
+ }
+ pid[5] = '\0';
+ GTP_DEBUG("update cfg get pid:%s", &pid[GTP_ADDR_LENGTH]);
+
+ chip_cfg_len = 186;
+ if(!memcmp(&pid[GTP_ADDR_LENGTH], "968", 3) ||
+ !memcmp(&pid[GTP_ADDR_LENGTH], "910", 3) ||
+ !memcmp(&pid[GTP_ADDR_LENGTH], "960", 3))
+ {
+ chip_cfg_len = 228;
+ }
+ GTP_DEBUG("[update_cfg]config file len:%d", file_len);
+ GTP_DEBUG("[update_cfg]need config len:%d",chip_cfg_len);
+ if((file_len+5) < chip_cfg_len*5)
+ {
+ GTP_ERROR("Config length error");
+ return -1;
+ }
+
+ buf = (u8*)kzalloc(file_len, GFP_KERNEL);
+ pre_buf = (u8*)kzalloc(file_len, GFP_KERNEL);
+ file_config = (u8*)kzalloc(chip_cfg_len + GTP_ADDR_LENGTH, GFP_KERNEL);
+ update_msg.cfg_file->f_op->llseek(update_msg.cfg_file, 0, SEEK_SET);
+
+ GTP_DEBUG("[update_cfg]Read config from file.");
+ ret = update_msg.cfg_file->f_op->read(update_msg.cfg_file, (char*)pre_buf, file_len, &update_msg.cfg_file->f_pos);
+ if(ret<0)
+ {
+ GTP_ERROR("[update_cfg]Read config file failed.");
+ goto update_cfg_file_failed;
+ }
+
+ GTP_DEBUG("[update_cfg]Delete illgal charactor.");
+ for(i=0,count=0; i<file_len; i++)
+ {
+ if (pre_buf[i] == ' ' || pre_buf[i] == '\r' || pre_buf[i] == '\n')
+ {
+ continue;
+ }
+ buf[count++] = pre_buf[i];
+ }
+
+ GTP_DEBUG("[update_cfg]Ascii to hex.");
+ file_config[0] = GTP_REG_CONFIG_DATA >> 8;
+ file_config[1] = GTP_REG_CONFIG_DATA & 0xff;
+ for(i=0,file_cfg_len=GTP_ADDR_LENGTH; i<count; i+=5)
+ {
+ if((buf[i]=='0') && ((buf[i+1]=='x') || (buf[i+1]=='X')))
+ {
+ u8 high,low;
+ high = ascii2hex(buf[i+2]);
+ low = ascii2hex(buf[i+3]);
+
+ if((high == 0xFF) || (low == 0xFF))
+ {
+ ret = 0;
+ GTP_ERROR("[update_cfg]Illegal config file.");
+ goto update_cfg_file_failed;
+ }
+ file_config[file_cfg_len++] = (high<<4) + low;
+ }
+ else
+ {
+ ret = 0;
+ GTP_ERROR("[update_cfg]Illegal config file.");
+ goto update_cfg_file_failed;
+ }
+ }
+
+// //cal checksum
+// for(i=GTP_ADDR_LENGTH; i<chip_cfg_len; i++)
+// {
+// checksum += file_config[i];
+// }
+// file_config[chip_cfg_len] = (~checksum) + 1;
+// file_config[chip_cfg_len+1] = 0x01;
+
+ GTP_DEBUG("config:");
+ GTP_DEBUG_ARRAY(file_config+2, file_cfg_len);
+
+ i = 0;
+ while(i++ < 5)
+ {
+ ret = gup_i2c_write(client, file_config, file_cfg_len);
+ if(ret > 0)
+ {
+ GTP_INFO("[update_cfg]Send config SUCCESS.");
+ break;
+ }
+ GTP_ERROR("[update_cfg]Send config i2c error.");
+ }
+
+update_cfg_file_failed:
+ kfree(pre_buf);
+ kfree(buf);
+ kfree(file_config);
+ return ret;
+}
+
+#if GTP_HEADER_FW_UPDATE
+static u8 gup_check_fs_mounted(char *path_name)
+{
+ struct path root_path;
+ struct path path;
+ int err;
+ err = kern_path("/", LOOKUP_FOLLOW, &root_path);
+
+ if (err)
+ {
+ GTP_DEBUG("\"/\" NOT Mounted: %d", err);
+ return FAIL;
+ }
+ err = kern_path(path_name, LOOKUP_FOLLOW, &path);
+
+ if (err)
+ {
+ GTP_DEBUG("/data/ NOT Mounted: %d", err);
+ return FAIL;
+ }
+
+ return SUCCESS;
+
+ /*
+ if (path.mnt->mnt_sb == root_path.mnt->mnt_sb)
+ {
+ //-- not mounted
+ return FAIL;
+ }
+ else
+ {
+ return SUCCESS;
+ }*/
+
+}
+#endif
+static u8 gup_check_update_file(struct i2c_client *client, st_fw_head* fw_head, u8* path)
+{
+ s32 ret = 0;
+ s32 i = 0;
+ s32 fw_checksum = 0;
+ u8 buf[FW_HEAD_LENGTH];
+
+ if (path)
+ {
+ GTP_DEBUG("Update File path:%s, %d", path, strlen(path));
+ update_msg.file = filp_open(path, O_RDONLY, 0);
+
+ if (IS_ERR(update_msg.file))
+ {
+ GTP_ERROR("Open update file(%s) error!", path);
+ return FAIL;
+ }
+ }
+ else
+ {
+#if GTP_HEADER_FW_UPDATE
+ for (i = 0; i < (GUP_SEARCH_FILE_TIMES); i++)
+ {
+ GTP_DEBUG("Waiting for /data mounted [%d]", i);
+
+ if (gup_check_fs_mounted("/data") == SUCCESS)
+ {
+ GTP_DEBUG("/data Mounted!");
+ break;
+ }
+ msleep(3000);
+ }
+ if (i >= (GUP_SEARCH_FILE_TIMES))
+ {
+ GTP_ERROR("Wait for /data mounted timeout!");
+ return FAIL;
+ }
+
+ // update config
+ update_msg.cfg_file = filp_open(CONFIG_FILE_PATH_1, O_RDONLY, 0);
+ if (IS_ERR(update_msg.cfg_file))
+ {
+ GTP_DEBUG("%s is unavailable", CONFIG_FILE_PATH_1);
+ }
+ else
+ {
+ GTP_INFO("Update Config File: %s", CONFIG_FILE_PATH_1);
+ ret = gup_update_config(client);
+ if(ret <= 0)
+ {
+ GTP_ERROR("Update config failed.");
+ }
+ filp_close(update_msg.cfg_file, NULL);
+ }
+
+ if (sizeof(header_fw_array) < (FW_HEAD_LENGTH+FW_SECTION_LENGTH*4+FW_DSP_ISP_LENGTH+FW_DSP_LENGTH+FW_BOOT_LENGTH))
+ {
+ GTP_ERROR("INVALID header_fw_array, check your gt9xx_firmware.h file!");
+ return FAIL;
+ }
+ update_msg.file = filp_open(UPDATE_FILE_PATH_2, O_CREAT | O_RDWR, 0666);
+ if ((IS_ERR(update_msg.file)))
+ {
+ GTP_ERROR("Failed to Create file: %s for fw_header!", UPDATE_FILE_PATH_2);
+ return FAIL;
+ }
+ update_msg.file->f_op->llseek(update_msg.file, 0, SEEK_SET);
+ update_msg.file->f_op->write(update_msg.file, (char *)header_fw_array, sizeof(header_fw_array), &update_msg.file->f_pos);
+ filp_close(update_msg.file, NULL);
+ update_msg.file = filp_open(UPDATE_FILE_PATH_2, O_RDONLY, 0);
+#else
+ u8 fp_len = max(sizeof(UPDATE_FILE_PATH_1), sizeof(UPDATE_FILE_PATH_2));
+ u8 cfp_len = max(sizeof(CONFIG_FILE_PATH_1), sizeof(CONFIG_FILE_PATH_2));
+ u8 *search_update_path = (u8*)kzalloc(fp_len, GFP_KERNEL);
+ u8 *search_cfg_path = (u8*)kzalloc(cfp_len, GFP_KERNEL);
+ //Begin to search update file,the config file & firmware file must be in the same path,single or double.
+ searching_file = 1;
+ for (i = 0; i < GUP_SEARCH_FILE_TIMES; i++)
+ {
+ if (searching_file == 0)
+ {
+ kfree(search_update_path);
+ kfree(search_cfg_path);
+ GTP_INFO(".bin/.cfg update file search forcely terminated!");
+ return FAIL;
+ }
+ if(i%2)
+ {
+ memcpy(search_update_path, UPDATE_FILE_PATH_1, sizeof(UPDATE_FILE_PATH_1));
+ memcpy(search_cfg_path, CONFIG_FILE_PATH_1, sizeof(CONFIG_FILE_PATH_1));
+ }
+ else
+ {
+ memcpy(search_update_path, UPDATE_FILE_PATH_2, sizeof(UPDATE_FILE_PATH_2));
+ memcpy(search_cfg_path, CONFIG_FILE_PATH_2, sizeof(CONFIG_FILE_PATH_2));
+ }
+
+ if(!(got_file_flag&0x0F))
+ {
+ update_msg.file = filp_open(search_update_path, O_RDONLY, 0);
+ if(!IS_ERR(update_msg.file))
+ {
+ GTP_DEBUG("Find the bin file");
+ got_file_flag |= 0x0F;
+ }
+ }
+ if(!(got_file_flag&0xF0))
+ {
+ update_msg.cfg_file = filp_open(search_cfg_path, O_RDONLY, 0);
+ if(!IS_ERR(update_msg.cfg_file))
+ {
+ GTP_DEBUG("Find the cfg file");
+ got_file_flag |= 0xF0;
+ }
+ }
+
+ if(got_file_flag)
+ {
+ if(got_file_flag == 0xFF)
+ {
+ break;
+ }
+ else
+ {
+ i += 4;
+ }
+ }
+ GTP_DEBUG("%3d:Searching %s %s file...", i, (got_file_flag&0x0F)?"":"bin", (got_file_flag&0xF0)?"":"cfg");
+ msleep(3000);
+ }
+ searching_file = 0;
+ kfree(search_update_path);
+ kfree(search_cfg_path);
+
+ if(!got_file_flag)
+ {
+ GTP_ERROR("Can't find update file.");
+ goto load_failed;
+ }
+
+ if(got_file_flag&0xF0)
+ {
+ GTP_DEBUG("Got the update config file.");
+ ret = gup_update_config(client);
+ if(ret <= 0)
+ {
+ GTP_ERROR("Update config failed.");
+ }
+ filp_close(update_msg.cfg_file, NULL);
+ msleep(500); //waiting config to be stored in FLASH.
+ }
+ if(got_file_flag&0x0F)
+ {
+ GTP_DEBUG("Got the update firmware file.");
+ }
+ else
+ {
+ GTP_ERROR("No need to upgrade firmware.");
+ goto load_failed;
+ }
+#endif
+ }
+
+ update_msg.old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ update_msg.file->f_op->llseek(update_msg.file, 0, SEEK_SET);
+ //update_msg.file->f_pos = 0;
+
+ ret = update_msg.file->f_op->read(update_msg.file, (char*)buf, FW_HEAD_LENGTH, &update_msg.file->f_pos);
+ if (ret < 0)
+ {
+ GTP_ERROR("Read firmware head in update file error.");
+ goto load_failed;
+ }
+ memcpy(fw_head, buf, FW_HEAD_LENGTH);
+
+ //check firmware legality
+ fw_checksum = 0;
+ for(i=0; i<FW_SECTION_LENGTH*4+FW_DSP_ISP_LENGTH+FW_DSP_LENGTH+FW_BOOT_LENGTH; i+=2)
+ {
+ u16 temp;
+ ret = update_msg.file->f_op->read(update_msg.file, (char*)buf, 2, &update_msg.file->f_pos);
+ if (ret < 0)
+ {
+ GTP_ERROR("Read firmware file error.");
+ goto load_failed;
+ }
+ //GTP_DEBUG("BUF[0]:%x", buf[0]);
+ temp = (buf[0]<<8) + buf[1];
+ fw_checksum += temp;
+ }
+
+ GTP_DEBUG("firmware checksum:%x", fw_checksum&0xFFFF);
+ if(fw_checksum&0xFFFF)
+ {
+ GTP_ERROR("Illegal firmware file.");
+ goto load_failed;
+ }
+
+ return SUCCESS;
+
+load_failed:
+ set_fs(update_msg.old_fs);
+ return FAIL;
+}
+
+#if 0
+static u8 gup_check_update_header(struct i2c_client *client, st_fw_head* fw_head)
+{
+ const u8* pos;
+ int i = 0;
+ u8 mask_num = 0;
+ s32 ret = 0;
+
+ pos = HEADER_UPDATE_DATA;
+
+ memcpy(fw_head, pos, FW_HEAD_LENGTH);
+ pos += FW_HEAD_LENGTH;
+
+ ret = gup_enter_update_judge(fw_head);
+ if(SUCCESS == ret)
+ {
+ return SUCCESS;
+ }
+ return FAIL;
+}
+#endif
+
+static u8 gup_burn_proc(struct i2c_client *client, u8 *burn_buf, u16 start_addr, u16 total_length)
+{
+ s32 ret = 0;
+ u16 burn_addr = start_addr;
+ u16 frame_length = 0;
+ u16 burn_length = 0;
+ u8 wr_buf[PACK_SIZE + GTP_ADDR_LENGTH];
+ u8 rd_buf[PACK_SIZE + GTP_ADDR_LENGTH];
+ u8 retry = 0;
+
+ GTP_DEBUG("Begin burn %dk data to addr 0x%x", (total_length/1024), start_addr);
+ while(burn_length < total_length)
+ {
+ GTP_DEBUG("B/T:%04d/%04d", burn_length, total_length);
+ frame_length = ((total_length - burn_length) > PACK_SIZE) ? PACK_SIZE : (total_length - burn_length);
+ wr_buf[0] = (u8)(burn_addr>>8);
+ rd_buf[0] = wr_buf[0];
+ wr_buf[1] = (u8)burn_addr;
+ rd_buf[1] = wr_buf[1];
+ memcpy(&wr_buf[GTP_ADDR_LENGTH], &burn_buf[burn_length], frame_length);
+
+ for(retry = 0; retry < MAX_FRAME_CHECK_TIME; retry++)
+ {
+ ret = gup_i2c_write(client, wr_buf, GTP_ADDR_LENGTH + frame_length);
+ if(ret <= 0)
+ {
+ GTP_ERROR("Write frame data i2c error.");
+ continue;
+ }
+ ret = gup_i2c_read(client, rd_buf, GTP_ADDR_LENGTH + frame_length);
+ if(ret <= 0)
+ {
+ GTP_ERROR("Read back frame data i2c error.");
+ continue;
+ }
+
+ if(memcmp(&wr_buf[GTP_ADDR_LENGTH], &rd_buf[GTP_ADDR_LENGTH], frame_length))
+ {
+ GTP_ERROR("Check frame data fail,not equal.");
+ GTP_DEBUG("write array:");
+ GTP_DEBUG_ARRAY(&wr_buf[GTP_ADDR_LENGTH], frame_length);
+ GTP_DEBUG("read array:");
+ GTP_DEBUG_ARRAY(&rd_buf[GTP_ADDR_LENGTH], frame_length);
+ continue;
+ }
+ else
+ {
+ //GTP_DEBUG("Check frame data success.");
+ break;
+ }
+ }
+ if(retry >= MAX_FRAME_CHECK_TIME)
+ {
+ GTP_ERROR("Burn frame data time out,exit.");
+ return FAIL;
+ }
+ burn_length += frame_length;
+ burn_addr += frame_length;
+ }
+ return SUCCESS;
+}
+
+static u8 gup_load_section_file(u8* buf, u16 offset, u16 length)
+{
+ s32 ret = 0;
+
+ if(update_msg.file == NULL)
+ {
+ GTP_ERROR("cannot find update file,load section file fail.");
+ return FAIL;
+ }
+ update_msg.file->f_pos = FW_HEAD_LENGTH + offset;
+
+ ret = update_msg.file->f_op->read(update_msg.file, (char*)buf, length, &update_msg.file->f_pos);
+ if(ret < 0)
+ {
+ GTP_ERROR("Read update file fail.");
+ return FAIL;
+ }
+
+ return SUCCESS;
+}
+
+static u8 gup_recall_check(struct i2c_client *client, u8* chk_src, u16 start_rd_addr, u16 chk_length)
+{
+ u8 rd_buf[PACK_SIZE + GTP_ADDR_LENGTH];
+ s32 ret = 0;
+ u16 recall_addr = start_rd_addr;
+ u16 recall_length = 0;
+ u16 frame_length = 0;
+
+ while(recall_length < chk_length)
+ {
+ frame_length = ((chk_length - recall_length) > PACK_SIZE) ? PACK_SIZE : (chk_length - recall_length);
+ ret = gup_get_ic_msg(client, recall_addr, rd_buf, frame_length);
+ if(ret <= 0)
+ {
+ GTP_ERROR("recall i2c error,exit");
+ return FAIL;
+ }
+
+ if(memcmp(&rd_buf[GTP_ADDR_LENGTH], &chk_src[recall_length], frame_length))
+ {
+ GTP_ERROR("Recall frame data fail,not equal.");
+ GTP_DEBUG("chk_src array:");
+ GTP_DEBUG_ARRAY(&chk_src[recall_length], frame_length);
+ GTP_DEBUG("recall array:");
+ GTP_DEBUG_ARRAY(&rd_buf[GTP_ADDR_LENGTH], frame_length);
+ return FAIL;
+ }
+
+ recall_length += frame_length;
+ recall_addr += frame_length;
+ }
+ GTP_DEBUG("Recall check %dk firmware success.", (chk_length/1024));
+
+ return SUCCESS;
+}
+
+static u8 gup_burn_fw_section(struct i2c_client *client, u8 *fw_section, u16 start_addr, u8 bank_cmd )
+{
+ s32 ret = 0;
+ u8 rd_buf[5];
+
+ //step1:hold ss51 & dsp
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_section]hold ss51 & dsp fail.");
+ return FAIL;
+ }
+
+ //step2:set scramble
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_OPT_B0_, 0x00);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_section]set scramble fail.");
+ return FAIL;
+ }
+
+ //step3:select bank
+ ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, (bank_cmd >> 4)&0x0F);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_section]select bank %d fail.", (bank_cmd >> 4)&0x0F);
+ return FAIL;
+ }
+
+ //step4:enable accessing code
+ ret = gup_set_ic_msg(client, _bRW_MISCTL__MEM_CD_EN, 0x01);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_section]enable accessing code fail.");
+ return FAIL;
+ }
+
+ //step5:burn 8k fw section
+ ret = gup_burn_proc(client, fw_section, start_addr, FW_SECTION_LENGTH);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_section]burn fw_section fail.");
+ return FAIL;
+ }
+
+ //step6:hold ss51 & release dsp
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x04);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_section]hold ss51 & release dsp fail.");
+ return FAIL;
+ }
+ //must delay
+ msleep(1);
+
+ //step7:send burn cmd to move data to flash from sram
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, bank_cmd&0x0f);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_section]send burn cmd fail.");
+ return FAIL;
+ }
+ GTP_DEBUG("[burn_fw_section]Wait for the burn is complete......");
+ do{
+ ret = gup_get_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, rd_buf, 1);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_section]Get burn state fail");
+ return FAIL;
+ }
+ msleep(10);
+ //GTP_DEBUG("[burn_fw_section]Get burn state:%d.", rd_buf[GTP_ADDR_LENGTH]);
+ }while(rd_buf[GTP_ADDR_LENGTH]);
+
+ //step8:select bank
+ ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, (bank_cmd >> 4)&0x0F);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_section]select bank %d fail.", (bank_cmd >> 4)&0x0F);
+ return FAIL;
+ }
+
+ //step9:enable accessing code
+ ret = gup_set_ic_msg(client, _bRW_MISCTL__MEM_CD_EN, 0x01);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_section]enable accessing code fail.");
+ return FAIL;
+ }
+
+ //step10:recall 8k fw section
+ ret = gup_recall_check(client, fw_section, start_addr, FW_SECTION_LENGTH);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_section]recall check 8k firmware fail.");
+ return FAIL;
+ }
+
+ //step11:disable accessing code
+ ret = gup_set_ic_msg(client, _bRW_MISCTL__MEM_CD_EN, 0x00);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_section]disable accessing code fail.");
+ return FAIL;
+ }
+
+ return SUCCESS;
+}
+
+static u8 gup_burn_dsp_isp(struct i2c_client *client)
+{
+ s32 ret = 0;
+ u8* fw_dsp_isp = NULL;
+ u8 retry = 0;
+
+ GTP_DEBUG("[burn_dsp_isp]Begin burn dsp isp---->>");
+
+ //step1:alloc memory
+ GTP_DEBUG("[burn_dsp_isp]step1:alloc memory");
+ while(retry++ < 5)
+ {
+ fw_dsp_isp = (u8*)kzalloc(FW_DSP_ISP_LENGTH, GFP_KERNEL);
+ if(fw_dsp_isp == NULL)
+ {
+ continue;
+ }
+ else
+ {
+ GTP_INFO("[burn_dsp_isp]Alloc %dk byte memory success.", (FW_DSP_ISP_LENGTH/1024));
+ break;
+ }
+ }
+ if(retry >= 5)
+ {
+ GTP_ERROR("[burn_dsp_isp]Alloc memory fail,exit.");
+ return FAIL;
+ }
+
+ //step2:load dsp isp file data
+ GTP_DEBUG("[burn_dsp_isp]step2:load dsp isp file data");
+ ret = gup_load_section_file(fw_dsp_isp, (4*FW_SECTION_LENGTH+FW_DSP_LENGTH+FW_BOOT_LENGTH), FW_DSP_ISP_LENGTH);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_dsp_isp]load firmware dsp_isp fail.");
+ goto exit_burn_dsp_isp;
+ }
+
+ //step3:disable wdt,clear cache enable
+ GTP_DEBUG("[burn_dsp_isp]step3:disable wdt,clear cache enable");
+ ret = gup_set_ic_msg(client, _bRW_MISCTL__TMR0_EN, 0x00);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_dsp_isp]disable wdt fail.");
+ ret = FAIL;
+ goto exit_burn_dsp_isp;
+ }
+ ret = gup_set_ic_msg(client, _bRW_MISCTL__CACHE_EN, 0x00);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_dsp_isp]clear cache enable fail.");
+ ret = FAIL;
+ goto exit_burn_dsp_isp;
+ }
+
+ //step4:hold ss51 & dsp
+ GTP_DEBUG("[burn_dsp_isp]step4:hold ss51 & dsp");
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_dsp_isp]hold ss51 & dsp fail.");
+ ret = FAIL;
+ goto exit_burn_dsp_isp;
+ }
+
+ //step5:set boot from sram
+ GTP_DEBUG("[burn_dsp_isp]step5:set boot from sram");
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOTCTL_B0_, 0x02);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_dsp_isp]set boot from sram fail.");
+ ret = FAIL;
+ goto exit_burn_dsp_isp;
+ }
+
+ //step6:software reboot
+ GTP_DEBUG("[burn_dsp_isp]step6:software reboot");
+ ret = gup_set_ic_msg(client, _bWO_MISCTL__CPU_SWRST_PULSE, 0x01);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_dsp_isp]software reboot fail.");
+ ret = FAIL;
+ goto exit_burn_dsp_isp;
+ }
+
+ //step7:select bank2
+ GTP_DEBUG("[burn_dsp_isp]step7:select bank2");
+ ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, 0x02);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_dsp_isp]select bank2 fail.");
+ ret = FAIL;
+ goto exit_burn_dsp_isp;
+ }
+
+ //step8:enable accessing code
+ GTP_DEBUG("[burn_dsp_isp]step8:enable accessing code");
+ ret = gup_set_ic_msg(client, _bRW_MISCTL__MEM_CD_EN, 0x01);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_dsp_isp]enable accessing code fail.");
+ ret = FAIL;
+ goto exit_burn_dsp_isp;
+ }
+
+ //step9:burn 4k dsp_isp
+ GTP_DEBUG("[burn_dsp_isp]step9:burn 4k dsp_isp");
+ ret = gup_burn_proc(client, fw_dsp_isp, 0xC000, FW_DSP_ISP_LENGTH);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_dsp_isp]burn dsp_isp fail.");
+ goto exit_burn_dsp_isp;
+ }
+
+ //step10:set scramble
+ GTP_DEBUG("[burn_dsp_isp]step10:set scramble");
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_OPT_B0_, 0x00);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_dsp_isp]set scramble fail.");
+ ret = FAIL;
+ goto exit_burn_dsp_isp;
+ }
+ ret = SUCCESS;
+
+exit_burn_dsp_isp:
+ kfree(fw_dsp_isp);
+ return ret;
+}
+
+static u8 gup_burn_fw_ss51(struct i2c_client *client)
+{
+ u8* fw_ss51 = NULL;
+ u8 retry = 0;
+ s32 ret = 0;
+
+ GTP_DEBUG("[burn_fw_ss51]Begin burn ss51 firmware---->>");
+
+ //step1:alloc memory
+ GTP_DEBUG("[burn_fw_ss51]step1:alloc memory");
+ while(retry++ < 5)
+ {
+ fw_ss51 = (u8*)kzalloc(FW_SECTION_LENGTH, GFP_KERNEL);
+ if(fw_ss51 == NULL)
+ {
+ continue;
+ }
+ else
+ {
+ GTP_INFO("[burn_fw_ss51]Alloc %dk byte memory success.", (FW_SECTION_LENGTH/1024));
+ break;
+ }
+ }
+ if(retry >= 5)
+ {
+ GTP_ERROR("[burn_fw_ss51]Alloc memory fail,exit.");
+ return FAIL;
+ }
+
+ //step2:load ss51 firmware section 1 file data
+ GTP_DEBUG("[burn_fw_ss51]step2:load ss51 firmware section 1 file data");
+ ret = gup_load_section_file(fw_ss51, 0, FW_SECTION_LENGTH);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_ss51]load ss51 firmware section 1 fail.");
+ goto exit_burn_fw_ss51;
+ }
+
+ //step3:clear control flag
+ GTP_DEBUG("[burn_fw_ss51]step3:clear control flag");
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x00);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_ss51]clear control flag fail.");
+ ret = FAIL;
+ goto exit_burn_fw_ss51;
+ }
+
+ //step4:burn ss51 firmware section 1
+ GTP_DEBUG("[burn_fw_ss51]step4:burn ss51 firmware section 1");
+ ret = gup_burn_fw_section(client, fw_ss51, 0xC000, 0x01);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_ss51]burn ss51 firmware section 1 fail.");
+ goto exit_burn_fw_ss51;
+ }
+
+ //step5:load ss51 firmware section 2 file data
+ GTP_DEBUG("[burn_fw_ss51]step5:load ss51 firmware section 2 file data");
+ ret = gup_load_section_file(fw_ss51, FW_SECTION_LENGTH, FW_SECTION_LENGTH);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_ss51]load ss51 firmware section 2 fail.");
+ goto exit_burn_fw_ss51;
+ }
+
+ //step6:burn ss51 firmware section 2
+ GTP_DEBUG("[burn_fw_ss51]step6:burn ss51 firmware section 2");
+ ret = gup_burn_fw_section(client, fw_ss51, 0xE000, 0x02);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_ss51]burn ss51 firmware section 2 fail.");
+ goto exit_burn_fw_ss51;
+ }
+
+ //step7:load ss51 firmware section 3 file data
+ GTP_DEBUG("[burn_fw_ss51]step7:load ss51 firmware section 3 file data");
+ ret = gup_load_section_file(fw_ss51, 2*FW_SECTION_LENGTH, FW_SECTION_LENGTH);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_ss51]load ss51 firmware section 3 fail.");
+ goto exit_burn_fw_ss51;
+ }
+
+ //step8:burn ss51 firmware section 3
+ GTP_DEBUG("[burn_fw_ss51]step8:burn ss51 firmware section 3");
+ ret = gup_burn_fw_section(client, fw_ss51, 0xC000, 0x13);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_ss51]burn ss51 firmware section 3 fail.");
+ goto exit_burn_fw_ss51;
+ }
+
+ //step9:load ss51 firmware section 4 file data
+ GTP_DEBUG("[burn_fw_ss51]step9:load ss51 firmware section 4 file data");
+ ret = gup_load_section_file(fw_ss51, 3*FW_SECTION_LENGTH, FW_SECTION_LENGTH);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_ss51]load ss51 firmware section 4 fail.");
+ goto exit_burn_fw_ss51;
+ }
+
+ //step10:burn ss51 firmware section 4
+ GTP_DEBUG("[burn_fw_ss51]step10:burn ss51 firmware section 4");
+ ret = gup_burn_fw_section(client, fw_ss51, 0xE000, 0x14);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_ss51]burn ss51 firmware section 4 fail.");
+ goto exit_burn_fw_ss51;
+ }
+
+ ret = SUCCESS;
+
+exit_burn_fw_ss51:
+ kfree(fw_ss51);
+ return ret;
+}
+
+static u8 gup_burn_fw_dsp(struct i2c_client *client)
+{
+ s32 ret = 0;
+ u8* fw_dsp = NULL;
+ u8 retry = 0;
+ u8 rd_buf[5];
+
+ GTP_DEBUG("[burn_fw_dsp]Begin burn dsp firmware---->>");
+ //step1:alloc memory
+ GTP_DEBUG("[burn_fw_dsp]step1:alloc memory");
+ while(retry++ < 5)
+ {
+ fw_dsp = (u8*)kzalloc(FW_DSP_LENGTH, GFP_KERNEL);
+ if(fw_dsp == NULL)
+ {
+ continue;
+ }
+ else
+ {
+ GTP_INFO("[burn_fw_dsp]Alloc %dk byte memory success.", (FW_SECTION_LENGTH/1024));
+ break;
+ }
+ }
+ if(retry >= 5)
+ {
+ GTP_ERROR("[burn_fw_dsp]Alloc memory fail,exit.");
+ return FAIL;
+ }
+
+ //step2:load firmware dsp
+ GTP_DEBUG("[burn_fw_dsp]step2:load firmware dsp");
+ ret = gup_load_section_file(fw_dsp, 4*FW_SECTION_LENGTH, FW_DSP_LENGTH);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_dsp]load firmware dsp fail.");
+ goto exit_burn_fw_dsp;
+ }
+
+ //step3:select bank3
+ GTP_DEBUG("[burn_fw_dsp]step3:select bank3");
+ ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, 0x03);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_dsp]select bank3 fail.");
+ ret = FAIL;
+ goto exit_burn_fw_dsp;
+ }
+
+ //step4:hold ss51 & dsp
+ GTP_DEBUG("[burn_fw_dsp]step4:hold ss51 & dsp");
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_dsp]hold ss51 & dsp fail.");
+ ret = FAIL;
+ goto exit_burn_fw_dsp;
+ }
+
+ //step5:set scramble
+ GTP_DEBUG("[burn_fw_dsp]step5:set scramble");
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_OPT_B0_, 0x00);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_dsp]set scramble fail.");
+ ret = FAIL;
+ goto exit_burn_fw_dsp;
+ }
+
+ //step6:release ss51 & dsp
+ GTP_DEBUG("[burn_fw_dsp]step6:release ss51 & dsp");
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x04); //20121211
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_dsp]release ss51 & dsp fail.");
+ ret = FAIL;
+ goto exit_burn_fw_dsp;
+ }
+ //must delay
+ msleep(1);
+
+ //step7:burn 4k dsp firmware
+ GTP_DEBUG("[burn_fw_dsp]step7:burn 4k dsp firmware");
+ ret = gup_burn_proc(client, fw_dsp, 0x9000, FW_DSP_LENGTH);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_dsp]burn fw_section fail.");
+ goto exit_burn_fw_dsp;
+ }
+
+ //step8:send burn cmd to move data to flash from sram
+ GTP_DEBUG("[burn_fw_dsp]step8:send burn cmd to move data to flash from sram");
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x05);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_dsp]send burn cmd fail.");
+ goto exit_burn_fw_dsp;
+ }
+ GTP_DEBUG("[burn_fw_dsp]Wait for the burn is complete......");
+ do{
+ ret = gup_get_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, rd_buf, 1);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_dsp]Get burn state fail");
+ goto exit_burn_fw_dsp;
+ }
+ msleep(10);
+ //GTP_DEBUG("[burn_fw_dsp]Get burn state:%d.", rd_buf[GTP_ADDR_LENGTH]);
+ }while(rd_buf[GTP_ADDR_LENGTH]);
+
+ //step9:recall check 4k dsp firmware
+ GTP_DEBUG("[burn_fw_dsp]step9:recall check 4k dsp firmware");
+ ret = gup_recall_check(client, fw_dsp, 0x9000, FW_DSP_LENGTH);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_dsp]recall check 4k dsp firmware fail.");
+ goto exit_burn_fw_dsp;
+ }
+
+ ret = SUCCESS;
+
+exit_burn_fw_dsp:
+ kfree(fw_dsp);
+ return ret;
+}
+
+static u8 gup_burn_fw_boot(struct i2c_client *client)
+{
+ s32 ret = 0;
+ u8* fw_boot = NULL;
+ u8 retry = 0;
+ u8 rd_buf[5];
+
+ GTP_DEBUG("[burn_fw_boot]Begin burn bootloader firmware---->>");
+
+ //step1:Alloc memory
+ GTP_DEBUG("[burn_fw_boot]step1:Alloc memory");
+ while(retry++ < 5)
+ {
+ fw_boot = (u8*)kzalloc(FW_BOOT_LENGTH, GFP_KERNEL);
+ if(fw_boot == NULL)
+ {
+ continue;
+ }
+ else
+ {
+ GTP_INFO("[burn_fw_boot]Alloc %dk byte memory success.", (FW_BOOT_LENGTH/1024));
+ break;
+ }
+ }
+ if(retry >= 5)
+ {
+ GTP_ERROR("[burn_fw_boot]Alloc memory fail,exit.");
+ return FAIL;
+ }
+
+ //step2:load firmware bootloader
+ GTP_DEBUG("[burn_fw_boot]step2:load firmware bootloader");
+ ret = gup_load_section_file(fw_boot, (4*FW_SECTION_LENGTH+FW_DSP_LENGTH), FW_BOOT_LENGTH);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_boot]load firmware dsp fail.");
+ goto exit_burn_fw_boot;
+ }
+
+ //step3:hold ss51 & dsp
+ GTP_DEBUG("[burn_fw_boot]step3:hold ss51 & dsp");
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_boot]hold ss51 & dsp fail.");
+ ret = FAIL;
+ goto exit_burn_fw_boot;
+ }
+
+ //step4:set scramble
+ GTP_DEBUG("[burn_fw_boot]step4:set scramble");
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_OPT_B0_, 0x00);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_boot]set scramble fail.");
+ ret = FAIL;
+ goto exit_burn_fw_boot;
+ }
+
+ //step5:release ss51 & dsp
+ GTP_DEBUG("[burn_fw_boot]step5:release ss51 & dsp");
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x04); //20121211
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_boot]release ss51 & dsp fail.");
+ ret = FAIL;
+ goto exit_burn_fw_boot;
+ }
+ //must delay
+ msleep(1);
+
+ //step6:select bank3
+ GTP_DEBUG("[burn_fw_boot]step6:select bank3");
+ ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, 0x03);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_boot]select bank3 fail.");
+ ret = FAIL;
+ goto exit_burn_fw_boot;
+ }
+
+ //step7:burn 2k bootloader firmware
+ GTP_DEBUG("[burn_fw_boot]step7:burn 2k bootloader firmware");
+ ret = gup_burn_proc(client, fw_boot, 0x9000, FW_BOOT_LENGTH);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_boot]burn fw_section fail.");
+ goto exit_burn_fw_boot;
+ }
+
+ //step7:send burn cmd to move data to flash from sram
+ GTP_DEBUG("[burn_fw_boot]step7:send burn cmd to move data to flash from sram");
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x06);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_boot]send burn cmd fail.");
+ goto exit_burn_fw_boot;
+ }
+ GTP_DEBUG("[burn_fw_boot]Wait for the burn is complete......");
+ do{
+ ret = gup_get_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, rd_buf, 1);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_boot]Get burn state fail");
+ goto exit_burn_fw_boot;
+ }
+ msleep(10);
+ //GTP_DEBUG("[burn_fw_boot]Get burn state:%d.", rd_buf[GTP_ADDR_LENGTH]);
+ }while(rd_buf[GTP_ADDR_LENGTH]);
+
+ //step8:recall check 2k bootloader firmware
+ GTP_DEBUG("[burn_fw_boot]step8:recall check 2k bootloader firmware");
+ ret = gup_recall_check(client, fw_boot, 0x9000, FW_BOOT_LENGTH);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[burn_fw_boot]recall check 4k dsp firmware fail.");
+ goto exit_burn_fw_boot;
+ }
+
+ //step9:enable download DSP code
+ GTP_DEBUG("[burn_fw_boot]step9:enable download DSP code ");
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x99);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_boot]enable download DSP code fail.");
+ ret = FAIL;
+ goto exit_burn_fw_boot;
+ }
+
+ //step10:release ss51 & hold dsp
+ GTP_DEBUG("[burn_fw_boot]step10:release ss51 & hold dsp");
+ ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x08);
+ if(ret <= 0)
+ {
+ GTP_ERROR("[burn_fw_boot]release ss51 & hold dsp fail.");
+ ret = FAIL;
+ goto exit_burn_fw_boot;
+ }
+
+ ret = SUCCESS;
+
+exit_burn_fw_boot:
+ kfree(fw_boot);
+ return ret;
+}
+
+s32 gup_update_proc(void *dir)
+{
+ s32 ret = 0;
+ u8 retry = 0;
+ st_fw_head fw_head;
+ struct goodix_ts_data *ts = NULL;
+
+ GTP_DEBUG("[update_proc]Begin update ......");
+
+ show_len = 1;
+ total_len = 100;
+ if(dir == NULL)
+ {
+ msleep(3000); //wait main thread to be completed
+ }
+
+ ts = i2c_get_clientdata(i2c_connect_client);
+
+ if (searching_file)
+ {
+ searching_file = 0; // exit .bin update file searching
+ GTP_INFO("Exiting searching .bin update file...");
+ while ((show_len != 200) && (show_len != 100)) // wait for auto update quitted completely
+ {
+ msleep(100);
+ }
+ }
+
+ update_msg.file = NULL;
+ ret = gup_check_update_file(i2c_connect_client, &fw_head, (u8*)dir); //20121211
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[update_proc]check update file fail.");
+ goto file_fail;
+ }
+
+ //gtp_reset_guitar(i2c_connect_client, 20);
+ ret = gup_get_ic_fw_msg(i2c_connect_client);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[update_proc]get ic message fail.");
+ goto file_fail;
+ }
+
+ ret = gup_enter_update_judge(&fw_head);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[update_proc]Check *.bin file fail.");
+ goto file_fail;
+ }
+
+ ts->enter_update = 1;
+ gtp_irq_disable(ts);
+#if GTP_ESD_PROTECT
+ gtp_esd_switch(ts->client, SWITCH_OFF);
+#endif
+ ret = gup_enter_update_mode(i2c_connect_client);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[update_proc]enter update mode fail.");
+ goto update_fail;
+ }
+
+ while(retry++ < 5)
+ {
+ show_len = 10;
+ total_len = 100;
+ ret = gup_burn_dsp_isp(i2c_connect_client);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[update_proc]burn dsp isp fail.");
+ continue;
+ }
+
+ show_len += 10;
+ ret = gup_burn_fw_ss51(i2c_connect_client);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[update_proc]burn ss51 firmware fail.");
+ continue;
+ }
+
+ show_len += 40;
+ ret = gup_burn_fw_dsp(i2c_connect_client);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[update_proc]burn dsp firmware fail.");
+ continue;
+ }
+
+ show_len += 20;
+ ret = gup_burn_fw_boot(i2c_connect_client);
+ if(FAIL == ret)
+ {
+ GTP_ERROR("[update_proc]burn bootloader firmware fail.");
+ continue;
+ }
+ show_len += 10;
+ GTP_INFO("[update_proc]UPDATE SUCCESS.");
+ break;
+ }
+ if(retry >= 5)
+ {
+ GTP_ERROR("[update_proc]retry timeout,UPDATE FAIL.");
+ goto update_fail;
+ }
+
+ GTP_DEBUG("[update_proc]leave update mode.");
+ gup_leave_update_mode();
+
+ msleep(100);
+// GTP_DEBUG("[update_proc]send config.");
+// ret = gtp_send_cfg(i2c_connect_client);
+// if(ret < 0)
+// {
+// GTP_ERROR("[update_proc]send config fail.");
+// }
+ if (ts->fw_error)
+ {
+ GTP_INFO("firmware error auto update, resent config!");
+ gup_init_panel(ts);
+ }
+ show_len = 100;
+ total_len = 100;
+ ts->enter_update = 0;
+ gtp_irq_enable(ts);
+
+#if GTP_ESD_PROTECT
+ gtp_esd_switch(ts->client, SWITCH_ON);
+#endif
+ filp_close(update_msg.file, NULL);
+ return SUCCESS;
+
+update_fail:
+ ts->enter_update = 0;
+ gtp_irq_enable(ts);
+
+#if GTP_ESD_PROTECT
+ gtp_esd_switch(ts->client, SWITCH_ON);
+#endif
+
+file_fail:
+ if(update_msg.file && !IS_ERR(update_msg.file))
+ {
+ filp_close(update_msg.file, NULL);
+ }
+ show_len = 200;
+ total_len = 100;
+ return FAIL;
+}
+
+#if GTP_AUTO_UPDATE
+u8 gup_init_update_proc(struct goodix_ts_data *ts)
+{
+ struct task_struct *thread = NULL;
+
+ GTP_INFO("Ready to run update thread.");
+ thread = kthread_run(gup_update_proc, (void*)NULL, "guitar_update");
+ if (IS_ERR(thread))
+ {
+ GTP_ERROR("Failed to create update thread.\n");
+ return -1;
+ }
+
+ return 0;
+}
+#endif \ No newline at end of file
diff --git a/drivers/input/touchscreen/it7258_ts_i2c.c b/drivers/input/touchscreen/it7258_ts_i2c.c
index 773ece9eb1d4..c60a2b5a94b0 100644
--- a/drivers/input/touchscreen/it7258_ts_i2c.c
+++ b/drivers/input/touchscreen/it7258_ts_i2c.c
@@ -22,12 +22,30 @@
#include <linux/firmware.h>
#include <linux/gpio.h>
#include <linux/slab.h>
-#include <linux/wakelock.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of_gpio.h>
+#include <linux/fb.h>
+#include <linux/debugfs.h>
+#include <linux/input/mt.h>
+#include <linux/string.h>
#define MAX_BUFFER_SIZE 144
#define DEVICE_NAME "IT7260"
#define SCREEN_X_RESOLUTION 320
#define SCREEN_Y_RESOLUTION 320
+#define DEBUGFS_DIR_NAME "ts_debug"
+#define FW_NAME "it7260_fw.bin"
+#define CFG_NAME "it7260_cfg.bin"
+#define VER_BUFFER_SIZE 4
+#define IT_FW_CHECK(x, y) \
+ (((x)[0] < (y)->data[8]) || ((x)[1] < (y)->data[9]) || \
+ ((x)[2] < (y)->data[10]) || ((x)[3] < (y)->data[11]))
+#define IT_CFG_CHECK(x, y) \
+ (((x)[0] < (y)->data[(y)->size - 8]) || \
+ ((x)[1] < (y)->data[(y)->size - 7]) || \
+ ((x)[2] < (y)->data[(y)->size - 6]) || \
+ ((x)[3] < (y)->data[(y)->size - 5]))
+#define IT7260_COORDS_ARR_SIZE 4
/* all commands writes go to this idx */
#define BUF_COMMAND 0x20
@@ -50,23 +68,26 @@
#define CMD_IDENT_CHIP 0x00
/* VERSION_LENGTH bytes of data in response */
#define CMD_READ_VERSIONS 0x01
-#define VER_FIRMWARE 0x00
-#define VER_CONFIG 0x06
+#define SUB_CMD_READ_FIRMWARE_VERSION 0x00
+#define SUB_CMD_READ_CONFIG_VERSION 0x06
#define VERSION_LENGTH 10
/* subcommand is zero, next byte is power mode */
#define CMD_PWR_CTL 0x04
+/* active mode */
+#define PWR_CTL_ACTIVE_MODE 0x00
/* idle mode */
#define PWR_CTL_LOW_POWER_MODE 0x01
/* sleep mode */
#define PWR_CTL_SLEEP_MODE 0x02
+#define WAIT_CHANGE_MODE 20
/* command is not documented in the datasheet v1.0.0.7 */
#define CMD_UNKNOWN_7 0x07
#define CMD_FIRMWARE_REINIT_C 0x0C
/* needs to be followed by 4 bytes of zeroes */
#define CMD_CALIBRATE 0x13
#define CMD_FIRMWARE_UPGRADE 0x60
-#define FIRMWARE_MODE_ENTER 0x00
-#define FIRMWARE_MODE_EXIT 0x80
+#define SUB_CMD_ENTER_FW_UPGRADE_MODE 0x00
+#define SUB_CMD_EXIT_FW_UPGRADE_MODE 0x80
/* address for FW read/write */
#define CMD_SET_START_OFFSET 0x61
/* subcommand is number of bytes to write */
@@ -78,11 +99,7 @@
#define FW_WRITE_CHUNK_SIZE 128
#define FW_WRITE_RETRY_COUNT 4
#define CHIP_FLASH_SIZE 0x8000
-#define SYSFS_FW_UPLOAD_MODE_MANUAL 2
-#define SYSFS_RESULT_FAIL (-1)
-#define SYSFS_RESULT_NOT_DONE 0
-#define SYSFS_RESULT_SUCCESS 1
-#define DEVICE_READY_MAX_WAIT 500
+#define DEVICE_READY_MAX_WAIT 10
/* result of reading with BUF_QUERY bits */
#define CMD_STATUS_BITS 0x07
@@ -95,9 +112,28 @@
/* no new data but finder(s) still down */
#define BT_INFO_NONE_BUT_DOWN 0x08
-/* use this to include integers in commands */
-#define CMD_UINT16(v) ((uint8_t)(v)) , ((uint8_t)((v) >> 8))
+#define PD_FLAGS_DATA_TYPE_BITS 0xF0
+/* other types (like chip-detected gestures) exist but we do not care */
+#define PD_FLAGS_DATA_TYPE_TOUCH 0x00
+/* a bit for each finger data that is valid (from lsb to msb) */
+#define PD_FLAGS_HAVE_FINGERS 0x07
+#define PD_PALM_FLAG_BIT 0x01
+#define FD_PRESSURE_BITS 0x0F
+#define FD_PRESSURE_NONE 0x00
+#define FD_PRESSURE_LIGHT 0x02
+
+#define IT_VTG_MIN_UV 1800000
+#define IT_VTG_MAX_UV 1800000
+#define IT_ACTIVE_LOAD_UA 15000
+#define IT_I2C_VTG_MIN_UV 2600000
+#define IT_I2C_VTG_MAX_UV 3300000
+#define IT_I2C_ACTIVE_LOAD_UA 10000
+#define DELAY_VTG_REG_EN 170
+#define PINCTRL_STATE_ACTIVE "pmx_ts_active"
+#define PINCTRL_STATE_SUSPEND "pmx_ts_suspend"
+#define PINCTRL_STATE_RELEASE "pmx_ts_release"
+#define IT_I2C_WAIT 1000
struct FingerData {
uint8_t xLo;
@@ -112,80 +148,130 @@ struct PointData {
struct FingerData fd[3];
} __packed;
-#define PD_FLAGS_DATA_TYPE_BITS 0xF0
-/* other types (like chip-detected gestures) exist but we do not care */
-#define PD_FLAGS_DATA_TYPE_TOUCH 0x00
-/* set if pen touched, clear if finger(s) */
-#define PD_FLAGS_NOT_PEN 0x08
-/* a bit for each finger data that is valid (from lsb to msb) */
-#define PD_FLAGS_HAVE_FINGERS 0x07
-#define PD_PALM_FLAG_BIT 0x01
-#define FD_PRESSURE_BITS 0x0F
-#define FD_PRESSURE_NONE 0x00
-#define FD_PRESSURE_HOVER 0x01
-#define FD_PRESSURE_LIGHT 0x02
-#define FD_PRESSURE_NORMAL 0x04
-#define FD_PRESSURE_HIGH 0x08
-#define FD_PRESSURE_HEAVY 0x0F
+struct IT7260_ts_platform_data {
+ u32 irq_gpio;
+ u32 irq_gpio_flags;
+ u32 reset_gpio;
+ u32 reset_gpio_flags;
+ bool wakeup;
+ bool palm_detect_en;
+ u16 palm_detect_keycode;
+ const char *fw_name;
+ const char *cfg_name;
+ unsigned int panel_minx;
+ unsigned int panel_miny;
+ unsigned int panel_maxx;
+ unsigned int panel_maxy;
+ unsigned int disp_minx;
+ unsigned int disp_miny;
+ unsigned int disp_maxx;
+ unsigned int disp_maxy;
+ unsigned num_of_fingers;
+ unsigned int reset_delay;
+ unsigned int avdd_lpm_cur;
+ bool low_reset;
+};
struct IT7260_ts_data {
struct i2c_client *client;
struct input_dev *input_dev;
+ const struct IT7260_ts_platform_data *pdata;
+ struct regulator *vdd;
+ struct regulator *avdd;
+ bool device_needs_wakeup;
+ bool suspended;
+ bool fw_upgrade_result;
+ bool cfg_upgrade_result;
+ bool fw_cfg_uploading;
+ struct work_struct work_pm_relax;
+ bool calibration_success;
+ bool had_finger_down;
+ char fw_name[MAX_BUFFER_SIZE];
+ char cfg_name[MAX_BUFFER_SIZE];
+ struct mutex fw_cfg_mutex;
+ u8 fw_ver[VER_BUFFER_SIZE];
+ u8 cfg_ver[VER_BUFFER_SIZE];
+#ifdef CONFIG_FB
+ struct notifier_block fb_notif;
+#endif
+ struct dentry *dir;
+ struct pinctrl *ts_pinctrl;
+ struct pinctrl_state *pinctrl_state_active;
+ struct pinctrl_state *pinctrl_state_suspend;
+ struct pinctrl_state *pinctrl_state_release;
};
-static int8_t fwUploadResult;
-static int8_t calibrationWasSuccessful;
-static bool devicePresent;
-static DEFINE_MUTEX(sleepModeMutex);
-static bool chipAwake;
-static bool hadFingerDown;
-static bool isDeviceSuspend;
-static struct input_dev *input_dev;
+/* Function declarations */
+static int fb_notifier_callback(struct notifier_block *self,
+ unsigned long event, void *data);
+static int IT7260_ts_resume(struct device *dev);
+static int IT7260_ts_suspend(struct device *dev);
+
static struct IT7260_ts_data *gl_ts;
-#define LOGE(...) pr_err(DEVICE_NAME ": " __VA_ARGS__)
-#define LOGI(...) printk(DEVICE_NAME ": " __VA_ARGS__)
+static int IT7260_debug_suspend_set(void *_data, u64 val)
+{
+ if (val)
+ IT7260_ts_suspend(&gl_ts->client->dev);
+ else
+ IT7260_ts_resume(&gl_ts->client->dev);
+
+ return 0;
+}
+
+static int IT7260_debug_suspend_get(void *_data, u64 *val)
+{
+ *val = gl_ts->suspended;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_suspend_fops, IT7260_debug_suspend_get,
+ IT7260_debug_suspend_set, "%lld\n");
/* internal use func - does not make sure chip is ready before read */
-static bool i2cReadNoReadyCheck(uint8_t bufferIndex, uint8_t *dataBuffer,
- uint16_t dataLength)
+static bool IT7260_i2cReadNoReadyCheck(uint8_t buf_index, uint8_t *buffer,
+ uint16_t buf_len)
{
struct i2c_msg msgs[2] = {
{
.addr = gl_ts->client->addr,
.flags = I2C_M_NOSTART,
.len = 1,
- .buf = &bufferIndex
+ .buf = &buf_index
},
{
.addr = gl_ts->client->addr,
.flags = I2C_M_RD,
- .len = dataLength,
- .buf = dataBuffer
+ .len = buf_len,
+ .buf = buffer
}
};
- memset(dataBuffer, 0xFF, dataLength);
+ memset(buffer, 0xFF, buf_len);
return i2c_transfer(gl_ts->client->adapter, msgs, 2);
}
-static bool i2cWriteNoReadyCheck(uint8_t bufferIndex,
- const uint8_t *dataBuffer, uint16_t dataLength)
+static bool IT7260_i2cWriteNoReadyCheck(uint8_t buf_index,
+ const uint8_t *buffer, uint16_t buf_len)
{
uint8_t txbuf[257];
struct i2c_msg msg = {
.addr = gl_ts->client->addr,
.flags = 0,
- .len = dataLength + 1,
+ .len = buf_len + 1,
.buf = txbuf
};
/* just to be careful */
- BUG_ON(dataLength > sizeof(txbuf) - 1);
+ if (buf_len > sizeof(txbuf) - 1) {
+ dev_err(&gl_ts->client->dev, "buf length is out of limit\n");
+ return false;
+ }
- txbuf[0] = bufferIndex;
- memcpy(txbuf + 1, dataBuffer, dataLength);
+ txbuf[0] = buf_index;
+ memcpy(txbuf + 1, buffer, buf_len);
return i2c_transfer(gl_ts->client->adapter, &msg, 1);
}
@@ -195,81 +281,84 @@ static bool i2cWriteNoReadyCheck(uint8_t bufferIndex,
* register reads/writes. This function ascertains it is ready
* for that too. the results of this call often were ignored.
*/
-static bool waitDeviceReady(bool forever, bool slowly)
+static bool IT7260_waitDeviceReady(bool forever, bool slowly)
{
- uint8_t ucQuery;
+ uint8_t query;
uint32_t count = DEVICE_READY_MAX_WAIT;
do {
- if (!i2cReadNoReadyCheck(BUF_QUERY, &ucQuery, sizeof(ucQuery)))
- ucQuery = CMD_STATUS_BUSY;
+ if (!IT7260_i2cReadNoReadyCheck(BUF_QUERY, &query,
+ sizeof(query)))
+ query = CMD_STATUS_BUSY;
if (slowly)
- mdelay(1000);
+ msleep(IT_I2C_WAIT);
if (!forever)
count--;
- } while ((ucQuery & CMD_STATUS_BUSY) && count);
+ } while ((query & CMD_STATUS_BUSY) && count);
- return !ucQuery;
+ return !query;
}
-static bool i2cRead(uint8_t bufferIndex, uint8_t *dataBuffer,
- uint16_t dataLength)
+static bool IT7260_i2cRead(uint8_t buf_index, uint8_t *buffer,
+ uint16_t buf_len)
{
- waitDeviceReady(false, false);
- return i2cReadNoReadyCheck(bufferIndex, dataBuffer, dataLength);
+ IT7260_waitDeviceReady(false, false);
+ return IT7260_i2cReadNoReadyCheck(buf_index, buffer, buf_len);
}
-static bool i2cWrite(uint8_t bufferIndex, const uint8_t *dataBuffer,
- uint16_t dataLength)
+static bool IT7260_i2cWrite(uint8_t buf_index, const uint8_t *buffer,
+ uint16_t buf_len)
{
- waitDeviceReady(false, false);
- return i2cWriteNoReadyCheck(bufferIndex, dataBuffer, dataLength);
+ IT7260_waitDeviceReady(false, false);
+ return IT7260_i2cWriteNoReadyCheck(buf_index, buffer, buf_len);
}
-static bool chipFirmwareReinitialize(uint8_t cmdOfChoice)
+static bool IT7260_firmware_reinitialize(u8 command)
{
- uint8_t cmd[] = {cmdOfChoice};
+ uint8_t cmd[] = {command};
uint8_t rsp[2];
- if (!i2cWrite(BUF_COMMAND, cmd, sizeof(cmd)))
+ if (!IT7260_i2cWrite(BUF_COMMAND, cmd, sizeof(cmd)))
return false;
- if (!i2cRead(BUF_RESPONSE, rsp, sizeof(rsp)))
+ if (!IT7260_i2cRead(BUF_RESPONSE, rsp, sizeof(rsp)))
return false;
/* a reply of two zero bytes signifies success */
return !rsp[0] && !rsp[1];
}
-static bool chipFirmwareUpgradeModeEnterExit(bool enter)
+static bool IT7260_enter_exit_fw_ugrade_mode(bool enter)
{
uint8_t cmd[] = {CMD_FIRMWARE_UPGRADE, 0, 'I', 'T', '7', '2',
'6', '0', 0x55, 0xAA};
uint8_t resp[2];
- cmd[1] = enter ? FIRMWARE_MODE_ENTER : FIRMWARE_MODE_EXIT;
- if (!i2cWrite(BUF_COMMAND, cmd, sizeof(cmd)))
+ cmd[1] = enter ? SUB_CMD_ENTER_FW_UPGRADE_MODE :
+ SUB_CMD_EXIT_FW_UPGRADE_MODE;
+ if (!IT7260_i2cWrite(BUF_COMMAND, cmd, sizeof(cmd)))
return false;
- if (!i2cRead(BUF_RESPONSE, resp, sizeof(resp)))
+ if (!IT7260_i2cRead(BUF_RESPONSE, resp, sizeof(resp)))
return false;
/* a reply of two zero bytes signifies success */
return !resp[0] && !resp[1];
}
-static bool chipSetStartOffset(uint16_t offset)
+static bool IT7260_chipSetStartOffset(uint16_t offset)
{
- uint8_t cmd[] = {CMD_SET_START_OFFSET, 0, CMD_UINT16(offset)};
+ uint8_t cmd[] = {CMD_SET_START_OFFSET, 0, ((uint8_t)(offset)),
+ ((uint8_t)((offset) >> 8))};
uint8_t resp[2];
- if (!i2cWrite(BUF_COMMAND, cmd, 4))
+ if (!IT7260_i2cWrite(BUF_COMMAND, cmd, 4))
return false;
- if (!i2cRead(BUF_RESPONSE, resp, sizeof(resp)))
+ if (!IT7260_i2cRead(BUF_RESPONSE, resp, sizeof(resp)))
return false;
@@ -278,371 +367,629 @@ static bool chipSetStartOffset(uint16_t offset)
}
-/* write fwLength bytes from fwData at chip offset writeStartOffset */
-static bool chipFlashWriteAndVerify(unsigned int fwLength,
- const uint8_t *fwData, uint16_t writeStartOffset)
+/* write fw_length bytes from fw_data at chip offset wr_start_offset */
+static bool IT7260_fw_flash_write_verify(unsigned int fw_length,
+ const uint8_t *fw_data, uint16_t wr_start_offset)
{
- uint32_t curDataOfst;
+ uint32_t cur_data_off;
- for (curDataOfst = 0; curDataOfst < fwLength;
- curDataOfst += FW_WRITE_CHUNK_SIZE) {
+ for (cur_data_off = 0; cur_data_off < fw_length;
+ cur_data_off += FW_WRITE_CHUNK_SIZE) {
- uint8_t cmdWrite[2 + FW_WRITE_CHUNK_SIZE] = {CMD_FW_WRITE};
- uint8_t bufRead[FW_WRITE_CHUNK_SIZE];
- uint8_t cmdRead[2] = {CMD_FW_READ};
- unsigned i, nRetries;
- uint32_t curWriteSz;
+ uint8_t cmd_write[2 + FW_WRITE_CHUNK_SIZE] = {CMD_FW_WRITE};
+ uint8_t buf_read[FW_WRITE_CHUNK_SIZE];
+ uint8_t cmd_read[2] = {CMD_FW_READ};
+ unsigned i, retries;
+ uint32_t cur_wr_size;
/* figure out how much to write */
- curWriteSz = fwLength - curDataOfst;
- if (curWriteSz > FW_WRITE_CHUNK_SIZE)
- curWriteSz = FW_WRITE_CHUNK_SIZE;
+ cur_wr_size = fw_length - cur_data_off;
+ if (cur_wr_size > FW_WRITE_CHUNK_SIZE)
+ cur_wr_size = FW_WRITE_CHUNK_SIZE;
/* prepare the write command */
- cmdWrite[1] = curWriteSz;
- for (i = 0; i < curWriteSz; i++)
- cmdWrite[i + 2] = fwData[curDataOfst + i];
+ cmd_write[1] = cur_wr_size;
+ for (i = 0; i < cur_wr_size; i++)
+ cmd_write[i + 2] = fw_data[cur_data_off + i];
/* prepare the read command */
- cmdRead[1] = curWriteSz;
+ cmd_read[1] = cur_wr_size;
- for (nRetries = 0; nRetries < FW_WRITE_RETRY_COUNT;
- nRetries++) {
+ for (retries = 0; retries < FW_WRITE_RETRY_COUNT;
+ retries++) {
/* set write offset and write the data */
- chipSetStartOffset(writeStartOffset + curDataOfst);
- i2cWrite(BUF_COMMAND, cmdWrite, 2 + curWriteSz);
+ IT7260_chipSetStartOffset(
+ wr_start_offset + cur_data_off);
+ IT7260_i2cWrite(BUF_COMMAND, cmd_write,
+ cur_wr_size + 2);
/* set offset and read the data back */
- chipSetStartOffset(writeStartOffset + curDataOfst);
- i2cWrite(BUF_COMMAND, cmdRead, sizeof(cmdRead));
- i2cRead(BUF_RESPONSE, bufRead, curWriteSz);
+ IT7260_chipSetStartOffset(
+ wr_start_offset + cur_data_off);
+ IT7260_i2cWrite(BUF_COMMAND, cmd_read,
+ sizeof(cmd_read));
+ IT7260_i2cRead(BUF_RESPONSE, buf_read, cur_wr_size);
/* verify. If success break out of retry loop */
i = 0;
- while (i < curWriteSz && bufRead[i] == cmdWrite[i + 2])
+ while (i < cur_wr_size &&
+ buf_read[i] == cmd_write[i + 2])
i++;
- if (i == curWriteSz)
+ if (i == cur_wr_size)
break;
- pr_err("write of data offset %u failed on try %u at byte %u/%u\n",
- curDataOfst, nRetries, i, curWriteSz);
}
/* if we've failed after all the retries, tell the caller */
- if (nRetries == FW_WRITE_RETRY_COUNT)
+ if (retries == FW_WRITE_RETRY_COUNT) {
+ dev_err(&gl_ts->client->dev,
+ "write of data offset %u failed on try %u at byte %u/%u\n",
+ cur_data_off, retries, i, cur_wr_size);
return false;
+ }
}
return true;
}
-static bool chipFirmwareUpload(uint32_t fwLen, const uint8_t *fwData,
- uint32_t cfgLen, const uint8_t *cfgData)
+/*
+ * this code to get versions from the chip via i2c transactions, and save
+ * them in driver data structure.
+ */
+static void IT7260_get_chip_versions(struct device *dev)
{
- bool success = false;
+ static const u8 cmd_read_fw_ver[] = {CMD_READ_VERSIONS,
+ SUB_CMD_READ_FIRMWARE_VERSION};
+ static const u8 cmd_read_cfg_ver[] = {CMD_READ_VERSIONS,
+ SUB_CMD_READ_CONFIG_VERSION};
+ u8 ver_fw[VERSION_LENGTH], ver_cfg[VERSION_LENGTH];
+ bool ret = true;
- /* enter fw upload mode */
- if (!chipFirmwareUpgradeModeEnterExit(true))
- return false;
+ ret = IT7260_i2cWrite(BUF_COMMAND, cmd_read_fw_ver,
+ sizeof(cmd_read_fw_ver));
+ if (ret) {
+ ret = IT7260_i2cRead(BUF_RESPONSE, ver_fw, VERSION_LENGTH);
+ if (ret)
+ memcpy(gl_ts->fw_ver, ver_fw + (5 * sizeof(u8)),
+ VER_BUFFER_SIZE * sizeof(u8));
+ }
+ if (!ret)
+ dev_err(dev, "failed to read fw version from chip\n");
+
+ ret = IT7260_i2cWrite(BUF_COMMAND, cmd_read_cfg_ver,
+ sizeof(cmd_read_cfg_ver));
+ if (ret) {
+ ret = IT7260_i2cRead(BUF_RESPONSE, ver_cfg, VERSION_LENGTH)
+ && ret;
+ if (ret)
+ memcpy(gl_ts->cfg_ver, ver_cfg + (1 * sizeof(u8)),
+ VER_BUFFER_SIZE * sizeof(u8));
+ }
+ if (!ret)
+ dev_err(dev, "failed to read cfg version from chip\n");
- /* flash the firmware if requested */
- if (fwLen && fwData && !chipFlashWriteAndVerify(fwLen, fwData, 0)) {
- LOGE("failed to upload touch firmware\n");
- goto out;
+ dev_info(dev, "Current fw{%X.%X.%X.%X} cfg{%X.%X.%X.%X}\n",
+ gl_ts->fw_ver[0], gl_ts->fw_ver[1], gl_ts->fw_ver[2],
+ gl_ts->fw_ver[3], gl_ts->cfg_ver[0], gl_ts->cfg_ver[1],
+ gl_ts->cfg_ver[2], gl_ts->cfg_ver[3]);
+}
+
+static int IT7260_cfg_upload(struct device *dev, bool force)
+{
+ const struct firmware *cfg = NULL;
+ int ret;
+ bool success, cfg_upgrade = false;
+
+ ret = request_firmware(&cfg, gl_ts->cfg_name, dev);
+ if (ret) {
+ dev_err(dev, "failed to get config data %s for it7260 %d\n",
+ gl_ts->cfg_name, ret);
+ return ret;
}
- /* flash config data if requested */
- if (fwLen && fwData && !chipFlashWriteAndVerify(cfgLen, cfgData,
- CHIP_FLASH_SIZE - cfgLen)) {
- LOGE("failed to upload touch cfg data\n");
+ /*
+ * This compares the cfg version number from chip and the cfg
+ * data file. IT flashes only when version of cfg data file is
+ * greater than that of chip or if it is set for force cfg upgrade.
+ */
+ if (force)
+ cfg_upgrade = true;
+ else if (IT_CFG_CHECK(gl_ts->cfg_ver, cfg))
+ cfg_upgrade = true;
+
+ if (!cfg_upgrade) {
+ dev_err(dev, "CFG upgrade no required ...\n");
+ ret = -EFAULT;
goto out;
- }
+ } else {
+ dev_info(dev, "Config upgrading...\n");
+
+ disable_irq(gl_ts->client->irq);
+ /* enter cfg upload mode */
+ success = IT7260_enter_exit_fw_ugrade_mode(true);
+ if (!success) {
+ dev_err(dev, "Can't enter cfg upgrade mode\n");
+ ret = -EIO;
+ goto out;
+ }
+ /* flash config data if requested */
+ success = IT7260_fw_flash_write_verify(cfg->size, cfg->data,
+ CHIP_FLASH_SIZE - cfg->size);
+ if (!success) {
+ dev_err(dev, "failed to upgrade touch cfg data\n");
+ IT7260_enter_exit_fw_ugrade_mode(false);
+ IT7260_firmware_reinitialize(CMD_FIRMWARE_REINIT_6F);
+ ret = -EIO;
+ goto out;
+ } else {
+ memcpy(gl_ts->cfg_ver, cfg->data +
+ (cfg->size - 8 * sizeof(u8)),
+ VER_BUFFER_SIZE * sizeof(u8));
+ dev_info(dev, "CFG upgrade is success. New cfg ver: %X.%X.%X.%X\n",
+ gl_ts->cfg_ver[0], gl_ts->cfg_ver[1],
+ gl_ts->cfg_ver[2], gl_ts->cfg_ver[3]);
- success = true;
+ }
+ enable_irq(gl_ts->client->irq);
+ }
out:
- return chipFirmwareUpgradeModeEnterExit(false) &&
- chipFirmwareReinitialize(CMD_FIRMWARE_REINIT_6F) && success;
-}
+ release_firmware(cfg);
+ return ret;
+}
-/*
- * both buffers should be VERSION_LENGTH in size,
- * but only a part of them is significant
- */
-static bool chipGetVersions(uint8_t *verFw, uint8_t *verCfg, bool logIt)
+static int IT7260_fw_upload(struct device *dev, bool force)
{
- /*
- * this code to get versions is reproduced as was written, but it does
- * not make sense. Something here *PROBABLY IS* wrong
- */
- static const uint8_t cmdReadFwVer[] = {CMD_READ_VERSIONS, VER_FIRMWARE};
- static const uint8_t cmdReadCfgVer[] = {CMD_READ_VERSIONS, VER_CONFIG};
- bool ret = true;
+ const struct firmware *fw = NULL;
+ int ret;
+ bool success, fw_upgrade = false;
+
+ ret = request_firmware(&fw, gl_ts->fw_name, dev);
+ if (ret) {
+ dev_err(dev, "failed to get firmware %s for it7260 %d\n",
+ gl_ts->fw_name, ret);
+ return ret;
+ }
/*
- * this structure is so that we definitely do all the calls, but still
- * return a status in case anyone cares
+ * This compares the fw version number from chip and the fw data
+ * file. It flashes only when version of fw data file is greater
+ * than that of chip or it it is set for force fw upgrade.
*/
- ret = i2cWrite(BUF_COMMAND, cmdReadFwVer, sizeof(cmdReadFwVer)) && ret;
- ret = i2cRead(BUF_RESPONSE, verFw, VERSION_LENGTH) && ret;
- ret = i2cWrite(BUF_COMMAND, cmdReadCfgVer,
- sizeof(cmdReadCfgVer)) && ret;
- ret = i2cRead(BUF_RESPONSE, verCfg, VERSION_LENGTH) && ret;
+ if (force)
+ fw_upgrade = true;
+ else if (IT_FW_CHECK(gl_ts->fw_ver, fw))
+ fw_upgrade = true;
+
+ if (!fw_upgrade) {
+ dev_err(dev, "FW upgrade not required ...\n");
+ ret = -EFAULT;
+ goto out;
+ } else {
+ dev_info(dev, "Firmware upgrading...\n");
+
+ disable_irq(gl_ts->client->irq);
+ /* enter fw upload mode */
+ success = IT7260_enter_exit_fw_ugrade_mode(true);
+ if (!success) {
+ dev_err(dev, "Can't enter fw upgrade mode\n");
+ ret = -EIO;
+ goto out;
+ }
+ /* flash the firmware if requested */
+ success = IT7260_fw_flash_write_verify(fw->size, fw->data, 0);
+ if (!success) {
+ dev_err(dev, "failed to upgrade touch firmware\n");
+ IT7260_enter_exit_fw_ugrade_mode(false);
+ IT7260_firmware_reinitialize(CMD_FIRMWARE_REINIT_6F);
+ ret = -EIO;
+ goto out;
+ } else {
+ memcpy(gl_ts->fw_ver, fw->data + (8 * sizeof(u8)),
+ VER_BUFFER_SIZE * sizeof(u8));
+ dev_info(dev, "FW upgrade is success. New fw ver: %X.%X.%X.%X\n",
+ gl_ts->fw_ver[0], gl_ts->fw_ver[1],
+ gl_ts->fw_ver[2], gl_ts->fw_ver[3]);
+ }
+ enable_irq(gl_ts->client->irq);
+ }
- if (logIt)
- LOGI("current versions: fw@{%X,%X,%X,%X}, cfg@{%X,%X,%X,%X}\n",
- verFw[5], verFw[6], verFw[7], verFw[8],
- verCfg[1], verCfg[2], verCfg[3], verCfg[4]);
+out:
+ release_firmware(fw);
return ret;
}
-static ssize_t sysfsUpgradeStore(struct device *dev,
+static int IT7260_ts_chipLowPowerMode(const u8 sleep_type)
+{
+ const uint8_t cmd_sleep[] = {CMD_PWR_CTL, 0x00, sleep_type};
+ uint8_t dummy;
+
+ if (sleep_type)
+ IT7260_i2cWriteNoReadyCheck(BUF_COMMAND, cmd_sleep,
+ sizeof(cmd_sleep));
+ else
+ IT7260_i2cReadNoReadyCheck(BUF_QUERY, &dummy, sizeof(dummy));
+
+ msleep(WAIT_CHANGE_MODE);
+ return 0;
+}
+
+static ssize_t sysfs_fw_upgrade_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- const struct firmware *fw, *cfg;
- uint8_t verFw[10], verCfg[10];
- unsigned fwLen = 0, cfgLen = 0;
- bool manualUpgrade, success;
int mode = 0, ret;
- ret = request_firmware(&fw, "it7260.fw", dev);
- if (ret)
- LOGE("failed to get firmware for it7260\n");
- else
- fwLen = fw->size;
+ if (gl_ts->suspended) {
+ dev_err(dev, "Device is suspended, can't flash fw!!!\n");
+ return -EBUSY;
+ }
- ret = request_firmware(&cfg, "it7260.cfg", dev);
- if (ret)
- LOGE("failed to get config data for it7260\n");
- else
- cfgLen = cfg->size;
+ ret = kstrtoint(buf, 10, &mode);
+ if (!ret) {
+ dev_err(dev, "failed to read input for sysfs\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&gl_ts->fw_cfg_mutex);
+ if (mode == 1) {
+ gl_ts->fw_cfg_uploading = true;
+ ret = IT7260_fw_upload(dev, false);
+ if (ret) {
+ dev_err(dev, "Failed to flash fw: %d", ret);
+ gl_ts->fw_upgrade_result = false;
+ } else {
+ gl_ts->fw_upgrade_result = true;
+ }
+ gl_ts->fw_cfg_uploading = false;
+ }
+ mutex_unlock(&gl_ts->fw_cfg_mutex);
+
+ return count;
+}
+
+static ssize_t sysfs_cfg_upgrade_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int mode = 0, ret;
+
+ if (gl_ts->suspended) {
+ dev_err(dev, "Device is suspended, can't flash cfg!!!\n");
+ return -EBUSY;
+ }
+
+ ret = kstrtoint(buf, 10, &mode);
+ if (!ret) {
+ dev_err(dev, "failed to read input for sysfs\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&gl_ts->fw_cfg_mutex);
+ if (mode == 1) {
+ gl_ts->fw_cfg_uploading = true;
+ ret = IT7260_cfg_upload(dev, false);
+ if (ret) {
+ dev_err(dev, "Failed to flash cfg: %d", ret);
+ gl_ts->cfg_upgrade_result = false;
+ } else {
+ gl_ts->cfg_upgrade_result = true;
+ }
+ gl_ts->fw_cfg_uploading = false;
+ }
+ mutex_unlock(&gl_ts->fw_cfg_mutex);
+
+ return count;
+}
+
+static ssize_t sysfs_fw_upgrade_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, MAX_BUFFER_SIZE, "%d\n",
+ gl_ts->fw_upgrade_result);
+}
+
+static ssize_t sysfs_cfg_upgrade_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, MAX_BUFFER_SIZE, "%d\n",
+ gl_ts->cfg_upgrade_result);
+}
+
+static ssize_t sysfs_force_fw_upgrade_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int mode = 0, ret;
+
+ if (gl_ts->suspended) {
+ dev_err(dev, "Device is suspended, can't flash fw!!!\n");
+ return -EBUSY;
+ }
ret = kstrtoint(buf, 10, &mode);
- manualUpgrade = mode == SYSFS_FW_UPLOAD_MODE_MANUAL;
- LOGI("firmware found %ub of fw and %ub of config in %s mode\n",
- fwLen, cfgLen, manualUpgrade ? "manual" : "normal");
-
- chipGetVersions(verFw, verCfg, true);
-
- fwUploadResult = SYSFS_RESULT_NOT_DONE;
- if (fwLen && cfgLen) {
- if (manualUpgrade || (verFw[5] < fw->data[8] || verFw[6] <
- fw->data[9] || verFw[7] < fw->data[10] || verFw[8] <
- fw->data[11]) || (verCfg[1] < cfg->data[cfgLen - 8]
- || verCfg[2] < cfg->data[cfgLen - 7] || verCfg[3] <
- cfg->data[cfgLen - 6] ||
- verCfg[4] < cfg->data[cfgLen - 5])){
- LOGI("firmware/config will be upgraded\n");
- disable_irq(gl_ts->client->irq);
- success = chipFirmwareUpload(fwLen, fw->data, cfgLen,
- cfg->data);
- enable_irq(gl_ts->client->irq);
-
- fwUploadResult = success ?
- SYSFS_RESULT_SUCCESS : SYSFS_RESULT_FAIL;
- LOGI("upload %s\n", success ? "success" : "failed");
+ if (!ret) {
+ dev_err(dev, "failed to read input for sysfs\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&gl_ts->fw_cfg_mutex);
+ if (mode == 1) {
+ gl_ts->fw_cfg_uploading = true;
+ ret = IT7260_fw_upload(dev, true);
+ if (ret) {
+ dev_err(dev, "Failed to force flash fw: %d", ret);
+ gl_ts->fw_upgrade_result = false;
} else {
- LOGI("firmware/config upgrade not needed\n");
+ gl_ts->fw_upgrade_result = true;
}
+ gl_ts->fw_cfg_uploading = false;
}
+ mutex_unlock(&gl_ts->fw_cfg_mutex);
- if (fwLen)
- release_firmware(fw);
+ return count;
+}
- if (cfgLen)
- release_firmware(cfg);
+static ssize_t sysfs_force_cfg_upgrade_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int mode = 0, ret;
+
+ if (gl_ts->suspended) {
+ dev_err(dev, "Device is suspended, can't flash cfg!!!\n");
+ return -EBUSY;
+ }
+
+ ret = kstrtoint(buf, 10, &mode);
+ if (!ret) {
+ dev_err(dev, "failed to read input for sysfs\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&gl_ts->fw_cfg_mutex);
+ if (mode == 1) {
+ gl_ts->fw_cfg_uploading = true;
+ ret = IT7260_cfg_upload(dev, true);
+ if (ret) {
+ dev_err(dev, "Failed to force flash cfg: %d", ret);
+ gl_ts->cfg_upgrade_result = false;
+ } else {
+ gl_ts->cfg_upgrade_result = true;
+ }
+ gl_ts->fw_cfg_uploading = false;
+ }
+ mutex_unlock(&gl_ts->fw_cfg_mutex);
return count;
}
-static ssize_t sysfsUpgradeShow(struct device *dev,
+static ssize_t sysfs_force_fw_upgrade_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, MAX_BUFFER_SIZE, "%d", gl_ts->fw_upgrade_result);
+}
+
+static ssize_t sysfs_force_cfg_upgrade_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return snprintf(buf, MAX_BUFFER_SIZE, "%d", fwUploadResult);
+ return snprintf(buf, MAX_BUFFER_SIZE, "%d", gl_ts->cfg_upgrade_result);
}
-static ssize_t sysfsCalibrationShow(struct device *dev,
+static ssize_t sysfs_calibration_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return snprintf(buf, MAX_BUFFER_SIZE, "%d", calibrationWasSuccessful);
+ return scnprintf(buf, MAX_BUFFER_SIZE, "%d\n",
+ gl_ts->calibration_success);
}
-static bool chipSendCalibrationCmd(bool autoTuneOn)
+static bool IT7260_chipSendCalibrationCmd(bool auto_tune_on)
{
- uint8_t cmdCalibrate[] = {CMD_CALIBRATE, 0, autoTuneOn ? 1 : 0, 0, 0};
- return i2cWrite(BUF_COMMAND, cmdCalibrate, sizeof(cmdCalibrate));
+ uint8_t cmd_calibrate[] = {CMD_CALIBRATE, 0,
+ auto_tune_on ? 1 : 0, 0, 0};
+ return IT7260_i2cWrite(BUF_COMMAND, cmd_calibrate,
+ sizeof(cmd_calibrate));
}
-static ssize_t sysfsCalibrationStore(struct device *dev,
+static ssize_t sysfs_calibration_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
uint8_t resp;
- if (!chipSendCalibrationCmd(false))
- LOGE("failed to send calibration command\n");
- else {
- calibrationWasSuccessful =
- i2cRead(BUF_RESPONSE, &resp, sizeof(resp))
- ? SYSFS_RESULT_SUCCESS : SYSFS_RESULT_FAIL;
+ if (!IT7260_chipSendCalibrationCmd(false)) {
+ dev_err(dev, "failed to send calibration command\n");
+ } else {
+ gl_ts->calibration_success =
+ IT7260_i2cRead(BUF_RESPONSE, &resp, sizeof(resp));
/*
* previous logic that was here never called
- * chipFirmwareReinitialize() due to checking a
+ * IT7260_firmware_reinitialize() due to checking a
* guaranteed-not-null value against null. We now
* call it. Hopefully this is OK
*/
if (!resp)
- LOGI("chipFirmwareReinitialize -> %s\n",
- chipFirmwareReinitialize(CMD_FIRMWARE_REINIT_6F)
+ dev_dbg(dev, "IT7260_firmware_reinitialize-> %s\n",
+ IT7260_firmware_reinitialize(CMD_FIRMWARE_REINIT_6F)
? "success" : "fail");
}
return count;
}
-static ssize_t sysfsPointShow(struct device *dev,
+static ssize_t sysfs_point_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- uint8_t pointData[sizeof(struct PointData)];
+ uint8_t point_data[sizeof(struct PointData)];
bool readSuccess;
ssize_t ret;
- readSuccess = i2cReadNoReadyCheck(BUF_POINT_INFO, pointData,
- sizeof(pointData));
- ret = snprintf(buf, MAX_BUFFER_SIZE,
- "point_show read ret[%d]--point[%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x]=\n",
- readSuccess, pointData[0], pointData[1], pointData[2],
- pointData[3], pointData[4], pointData[5], pointData[6],
- pointData[7], pointData[8], pointData[9], pointData[10],
- pointData[11], pointData[12], pointData[13]);
-
- LOGI("%s", buf);
+ readSuccess = IT7260_i2cReadNoReadyCheck(BUF_POINT_INFO, point_data,
+ sizeof(point_data));
+
+ if (readSuccess) {
+ ret = scnprintf(buf, MAX_BUFFER_SIZE,
+ "point_show read ret[%d]--point[%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x]\n",
+ readSuccess, point_data[0], point_data[1],
+ point_data[2], point_data[3], point_data[4],
+ point_data[5], point_data[6], point_data[7],
+ point_data[8], point_data[9], point_data[10],
+ point_data[11], point_data[12], point_data[13]);
+ } else {
+ ret = scnprintf(buf, MAX_BUFFER_SIZE,
+ "failed to read point data\n");
+ }
+ dev_dbg(dev, "%s", buf);
return ret;
}
-static ssize_t sysfsPointStore(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t sysfs_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return count;
+ return scnprintf(buf, MAX_BUFFER_SIZE,
+ "fw{%X.%X.%X.%X} cfg{%X.%X.%X.%X}\n",
+ gl_ts->fw_ver[0], gl_ts->fw_ver[1], gl_ts->fw_ver[2],
+ gl_ts->fw_ver[3], gl_ts->cfg_ver[0], gl_ts->cfg_ver[1],
+ gl_ts->cfg_ver[2], gl_ts->cfg_ver[3]);
}
-static ssize_t sysfsStatusShow(struct device *dev,
+static ssize_t sysfs_sleep_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return snprintf(buf, MAX_BUFFER_SIZE, "%d\n", devicePresent ? 1 : 0);
+ /*
+ * The usefulness of this was questionable at best - we were at least
+ * leaking a byte of kernel data (by claiming to return a byte but not
+ * writing to buf. To fix this now we actually return the sleep status
+ */
+ *buf = gl_ts->suspended ? '1' : '0';
+ return 1;
}
-static ssize_t sysfsStatusStore(struct device *dev,
+static ssize_t sysfs_sleep_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- uint8_t verFw[10], verCfg[10];
+ int go_to_sleep, ret;
+
+ ret = kstrtoint(buf, 10, &go_to_sleep);
- chipGetVersions(verFw, verCfg, true);
+ /* (gl_ts->suspended == true && goToSleepVal > 0) means
+ * device is already suspended and you want it to be in sleep,
+ * (gl_ts->suspended == false && goToSleepVal == 0) means
+ * device is already active and you also want it to be active.
+ */
+ if ((gl_ts->suspended && go_to_sleep > 0) ||
+ (!gl_ts->suspended && go_to_sleep == 0))
+ dev_err(dev, "duplicate request to %s chip\n",
+ go_to_sleep ? "sleep" : "wake");
+ else if (go_to_sleep) {
+ disable_irq(gl_ts->client->irq);
+ IT7260_ts_chipLowPowerMode(PWR_CTL_SLEEP_MODE);
+ dev_dbg(dev, "touch is going to sleep...\n");
+ } else {
+ IT7260_ts_chipLowPowerMode(PWR_CTL_ACTIVE_MODE);
+ enable_irq(gl_ts->client->irq);
+ dev_dbg(dev, "touch is going to wake!\n");
+ }
+ gl_ts->suspended = go_to_sleep;
return count;
}
-static ssize_t sysfsVersionShow(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t sysfs_cfg_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
{
- uint8_t verFw[10], verCfg[10];
+ char *strptr;
- chipGetVersions(verFw, verCfg, false);
- return snprintf(buf, MAX_BUFFER_SIZE, "%x,%x,%x,%x # %x,%x,%x,%x\n",
- verFw[5], verFw[6], verFw[7], verFw[8],
- verCfg[1], verCfg[2], verCfg[3], verCfg[4]);
-}
+ if (count >= MAX_BUFFER_SIZE) {
+ dev_err(dev, "Input over %d chars long\n", MAX_BUFFER_SIZE);
+ return -EINVAL;
+ }
+
+ strptr = strnstr(buf, ".bin", count);
+ if (!strptr) {
+ dev_err(dev, "Input is invalid cfg file\n");
+ return -EINVAL;
+ }
+
+ strlcpy(gl_ts->cfg_name, buf, count);
-static ssize_t sysfsVersionStore(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
return count;
}
-static ssize_t sysfsSleepShow(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t sysfs_cfg_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- /*
- * The usefulness of this was questionable at best - we were at least
- * leaking a byte of kernel data (by claiming to return a byte but not
- * writing to buf. To fix this now we actually return the sleep status
- */
- if (!mutex_lock_interruptible(&sleepModeMutex)) {
- *buf = chipAwake ? '1' : '0';
- mutex_unlock(&sleepModeMutex);
- return 1;
- } else {
- return -EINTR;
- }
+ if (strnlen(gl_ts->cfg_name, MAX_BUFFER_SIZE) > 0)
+ return scnprintf(buf, MAX_BUFFER_SIZE, "%s\n",
+ gl_ts->cfg_name);
+ else
+ return scnprintf(buf, MAX_BUFFER_SIZE,
+ "No config file name given\n");
}
-static ssize_t sysfsSleepStore(struct device *dev,
+static ssize_t sysfs_fw_name_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- static const uint8_t cmdGoSleep[] = {CMD_PWR_CTL,
- 0x00, PWR_CTL_SLEEP_MODE};
- int goToSleepVal, ret;
- bool goToWake;
- uint8_t dummy;
+ char *strptr;
- ret = kstrtoint(buf, 10, &goToSleepVal);
- /* convert to bool of proper polarity */
- goToWake = !goToSleepVal;
-
- if (!mutex_lock_interruptible(&sleepModeMutex)) {
- if ((chipAwake && goToWake) || (!chipAwake && !goToWake))
- LOGE("duplicate request to %s chip\n",
- goToWake ? "wake" : "sleep");
- else if (goToWake) {
- i2cReadNoReadyCheck(BUF_QUERY, &dummy, sizeof(dummy));
- enable_irq(gl_ts->client->irq);
- LOGI("touch is going to wake!\n");
- } else {
- disable_irq(gl_ts->client->irq);
- i2cWriteNoReadyCheck(BUF_COMMAND, cmdGoSleep,
- sizeof(cmdGoSleep));
- LOGI("touch is going to sleep...\n");
- }
- chipAwake = goToWake;
- mutex_unlock(&sleepModeMutex);
- return count;
- } else {
- return -EINTR;
+ if (count >= MAX_BUFFER_SIZE) {
+ dev_err(dev, "Input over %d chars long\n", MAX_BUFFER_SIZE);
+ return -EINVAL;
}
-}
-
-static DEVICE_ATTR(status, S_IRUGO|S_IWUSR|S_IWGRP,
- sysfsStatusShow, sysfsStatusStore);
-static DEVICE_ATTR(version, S_IRUGO|S_IWUSR|S_IWGRP,
- sysfsVersionShow, sysfsVersionStore);
-static DEVICE_ATTR(sleep, S_IRUGO|S_IWUSR|S_IWGRP,
- sysfsSleepShow, sysfsSleepStore);
+ strptr = strnstr(buf, ".bin", count);
+ if (!strptr) {
+ dev_err(dev, "Input is invalid fw file\n");
+ return -EINVAL;
+ }
-static struct attribute *it7260_attrstatus[] = {
- &dev_attr_status.attr,
- &dev_attr_version.attr,
- &dev_attr_sleep.attr,
- NULL
-};
+ strlcpy(gl_ts->fw_name, buf, count);
+ return count;
+}
-static const struct attribute_group it7260_attrstatus_group = {
- .attrs = it7260_attrstatus,
-};
+static ssize_t sysfs_fw_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (strnlen(gl_ts->fw_name, MAX_BUFFER_SIZE) > 0)
+ return scnprintf(buf, MAX_BUFFER_SIZE, "%s\n",
+ gl_ts->fw_name);
+ else
+ return scnprintf(buf, MAX_BUFFER_SIZE,
+ "No firmware file name given\n");
+}
-static DEVICE_ATTR(calibration, S_IRUGO|S_IWUSR|S_IWGRP,
- sysfsCalibrationShow, sysfsCalibrationStore);
-static DEVICE_ATTR(upgrade, S_IRUGO|S_IWUSR|S_IWGRP,
- sysfsUpgradeShow, sysfsUpgradeStore);
-static DEVICE_ATTR(point, S_IRUGO|S_IWUSR|S_IWGRP,
- sysfsPointShow, sysfsPointStore);
+static DEVICE_ATTR(version, S_IRUGO | S_IWUSR,
+ sysfs_version_show, NULL);
+static DEVICE_ATTR(sleep, S_IRUGO | S_IWUSR,
+ sysfs_sleep_show, sysfs_sleep_store);
+static DEVICE_ATTR(calibration, S_IRUGO | S_IWUSR,
+ sysfs_calibration_show, sysfs_calibration_store);
+static DEVICE_ATTR(fw_update, S_IRUGO | S_IWUSR,
+ sysfs_fw_upgrade_show, sysfs_fw_upgrade_store);
+static DEVICE_ATTR(cfg_update, S_IRUGO | S_IWUSR,
+ sysfs_cfg_upgrade_show, sysfs_cfg_upgrade_store);
+static DEVICE_ATTR(point, S_IRUGO | S_IWUSR,
+ sysfs_point_show, NULL);
+static DEVICE_ATTR(fw_name, S_IRUGO | S_IWUSR,
+ sysfs_fw_name_show, sysfs_fw_name_store);
+static DEVICE_ATTR(cfg_name, S_IRUGO | S_IWUSR,
+ sysfs_cfg_name_show, sysfs_cfg_name_store);
+static DEVICE_ATTR(force_fw_update, S_IRUGO | S_IWUSR,
+ sysfs_force_fw_upgrade_show,
+ sysfs_force_fw_upgrade_store);
+static DEVICE_ATTR(force_cfg_update, S_IRUGO | S_IWUSR,
+ sysfs_force_cfg_upgrade_show,
+ sysfs_force_cfg_upgrade_store);
static struct attribute *it7260_attributes[] = {
+ &dev_attr_version.attr,
+ &dev_attr_sleep.attr,
&dev_attr_calibration.attr,
- &dev_attr_upgrade.attr,
+ &dev_attr_fw_update.attr,
+ &dev_attr_cfg_update.attr,
&dev_attr_point.attr,
+ &dev_attr_fw_name.attr,
+ &dev_attr_cfg_name.attr,
+ &dev_attr_force_fw_update.attr,
+ &dev_attr_force_cfg_update.attr,
NULL
};
@@ -650,199 +997,714 @@ static const struct attribute_group it7260_attr_group = {
.attrs = it7260_attributes,
};
-static void chipExternalCalibration(bool autoTuneEnabled)
+static void IT7260_chipExternalCalibration(bool autoTuneEnabled)
{
uint8_t resp[2];
- LOGI("sent calibration command -> %d\n",
- chipSendCalibrationCmd(autoTuneEnabled));
- waitDeviceReady(true, true);
- i2cReadNoReadyCheck(BUF_RESPONSE, resp, sizeof(resp));
- chipFirmwareReinitialize(CMD_FIRMWARE_REINIT_C);
+ dev_dbg(&gl_ts->client->dev, "sent calibration command -> %d\n",
+ IT7260_chipSendCalibrationCmd(autoTuneEnabled));
+ IT7260_waitDeviceReady(true, true);
+ IT7260_i2cReadNoReadyCheck(BUF_RESPONSE, resp, sizeof(resp));
+ IT7260_firmware_reinitialize(CMD_FIRMWARE_REINIT_C);
}
-void sendCalibrationCmd(void)
+void IT7260_sendCalibrationCmd(void)
{
- chipExternalCalibration(false);
+ IT7260_chipExternalCalibration(false);
}
-EXPORT_SYMBOL(sendCalibrationCmd);
+EXPORT_SYMBOL(IT7260_sendCalibrationCmd);
-static void readFingerData(uint16_t *xP, uint16_t *yP, uint8_t *pressureP,
- const struct FingerData *fd)
+static void IT7260_ts_release_all(void)
{
- uint16_t x = fd->xLo;
- uint16_t y = fd->yLo;
+ int finger;
- x += ((uint16_t)(fd->hi & 0x0F)) << 8;
- y += ((uint16_t)(fd->hi & 0xF0)) << 4;
+ for (finger = 0; finger < gl_ts->pdata->num_of_fingers; finger++) {
+ input_mt_slot(gl_ts->input_dev, finger);
+ input_mt_report_slot_state(gl_ts->input_dev,
+ MT_TOOL_FINGER, 0);
+ }
- if (xP)
- *xP = x;
- if (yP)
- *yP = y;
- if (pressureP)
- *pressureP = fd->pressure & FD_PRESSURE_BITS;
+ input_report_key(gl_ts->input_dev, BTN_TOUCH, 0);
+ input_sync(gl_ts->input_dev);
}
-static void readTouchDataPoint(void)
+static irqreturn_t IT7260_ts_threaded_handler(int irq, void *devid)
{
- struct PointData pointData;
- uint8_t devStatus;
- uint8_t pressure = FD_PRESSURE_NONE;
- uint16_t x, y;
+ struct PointData point_data;
+ struct input_dev *input_dev = gl_ts->input_dev;
+ u8 dev_status, finger, touch_count = 0, finger_status;
+ u8 pressure = FD_PRESSURE_NONE;
+ u16 x, y;
+ bool palm_detected;
/* verify there is point data to read & it is readable and valid */
- i2cReadNoReadyCheck(BUF_QUERY, &devStatus, sizeof(devStatus));
- if (!((devStatus & PT_INFO_BITS) & PT_INFO_YES)) {
- pr_err("readTouchDataPoint() called when no data available (0x%02X)\n",
- devStatus);
- return;
- }
- if (!i2cReadNoReadyCheck(BUF_POINT_INFO, (void *)&pointData,
- sizeof(pointData))) {
- pr_err("readTouchDataPoint() failed to read point data buffer\n");
- return;
- }
- if ((pointData.flags & PD_FLAGS_DATA_TYPE_BITS) !=
+ IT7260_i2cReadNoReadyCheck(BUF_QUERY, &dev_status, sizeof(dev_status));
+ if (!((dev_status & PT_INFO_BITS) & PT_INFO_YES))
+ return IRQ_HANDLED;
+ if (!IT7260_i2cReadNoReadyCheck(BUF_POINT_INFO, (void *)&point_data,
+ sizeof(point_data))) {
+ dev_err(&gl_ts->client->dev,
+ "failed to read point data buffer\n");
+ return IRQ_HANDLED;
+ }
+
+ /* Check if controller moves from idle to active state */
+ if ((point_data.flags & PD_FLAGS_DATA_TYPE_BITS) !=
PD_FLAGS_DATA_TYPE_TOUCH) {
- pr_err("readTouchDataPoint() dropping non-point data of type 0x%02X\n",
- pointData.flags);
- return;
+ /*
+ * This code adds the touch-to-wake functionality to the ITE
+ * tech driver. When user puts a finger on touch controller in
+ * idle state, the controller moves to active state and driver
+ * sends the KEY_WAKEUP event to wake the device. The
+ * pm_stay_awake() call tells the pm core to stay awake until
+ * the CPU cores are up already. The schedule_work() call
+ * schedule a work that tells the pm core to relax once the CPU
+ * cores are up.
+ */
+ if (gl_ts->device_needs_wakeup) {
+ pm_stay_awake(&gl_ts->client->dev);
+ input_report_key(input_dev, KEY_WAKEUP, 1);
+ input_sync(input_dev);
+ input_report_key(input_dev, KEY_WAKEUP, 0);
+ input_sync(input_dev);
+ schedule_work(&gl_ts->work_pm_relax);
+ return IRQ_HANDLED;
+ }
}
- if ((pointData.flags & PD_FLAGS_HAVE_FINGERS) & 1)
- readFingerData(&x, &y, &pressure, pointData.fd);
+ palm_detected = point_data.palm & PD_PALM_FLAG_BIT;
+ if (palm_detected && gl_ts->pdata->palm_detect_en) {
+ input_report_key(input_dev,
+ gl_ts->pdata->palm_detect_keycode, 1);
+ input_sync(input_dev);
+ input_report_key(input_dev,
+ gl_ts->pdata->palm_detect_keycode, 0);
+ input_sync(input_dev);
+ }
- if (pressure >= FD_PRESSURE_LIGHT) {
+ for (finger = 0; finger < gl_ts->pdata->num_of_fingers; finger++) {
+ finger_status = point_data.flags & (0x01 << finger);
+
+ input_mt_slot(input_dev, finger);
+ input_mt_report_slot_state(input_dev, MT_TOOL_FINGER,
+ finger_status != 0);
+
+ x = point_data.fd[finger].xLo +
+ (((u16)(point_data.fd[finger].hi & 0x0F)) << 8);
+ y = point_data.fd[finger].yLo +
+ (((u16)(point_data.fd[finger].hi & 0xF0)) << 4);
+
+ pressure = point_data.fd[finger].pressure & FD_PRESSURE_BITS;
+
+ if (finger_status) {
+ if (pressure >= FD_PRESSURE_LIGHT) {
+ input_report_key(input_dev, BTN_TOUCH, 1);
+ input_report_abs(input_dev,
+ ABS_MT_POSITION_X, x);
+ input_report_abs(input_dev,
+ ABS_MT_POSITION_Y, y);
+ touch_count++;
+ }
+ }
+ }
- if (!hadFingerDown)
- hadFingerDown = true;
+ input_report_key(input_dev, BTN_TOUCH, touch_count > 0);
+ input_sync(input_dev);
- readFingerData(&x, &y, &pressure, pointData.fd);
+ return IRQ_HANDLED;
+}
- input_report_abs(gl_ts->input_dev, ABS_X, x);
- input_report_abs(gl_ts->input_dev, ABS_Y, y);
- input_report_key(gl_ts->input_dev, BTN_TOUCH, 1);
- input_sync(gl_ts->input_dev);
+static void IT7260_ts_work_func(struct work_struct *work)
+{
+ pm_relax(&gl_ts->client->dev);
+}
- } else if (hadFingerDown) {
- hadFingerDown = false;
+static int IT7260_chipIdentify(void)
+{
+ static const uint8_t cmd_ident[] = {CMD_IDENT_CHIP};
+ static const uint8_t expected_id[] = {0x0A, 'I', 'T', 'E', '7',
+ '2', '6', '0'};
+ uint8_t chip_id[10] = {0,};
- input_report_key(gl_ts->input_dev, BTN_TOUCH, 0);
- input_sync(gl_ts->input_dev);
+ IT7260_waitDeviceReady(false, false);
+
+ if (!IT7260_i2cWriteNoReadyCheck(BUF_COMMAND, cmd_ident,
+ sizeof(cmd_ident))) {
+ dev_err(&gl_ts->client->dev, "failed to write CMD_IDENT_CHIP\n");
+ return -ENODEV;
}
+ IT7260_waitDeviceReady(false, false);
+
+ if (!IT7260_i2cReadNoReadyCheck(BUF_RESPONSE, chip_id,
+ sizeof(chip_id))) {
+ dev_err(&gl_ts->client->dev, "failed to read chip-id\n");
+ return -ENODEV;
+ }
+ dev_info(&gl_ts->client->dev,
+ "IT7260_chipIdentify read id: %02X %c%c%c%c%c%c%c %c%c\n",
+ chip_id[0], chip_id[1], chip_id[2], chip_id[3], chip_id[4],
+ chip_id[5], chip_id[6], chip_id[7], chip_id[8], chip_id[9]);
+
+ if (memcmp(chip_id, expected_id, sizeof(expected_id)))
+ return -EINVAL;
+
+ if (chip_id[8] == '5' && chip_id[9] == '6')
+ dev_info(&gl_ts->client->dev, "rev BX3 found\n");
+ else if (chip_id[8] == '6' && chip_id[9] == '6')
+ dev_info(&gl_ts->client->dev, "rev BX4 found\n");
+ else
+ dev_info(&gl_ts->client->dev, "unknown revision (0x%02X 0x%02X) found\n",
+ chip_id[8], chip_id[9]);
+
+ return 0;
}
-static irqreturn_t IT7260_ts_threaded_handler(int irq, void *devid)
+static int reg_set_optimum_mode_check(struct regulator *reg, int load_uA)
{
- readTouchDataPoint();
- return IRQ_HANDLED;
+ return (regulator_count_voltages(reg) > 0) ?
+ regulator_set_optimum_mode(reg, load_uA) : 0;
}
-static bool chipIdentifyIT7260(void)
+static int IT7260_regulator_configure(bool on)
{
- static const uint8_t cmdIdent[] = {CMD_IDENT_CHIP};
- static const uint8_t expectedID[] = {0x0A, 'I', 'T', 'E', '7',
- '2', '6', '0'};
- uint8_t chipID[10] = {0,};
+ int retval;
- waitDeviceReady(true, false);
+ if (on == false)
+ goto hw_shutdown;
- if (!i2cWriteNoReadyCheck(BUF_COMMAND, cmdIdent, sizeof(cmdIdent))) {
- LOGE("i2cWrite() failed\n");
- return false;
+ gl_ts->vdd = devm_regulator_get(&gl_ts->client->dev, "vdd");
+ if (IS_ERR(gl_ts->vdd)) {
+ dev_err(&gl_ts->client->dev,
+ "%s: Failed to get vdd regulator\n", __func__);
+ return PTR_ERR(gl_ts->vdd);
}
- waitDeviceReady(true, false);
+ if (regulator_count_voltages(gl_ts->vdd) > 0) {
+ retval = regulator_set_voltage(gl_ts->vdd,
+ IT_VTG_MIN_UV, IT_VTG_MAX_UV);
+ if (retval) {
+ dev_err(&gl_ts->client->dev,
+ "regulator set_vtg failed retval =%d\n",
+ retval);
+ goto err_set_vtg_vdd;
+ }
+ }
- if (!i2cReadNoReadyCheck(BUF_RESPONSE, chipID, sizeof(chipID))) {
- LOGE("i2cRead() failed\n");
- return false;
+ gl_ts->avdd = devm_regulator_get(&gl_ts->client->dev, "avdd");
+ if (IS_ERR(gl_ts->avdd)) {
+ dev_err(&gl_ts->client->dev,
+ "%s: Failed to get i2c regulator\n", __func__);
+ retval = PTR_ERR(gl_ts->avdd);
+ goto err_get_vtg_i2c;
}
- LOGI("chipIdentifyIT7260 read id: %02X %c%c%c%c%c%c%ci %c%c\n",
- chipID[0], chipID[1], chipID[2], chipID[3], chipID[4],
- chipID[5], chipID[6], chipID[7], chipID[8], chipID[9]);
- if (memcmp(chipID, expectedID, sizeof(expectedID)))
- return false;
+ if (regulator_count_voltages(gl_ts->avdd) > 0) {
+ retval = regulator_set_voltage(gl_ts->avdd,
+ IT_I2C_VTG_MIN_UV, IT_I2C_VTG_MAX_UV);
+ if (retval) {
+ dev_err(&gl_ts->client->dev,
+ "reg set i2c vtg failed retval =%d\n",
+ retval);
+ goto err_set_vtg_i2c;
+ }
+ }
- if (chipID[8] == '5' && chipID[9] == '6')
- LOGI("rev BX3 found\n");
- else if (chipID[8] == '6' && chipID[9] == '6')
- LOGI("rev BX4 found\n");
- else
- LOGI("unknown revision (0x%02X 0x%02X) found\n",
- chipID[8], chipID[9]);
+ return 0;
- return true;
+err_set_vtg_i2c:
+err_get_vtg_i2c:
+ if (regulator_count_voltages(gl_ts->vdd) > 0)
+ regulator_set_voltage(gl_ts->vdd, 0, IT_VTG_MAX_UV);
+err_set_vtg_vdd:
+ return retval;
+
+hw_shutdown:
+ if (regulator_count_voltages(gl_ts->vdd) > 0)
+ regulator_set_voltage(gl_ts->vdd, 0, IT_VTG_MAX_UV);
+ if (regulator_count_voltages(gl_ts->avdd) > 0)
+ regulator_set_voltage(gl_ts->avdd, 0, IT_I2C_VTG_MAX_UV);
+ return 0;
+};
+
+static int IT7260_power_on(bool on)
+{
+ int retval;
+
+ if (on == false)
+ goto power_off;
+
+ retval = reg_set_optimum_mode_check(gl_ts->vdd,
+ IT_ACTIVE_LOAD_UA);
+ if (retval < 0) {
+ dev_err(&gl_ts->client->dev,
+ "Regulator vdd set_opt failed rc=%d\n",
+ retval);
+ return retval;
+ }
+
+ retval = regulator_enable(gl_ts->vdd);
+ if (retval) {
+ dev_err(&gl_ts->client->dev,
+ "Regulator vdd enable failed rc=%d\n",
+ retval);
+ goto error_reg_en_vdd;
+ }
+
+ retval = reg_set_optimum_mode_check(gl_ts->avdd,
+ IT_I2C_ACTIVE_LOAD_UA);
+ if (retval < 0) {
+ dev_err(&gl_ts->client->dev,
+ "Regulator avdd set_opt failed rc=%d\n",
+ retval);
+ goto error_reg_opt_i2c;
+ }
+
+ retval = regulator_enable(gl_ts->avdd);
+ if (retval) {
+ dev_err(&gl_ts->client->dev,
+ "Regulator avdd enable failed rc=%d\n",
+ retval);
+ goto error_reg_en_avdd;
+ }
+
+ return 0;
+
+error_reg_en_avdd:
+ reg_set_optimum_mode_check(gl_ts->avdd, 0);
+error_reg_opt_i2c:
+ regulator_disable(gl_ts->vdd);
+error_reg_en_vdd:
+ reg_set_optimum_mode_check(gl_ts->vdd, 0);
+ return retval;
+
+power_off:
+ reg_set_optimum_mode_check(gl_ts->vdd, 0);
+ regulator_disable(gl_ts->vdd);
+ reg_set_optimum_mode_check(gl_ts->avdd, 0);
+ regulator_disable(gl_ts->avdd);
+
+ return 0;
+}
+
+static int IT7260_gpio_configure(bool on)
+{
+ int retval = 0;
+
+ if (on) {
+ if (gpio_is_valid(gl_ts->pdata->irq_gpio)) {
+ /* configure touchscreen irq gpio */
+ retval = gpio_request(gl_ts->pdata->irq_gpio,
+ "ite_irq_gpio");
+ if (retval) {
+ dev_err(&gl_ts->client->dev,
+ "unable to request irq gpio [%d]\n",
+ retval);
+ goto err_irq_gpio_req;
+ }
+
+ retval = gpio_direction_input(gl_ts->pdata->irq_gpio);
+ if (retval) {
+ dev_err(&gl_ts->client->dev,
+ "unable to set direction for irq gpio [%d]\n",
+ retval);
+ goto err_irq_gpio_dir;
+ }
+ } else {
+ dev_err(&gl_ts->client->dev,
+ "irq gpio not provided\n");
+ goto err_irq_gpio_req;
+ }
+
+ if (gpio_is_valid(gl_ts->pdata->reset_gpio)) {
+ /* configure touchscreen reset out gpio */
+ retval = gpio_request(gl_ts->pdata->reset_gpio,
+ "ite_reset_gpio");
+ if (retval) {
+ dev_err(&gl_ts->client->dev,
+ "unable to request reset gpio [%d]\n",
+ retval);
+ goto err_reset_gpio_req;
+ }
+
+ retval = gpio_direction_output(
+ gl_ts->pdata->reset_gpio, 1);
+ if (retval) {
+ dev_err(&gl_ts->client->dev,
+ "unable to set direction for reset gpio [%d]\n",
+ retval);
+ goto err_reset_gpio_dir;
+ }
+
+ if (gl_ts->pdata->low_reset)
+ gpio_set_value(gl_ts->pdata->reset_gpio, 0);
+ else
+ gpio_set_value(gl_ts->pdata->reset_gpio, 1);
+
+ msleep(gl_ts->pdata->reset_delay);
+ } else {
+ dev_err(&gl_ts->client->dev,
+ "reset gpio not provided\n");
+ goto err_reset_gpio_req;
+ }
+ } else {
+ if (gpio_is_valid(gl_ts->pdata->irq_gpio))
+ gpio_free(gl_ts->pdata->irq_gpio);
+ if (gpio_is_valid(gl_ts->pdata->reset_gpio)) {
+ /*
+ * This is intended to save leakage current
+ * only. Even if the call(gpio_direction_input)
+ * fails, only leakage current will be more but
+ * functionality will not be affected.
+ */
+ retval = gpio_direction_input(gl_ts->pdata->reset_gpio);
+ if (retval) {
+ dev_err(&gl_ts->client->dev,
+ "unable to set direction for gpio reset [%d]\n",
+ retval);
+ }
+ gpio_free(gl_ts->pdata->reset_gpio);
+ }
+ }
+
+ return 0;
+
+err_reset_gpio_dir:
+ if (gpio_is_valid(gl_ts->pdata->reset_gpio))
+ gpio_free(gl_ts->pdata->reset_gpio);
+err_reset_gpio_req:
+err_irq_gpio_dir:
+ if (gpio_is_valid(gl_ts->pdata->irq_gpio))
+ gpio_free(gl_ts->pdata->irq_gpio);
+err_irq_gpio_req:
+ return retval;
+}
+
+#if CONFIG_OF
+static int IT7260_get_dt_coords(struct device *dev, char *name,
+ struct IT7260_ts_platform_data *pdata)
+{
+ u32 coords[IT7260_COORDS_ARR_SIZE];
+ struct property *prop;
+ struct device_node *np = dev->of_node;
+ int coords_size, rc;
+
+ prop = of_find_property(np, name, NULL);
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+
+ coords_size = prop->length / sizeof(u32);
+ if (coords_size != IT7260_COORDS_ARR_SIZE) {
+ dev_err(dev, "invalid %s\n", name);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(np, name, coords, coords_size);
+ if (rc && (rc != -EINVAL)) {
+ dev_err(dev, "Unable to read %s\n", name);
+ return rc;
+ }
+
+ if (strcmp(name, "ite,panel-coords") == 0) {
+ pdata->panel_minx = coords[0];
+ pdata->panel_miny = coords[1];
+ pdata->panel_maxx = coords[2];
+ pdata->panel_maxy = coords[3];
+
+ if (pdata->panel_maxx == 0 || pdata->panel_minx > 0)
+ rc = -EINVAL;
+ else if (pdata->panel_maxy == 0 || pdata->panel_miny > 0)
+ rc = -EINVAL;
+
+ if (rc) {
+ dev_err(dev, "Invalid panel resolution %d\n", rc);
+ return rc;
+ }
+ } else if (strcmp(name, "ite,display-coords") == 0) {
+ pdata->disp_minx = coords[0];
+ pdata->disp_miny = coords[1];
+ pdata->disp_maxx = coords[2];
+ pdata->disp_maxy = coords[3];
+ } else {
+ dev_err(dev, "unsupported property %s\n", name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int IT7260_parse_dt(struct device *dev,
+ struct IT7260_ts_platform_data *pdata)
+{
+ struct device_node *np = dev->of_node;
+ u32 temp_val;
+ int rc;
+
+ /* reset, irq gpio info */
+ pdata->reset_gpio = of_get_named_gpio_flags(np,
+ "ite,reset-gpio", 0, &pdata->reset_gpio_flags);
+ pdata->irq_gpio = of_get_named_gpio_flags(np,
+ "ite,irq-gpio", 0, &pdata->irq_gpio_flags);
+
+ rc = of_property_read_u32(np, "ite,num-fingers", &temp_val);
+ if (!rc)
+ pdata->num_of_fingers = temp_val;
+ else if (rc != -EINVAL) {
+ dev_err(dev, "Unable to read reset delay\n");
+ return rc;
+ }
+
+ pdata->wakeup = of_property_read_bool(np, "ite,wakeup");
+ pdata->palm_detect_en = of_property_read_bool(np, "ite,palm-detect-en");
+ if (pdata->palm_detect_en) {
+ rc = of_property_read_u32(np, "ite,palm-detect-keycode",
+ &temp_val);
+ if (!rc) {
+ pdata->palm_detect_keycode = temp_val;
+ } else {
+ dev_err(dev, "Unable to read palm-detect-keycode\n");
+ return rc;
+ }
+ }
+
+ rc = of_property_read_string(np, "ite,fw-name", &pdata->fw_name);
+ if (rc && (rc != -EINVAL)) {
+ dev_err(dev, "Unable to read fw image name %d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_string(np, "ite,cfg-name", &pdata->cfg_name);
+ if (rc && (rc != -EINVAL)) {
+ dev_err(dev, "Unable to read cfg image name %d\n", rc);
+ return rc;
+ }
+
+ snprintf(gl_ts->fw_name, MAX_BUFFER_SIZE, "%s",
+ (pdata->fw_name != NULL) ? pdata->fw_name : FW_NAME);
+ snprintf(gl_ts->cfg_name, MAX_BUFFER_SIZE, "%s",
+ (pdata->cfg_name != NULL) ? pdata->cfg_name : CFG_NAME);
+
+ rc = of_property_read_u32(np, "ite,reset-delay", &temp_val);
+ if (!rc)
+ pdata->reset_delay = temp_val;
+ else if (rc != -EINVAL) {
+ dev_err(dev, "Unable to read reset delay\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(np, "ite,avdd-lpm-cur", &temp_val);
+ if (!rc) {
+ pdata->avdd_lpm_cur = temp_val;
+ } else if (rc && (rc != -EINVAL)) {
+ dev_err(dev, "Unable to read avdd lpm current value %d\n", rc);
+ return rc;
+ }
+
+ pdata->low_reset = of_property_read_bool(np, "ite,low-reset");
+
+ rc = IT7260_get_dt_coords(dev, "ite,display-coords", pdata);
+ if (rc && (rc != -EINVAL))
+ return rc;
+
+ rc = IT7260_get_dt_coords(dev, "ite,panel-coords", pdata);
+ if (rc && (rc != -EINVAL))
+ return rc;
+
+ return 0;
+}
+#else
+static inline int IT7260_ts_parse_dt(struct device *dev,
+ struct IT7260_ts_platform_data *pdata)
+{
+ return 0;
+}
+#endif
+
+static int IT7260_ts_pinctrl_init(struct IT7260_ts_data *ts_data)
+{
+ int retval;
+
+ /* Get pinctrl if target uses pinctrl */
+ ts_data->ts_pinctrl = devm_pinctrl_get(&(ts_data->client->dev));
+ if (IS_ERR_OR_NULL(ts_data->ts_pinctrl)) {
+ retval = PTR_ERR(ts_data->ts_pinctrl);
+ dev_dbg(&ts_data->client->dev,
+ "Target does not use pinctrl %d\n", retval);
+ goto err_pinctrl_get;
+ }
+
+ ts_data->pinctrl_state_active
+ = pinctrl_lookup_state(ts_data->ts_pinctrl,
+ PINCTRL_STATE_ACTIVE);
+ if (IS_ERR_OR_NULL(ts_data->pinctrl_state_active)) {
+ retval = PTR_ERR(ts_data->pinctrl_state_active);
+ dev_err(&ts_data->client->dev,
+ "Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_ACTIVE, retval);
+ goto err_pinctrl_lookup;
+ }
+
+ ts_data->pinctrl_state_suspend
+ = pinctrl_lookup_state(ts_data->ts_pinctrl,
+ PINCTRL_STATE_SUSPEND);
+ if (IS_ERR_OR_NULL(ts_data->pinctrl_state_suspend)) {
+ retval = PTR_ERR(ts_data->pinctrl_state_suspend);
+ dev_err(&ts_data->client->dev,
+ "Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_SUSPEND, retval);
+ goto err_pinctrl_lookup;
+ }
+
+ ts_data->pinctrl_state_release
+ = pinctrl_lookup_state(ts_data->ts_pinctrl,
+ PINCTRL_STATE_RELEASE);
+ if (IS_ERR_OR_NULL(ts_data->pinctrl_state_release)) {
+ retval = PTR_ERR(ts_data->pinctrl_state_release);
+ dev_dbg(&ts_data->client->dev,
+ "Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_RELEASE, retval);
+ }
+
+ return 0;
+
+err_pinctrl_lookup:
+ devm_pinctrl_put(ts_data->ts_pinctrl);
+err_pinctrl_get:
+ ts_data->ts_pinctrl = NULL;
+ return retval;
}
static int IT7260_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- static const uint8_t cmdStart[] = {CMD_UNKNOWN_7};
- struct IT7260_i2c_platform_data *pdata;
+ static const uint8_t cmd_start[] = {CMD_UNKNOWN_7};
+ struct IT7260_ts_platform_data *pdata;
uint8_t rsp[2];
- int ret = -1;
+ int ret = -1, err;
+ struct dentry *temp;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- LOGE("need I2C_FUNC_I2C\n");
- ret = -ENODEV;
- goto err_out;
+ dev_err(&client->dev, "need I2C_FUNC_I2C\n");
+ return -ENODEV;
}
- if (!client->irq) {
- LOGE("need IRQ\n");
- ret = -ENODEV;
- goto err_out;
- }
- gl_ts = kzalloc(sizeof(*gl_ts), GFP_KERNEL);
- if (!gl_ts) {
- ret = -ENOMEM;
- goto err_out;
- }
+ gl_ts = devm_kzalloc(&client->dev, sizeof(*gl_ts), GFP_KERNEL);
+ if (!gl_ts)
+ return -ENOMEM;
gl_ts->client = client;
i2c_set_clientdata(client, gl_ts);
- pdata = client->dev.platform_data;
- if (sysfs_create_group(&(client->dev.kobj), &it7260_attrstatus_group)) {
- dev_err(&client->dev, "failed to register sysfs #1\n");
- goto err_sysfs_grp_create_1;
+ if (client->dev.platform_data == NULL)
+ return -ENODEV;
+
+ if (client->dev.of_node) {
+ pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ ret = IT7260_parse_dt(&client->dev, pdata);
+ if (ret)
+ return ret;
+ } else {
+ pdata = client->dev.platform_data;
+ }
+
+ if (!pdata)
+ return -ENOMEM;
+
+ gl_ts->pdata = pdata;
+
+ ret = IT7260_regulator_configure(true);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to configure regulators\n");
+ goto err_reg_configure;
}
- if (!chipIdentifyIT7260()) {
- LOGI("chipIdentifyIT7260 FAIL");
- goto err_ident_fail_or_input_alloc;
+ ret = IT7260_power_on(true);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to power on\n");
+ goto err_power_device;
}
- input_dev = input_allocate_device();
- if (!input_dev) {
- LOGE("failed to allocate input device\n");
+ /*
+ * After enabling regulators, controller needs a delay to come to
+ * an active state.
+ */
+ msleep(DELAY_VTG_REG_EN);
+
+ ret = IT7260_ts_pinctrl_init(gl_ts);
+ if (!ret && gl_ts->ts_pinctrl) {
+ /*
+ * Pinctrl handle is optional. If pinctrl handle is found
+ * let pins to be configured in active state. If not
+ * found continue further without error.
+ */
+ ret = pinctrl_select_state(gl_ts->ts_pinctrl,
+ gl_ts->pinctrl_state_active);
+ if (ret < 0) {
+ dev_err(&gl_ts->client->dev,
+ "failed to select pin to active state %d",
+ ret);
+ }
+ } else {
+ ret = IT7260_gpio_configure(true);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to configure gpios\n");
+ goto err_gpio_config;
+ }
+ }
+
+ ret = IT7260_chipIdentify();
+ if (ret) {
+ dev_err(&client->dev, "Failed to identify chip %d!!!", ret);
+ goto err_identification_fail;
+ }
+
+ IT7260_get_chip_versions(&client->dev);
+
+ gl_ts->input_dev = input_allocate_device();
+ if (!gl_ts->input_dev) {
+ dev_err(&client->dev, "failed to allocate input device\n");
ret = -ENOMEM;
- goto err_ident_fail_or_input_alloc;
- }
- gl_ts->input_dev = input_dev;
-
- input_dev->name = DEVICE_NAME;
- input_dev->phys = "I2C";
- input_dev->id.bustype = BUS_I2C;
- input_dev->id.vendor = 0x0001;
- input_dev->id.product = 0x7260;
- set_bit(EV_SYN, input_dev->evbit);
- set_bit(EV_KEY, input_dev->evbit);
- set_bit(EV_ABS, input_dev->evbit);
- set_bit(INPUT_PROP_DIRECT,input_dev->propbit);
- set_bit(BTN_TOUCH, input_dev->keybit);
- set_bit(KEY_SLEEP,input_dev->keybit);
- set_bit(KEY_WAKEUP,input_dev->keybit);
- set_bit(KEY_POWER,input_dev->keybit);
- input_set_abs_params(input_dev, ABS_X, 0, SCREEN_X_RESOLUTION, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, SCREEN_Y_RESOLUTION, 0, 0);
-
- if (input_register_device(input_dev)) {
- LOGE("failed to register input device\n");
+ goto err_input_alloc;
+ }
+
+ /* Initialize mutex for fw and cfg upgrade */
+ mutex_init(&gl_ts->fw_cfg_mutex);
+
+ gl_ts->input_dev->name = DEVICE_NAME;
+ gl_ts->input_dev->phys = "I2C";
+ gl_ts->input_dev->id.bustype = BUS_I2C;
+ gl_ts->input_dev->id.vendor = 0x0001;
+ gl_ts->input_dev->id.product = 0x7260;
+ set_bit(EV_SYN, gl_ts->input_dev->evbit);
+ set_bit(EV_KEY, gl_ts->input_dev->evbit);
+ set_bit(EV_ABS, gl_ts->input_dev->evbit);
+ set_bit(INPUT_PROP_DIRECT, gl_ts->input_dev->propbit);
+ set_bit(BTN_TOUCH, gl_ts->input_dev->keybit);
+ input_set_abs_params(gl_ts->input_dev, ABS_MT_POSITION_X,
+ gl_ts->pdata->disp_minx, gl_ts->pdata->disp_maxx, 0, 0);
+ input_set_abs_params(gl_ts->input_dev, ABS_MT_POSITION_Y,
+ gl_ts->pdata->disp_miny, gl_ts->pdata->disp_maxy, 0, 0);
+ input_mt_init_slots(gl_ts->input_dev, gl_ts->pdata->num_of_fingers, 0);
+
+ input_set_drvdata(gl_ts->input_dev, gl_ts);
+
+ if (pdata->wakeup) {
+ set_bit(KEY_WAKEUP, gl_ts->input_dev->keybit);
+ INIT_WORK(&gl_ts->work_pm_relax, IT7260_ts_work_func);
+ device_init_wakeup(&client->dev, pdata->wakeup);
+ }
+
+ if (pdata->palm_detect_en)
+ set_bit(gl_ts->pdata->palm_detect_keycode,
+ gl_ts->input_dev->keybit);
+
+ if (input_register_device(gl_ts->input_dev)) {
+ dev_err(&client->dev, "failed to register input device\n");
goto err_input_register;
}
@@ -854,45 +1716,271 @@ static int IT7260_ts_probe(struct i2c_client *client,
if (sysfs_create_group(&(client->dev.kobj), &it7260_attr_group)) {
dev_err(&client->dev, "failed to register sysfs #2\n");
- goto err_sysfs_grp_create_2;
+ goto err_sysfs_grp_create;
}
+
+#if defined(CONFIG_FB)
+ gl_ts->fb_notif.notifier_call = fb_notifier_callback;
+
+ ret = fb_register_client(&gl_ts->fb_notif);
+ if (ret)
+ dev_err(&client->dev, "Unable to register fb_notifier %d\n",
+ ret);
+#endif
- devicePresent = true;
+ IT7260_i2cWriteNoReadyCheck(BUF_COMMAND, cmd_start, sizeof(cmd_start));
+ msleep(pdata->reset_delay);
+ IT7260_i2cReadNoReadyCheck(BUF_RESPONSE, rsp, sizeof(rsp));
+ msleep(pdata->reset_delay);
+
+ gl_ts->dir = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
+ if (gl_ts->dir == NULL || IS_ERR(gl_ts->dir)) {
+ dev_err(&client->dev,
+ "%s: Failed to create debugfs directory, ret = %ld\n",
+ __func__, PTR_ERR(gl_ts->dir));
+ ret = PTR_ERR(gl_ts->dir);
+ goto err_create_debugfs_dir;
+ }
- i2cWriteNoReadyCheck(BUF_COMMAND, cmdStart, sizeof(cmdStart));
- mdelay(10);
- i2cReadNoReadyCheck(BUF_RESPONSE, rsp, sizeof(rsp));
- mdelay(10);
+ temp = debugfs_create_file("suspend", S_IRUSR | S_IWUSR, gl_ts->dir,
+ gl_ts, &debug_suspend_fops);
+ if (temp == NULL || IS_ERR(temp)) {
+ dev_err(&client->dev,
+ "%s: Failed to create suspend debugfs file, ret = %ld\n",
+ __func__, PTR_ERR(temp));
+ ret = PTR_ERR(temp);
+ goto err_create_debugfs_file;
+ }
return 0;
-err_sysfs_grp_create_2:
+err_create_debugfs_file:
+ debugfs_remove_recursive(gl_ts->dir);
+err_create_debugfs_dir:
+#if defined(CONFIG_FB)
+ if (fb_unregister_client(&gl_ts->fb_notif))
+ dev_err(&client->dev, "Error occurred while unregistering fb_notifier.\n");
+#endif
+ sysfs_remove_group(&(client->dev.kobj), &it7260_attr_group);
+
+err_sysfs_grp_create:
free_irq(client->irq, gl_ts);
err_irq_reg:
- input_unregister_device(input_dev);
- input_dev = NULL;
+ input_unregister_device(gl_ts->input_dev);
err_input_register:
- if (input_dev)
- input_free_device(input_dev);
+ if (pdata->wakeup) {
+ cancel_work_sync(&gl_ts->work_pm_relax);
+ device_init_wakeup(&client->dev, false);
+ }
+ if (gl_ts->input_dev)
+ input_free_device(gl_ts->input_dev);
+ gl_ts->input_dev = NULL;
+
+err_input_alloc:
+err_identification_fail:
+ if (gl_ts->ts_pinctrl) {
+ if (IS_ERR_OR_NULL(gl_ts->pinctrl_state_release)) {
+ devm_pinctrl_put(gl_ts->ts_pinctrl);
+ gl_ts->ts_pinctrl = NULL;
+ } else {
+ err = pinctrl_select_state(gl_ts->ts_pinctrl,
+ gl_ts->pinctrl_state_release);
+ if (err)
+ dev_err(&gl_ts->client->dev,
+ "failed to select relase pinctrl state %d\n",
+ err);
+ }
+ } else {
+ if (gpio_is_valid(pdata->reset_gpio))
+ gpio_free(pdata->reset_gpio);
+ if (gpio_is_valid(pdata->irq_gpio))
+ gpio_free(pdata->irq_gpio);
+ }
-err_ident_fail_or_input_alloc:
- sysfs_remove_group(&(client->dev.kobj), &it7260_attrstatus_group);
+err_gpio_config:
+ IT7260_power_on(false);
-err_sysfs_grp_create_1:
- kfree(gl_ts);
+err_power_device:
+ IT7260_regulator_configure(false);
-err_out:
+err_reg_configure:
return ret;
}
static int IT7260_ts_remove(struct i2c_client *client)
{
- devicePresent = false;
+ int ret;
+
+ debugfs_remove_recursive(gl_ts->dir);
+#if defined(CONFIG_FB)
+ if (fb_unregister_client(&gl_ts->fb_notif))
+ dev_err(&client->dev, "Error occurred while unregistering fb_notifier.\n");
+#endif
+ sysfs_remove_group(&(client->dev.kobj), &it7260_attr_group);
+ free_irq(client->irq, gl_ts);
+ input_unregister_device(gl_ts->input_dev);
+ if (gl_ts->input_dev)
+ input_free_device(gl_ts->input_dev);
+ gl_ts->input_dev = NULL;
+ if (gl_ts->pdata->wakeup) {
+ cancel_work_sync(&gl_ts->work_pm_relax);
+ device_init_wakeup(&client->dev, false);
+ }
+ if (gl_ts->ts_pinctrl) {
+ if (IS_ERR_OR_NULL(gl_ts->pinctrl_state_release)) {
+ devm_pinctrl_put(gl_ts->ts_pinctrl);
+ gl_ts->ts_pinctrl = NULL;
+ } else {
+ ret = pinctrl_select_state(gl_ts->ts_pinctrl,
+ gl_ts->pinctrl_state_release);
+ if (ret)
+ dev_err(&gl_ts->client->dev,
+ "failed to select relase pinctrl state %d\n",
+ ret);
+ }
+ } else {
+ if (gpio_is_valid(gl_ts->pdata->reset_gpio))
+ gpio_free(gl_ts->pdata->reset_gpio);
+ if (gpio_is_valid(gl_ts->pdata->irq_gpio))
+ gpio_free(gl_ts->pdata->irq_gpio);
+ }
+ IT7260_power_on(false);
+ IT7260_regulator_configure(false);
+
return 0;
}
+#if defined(CONFIG_FB)
+static int fb_notifier_callback(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ struct fb_event *evdata = data;
+ int *blank;
+
+ if (evdata && evdata->data && gl_ts && gl_ts->client) {
+ if (event == FB_EVENT_BLANK) {
+ blank = evdata->data;
+ if (*blank == FB_BLANK_UNBLANK)
+ IT7260_ts_resume(&(gl_ts->client->dev));
+ else if (*blank == FB_BLANK_POWERDOWN ||
+ *blank == FB_BLANK_VSYNC_SUSPEND)
+ IT7260_ts_suspend(&(gl_ts->client->dev));
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int IT7260_ts_resume(struct device *dev)
+{
+ int retval;
+
+ if (device_may_wakeup(dev)) {
+ if (gl_ts->device_needs_wakeup) {
+ /* Set active current for the avdd regulator */
+ if (gl_ts->pdata->avdd_lpm_cur) {
+ retval = reg_set_optimum_mode_check(gl_ts->avdd,
+ IT_I2C_ACTIVE_LOAD_UA);
+ if (retval < 0)
+ dev_err(dev, "Regulator avdd set_opt failed at resume rc=%d\n",
+ retval);
+ }
+
+ gl_ts->device_needs_wakeup = false;
+ disable_irq_wake(gl_ts->client->irq);
+ }
+ return 0;
+ }
+
+ if (gl_ts->ts_pinctrl) {
+ retval = pinctrl_select_state(gl_ts->ts_pinctrl,
+ gl_ts->pinctrl_state_active);
+ if (retval < 0) {
+ dev_err(dev, "Cannot get default pinctrl state %d\n",
+ retval);
+ goto err_pinctrl_select_suspend;
+ }
+ }
+
+ enable_irq(gl_ts->client->irq);
+ gl_ts->suspended = false;
+ return 0;
+
+err_pinctrl_select_suspend:
+ return retval;
+}
+
+static int IT7260_ts_suspend(struct device *dev)
+{
+ int retval;
+
+ if (gl_ts->fw_cfg_uploading) {
+ dev_dbg(dev, "Fw/cfg uploading. Can't go to suspend.\n");
+ return -EBUSY;
+ }
+
+ if (device_may_wakeup(dev)) {
+ if (!gl_ts->device_needs_wakeup) {
+ /* put the device in low power idle mode */
+ IT7260_ts_chipLowPowerMode(PWR_CTL_LOW_POWER_MODE);
+
+ /* Set lpm current for avdd regulator */
+ if (gl_ts->pdata->avdd_lpm_cur) {
+ retval = reg_set_optimum_mode_check(gl_ts->avdd,
+ gl_ts->pdata->avdd_lpm_cur);
+ if (retval < 0)
+ dev_err(dev, "Regulator avdd set_opt failed at suspend rc=%d\n",
+ retval);
+ }
+
+ gl_ts->device_needs_wakeup = true;
+ enable_irq_wake(gl_ts->client->irq);
+ }
+ return 0;
+ }
+
+ disable_irq(gl_ts->client->irq);
+
+ IT7260_ts_release_all();
+
+ if (gl_ts->ts_pinctrl) {
+ retval = pinctrl_select_state(gl_ts->ts_pinctrl,
+ gl_ts->pinctrl_state_suspend);
+ if (retval < 0) {
+ dev_err(dev, "Cannot get idle pinctrl state %d\n",
+ retval);
+ goto err_pinctrl_select_suspend;
+ }
+ }
+
+ gl_ts->suspended = true;
+
+ return 0;
+
+err_pinctrl_select_suspend:
+ return retval;
+}
+
+static const struct dev_pm_ops IT7260_ts_dev_pm_ops = {
+ .suspend = IT7260_ts_suspend,
+ .resume = IT7260_ts_resume,
+};
+#else
+static int IT7260_ts_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int IT7260_ts_suspend(struct device *dev)
+{
+ return 0;
+}
+#endif
+
static const struct i2c_device_id IT7260_ts_id[] = {
{ DEVICE_NAME, 0},
{}
@@ -901,33 +1989,22 @@ static const struct i2c_device_id IT7260_ts_id[] = {
MODULE_DEVICE_TABLE(i2c, IT7260_ts_id);
static const struct of_device_id IT7260_match_table[] = {
- { .compatible = "ITE,IT7260_ts",},
+ { .compatible = "ite,it7260_ts",},
{},
};
-static int IT7260_ts_resume(struct i2c_client *i2cdev)
-{
- isDeviceSuspend = false;
- return 0;
-}
-
-static int IT7260_ts_suspend(struct i2c_client *i2cdev, pm_message_t pmesg)
-{
- isDeviceSuspend = true;
- return 0;
-}
-
static struct i2c_driver IT7260_ts_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DEVICE_NAME,
.of_match_table = IT7260_match_table,
+#ifdef CONFIG_PM
+ .pm = &IT7260_ts_dev_pm_ops,
+#endif
},
.probe = IT7260_ts_probe,
.remove = IT7260_ts_remove,
.id_table = IT7260_ts_id,
- .resume = IT7260_ts_resume,
- .suspend = IT7260_ts_suspend,
};
module_i2c_driver(IT7260_ts_driver);
diff --git a/drivers/input/touchscreen/msg21xx_ts.c b/drivers/input/touchscreen/msg21xx_ts.c
new file mode 100644
index 000000000000..4eb7fd4b1cc9
--- /dev/null
+++ b/drivers/input/touchscreen/msg21xx_ts.c
@@ -0,0 +1,1757 @@
+/*
+ * MStar MSG21XX touchscreen driver
+ *
+ * Copyright (c) 2006-2012 MStar Semiconductor, Inc.
+ *
+ * Copyright (C) 2012 Bruce Ding <bruce.ding@mstarsemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/timer.h>
+#include <linux/gpio.h>
+
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <mach/gpio.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <linux/syscalls.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <asm/unistd.h>
+#include <linux/cdev.h>
+#include <asm/uaccess.h>
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif
+#include <linux/input.h>
+#if defined(CONFIG_FB)
+#include <linux/notifier.h>
+#include <linux/fb.h>
+#endif
+#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
+#include <linux/input/vir_ps.h>
+#endif
+
+/*=============================================================*/
+// Macro Definition
+/*=============================================================*/
+
+#define TOUCH_DRIVER_DEBUG 0
+#if (TOUCH_DRIVER_DEBUG == 1)
+#define DBG(fmt, arg...) pr_err(fmt, ##arg) //pr_info(fmt, ##arg)
+#else
+#define DBG(fmt, arg...)
+#endif
+
+/*=============================================================*/
+// Constant Value & Variable Definition
+/*=============================================================*/
+
+#define U8 unsigned char
+#define U16 unsigned short
+#define U32 unsigned int
+#define S8 signed char
+#define S16 signed short
+#define S32 signed int
+
+#define TOUCH_SCREEN_X_MIN (0)
+#define TOUCH_SCREEN_Y_MIN (0)
+/*
+ * Note.
+ * Please change the below touch screen resolution according to the touch panel that you are using.
+ */
+#define TOUCH_SCREEN_X_MAX (480)
+#define TOUCH_SCREEN_Y_MAX (800)
+/*
+ * Note.
+ * Please do not change the below setting.
+ */
+#define TPD_WIDTH (2048)
+#define TPD_HEIGHT (2048)
+
+/*
+ * Note.
+ * Please change the below GPIO pin setting to follow the platform that you are using
+ */
+static int int_gpio = 1;
+static int reset_gpio = 0;
+#define MS_TS_MSG21XX_GPIO_RST reset_gpio
+#define MS_TS_MSG21XX_GPIO_INT int_gpio
+//---------------------------------------------------------------------//
+
+//#define SYSFS_AUTHORITY_CHANGE_FOR_CTS_TEST
+
+#ifdef SYSFS_AUTHORITY_CHANGE_FOR_CTS_TEST
+#define SYSFS_AUTHORITY (0644)
+#else
+#define SYSFS_AUTHORITY (0777)
+#endif
+
+#define FIRMWARE_AUTOUPDATE
+#ifdef FIRMWARE_AUTOUPDATE
+typedef enum {
+ SWID_START = 1,
+ SWID_TRULY = SWID_START,
+ SWID_NULL,
+} SWID_ENUM;
+
+unsigned char MSG_FIRMWARE[1][33*1024] =
+{
+ {
+ #include "msg21xx_truly_update_bin.h"
+ }
+};
+#endif
+
+#define CONFIG_TP_HAVE_KEY
+
+/*
+ * Note.
+ * If the below virtual key value definition are not consistent with those that defined in key layout file of platform,
+ * please change the below virtual key value to follow the platform that you are using.
+ */
+#ifdef CONFIG_TP_HAVE_KEY
+#define TOUCH_KEY_MENU (139) //229
+#define TOUCH_KEY_HOME (172) //102
+#define TOUCH_KEY_BACK (158)
+#define TOUCH_KEY_SEARCH (217)
+
+const U16 tp_key_array[] = {TOUCH_KEY_MENU, TOUCH_KEY_HOME, TOUCH_KEY_BACK, TOUCH_KEY_SEARCH};
+#define MAX_KEY_NUM (sizeof(tp_key_array)/sizeof(tp_key_array[0]))
+#endif
+
+#define SLAVE_I2C_ID_DBBUS (0xC4>>1)
+#define SLAVE_I2C_ID_DWI2C (0x4C>>1)
+
+#define DEMO_MODE_PACKET_LENGTH (8)
+#define MAX_TOUCH_NUM (2) //5
+
+#define TP_PRINT
+#ifdef TP_PRINT
+static int tp_print_proc_read(void);
+static void tp_print_create_entry(void);
+#endif
+
+static char *fw_version = NULL; // customer firmware version
+static U16 fw_version_major = 0;
+static U16 fw_version_minor = 0;
+static U8 temp[94][1024];
+static U32 crc32_table[256];
+static int FwDataCnt = 0;
+static U8 bFwUpdating = 0;
+static struct class *firmware_class = NULL;
+static struct device *firmware_cmd_dev = NULL;
+
+static struct i2c_client *i2c_client = NULL;
+
+#if defined(CONFIG_FB)
+static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data);
+static struct notifier_block msg21xx_fb_notif;
+#elif defined (CONFIG_HAS_EARLYSUSPEND)
+static struct early_suspend mstar_ts_early_suspend;
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
+static U8 bEnableTpProximity = 0;
+static U8 bFaceClosingTp = 0;
+#endif
+static U8 bTpInSuspend = 0;
+
+static int irq_msg21xx = -1;
+static struct work_struct msg21xx_wk;
+static struct mutex msg21xx_mutex;
+static struct input_dev *input_dev = NULL;
+
+/*=============================================================*/
+// Data Type Definition
+/*=============================================================*/
+
+typedef struct
+{
+ U16 x;
+ U16 y;
+} touchPoint_t;
+
+/// max 80+1+1 = 82 bytes
+typedef struct
+{
+ touchPoint_t point[MAX_TOUCH_NUM];
+ U8 count;
+ U8 keycode;
+} touchInfo_t;
+
+enum i2c_speed
+{
+ I2C_SLOW = 0,
+ I2C_NORMAL = 1, /* Enable erasing/writing for 10 msec. */
+ I2C_FAST = 2, /* Disable EWENB before 10 msec timeout. */
+};
+
+typedef enum
+{
+ EMEM_ALL = 0,
+ EMEM_MAIN,
+ EMEM_INFO,
+} EMEM_TYPE_t;
+
+/*=============================================================*/
+// Function Definition
+/*=============================================================*/
+
+/// CRC
+static U32 _CRC_doReflect(U32 ref, S8 ch)
+{
+ U32 value = 0;
+ U32 i = 0;
+
+ for (i = 1; i < (ch + 1); i ++)
+ {
+ if (ref & 1)
+ {
+ value |= 1 << (ch - i);
+ }
+ ref >>= 1;
+ }
+
+ return value;
+}
+
+U32 _CRC_getValue(U32 text, U32 prevCRC)
+{
+ U32 ulCRC = prevCRC;
+
+ ulCRC = (ulCRC >> 8) ^ crc32_table[(ulCRC & 0xFF) ^ text];
+
+ return ulCRC;
+}
+
+static void _CRC_initTable(void)
+{
+ U32 magic_number = 0x04c11db7;
+ U32 i, j;
+
+ for (i = 0; i <= 0xFF; i ++)
+ {
+ crc32_table[i] = _CRC_doReflect (i, 8) << 24;
+ for (j = 0; j < 8; j ++)
+ {
+ crc32_table[i] = (crc32_table[i] << 1) ^ (crc32_table[i] & (0x80000000L) ? magic_number : 0);
+ }
+ crc32_table[i] = _CRC_doReflect(crc32_table[i], 32);
+ }
+}
+
+static void reset_hw(void)
+{
+ DBG("reset_hw()\n");
+
+ gpio_direction_output(MS_TS_MSG21XX_GPIO_RST, 1);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 0);
+ mdelay(100); /* Note that the RST must be in LOW 10ms at least */
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 1);
+ mdelay(100); /* Enable the interrupt service thread/routine for INT after 50ms */
+}
+
+static int read_i2c_seq(U8 addr, U8* buf, U16 size)
+{
+ int rc = 0;
+ struct i2c_msg msgs[] =
+ {
+ {
+ .addr = addr,
+ .flags = I2C_M_RD, // read flag
+ .len = size,
+ .buf = buf,
+ },
+ };
+
+ /* If everything went ok (i.e. 1 msg transmitted), return #bytes
+ transmitted, else error code. */
+ if (i2c_client != NULL)
+ {
+ rc = i2c_transfer(i2c_client->adapter, msgs, 1);
+ if (rc < 0)
+ {
+ DBG("read_i2c_seq() error %d\n", rc);
+ }
+ }
+ else
+ {
+ DBG("i2c_client is NULL\n");
+ }
+
+ return rc;
+}
+
+static int write_i2c_seq(U8 addr, U8* buf, U16 size)
+{
+ int rc = 0;
+ struct i2c_msg msgs[] =
+ {
+ {
+ .addr = addr,
+ .flags = 0, // if read flag is undefined, then it means write flag.
+ .len = size,
+ .buf = buf,
+ },
+ };
+
+ /* If everything went ok (i.e. 1 msg transmitted), return #bytes
+ transmitted, else error code. */
+ if (i2c_client != NULL)
+ {
+ rc = i2c_transfer(i2c_client->adapter, msgs, 1);
+ if ( rc < 0 )
+ {
+ DBG("write_i2c_seq() error %d\n", rc);
+ }
+ }
+ else
+ {
+ DBG("i2c_client is NULL\n");
+ }
+
+ return rc;
+}
+
+static U16 read_reg(U8 bank, U8 addr)
+{
+ U8 tx_data[3] = {0x10, bank, addr};
+ U8 rx_data[2] = {0};
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, &tx_data[0], 3);
+ read_i2c_seq(SLAVE_I2C_ID_DBBUS, &rx_data[0], 2);
+
+ return (rx_data[1] << 8 | rx_data[0]);
+}
+
+static void write_reg(U8 bank, U8 addr, U16 data)
+{
+ U8 tx_data[5] = {0x10, bank, addr, data & 0xFF, data >> 8};
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, &tx_data[0], 5);
+}
+
+static void write_reg_8bit(U8 bank, U8 addr, U8 data)
+{
+ U8 tx_data[4] = {0x10, bank, addr, data};
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, &tx_data[0], 4);
+}
+
+void dbbusDWIICEnterSerialDebugMode(void)
+{
+ U8 data[5];
+
+ // Enter the Serial Debug Mode
+ data[0] = 0x53;
+ data[1] = 0x45;
+ data[2] = 0x52;
+ data[3] = 0x44;
+ data[4] = 0x42;
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, data, 5);
+}
+
+void dbbusDWIICStopMCU(void)
+{
+ U8 data[1];
+
+ // Stop the MCU
+ data[0] = 0x37;
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, data, 1);
+}
+
+void dbbusDWIICIICUseBus(void)
+{
+ U8 data[1];
+
+ // IIC Use Bus
+ data[0] = 0x35;
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, data, 1);
+}
+
+void dbbusDWIICIICReshape(void)
+{
+ U8 data[1];
+
+ // IIC Re-shape
+ data[0] = 0x71;
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, data, 1);
+}
+
+void dbbusDWIICIICNotUseBus(void)
+{
+ U8 data[1];
+
+ // IIC Not Use Bus
+ data[0] = 0x34;
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, data, 1);
+}
+
+void dbbusDWIICNotStopMCU(void)
+{
+ U8 data[1];
+
+ // Not Stop the MCU
+ data[0] = 0x36;
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, data, 1);
+}
+
+void dbbusDWIICExitSerialDebugMode(void)
+{
+ U8 data[1];
+
+ // Exit the Serial Debug Mode
+ data[0] = 0x45;
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, data, 1);
+
+ // Delay some interval to guard the next transaction
+ //udelay ( 200 ); // delay about 0.2ms
+}
+
+//---------------------------------------------------------------------//
+
+static U8 get_ic_type(void)
+{
+ U8 ic_type = 0;
+
+ reset_hw();
+ dbbusDWIICEnterSerialDebugMode();
+ dbbusDWIICStopMCU();
+ dbbusDWIICIICUseBus();
+ dbbusDWIICIICReshape();
+ mdelay ( 300 );
+
+ // stop mcu
+ write_reg_8bit ( 0x0F, 0xE6, 0x01 );
+ // disable watch dog
+ write_reg ( 0x3C, 0x60, 0xAA55 );
+ // get ic type
+ ic_type = (0xff)&(read_reg(0x1E, 0xCC));
+
+ if (ic_type != 1 //msg2133
+ && ic_type != 2 //msg21xxA
+ && ic_type != 3) //msg26xxM
+ {
+ ic_type = 0;
+ }
+
+ reset_hw();
+
+ return ic_type;
+}
+
+static int get_customer_firmware_version(void)
+{
+ U8 dbbus_tx_data[3] = {0};
+ U8 dbbus_rx_data[4] = {0};
+ int ret = 0;
+
+ DBG("get_customer_firmware_version()\n");
+
+ dbbus_tx_data[0] = 0x53;
+ dbbus_tx_data[1] = 0x00;
+ dbbus_tx_data[2] = 0x2A;
+ mutex_lock(&msg21xx_mutex);
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_tx_data[0], 3);
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_rx_data[0], 4);
+ mutex_unlock(&msg21xx_mutex);
+ fw_version_major = (dbbus_rx_data[1]<<8) + dbbus_rx_data[0];
+ fw_version_minor = (dbbus_rx_data[3]<<8) + dbbus_rx_data[2];
+
+ DBG("*** major = %d ***\n", fw_version_major);
+ DBG("*** minor = %d ***\n", fw_version_minor);
+
+ if (fw_version == NULL)
+ {
+ fw_version = kzalloc(sizeof(char), GFP_KERNEL);
+ }
+
+ sprintf(fw_version, "%03d%03d", fw_version_major, fw_version_minor);
+
+ return ret;
+}
+
+static int firmware_erase_c33 ( EMEM_TYPE_t emem_type )
+{
+ // stop mcu
+ write_reg ( 0x0F, 0xE6, 0x0001 );
+
+ //disable watch dog
+ write_reg_8bit ( 0x3C, 0x60, 0x55 );
+ write_reg_8bit ( 0x3C, 0x61, 0xAA );
+
+ // set PROGRAM password
+ write_reg_8bit ( 0x16, 0x1A, 0xBA );
+ write_reg_8bit ( 0x16, 0x1B, 0xAB );
+
+ write_reg_8bit ( 0x16, 0x18, 0x80 );
+
+ if ( emem_type == EMEM_ALL )
+ {
+ write_reg_8bit ( 0x16, 0x08, 0x10 ); //mark
+ }
+
+ write_reg_8bit ( 0x16, 0x18, 0x40 );
+ mdelay ( 10 );
+
+ // clear pce
+ write_reg_8bit ( 0x16, 0x18, 0x80 );
+
+ // erase trigger
+ if ( emem_type == EMEM_MAIN )
+ {
+ write_reg_8bit ( 0x16, 0x0E, 0x04 ); //erase main
+ }
+ else
+ {
+ write_reg_8bit ( 0x16, 0x0E, 0x08 ); //erase all block
+ }
+
+ return ( 1 );
+}
+
+static ssize_t firmware_update_c33 ( struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size, EMEM_TYPE_t emem_type )
+{
+ U32 i, j;
+ U32 crc_main, crc_main_tp;
+ U32 crc_info, crc_info_tp;
+ U16 reg_data = 0;
+ int update_pass = 1;
+
+ crc_main = 0xffffffff;
+ crc_info = 0xffffffff;
+
+ reset_hw();
+ dbbusDWIICEnterSerialDebugMode();
+ dbbusDWIICStopMCU();
+ dbbusDWIICIICUseBus();
+ dbbusDWIICIICReshape();
+ mdelay ( 300 );
+
+ //erase main
+ firmware_erase_c33 ( EMEM_MAIN );
+ mdelay ( 1000 );
+
+ reset_hw();
+ dbbusDWIICEnterSerialDebugMode();
+ dbbusDWIICStopMCU();
+ dbbusDWIICIICUseBus();
+ dbbusDWIICIICReshape();
+ mdelay ( 300 );
+
+ /////////////////////////
+ // Program
+ /////////////////////////
+
+ //polling 0x3CE4 is 0x1C70
+ if ( ( emem_type == EMEM_ALL ) || ( emem_type == EMEM_MAIN ) )
+ {
+ do
+ {
+ reg_data = read_reg ( 0x3C, 0xE4 );
+ }
+ while ( reg_data != 0x1C70 );
+ }
+
+ switch ( emem_type )
+ {
+ case EMEM_ALL:
+ write_reg ( 0x3C, 0xE4, 0xE38F ); // for all-blocks
+ break;
+ case EMEM_MAIN:
+ write_reg ( 0x3C, 0xE4, 0x7731 ); // for main block
+ break;
+ case EMEM_INFO:
+ write_reg ( 0x3C, 0xE4, 0x7731 ); // for info block
+
+ write_reg_8bit ( 0x0F, 0xE6, 0x01 );
+
+ write_reg_8bit ( 0x3C, 0xE4, 0xC5 );
+ write_reg_8bit ( 0x3C, 0xE5, 0x78 );
+
+ write_reg_8bit ( 0x1E, 0x04, 0x9F );
+ write_reg_8bit ( 0x1E, 0x05, 0x82 );
+
+ write_reg_8bit ( 0x0F, 0xE6, 0x00 );
+ mdelay ( 100 );
+ break;
+ }
+
+ // polling 0x3CE4 is 0x2F43
+ do
+ {
+ reg_data = read_reg ( 0x3C, 0xE4 );
+ }
+ while ( reg_data != 0x2F43 );
+
+ // calculate CRC 32
+ _CRC_initTable ();
+
+ for ( i = 0; i < 32; i++ ) // total 32 KB : 2 byte per R/W
+ {
+ if ( i == 31 )
+ {
+ temp[i][1014] = 0x5A;
+ temp[i][1015] = 0xA5;
+
+ for ( j = 0; j < 1016; j++ )
+ {
+ crc_main = _CRC_getValue ( temp[i][j], crc_main);
+ }
+ }
+ else
+ {
+ for ( j = 0; j < 1024; j++ )
+ {
+ crc_main = _CRC_getValue ( temp[i][j], crc_main);
+ }
+ }
+
+ //write_i2c_seq(SLAVE_I2C_ID_DWI2C, temp[i], 1024);
+ for (j = 0; j < 8; j++)
+ {
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &temp[i][j*128], 128 );
+ }
+ msleep (100);
+
+ // polling 0x3CE4 is 0xD0BC
+ do
+ {
+ reg_data = read_reg ( 0x3C, 0xE4 );
+ }
+ while ( reg_data != 0xD0BC );
+
+ write_reg ( 0x3C, 0xE4, 0x2F43 );
+ }
+
+ if ( ( emem_type == EMEM_ALL ) || ( emem_type == EMEM_MAIN ) )
+ {
+ // write file done and check crc
+ write_reg ( 0x3C, 0xE4, 0x1380 );
+ }
+ mdelay ( 10 );
+
+ if ( ( emem_type == EMEM_ALL ) || ( emem_type == EMEM_MAIN ) )
+ {
+ // polling 0x3CE4 is 0x9432
+ do
+ {
+ reg_data = read_reg ( 0x3C, 0xE4 );
+ }while ( reg_data != 0x9432 );
+ }
+
+ crc_main = crc_main ^ 0xffffffff;
+ crc_info = crc_info ^ 0xffffffff;
+
+ if ( ( emem_type == EMEM_ALL ) || ( emem_type == EMEM_MAIN ) )
+ {
+ // CRC Main from TP
+ crc_main_tp = read_reg ( 0x3C, 0x80 );
+ crc_main_tp = ( crc_main_tp << 16 ) | read_reg ( 0x3C, 0x82 );
+
+ // CRC Info from TP
+ crc_info_tp = read_reg ( 0x3C, 0xA0 );
+ crc_info_tp = ( crc_info_tp << 16 ) | read_reg ( 0x3C, 0xA2 );
+ }
+
+ update_pass = 1;
+ if ( ( emem_type == EMEM_ALL ) || ( emem_type == EMEM_MAIN ) )
+ {
+ if ( crc_main_tp != crc_main )
+ update_pass = 0;
+
+ /*
+ if ( crc_info_tp != crc_info )
+ update_pass = 0;
+ */
+ }
+
+ if ( !update_pass )
+ {
+ DBG( "update_C33 failed\n" );
+ reset_hw();
+ FwDataCnt = 0;
+ return 0;
+ }
+
+ DBG( "update_C33 OK\n" );
+ reset_hw();
+ FwDataCnt = 0;
+ return size;
+}
+
+#ifdef FIRMWARE_AUTOUPDATE
+unsigned short main_sw_id = 0x7FF, info_sw_id = 0x7FF;
+U32 bin_conf_crc32 = 0;
+
+static U32 _CalMainCRC32(void)
+{
+ U32 ret=0;
+ U16 reg_data=0;
+
+ reset_hw();
+
+ dbbusDWIICEnterSerialDebugMode();
+ dbbusDWIICStopMCU();
+ dbbusDWIICIICUseBus();
+ dbbusDWIICIICReshape();
+ msleep ( 100 );
+
+ //Stop MCU
+ write_reg ( 0x0F, 0xE6, 0x0001 );
+
+ // Stop Watchdog
+ write_reg_8bit ( 0x3C, 0x60, 0x55 );
+ write_reg_8bit ( 0x3C, 0x61, 0xAA );
+
+ //cmd
+ write_reg ( 0x3C, 0xE4, 0xDF4C );
+ write_reg ( 0x1E, 0x04, 0x7d60 );
+ // TP SW reset
+ write_reg ( 0x1E, 0x04, 0x829F );
+
+ //MCU run
+ write_reg ( 0x0F, 0xE6, 0x0000 );
+
+ //polling 0x3CE4
+ do
+ {
+ reg_data = read_reg ( 0x3C, 0xE4 );
+ }while ( reg_data != 0x9432 );
+
+ // Cal CRC Main from TP
+ ret = read_reg ( 0x3C, 0x80 );
+ ret = ( ret << 16 ) | read_reg ( 0x3C, 0x82 );
+
+ DBG("[21xxA]:Current main crc32=0x%x\n",ret);
+ return (ret);
+}
+
+static void _ReadBinConfig ( void )
+{
+ U8 dbbus_tx_data[5]={0};
+ U8 dbbus_rx_data[4]={0};
+ U16 reg_data=0;
+
+ reset_hw();
+
+ dbbusDWIICEnterSerialDebugMode();
+ dbbusDWIICStopMCU();
+ dbbusDWIICIICUseBus();
+ dbbusDWIICIICReshape();
+ msleep ( 100 );
+
+ //Stop MCU
+ write_reg ( 0x0F, 0xE6, 0x0001 );
+
+ // Stop Watchdog
+ write_reg_8bit ( 0x3C, 0x60, 0x55 );
+ write_reg_8bit ( 0x3C, 0x61, 0xAA );
+
+ //cmd
+ write_reg ( 0x3C, 0xE4, 0xA4AB );
+ write_reg ( 0x1E, 0x04, 0x7d60 );
+
+ // TP SW reset
+ write_reg ( 0x1E, 0x04, 0x829F );
+
+ //MCU run
+ write_reg ( 0x0F, 0xE6, 0x0000 );
+
+ //polling 0x3CE4
+ do
+ {
+ reg_data = read_reg ( 0x3C, 0xE4 );
+ }
+ while ( reg_data != 0x5B58 );
+
+ dbbus_tx_data[0] = 0x72;
+ dbbus_tx_data[1] = 0x7F;
+ dbbus_tx_data[2] = 0x55;
+ dbbus_tx_data[3] = 0x00;
+ dbbus_tx_data[4] = 0x04;
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_tx_data[0], 5 );
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_rx_data[0], 4 );
+ if ((dbbus_rx_data[0]>=0x30 && dbbus_rx_data[0]<=0x39)
+ &&(dbbus_rx_data[1]>=0x30 && dbbus_rx_data[1]<=0x39)
+ &&(dbbus_rx_data[2]>=0x31 && dbbus_rx_data[2]<=0x39))
+ {
+ main_sw_id = (dbbus_rx_data[0]-0x30)*100+(dbbus_rx_data[1]-0x30)*10+(dbbus_rx_data[2]-0x30);
+ }
+
+ dbbus_tx_data[0] = 0x72;
+ dbbus_tx_data[1] = 0x7F;
+ dbbus_tx_data[2] = 0xFC;
+ dbbus_tx_data[3] = 0x00;
+ dbbus_tx_data[4] = 0x04;
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_tx_data[0], 5 );
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_rx_data[0], 4 );
+ bin_conf_crc32 = dbbus_rx_data[0];
+ bin_conf_crc32 = (bin_conf_crc32<<8)|dbbus_rx_data[1];
+ bin_conf_crc32 = (bin_conf_crc32<<8)|dbbus_rx_data[2];
+ bin_conf_crc32 = (bin_conf_crc32<<8)|dbbus_rx_data[3];
+
+ dbbus_tx_data[0] = 0x72;
+ dbbus_tx_data[1] = 0x83;
+ dbbus_tx_data[2] = 0x00;
+ dbbus_tx_data[3] = 0x00;
+ dbbus_tx_data[4] = 0x04;
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_tx_data[0], 5 );
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_rx_data[0], 4 );
+ if ((dbbus_rx_data[0]>=0x30 && dbbus_rx_data[0]<=0x39)
+ &&(dbbus_rx_data[1]>=0x30 && dbbus_rx_data[1]<=0x39)
+ &&(dbbus_rx_data[2]>=0x31 && dbbus_rx_data[2]<=0x39))
+ {
+ info_sw_id = (dbbus_rx_data[0]-0x30)*100+(dbbus_rx_data[1]-0x30)*10+(dbbus_rx_data[2]-0x30);
+ }
+
+ DBG("[21xxA]:main_sw_id = %d, info_sw_id = %d, bin_conf_crc32=0x%x\n", main_sw_id, info_sw_id, bin_conf_crc32);
+}
+
+static int fwAutoUpdate(void *unused)
+{
+ int time = 0;
+ ssize_t ret = 0;
+
+ for (time = 0; time < 5; time++)
+ {
+ DBG("fwAutoUpdate time = %d\n",time);
+ ret = firmware_update_c33(NULL, NULL, NULL, 1, EMEM_MAIN);
+ if (ret == 1)
+ {
+ DBG("AUTO_UPDATE OK!!!");
+ break;
+ }
+ }
+ if (time == 5)
+ {
+ DBG("AUTO_UPDATE failed!!!");
+ }
+ enable_irq(irq_msg21xx);
+ return 0;
+}
+#endif
+
+//------------------------------------------------------------------------------//
+static ssize_t firmware_update_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ DBG("*** firmware_update_show() fw_version = %s ***\n", fw_version);
+
+ return sprintf(buf, "%s\n", fw_version);
+}
+
+static ssize_t firmware_update_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ bFwUpdating = 1;
+ disable_irq(irq_msg21xx);
+
+ DBG("*** update fw size = %d ***\n", FwDataCnt);
+ size = firmware_update_c33 (dev, attr, buf, size, EMEM_MAIN);
+
+ enable_irq(irq_msg21xx);
+ bFwUpdating = 0;
+
+ return size;
+}
+
+static DEVICE_ATTR(update, SYSFS_AUTHORITY, firmware_update_show, firmware_update_store);
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ DBG("*** firmware_version_show() fw_version = %s ***\n", fw_version);
+
+ return sprintf(buf, "%s\n", fw_version);
+}
+
+static ssize_t firmware_version_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ get_customer_firmware_version();
+
+ DBG("*** firmware_version_store() fw_version = %s ***\n", fw_version);
+
+ return size;
+}
+
+static DEVICE_ATTR(version, SYSFS_AUTHORITY, firmware_version_show, firmware_version_store);
+
+static ssize_t firmware_data_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ DBG("*** firmware_data_show() FwDataCnt = %d ***\n", FwDataCnt);
+
+ return FwDataCnt;
+}
+
+static ssize_t firmware_data_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ int count = size / 1024;
+ int i;
+
+ for (i = 0; i < count; i ++)
+ {
+ memcpy(temp[FwDataCnt], buf+(i*1024), 1024);
+
+ FwDataCnt ++;
+ }
+
+ DBG("***FwDataCnt = %d ***\n", FwDataCnt);
+
+ if (buf != NULL)
+ {
+ DBG("*** buf[0] = %c ***\n", buf[0]);
+ }
+
+ return size;
+}
+
+static DEVICE_ATTR(data, SYSFS_AUTHORITY, firmware_data_show, firmware_data_store);
+
+#ifdef TP_PRINT
+static ssize_t tp_print_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ tp_print_proc_read();
+
+ return sprintf(buf, "%d\n", bTpInSuspend);
+}
+
+static ssize_t tp_print_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ DBG("*** tp_print_store() ***\n");
+
+ return size;
+}
+
+static DEVICE_ATTR(tpp, SYSFS_AUTHORITY, tp_print_show, tp_print_store);
+#endif
+
+//------------------------------------------------------------------------------//
+#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
+static void _msg_enable_proximity(void)
+{
+ U8 tx_data[4] = {0};
+
+ DBG("_msg_enable_proximity!");
+ tx_data[0] = 0x52;
+ tx_data[1] = 0x00;
+ tx_data[2] = 0x47;
+ tx_data[3] = 0xa0;
+ mutex_lock(&msg21xx_mutex);
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &tx_data[0], 4);
+ mutex_unlock(&msg21xx_mutex);
+
+ bEnableTpProximity = 1;
+}
+
+static void _msg_disable_proximity(void)
+{
+ U8 tx_data[4] = {0};
+
+ DBG("_msg_disable_proximity!");
+ tx_data[0] = 0x52;
+ tx_data[1] = 0x00;
+ tx_data[2] = 0x47;
+ tx_data[3] = 0xa1;
+ mutex_lock(&msg21xx_mutex);
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &tx_data[0], 4);
+ mutex_unlock(&msg21xx_mutex);
+
+ bEnableTpProximity = 0;
+ bFaceClosingTp = 0;
+}
+
+void tsps_msg21xx_enable(int en)
+{
+ if (en)
+ {
+ _msg_enable_proximity();
+ }
+ else
+ {
+ _msg_disable_proximity();
+ }
+}
+
+int tsps_msg21xx_data(void)
+{
+ return bFaceClosingTp;
+}
+#endif
+
+static U8 calculate_checksum(U8 *msg, S32 length)
+{
+ S32 Checksum = 0;
+ S32 i;
+
+ for (i = 0; i < length; i ++)
+ {
+ Checksum += msg[i];
+ }
+
+ return (U8)((-Checksum) & 0xFF);
+}
+
+static S32 parse_info(touchInfo_t *info)
+{
+ U8 data[DEMO_MODE_PACKET_LENGTH] = {0};
+ U8 checksum = 0;
+ U32 x = 0, y = 0;
+ U32 x2 = 0, y2 = 0;
+ U32 delta_x = 0, delta_y = 0;
+
+ mutex_lock(&msg21xx_mutex);
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &data[0], DEMO_MODE_PACKET_LENGTH);
+ mutex_unlock(&msg21xx_mutex);
+ checksum = calculate_checksum(&data[0], (DEMO_MODE_PACKET_LENGTH-1));
+ DBG("check sum: [%x] == [%x]? \n", data[DEMO_MODE_PACKET_LENGTH-1], checksum);
+
+ if(data[DEMO_MODE_PACKET_LENGTH-1] != checksum)
+ {
+ DBG("WRONG CHECKSUM\n");
+ return -1;
+ }
+
+ if(data[0] != 0x52)
+ {
+ DBG("WRONG HEADER\n");
+ return -1;
+ }
+
+ info->keycode = 0xFF;
+ if ((data[1] == 0xFF) && (data[2] == 0xFF) && (data[3] == 0xFF) && (data[4] == 0xFF) && (data[6] == 0xFF))
+ {
+ if ((data[5] == 0xFF) || (data[5] == 0))
+ {
+ info->keycode = 0xFF;
+ }
+ else if ((data[5] == 1) || (data[5] == 2) || (data[5] == 4) || (data[5] == 8))
+ {
+ if (data[5] == 1)
+ {
+ info->keycode = 0;
+ }
+ else if (data[5] == 2)
+ {
+ info->keycode = 1;
+ }
+ else if (data[5] == 4)
+ {
+ info->keycode = 2;
+ }
+ else if (data[5] == 8)
+ {
+ info->keycode = 3;
+ }
+ }
+ #ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
+ else if (bEnableTpProximity &&((data[5] == 0x80) || (data[5] == 0x40)))
+ {
+ if (data[5] == 0x80)
+ {
+ bFaceClosingTp = 1;
+ }
+ else if (data[5] == 0x40)
+ {
+ bFaceClosingTp = 0;
+ }
+ DBG("bEnableTpProximity=%d; bFaceClosingTp=%d; data[5]=%x;\n", bEnableTpProximity, bFaceClosingTp, data[5]);
+ return -1;
+ }
+ #endif
+ else
+ {
+ DBG("WRONG KEY\n");
+ return -1;
+ }
+ }
+ else
+ {
+ x = (((data[1] & 0xF0 ) << 4) | data[2]);
+ y = ((( data[1] & 0x0F) << 8) | data[3]);
+ delta_x = (((data[4] & 0xF0) << 4 ) | data[5]);
+ delta_y = (((data[4] & 0x0F) << 8 ) | data[6]);
+
+ if ((delta_x == 0) && (delta_y == 0))
+ {
+ info->point[0].x = x * TOUCH_SCREEN_X_MAX / TPD_WIDTH;
+ info->point[0].y = y * TOUCH_SCREEN_Y_MAX/ TPD_HEIGHT;
+ info->count = 1;
+ }
+ else
+ {
+ if (delta_x > 2048)
+ {
+ delta_x -= 4096;
+ }
+ if (delta_y > 2048)
+ {
+ delta_y -= 4096;
+ }
+ x2 = (U32)((S16)x + (S16)delta_x);
+ y2 = (U32)((S16)y + (S16)delta_y);
+ info->point[0].x = x * TOUCH_SCREEN_X_MAX / TPD_WIDTH;
+ info->point[0].y = y * TOUCH_SCREEN_Y_MAX/ TPD_HEIGHT;
+ info->point[1].x = x2 * TOUCH_SCREEN_X_MAX / TPD_WIDTH;
+ info->point[1].y = y2 * TOUCH_SCREEN_Y_MAX/ TPD_HEIGHT;
+ info->count = 2;
+ }
+ }
+
+ return 0;
+}
+
+static void touch_driver_touch_pressed(int x, int y)
+{
+ DBG("point touch pressed");
+
+ input_report_key(input_dev, BTN_TOUCH, 1);
+ input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, 1);
+ input_report_abs(input_dev, ABS_MT_POSITION_X, x);
+ input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ input_mt_sync(input_dev);
+}
+
+static void touch_driver_touch_released(void)
+{
+ DBG("point touch released");
+
+ input_report_key(input_dev, BTN_TOUCH, 0);
+ input_mt_sync(input_dev);
+}
+
+/* read data through I2C then report data to input sub-system when interrupt occurred */
+void touch_driver_do_work(struct work_struct *work)
+{
+ touchInfo_t info;
+ int i = 0;
+ static int last_keycode = 0xFF;
+ static int last_count = 0;
+
+ DBG("touch_driver_do_work()\n");
+
+ memset(&info, 0x0, sizeof(info));
+ if (0 == parse_info(&info))
+ {
+ #ifdef CONFIG_TP_HAVE_KEY
+ if (info.keycode != 0xFF) //key touch pressed
+ {
+ DBG("touch_driver_do_work() info.keycode=%x, last_keycode=%x, tp_key_array[%d]=%d\n", info.keycode, last_keycode, info.keycode, tp_key_array[info.keycode]);
+ if (info.keycode < MAX_KEY_NUM)
+ {
+ if (info.keycode != last_keycode)
+ {
+ DBG("key touch pressed");
+
+ input_report_key(input_dev, BTN_TOUCH, 1);
+ input_report_key(input_dev, tp_key_array[info.keycode], 1);
+
+ last_keycode = info.keycode;
+ }
+ else
+ {
+ /// pass duplicate key-pressing
+ DBG("REPEATED KEY\n");
+ }
+ }
+ else
+ {
+ DBG("WRONG KEY\n");
+ }
+ }
+ else //key touch released
+ {
+ if (last_keycode != 0xFF)
+ {
+ DBG("key touch released");
+
+ input_report_key(input_dev, BTN_TOUCH, 0);
+ input_report_key(input_dev, tp_key_array[last_keycode], 0);
+
+ last_keycode = 0xFF;
+ }
+ }
+ #endif //CONFIG_TP_HAVE_KEY
+
+ if (info.count > 0) //point touch pressed
+ {
+ for (i = 0; i < info.count; i ++)
+ {
+ touch_driver_touch_pressed(info.point[i].x, info.point[i].y);
+ }
+ last_count = info.count;
+ }
+ else if (last_count > 0) //point touch released
+ {
+ touch_driver_touch_released();
+ last_count = 0;
+ }
+
+ input_sync(input_dev);
+ }
+
+ enable_irq(irq_msg21xx);
+}
+
+/* The interrupt service routine will be triggered when interrupt occurred */
+irqreturn_t touch_driver_isr(int irq, void *dev_id)
+{
+ DBG("touch_driver_isr()\n");
+
+ disable_irq_nosync(irq_msg21xx);
+ schedule_work(&msg21xx_wk);
+
+ return IRQ_HANDLED;
+}
+
+#if defined(CONFIG_FB)
+static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data)
+{
+ struct fb_event *evdata = data;
+ int *blank;
+
+ if (evdata && evdata->data && event == FB_EVENT_BLANK )
+ {
+ blank = evdata->data;
+ if (*blank == FB_BLANK_UNBLANK)
+ {
+ if (bTpInSuspend)
+ {
+ gpio_direction_output(MS_TS_MSG21XX_GPIO_RST, 1);
+ mdelay(10);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 0);
+ mdelay(10);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 1);
+ mdelay(200);
+
+ touch_driver_touch_released();
+ input_sync(input_dev);
+
+ enable_irq(irq_msg21xx);
+ }
+ bTpInSuspend = 0;
+ }
+ else if (*blank == FB_BLANK_POWERDOWN)
+ {
+ if (bFwUpdating)
+ {
+ DBG("suspend bFwUpdating=%d\n", bFwUpdating);
+ return 0;
+ }
+
+ #ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
+ if (bEnableTpProximity)
+ {
+ DBG("suspend bEnableTpProximity=%d\n", bEnableTpProximity);
+ return 0;
+ }
+ #endif
+
+ if (bTpInSuspend == 0)
+ {
+ disable_irq(irq_msg21xx);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 0);
+ }
+ bTpInSuspend = 1;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+void touch_driver_early_suspend(struct early_suspend *p)
+{
+ DBG("touch_driver_early_suspend()\n");
+
+ if (bFwUpdating)
+ {
+ DBG("suspend bFwUpdating=%d\n", bFwUpdating);
+ return;
+ }
+
+#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
+ if (bEnableTpProximity)
+ {
+ DBG("suspend bEnableTpProximity=%d\n", bEnableTpProximity);
+ return;
+ }
+#endif
+
+ if (bTpInSuspend == 0)
+ {
+ disable_irq(irq_msg21xx);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 0);
+ }
+ bTpInSuspend = 1;
+}
+
+void touch_driver_early_resume(struct early_suspend *p)
+{
+ DBG("touch_driver_early_resume() bTpInSuspend=%d\n", bTpInSuspend);
+
+ if (bTpInSuspend)
+ {
+ gpio_direction_output(MS_TS_MSG21XX_GPIO_RST, 1);
+ mdelay(10);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 0);
+ mdelay(10);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 1);
+ mdelay(200);
+
+ touch_driver_touch_released();
+ input_sync(input_dev);
+
+ enable_irq(irq_msg21xx);
+ }
+ bTpInSuspend = 0;
+}
+#endif
+
+/* probe function is used for matching and initializing input device */
+static int touch_driver_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+#ifdef FIRMWARE_AUTOUPDATE
+ unsigned short update_bin_major = 0, update_bin_minor = 0;
+ int i, update_flag = 0;
+#endif
+ int ret = 0;
+
+ if (input_dev != NULL)
+ {
+ DBG("input device has found\n");
+ return -1;
+ }
+
+ DBG("*** %s ***\n", __FUNCTION__);
+
+ i2c_client = client;
+
+ ret = gpio_request(MS_TS_MSG21XX_GPIO_RST, "reset");
+ if (ret < 0)
+ {
+ pr_err("*** Failed to request GPIO %d, error %d ***\n", MS_TS_MSG21XX_GPIO_RST, ret);
+ goto err0;
+ }
+
+ // power on TP
+ gpio_direction_output(MS_TS_MSG21XX_GPIO_RST, 1);
+ mdelay(100);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 0);
+ mdelay(10);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 1);
+ mdelay(200);
+ if (0 == get_ic_type())
+ {
+ pr_err("the currnet ic is not Mstar\n");
+ ret = -1;
+ goto err0;
+ }
+
+ mutex_init(&msg21xx_mutex);
+
+ /* allocate an input device */
+ input_dev = input_allocate_device();
+ if (!input_dev)
+ {
+ ret = -ENOMEM;
+ pr_err("*** input device allocation failed ***\n");
+ goto err1;
+ }
+
+ input_dev->name = client->name;
+ input_dev->phys = "I2C";
+ input_dev->dev.parent = &client->dev;
+ input_dev->id.bustype = BUS_I2C;
+
+ /* set the supported event type for input device */
+ set_bit(EV_ABS, input_dev->evbit);
+ set_bit(EV_SYN, input_dev->evbit);
+ set_bit(EV_KEY, input_dev->evbit);
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
+#ifdef CONFIG_TP_HAVE_KEY
+ {
+ int i;
+ for (i = 0; i < MAX_KEY_NUM; i ++)
+ {
+ input_set_capability(input_dev, EV_KEY, tp_key_array[i]);
+ }
+ }
+#endif
+
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 2, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, TOUCH_SCREEN_X_MIN, TOUCH_SCREEN_X_MAX, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, TOUCH_SCREEN_Y_MIN, TOUCH_SCREEN_Y_MAX, 0, 0);
+
+ /* register the input device to input sub-system */
+ ret = input_register_device(input_dev);
+ if (ret < 0)
+ {
+ pr_err("*** Unable to register ms-touchscreen input device ***\n");
+ goto err1;
+ }
+
+ /* set sysfs for firmware */
+ firmware_class = class_create(THIS_MODULE, "ms-touchscreen-msg20xx"); //client->name
+ if (IS_ERR(firmware_class))
+ pr_err("Failed to create class(firmware)!\n");
+
+ firmware_cmd_dev = device_create(firmware_class, NULL, 0, NULL, "device");
+ if (IS_ERR(firmware_cmd_dev))
+ pr_err("Failed to create device(firmware_cmd_dev)!\n");
+
+ // version
+ if (device_create_file(firmware_cmd_dev, &dev_attr_version) < 0)
+ pr_err("Failed to create device file(%s)!\n", dev_attr_version.attr.name);
+ // update
+ if (device_create_file(firmware_cmd_dev, &dev_attr_update) < 0)
+ pr_err("Failed to create device file(%s)!\n", dev_attr_update.attr.name);
+ // data
+ if (device_create_file(firmware_cmd_dev, &dev_attr_data) < 0)
+ pr_err("Failed to create device file(%s)!\n", dev_attr_data.attr.name);
+
+#ifdef TP_PRINT
+ tp_print_create_entry();
+#endif
+
+ dev_set_drvdata(firmware_cmd_dev, NULL);
+
+ /* initialize the work queue */
+ INIT_WORK(&msg21xx_wk, touch_driver_do_work);
+
+ ret = gpio_request(MS_TS_MSG21XX_GPIO_INT, "interrupt");
+ if (ret < 0)
+ {
+ pr_err("*** Failed to request GPIO %d, error %d ***\n", MS_TS_MSG21XX_GPIO_INT, ret);
+ goto err2;
+ }
+ gpio_direction_input(MS_TS_MSG21XX_GPIO_INT);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_INT, 1);
+
+ irq_msg21xx = gpio_to_irq(MS_TS_MSG21XX_GPIO_INT);
+
+ /* request an irq and register the isr */
+ ret = request_irq(irq_msg21xx, touch_driver_isr, IRQF_TRIGGER_RISING, "msg21xx", NULL);
+ if (ret != 0)
+ {
+ pr_err("*** Unable to claim irq %d; error %d ***\n", MS_TS_MSG21XX_GPIO_INT, ret);
+ goto err3;
+ }
+
+ disable_irq(irq_msg21xx);
+
+#if defined(CONFIG_FB)
+ msg21xx_fb_notif.notifier_call = fb_notifier_callback;
+ ret = fb_register_client(&msg21xx_fb_notif);
+#elif defined (CONFIG_HAS_EARLYSUSPEND)
+ mstar_ts_early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN;
+ mstar_ts_early_suspend.suspend = touch_driver_early_suspend;
+ mstar_ts_early_suspend.resume = touch_driver_early_resume;
+ register_early_suspend(&mstar_ts_early_suspend);
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
+ tsps_assist_register_callback("msg21xx", &tsps_msg21xx_enable, &tsps_msg21xx_data);
+#endif
+
+#ifdef FIRMWARE_AUTOUPDATE
+ get_customer_firmware_version();
+ _ReadBinConfig();
+
+ if (main_sw_id == info_sw_id)
+ {
+ if (_CalMainCRC32() == bin_conf_crc32)
+ {
+ if ((main_sw_id >= SWID_START) && (main_sw_id < SWID_NULL))
+ {
+ update_bin_major= (MSG_FIRMWARE[main_sw_id-SWID_START][0x7f4f] << 8) + MSG_FIRMWARE[main_sw_id-SWID_START][0x7f4e];
+ update_bin_minor= (MSG_FIRMWARE[main_sw_id-SWID_START][0x7f51] << 8) + MSG_FIRMWARE[main_sw_id-SWID_START][0x7f50];
+
+ //check upgrading
+ if ((update_bin_major == fw_version_major) && (update_bin_minor > fw_version_minor))
+ {
+ update_flag = 1;
+ }
+ }
+ DBG("MAIN sw_id=%d,update_flag=%d,update_bin_major=%d,update_bin_minor=%d\n",main_sw_id,update_flag,update_bin_major,update_bin_minor);
+ }
+ else
+ {
+ if ((info_sw_id >= SWID_START) && (info_sw_id < SWID_NULL))
+ {
+ update_bin_major= (MSG_FIRMWARE[info_sw_id-SWID_START][0x7f4f] << 8) + MSG_FIRMWARE[info_sw_id-SWID_START][0x7f4e];
+ update_bin_minor= (MSG_FIRMWARE[info_sw_id-SWID_START][0x7f51] << 8) + MSG_FIRMWARE[info_sw_id-SWID_START][0x7f50];
+ update_flag = 1;
+ }
+ DBG("INFO1 sw_id=%d,update_flag=%d,update_bin_major=%d,update_bin_minor=%d\n",info_sw_id,update_flag,update_bin_major,update_bin_minor);
+ }
+ }
+ else
+ {
+ if ((info_sw_id >= SWID_START) && (info_sw_id < SWID_NULL))
+ {
+ update_bin_major= (MSG_FIRMWARE[info_sw_id-SWID_START][0x7f4f] << 8) + MSG_FIRMWARE[info_sw_id-SWID_START][0x7f4e];
+ update_bin_minor= (MSG_FIRMWARE[info_sw_id-SWID_START][0x7f51] << 8) + MSG_FIRMWARE[info_sw_id-SWID_START][0x7f50];
+ update_flag = 1;
+ }
+ DBG("INFO2 sw_id=%d,update_flag=%d,update_bin_major=%d,update_bin_minor=%d\n",info_sw_id,update_flag,update_bin_major,update_bin_minor);
+ }
+
+ if (update_flag == 1)
+ {
+ DBG("MSG21XX_fw_auto_update begin....\n");
+ //transfer data
+ for (i = 0; i < 33; i++)
+ {
+ firmware_data_store(NULL, NULL, &(MSG_FIRMWARE[info_sw_id-SWID_START][i*1024]), 1024);
+ }
+
+ kthread_run(fwAutoUpdate, 0, "MSG21XX_fw_auto_update");
+ DBG("*** mstar touch screen registered ***\n");
+ return 0;
+ }
+
+ reset_hw();
+#endif
+
+ DBG("*** mstar touch screen registered ***\n");
+ enable_irq(irq_msg21xx);
+ return 0;
+
+err3:
+ free_irq(irq_msg21xx, input_dev);
+
+err2:
+ gpio_free(MS_TS_MSG21XX_GPIO_INT);
+
+err1:
+ mutex_destroy(&msg21xx_mutex);
+ input_unregister_device(input_dev);
+ input_free_device(input_dev);
+ input_dev = NULL;
+
+err0:
+ gpio_free(MS_TS_MSG21XX_GPIO_RST);
+
+ return ret;
+}
+
+/* remove function is triggered when the input device is removed from input sub-system */
+static int touch_driver_remove(struct i2c_client *client)
+{
+ DBG("touch_driver_remove()\n");
+
+ free_irq(irq_msg21xx, input_dev);
+ gpio_free(MS_TS_MSG21XX_GPIO_INT);
+ gpio_free(MS_TS_MSG21XX_GPIO_RST);
+ input_unregister_device(input_dev);
+ mutex_destroy(&msg21xx_mutex);
+
+ return 0;
+}
+
+/* The I2C device list is used for matching I2C device and I2C device driver. */
+static const struct i2c_device_id touch_device_id[] =
+{
+ {"msg21xx", 0},
+ {}, /* should not omitted */
+};
+
+MODULE_DEVICE_TABLE(i2c, touch_device_id);
+
+static struct i2c_driver touch_device_driver =
+{
+ .driver = {
+ .name = "msg21xx",
+ .owner = THIS_MODULE,
+ },
+ .probe = touch_driver_probe,
+ .remove = touch_driver_remove,
+ .id_table = touch_device_id,
+};
+
+static int __init touch_driver_init(void)
+{
+ int ret;
+
+ /* register driver */
+ ret = i2c_add_driver(&touch_device_driver);
+ if (ret < 0)
+ {
+ DBG("add touch_device_driver i2c driver failed.\n");
+ return -ENODEV;
+ }
+ DBG("add touch_device_driver i2c driver.\n");
+
+ return ret;
+}
+
+static void __exit touch_driver_exit(void)
+{
+ DBG("remove touch_device_driver i2c driver.\n");
+
+ i2c_del_driver(&touch_device_driver);
+}
+
+#ifdef TP_PRINT
+#include <linux/proc_fs.h>
+
+static U16 InfoAddr = 0x0F, PoolAddr = 0x10, TransLen = 256;
+static U8 row, units, cnt;
+
+static int tp_print_proc_read(void)
+{
+ U16 i, j;
+ U16 left, offset = 0;
+ U8 dbbus_tx_data[3] = {0};
+ U8 u8Data;
+ S16 s16Data;
+ S32 s32Data;
+ char *buf = NULL;
+
+ left = cnt*row*units;
+ if ((bTpInSuspend == 0) && (InfoAddr != 0x0F) && (PoolAddr != 0x10) && (left > 0))
+ {
+ buf = kmalloc(left, GFP_KERNEL);
+ if (buf != NULL)
+ {
+ printk("tpp: \n");
+
+ while (left > 0)
+ {
+ dbbus_tx_data[0] = 0x53;
+ dbbus_tx_data[1] = ((PoolAddr + offset) >> 8) & 0xFF;
+ dbbus_tx_data[2] = (PoolAddr + offset) & 0xFF;
+ mutex_lock(&msg21xx_mutex);
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_tx_data[0], 3);
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &buf[offset], left > TransLen ? TransLen : left);
+ mutex_unlock(&msg21xx_mutex);
+
+ if (left > TransLen)
+ {
+ left -= TransLen;
+ offset += TransLen;
+ }
+ else
+ {
+ left = 0;
+ }
+ }
+
+ for (i = 0; i < cnt; i++)
+ {
+ printk("tpp: ");
+ for (j = 0; j < row; j++)
+ {
+ if (units == 1)
+ {
+ u8Data = buf[i*row*units + j*units];
+ printk("%d\t", u8Data);
+ }
+ else if (units == 2)
+ {
+ s16Data = buf[i*row*units + j*units] + (buf[i*row*units + j*units + 1] << 8);
+ printk("%d\t", s16Data);
+ }
+ else if (units == 4)
+ {
+ s32Data = buf[i*row*units + j*units] + (buf[i*row*units + j*units + 1] << 8) + (buf[i*row*units + j*units + 2] << 16) + (buf[i*row*units + j*units + 3] << 24);
+ printk("%d\t", s32Data);
+ }
+ }
+ printk("\n");
+ }
+
+ kfree(buf);
+ }
+ }
+
+ return 0;
+}
+
+static void tp_print_create_entry(void)
+{
+ U8 dbbus_tx_data[3] = {0};
+ U8 dbbus_rx_data[8] = {0};
+
+ dbbus_tx_data[0] = 0x53;
+ dbbus_tx_data[1] = 0x00;
+ dbbus_tx_data[2] = 0x58;
+ mutex_lock(&msg21xx_mutex);
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_tx_data[0], 3);
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_rx_data[0], 4);
+ mutex_unlock(&msg21xx_mutex);
+ InfoAddr = (dbbus_rx_data[1]<<8) + dbbus_rx_data[0];
+ PoolAddr = (dbbus_rx_data[3]<<8) + dbbus_rx_data[2];
+ printk("InfoAddr=0x%X\n", InfoAddr);
+ printk("PoolAddr=0x%X\n", PoolAddr);
+
+ if ((InfoAddr != 0x0F) && (PoolAddr != 0x10))
+ {
+ msleep(10);
+ dbbus_tx_data[0] = 0x53;
+ dbbus_tx_data[1] = (InfoAddr >> 8) & 0xFF;
+ dbbus_tx_data[2] = InfoAddr & 0xFF;
+ mutex_lock(&msg21xx_mutex);
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_tx_data[0], 3);
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_rx_data[0], 8);
+ mutex_unlock(&msg21xx_mutex);
+
+ units = dbbus_rx_data[0];
+ row = dbbus_rx_data[1];
+ cnt = dbbus_rx_data[2];
+ TransLen = (dbbus_rx_data[7]<<8) + dbbus_rx_data[6];
+ printk("tpp: row=%d, units=%d\n", row, units);
+ printk("tpp: cnt=%d, TransLen=%d\n", cnt, TransLen);
+
+ // tpp
+ if (device_create_file(firmware_cmd_dev, &dev_attr_tpp) < 0)
+ {
+ pr_err("Failed to create device file(%s)!\n", dev_attr_tpp.attr.name);
+ }
+ }
+}
+#endif
+
+module_init(touch_driver_init);
+module_exit(touch_driver_exit);
+MODULE_AUTHOR("MStar Semiconductor, Inc.");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index db4b66bb18ed..55eff5ae04e4 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -2848,10 +2848,16 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
struct iommu_group *group;
int ret;
- if (dev_is_pci(dev))
- group = pci_device_group(dev);
- else
- group = generic_device_group(dev);
+ /*
+ * We used to call pci_device_group here for dev_is_pci(dev)
+ * devices. However, that causes the root complex device to be
+ * placed in the same group as endpoint devices (and probably puts
+ * all endpoint devices in the same group as well), which makes
+ * things tricky in the DMA layer since we don't actually want to
+ * attach *everybody* in the group when one client calls attach.
+ * Instead, we'll just allocate a new group for everybody here.
+ */
+ group = generic_device_group(dev);
if (IS_ERR(group))
return group;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 5118c2f32a4c..8ab502d80270 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -851,7 +851,8 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
if (!group->default_domain) {
group->default_domain = __iommu_domain_alloc(dev->bus,
IOMMU_DOMAIN_DMA);
- group->domain = group->default_domain;
+ if (!group->domain)
+ group->domain = group->default_domain;
}
ret = iommu_group_add_device(group, dev);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 022473473971..190d294197a7 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -588,9 +588,6 @@ static int gic_populate_rdist(void)
u64 offset = ptr - gic_data.redist_regions[i].redist_base;
gic_data_rdist_rd_base() = ptr;
gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
- pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
- smp_processor_id(), mpidr, i,
- &gic_data_rdist()->phys_base);
return 0;
}
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 786ffa822851..291720db72ce 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -16,13 +16,21 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_gpio.h>
#include <linux/gpio.h>
#include <linux/regmap.h>
+#include <linux/power_supply.h>
#include <linux/platform_device.h>
+#include <linux/interrupt.h>
#include <linux/regulator/consumer.h>
+#include <linux/leds-qpnp-flash.h>
#include <linux/leds-qpnp-flash-v2.h>
+#include "leds.h"
+#define FLASH_LED_REG_LED_STATUS1(base) (base + 0x08)
+#define FLASH_LED_REG_LED_STATUS2(base) (base + 0x09)
+#define FLASH_LED_REG_INT_RT_STS(base) (base + 0x10)
#define FLASH_LED_REG_SAFETY_TMR(base) (base + 0x40)
#define FLASH_LED_REG_TGR_CURRENT(base) (base + 0x43)
#define FLASH_LED_REG_MOD_CTRL(base) (base + 0x46)
@@ -32,20 +40,40 @@
#define FLASH_LED_EN_LED_CTRL(base) (base + 0x4C)
#define FLASH_LED_REG_HDRM_PRGM(base) (base + 0x4D)
#define FLASH_LED_REG_HDRM_AUTO_MODE_CTRL(base) (base + 0x50)
+#define FLASH_LED_REG_WARMUP_DELAY(base) (base + 0x51)
#define FLASH_LED_REG_ISC_DELAY(base) (base + 0x52)
+#define FLASH_LED_REG_VPH_DROOP_THRESHOLD(base) (base + 0x61)
+#define FLASH_LED_REG_VPH_DROOP_DEBOUNCE(base) (base + 0x62)
+#define FLASH_LED_REG_CURRENT_DERATE_EN(base) (base + 0x76)
#define FLASH_LED_HDRM_MODE_PRGM_MASK GENMASK(7, 0)
#define FLASH_LED_HDRM_VOL_MASK GENMASK(7, 4)
#define FLASH_LED_CURRENT_MASK GENMASK(6, 0)
#define FLASH_LED_ENABLE_MASK GENMASK(2, 0)
#define FLASH_LED_SAFETY_TMR_MASK GENMASK(7, 0)
-#define FLASH_LED_ISC_DELAY_MASK GENMASK(1, 0)
+#define FLASH_LED_INT_RT_STS_MASK GENMASK(7, 0)
+#define FLASH_LED_ISC_WARMUP_DELAY_MASK GENMASK(1, 0)
+#define FLASH_LED_CURRENT_DERATE_EN_MASK GENMASK(2, 0)
+#define FLASH_LED_VPH_DROOP_DEBOUNCE_MASK GENMASK(1, 0)
+#define FLASH_LED_VPH_DROOP_HYSTERESIS_MASK GENMASK(5, 4)
+#define FLASH_LED_VPH_DROOP_THRESHOLD_MASK GENMASK(2, 0)
#define FLASH_LED_MOD_CTRL_MASK BIT(7)
#define FLASH_LED_HW_SW_STROBE_SEL_MASK BIT(2)
-
-#define FLASH_LED_HEADROOM_AUTO_MODE_ENABLED true
-#define FLASH_LED_ISC_DELAY_SHIFT 6
-#define FLASH_LED_ISC_DELAY_DEFAULT_US 3
+#define FLASH_LED_VPH_DROOP_FAULT_MASK BIT(4)
+
+#define VPH_DROOP_DEBOUNCE_US_TO_VAL(val_us) (val_us / 8)
+#define VPH_DROOP_HYST_MV_TO_VAL(val_mv) (val_mv / 25)
+#define VPH_DROOP_THRESH_MV_TO_VAL(val_mv) ((val_mv / 100) - 25)
+
+#define FLASH_LED_ISC_WARMUP_DELAY_SHIFT 6
+#define FLASH_LED_WARMUP_DELAY_DEFAULT 2
+#define FLASH_LED_ISC_DELAY_DEFAULT 3
+#define FLASH_LED_VPH_DROOP_DEBOUNCE_DEFAULT 2
+#define FLASH_LED_VPH_DROOP_HYST_DEFAULT 2
+#define FLASH_LED_VPH_DROOP_THRESH_DEFAULT 5
+#define FLASH_LED_VPH_DROOP_DEBOUNCE_MAX 3
+#define FLASH_LED_VPH_DROOP_HYST_MAX 3
+#define FLASH_LED_VPH_DROOP_THRESH_MAX 7
#define FLASH_LED_SAFETY_TMR_VAL_OFFSET 1
#define FLASH_LED_SAFETY_TMR_VAL_DIVISOR 10
#define FLASH_LED_SAFETY_TMR_ENABLE BIT(7)
@@ -69,6 +97,9 @@
#define FLASH_LED_SAFETY_TMR_DISABLED 0x13
#define FLASH_LED_MIN_CURRENT_MA 25
+/* notifier call chain for flash-led irqs */
+static ATOMIC_NOTIFIER_HEAD(irq_notifier_list);
+
enum flash_led_type {
FLASH_LED_TYPE_FLASH,
FLASH_LED_TYPE_TORCH,
@@ -125,9 +156,17 @@ struct flash_switch_data {
* Flash LED configuration read from device tree
*/
struct flash_led_platform_data {
- u8 isc_delay_us;
- u8 hw_strobe_option;
- bool hdrm_auto_mode_en;
+ int all_ramp_up_done_irq;
+ int all_ramp_down_done_irq;
+ int led_fault_irq;
+ u8 isc_delay;
+ u8 warmup_delay;
+ u8 current_derate_en_cfg;
+ u8 vph_droop_threshold;
+ u8 vph_droop_hysteresis;
+ u8 vph_droop_debounce;
+ u8 hw_strobe_option;
+ bool hdrm_auto_mode_en;
};
/*
@@ -139,6 +178,8 @@ struct qpnp_flash_led {
struct regmap *regmap;
struct flash_node_data *fnode;
struct flash_switch_data *snode;
+ struct power_supply *bms_psy;
+ struct notifier_block nb;
spinlock_t lock;
int num_fnodes;
int num_snodes;
@@ -147,19 +188,36 @@ struct qpnp_flash_led {
};
static int
+qpnp_flash_led_read(struct qpnp_flash_led *led, u16 addr, u8 *data)
+{
+ int rc;
+ uint val;
+
+ rc = regmap_read(led->regmap, addr, &val);
+ if (rc < 0)
+ dev_err(&led->pdev->dev, "Unable to read from 0x%04X rc = %d\n",
+ addr, rc);
+ else
+ dev_dbg(&led->pdev->dev, "Read 0x%02X from addr 0x%04X\n",
+ val, addr);
+
+ *data = (u8)val;
+ return rc;
+}
+
+static int
qpnp_flash_led_masked_write(struct qpnp_flash_led *led, u16 addr, u8 mask,
- u8 val)
+ u8 val)
{
int rc;
rc = regmap_update_bits(led->regmap, addr, mask, val);
if (rc < 0)
- dev_err(&led->pdev->dev,
- "Unable to update bits from 0x%02X, rc = %d\n",
- addr, rc);
+ dev_err(&led->pdev->dev, "Unable to update bits from 0x%04X, rc = %d\n",
+ addr, rc);
else
- dev_dbg(&led->pdev->dev, "Wrote 0x%02X to addr 0x%02X\n",
- val, addr);
+ dev_dbg(&led->pdev->dev, "Wrote 0x%02X to addr 0x%04X\n",
+ val, addr);
return rc;
}
@@ -195,7 +253,43 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
rc = qpnp_flash_led_masked_write(led,
FLASH_LED_REG_ISC_DELAY(led->base),
- FLASH_LED_ISC_DELAY_MASK, led->pdata->isc_delay_us);
+ FLASH_LED_ISC_WARMUP_DELAY_MASK,
+ led->pdata->isc_delay);
+ if (rc < 0)
+ return rc;
+
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_WARMUP_DELAY(led->base),
+ FLASH_LED_ISC_WARMUP_DELAY_MASK,
+ led->pdata->warmup_delay);
+ if (rc < 0)
+ return rc;
+
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_CURRENT_DERATE_EN(led->base),
+ FLASH_LED_CURRENT_DERATE_EN_MASK,
+ led->pdata->current_derate_en_cfg);
+ if (rc < 0)
+ return rc;
+
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_VPH_DROOP_DEBOUNCE(led->base),
+ FLASH_LED_VPH_DROOP_DEBOUNCE_MASK,
+ led->pdata->vph_droop_debounce);
+ if (rc < 0)
+ return rc;
+
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_VPH_DROOP_THRESHOLD(led->base),
+ FLASH_LED_VPH_DROOP_THRESHOLD_MASK,
+ led->pdata->vph_droop_threshold);
+ if (rc < 0)
+ return rc;
+
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_VPH_DROOP_THRESHOLD(led->base),
+ FLASH_LED_VPH_DROOP_HYSTERESIS_MASK,
+ led->pdata->vph_droop_hysteresis);
if (rc < 0)
return rc;
@@ -459,13 +553,21 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
return 0;
}
-int qpnp_flash_led_prepare(struct led_classdev *led_cdev, int options)
+int qpnp_flash_led_prepare(struct led_trigger *trig, int options)
{
- struct flash_switch_data *snode =
- container_of(led_cdev, struct flash_switch_data, cdev);
- struct qpnp_flash_led *led = dev_get_drvdata(&snode->pdev->dev);
+ struct led_classdev *led_cdev = trigger_to_lcdev(trig);
+ struct flash_switch_data *snode;
+ struct qpnp_flash_led *led;
int rc, val = 0;
+ if (!led_cdev) {
+ pr_err("Invalid led_trigger provided\n");
+ return -EINVAL;
+ }
+
+ snode = container_of(led_cdev, struct flash_switch_data, cdev);
+ led = dev_get_drvdata(&snode->pdev->dev);
+
if (!(options & (ENABLE_REGULATOR | QUERY_MAX_CURRENT))) {
dev_err(&led->pdev->dev, "Invalid options %d\n", options);
return -EINVAL;
@@ -521,6 +623,112 @@ static void qpnp_flash_led_brightness_set(struct led_classdev *led_cdev,
spin_unlock(&led->lock);
}
+static int flash_led_psy_notifier_call(struct notifier_block *nb,
+ unsigned long ev, void *v)
+{
+ struct power_supply *psy = v;
+ struct qpnp_flash_led *led =
+ container_of(nb, struct qpnp_flash_led, nb);
+
+ if (ev != PSY_EVENT_PROP_CHANGED)
+ return NOTIFY_OK;
+
+ if (!strcmp(psy->desc->name, "bms")) {
+ led->bms_psy = power_supply_get_by_name("bms");
+ if (!led->bms_psy)
+ dev_err(&led->pdev->dev, "Failed to get bms power_supply\n");
+ else
+ power_supply_unreg_notifier(&led->nb);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int flash_led_psy_register_notifier(struct qpnp_flash_led *led)
+{
+ int rc;
+
+ led->nb.notifier_call = flash_led_psy_notifier_call;
+ rc = power_supply_reg_notifier(&led->nb);
+ if (rc < 0) {
+ pr_err("Couldn't register psy notifier, rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* irq handler */
+static irqreturn_t qpnp_flash_led_irq_handler(int irq, void *_led)
+{
+ struct qpnp_flash_led *led = _led;
+ enum flash_led_irq_type irq_type = INVALID_IRQ;
+ int rc;
+ u8 irq_status, led_status1, led_status2;
+
+ dev_dbg(&led->pdev->dev, "irq received, irq=%d\n", irq);
+
+ rc = qpnp_flash_led_read(led,
+ FLASH_LED_REG_INT_RT_STS(led->base), &irq_status);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev, "Failed to read interrupt status reg, rc=%d\n",
+ rc);
+ goto exit;
+ }
+
+ if (irq == led->pdata->all_ramp_up_done_irq)
+ irq_type = ALL_RAMP_UP_DONE_IRQ;
+ else if (irq == led->pdata->all_ramp_down_done_irq)
+ irq_type = ALL_RAMP_DOWN_DONE_IRQ;
+ else if (irq == led->pdata->led_fault_irq)
+ irq_type = LED_FAULT_IRQ;
+
+ if (irq_type == ALL_RAMP_UP_DONE_IRQ)
+ atomic_notifier_call_chain(&irq_notifier_list,
+ irq_type, NULL);
+
+ if (irq_type == LED_FAULT_IRQ) {
+ rc = qpnp_flash_led_read(led,
+ FLASH_LED_REG_LED_STATUS1(led->base), &led_status1);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev, "Failed to read led_status1 reg, rc=%d\n",
+ rc);
+ goto exit;
+ }
+
+ rc = qpnp_flash_led_read(led,
+ FLASH_LED_REG_LED_STATUS2(led->base), &led_status2);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev, "Failed to read led_status2 reg, rc=%d\n",
+ rc);
+ goto exit;
+ }
+
+ if (led_status1)
+ dev_emerg(&led->pdev->dev, "led short/open fault detected! led_status1=%x\n",
+ led_status1);
+
+ if (led_status2 & FLASH_LED_VPH_DROOP_FAULT_MASK)
+ dev_emerg(&led->pdev->dev, "led vph_droop fault detected!\n");
+ }
+
+ dev_dbg(&led->pdev->dev, "irq handled, irq_type=%x, irq_status=%x\n",
+ irq_type, irq_status);
+
+exit:
+ return IRQ_HANDLED;
+}
+
+int qpnp_flash_led_register_irq_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&irq_notifier_list, nb);
+}
+
+int qpnp_flash_led_unregister_irq_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&irq_notifier_list, nb);
+}
+
static int qpnp_flash_led_regulator_setup(struct qpnp_flash_led *led,
struct flash_switch_data *snode, bool on)
{
@@ -901,28 +1109,116 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
{
int rc;
u32 val;
+ bool short_circuit_det, open_circuit_det, vph_droop_det;
- led->pdata->hdrm_auto_mode_en = FLASH_LED_HEADROOM_AUTO_MODE_ENABLED;
led->pdata->hdrm_auto_mode_en = of_property_read_bool(node,
"qcom,hdrm-auto-mode");
- led->pdata->isc_delay_us = FLASH_LED_ISC_DELAY_DEFAULT_US;
- rc = of_property_read_u32(node, "qcom,isc-delay", &val);
+ led->pdata->isc_delay = FLASH_LED_ISC_DELAY_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,isc-delay-us", &val);
+ if (!rc) {
+ led->pdata->isc_delay =
+ val >> FLASH_LED_ISC_WARMUP_DELAY_SHIFT;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read ISC delay, rc=%d\n", rc);
+ return rc;
+ }
+
+ led->pdata->warmup_delay = FLASH_LED_WARMUP_DELAY_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,warmup-delay-us", &val);
+ if (!rc) {
+ led->pdata->warmup_delay =
+ val >> FLASH_LED_ISC_WARMUP_DELAY_SHIFT;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read WARMUP delay, rc=%d\n", rc);
+ return rc;
+ }
+
+ short_circuit_det =
+ of_property_read_bool(node, "qcom,short-circuit-det");
+ open_circuit_det = of_property_read_bool(node, "qcom,open-circuit-det");
+ vph_droop_det = of_property_read_bool(node, "qcom,vph-droop-det");
+ led->pdata->current_derate_en_cfg = (vph_droop_det << 2) |
+ (open_circuit_det << 1) | short_circuit_det;
+
+ led->pdata->vph_droop_debounce = FLASH_LED_VPH_DROOP_DEBOUNCE_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,vph-droop-debounce-us", &val);
if (!rc) {
- led->pdata->isc_delay_us = val >> FLASH_LED_ISC_DELAY_SHIFT;
+ led->pdata->vph_droop_debounce =
+ VPH_DROOP_DEBOUNCE_US_TO_VAL(val);
} else if (rc != -EINVAL) {
- dev_err(&led->pdev->dev, "Unable to read ISC delay\n");
+ dev_err(&led->pdev->dev,
+ "Unable to read VPH droop debounce, rc=%d\n", rc);
return rc;
}
+ if (led->pdata->vph_droop_debounce > FLASH_LED_VPH_DROOP_DEBOUNCE_MAX) {
+ dev_err(&led->pdev->dev,
+ "Invalid VPH droop debounce specified");
+ return -EINVAL;
+ }
+
+ led->pdata->vph_droop_threshold = FLASH_LED_VPH_DROOP_THRESH_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,vph-droop-threshold-mv", &val);
+ if (!rc) {
+ led->pdata->vph_droop_threshold =
+ VPH_DROOP_THRESH_MV_TO_VAL(val);
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read VPH droop threshold, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (led->pdata->vph_droop_threshold > FLASH_LED_VPH_DROOP_THRESH_MAX) {
+ dev_err(&led->pdev->dev,
+ "Invalid VPH droop threshold specified");
+ return -EINVAL;
+ }
+
+ led->pdata->vph_droop_hysteresis =
+ FLASH_LED_VPH_DROOP_HYST_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,vph-droop-hysteresis-mv", &val);
+ if (!rc) {
+ led->pdata->vph_droop_hysteresis =
+ VPH_DROOP_HYST_MV_TO_VAL(val);
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read VPH droop hysteresis, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (led->pdata->vph_droop_hysteresis > FLASH_LED_VPH_DROOP_HYST_MAX) {
+ dev_err(&led->pdev->dev,
+ "Invalid VPH droop hysteresis specified");
+ return -EINVAL;
+ }
+
rc = of_property_read_u32(node, "qcom,hw-strobe-option", &val);
if (!rc) {
led->pdata->hw_strobe_option = (u8)val;
} else if (rc != -EINVAL) {
- dev_err(&led->pdev->dev, "Unable to parse hw strobe option\n");
+ dev_err(&led->pdev->dev,
+ "Unable to parse hw strobe option, rc=%d\n", rc);
return rc;
}
+ led->pdata->all_ramp_up_done_irq =
+ of_irq_get_byname(node, "all-ramp-up-done-irq");
+ if (led->pdata->all_ramp_up_done_irq < 0)
+ dev_dbg(&led->pdev->dev, "all-ramp-up-done-irq not used\n");
+
+ led->pdata->all_ramp_down_done_irq =
+ of_irq_get_byname(node, "all-ramp-down-done-irq");
+ if (led->pdata->all_ramp_down_done_irq < 0)
+ dev_dbg(&led->pdev->dev, "all-ramp-down-done-irq not used\n");
+
+ led->pdata->led_fault_irq =
+ of_irq_get_byname(node, "led-fault-irq");
+ if (led->pdata->led_fault_irq < 0)
+ dev_dbg(&led->pdev->dev, "led-fault-irq not used\n");
+
return 0;
}
@@ -1033,11 +1329,64 @@ static int qpnp_flash_led_probe(struct platform_device *pdev)
}
}
+ /* setup irqs */
+ if (led->pdata->all_ramp_up_done_irq >= 0) {
+ rc = devm_request_threaded_irq(&led->pdev->dev,
+ led->pdata->all_ramp_up_done_irq,
+ NULL, qpnp_flash_led_irq_handler,
+ IRQF_ONESHOT,
+ "qpnp_flash_led_all_ramp_up_done_irq", led);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Unable to request all_ramp_up_done(%d) IRQ(err:%d)\n",
+ led->pdata->all_ramp_up_done_irq, rc);
+ goto error_switch_register;
+ }
+ }
+
+ if (led->pdata->all_ramp_down_done_irq >= 0) {
+ rc = devm_request_threaded_irq(&led->pdev->dev,
+ led->pdata->all_ramp_down_done_irq,
+ NULL, qpnp_flash_led_irq_handler,
+ IRQF_ONESHOT,
+ "qpnp_flash_led_all_ramp_down_done_irq", led);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Unable to request all_ramp_down_done(%d) IRQ(err:%d)\n",
+ led->pdata->all_ramp_down_done_irq, rc);
+ goto error_switch_register;
+ }
+ }
+
+ if (led->pdata->led_fault_irq >= 0) {
+ rc = devm_request_threaded_irq(&led->pdev->dev,
+ led->pdata->led_fault_irq,
+ NULL, qpnp_flash_led_irq_handler,
+ IRQF_ONESHOT,
+ "qpnp_flash_led_fault_irq", led);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Unable to request led_fault(%d) IRQ(err:%d)\n",
+ led->pdata->led_fault_irq, rc);
+ goto error_switch_register;
+ }
+ }
+
+ led->bms_psy = power_supply_get_by_name("bms");
+ if (!led->bms_psy) {
+ rc = flash_led_psy_register_notifier(led);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Couldn't register psy notifier, rc = %d\n",
+ rc);
+ goto error_switch_register;
+ }
+ }
+
rc = qpnp_flash_led_init_settings(led);
if (rc < 0) {
dev_err(&pdev->dev,
"Failed to initialize flash LED, rc=%d\n", rc);
- goto error_switch_register;
+ goto unreg_notifier;
}
spin_lock_init(&led->lock);
@@ -1046,6 +1395,8 @@ static int qpnp_flash_led_probe(struct platform_device *pdev)
return 0;
+unreg_notifier:
+ power_supply_unreg_notifier(&led->nb);
error_switch_register:
while (i > 0)
led_classdev_unregister(&led->snode[--i].cdev);
@@ -1078,6 +1429,7 @@ static int qpnp_flash_led_remove(struct platform_device *pdev)
while (i > 0)
led_classdev_unregister(&led->fnode[--i].cdev);
+ power_supply_unreg_notifier(&led->nb);
return 0;
}
diff --git a/drivers/leds/leds-qpnp-flash.c b/drivers/leds/leds-qpnp-flash.c
index c45b217ee754..3e19cf6796a3 100644
--- a/drivers/leds/leds-qpnp-flash.c
+++ b/drivers/leds/leds-qpnp-flash.c
@@ -26,11 +26,12 @@
#include <linux/regulator/consumer.h>
#include <linux/workqueue.h>
#include <linux/power_supply.h>
+#include <linux/leds-qpnp-flash.h>
#include <linux/qpnp/qpnp-adc.h>
#include <linux/qpnp/qpnp-revid.h>
-#include "leds.h"
#include <linux/debugfs.h>
#include <linux/uaccess.h>
+#include "leds.h"
#define FLASH_LED_PERIPHERAL_SUBTYPE(base) (base + 0x05)
#define FLASH_SAFETY_TIMER(base) (base + 0x40)
@@ -1154,6 +1155,47 @@ error_regulator_enable:
return rc;
}
+int qpnp_flash_led_prepare(struct led_trigger *trig, int options)
+{
+ struct led_classdev *led_cdev = trigger_to_lcdev(trig);
+ struct flash_node_data *flash_node;
+ struct qpnp_flash_led *led;
+ int rc, val = 0;
+
+ if (!led_cdev) {
+ pr_err("Invalid led_trigger provided\n");
+ return -EINVAL;
+ }
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ if (!(options & (ENABLE_REGULATOR | QUERY_MAX_CURRENT))) {
+ dev_err(&led->pdev->dev, "Invalid options %d\n", options);
+ return -EINVAL;
+ }
+
+ if (options & ENABLE_REGULATOR) {
+ rc = flash_regulator_enable(led, flash_node, true);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "enable regulator failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (options & QUERY_MAX_CURRENT) {
+ val = qpnp_flash_led_get_max_avail_current(flash_node, led);
+ if (val < 0) {
+ dev_err(&led->pdev->dev,
+ "query max current failed, rc=%d\n", val);
+ return val;
+ }
+ }
+
+ return val;
+}
+
static void qpnp_flash_led_work(struct work_struct *work)
{
struct flash_node_data *flash_node = container_of(work,
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index 4238fbc31d35..61de87e2ad80 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -44,6 +44,22 @@ static inline int led_get_brightness(struct led_classdev *led_cdev)
return led_cdev->brightness;
}
+static inline struct led_classdev *trigger_to_lcdev(struct led_trigger *trig)
+{
+ struct led_classdev *led_cdev;
+
+ read_lock(&trig->leddev_list_lock);
+ list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
+ if (!strcmp(led_cdev->default_trigger, trig->name)) {
+ read_unlock(&trig->leddev_list_lock);
+ return led_cdev;
+ }
+ }
+
+ read_unlock(&trig->leddev_list_lock);
+ return NULL;
+}
+
void led_init_core(struct led_classdev *led_cdev);
void led_stop_software_blink(struct led_classdev *led_cdev);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 8d0ead98eb6e..a296425a7270 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1015,8 +1015,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
*/
atomic_set(&dc->count, 1);
- if (bch_cached_dev_writeback_start(dc))
+ /* Block writeback thread, but spawn it */
+ down_write(&dc->writeback_lock);
+ if (bch_cached_dev_writeback_start(dc)) {
+ up_write(&dc->writeback_lock);
return -ENOMEM;
+ }
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
bch_sectors_dirty_init(dc);
@@ -1028,6 +1032,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
bch_cached_dev_run(dc);
bcache_device_link(&dc->disk, c, "bdev");
+ /* Allow the writeback thread to proceed */
+ up_write(&dc->writeback_lock);
+
pr_info("Caching %s as %s on set %pU",
bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
dc->disk.c->sb.set_uuid);
@@ -1366,6 +1373,9 @@ static void cache_set_flush(struct closure *cl)
struct btree *b;
unsigned i;
+ if (!c)
+ closure_return(cl);
+
bch_cache_accounting_destroy(&c->accounting);
kobject_put(&c->internal);
@@ -1828,11 +1838,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
return 0;
}
-static void register_cache(struct cache_sb *sb, struct page *sb_page,
+static int register_cache(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev, struct cache *ca)
{
char name[BDEVNAME_SIZE];
- const char *err = "cannot allocate memory";
+ const char *err = NULL;
+ int ret = 0;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
@@ -1847,27 +1858,35 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
- if (cache_alloc(sb, ca) != 0)
+ ret = cache_alloc(sb, ca);
+ if (ret != 0)
goto err;
- err = "error creating kobject";
- if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
- goto err;
+ if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
+ err = "error calling kobject_add";
+ ret = -ENOMEM;
+ goto out;
+ }
mutex_lock(&bch_register_lock);
err = register_cache_set(ca);
mutex_unlock(&bch_register_lock);
- if (err)
- goto err;
+ if (err) {
+ ret = -ENODEV;
+ goto out;
+ }
pr_info("registered cache device %s", bdevname(bdev, name));
+
out:
kobject_put(&ca->kobj);
- return;
+
err:
- pr_notice("error opening %s: %s", bdevname(bdev, name), err);
- goto out;
+ if (err)
+ pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+
+ return ret;
}
/* Global interfaces/init */
@@ -1965,7 +1984,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!ca)
goto err_close;
- register_cache(sb, sb_page, bdev, ca);
+ if (register_cache(sb, sb_page, bdev, ca) != 0)
+ goto err_close;
}
out:
if (sb_page)
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index f6543f3a970f..27f2ef300f8b 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -867,19 +867,40 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
return 0;
}
-#define WRITE_LOCK(cmd) \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
+#define WRITE_LOCK(cmd) \
+ down_write(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_write(&cmd->root_lock); \
return -EINVAL; \
- down_write(&cmd->root_lock)
+ }
#define WRITE_LOCK_VOID(cmd) \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
+ down_write(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_write(&cmd->root_lock); \
return; \
- down_write(&cmd->root_lock)
+ }
#define WRITE_UNLOCK(cmd) \
up_write(&cmd->root_lock)
+#define READ_LOCK(cmd) \
+ down_read(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_read(&cmd->root_lock); \
+ return -EINVAL; \
+ }
+
+#define READ_LOCK_VOID(cmd) \
+ down_read(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_read(&cmd->root_lock); \
+ return; \
+ }
+
+#define READ_UNLOCK(cmd) \
+ up_read(&cmd->root_lock)
+
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
{
int r;
@@ -1015,22 +1036,20 @@ int dm_cache_load_discards(struct dm_cache_metadata *cmd,
{
int r;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = __load_discards(cmd, fn, context);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
+int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
{
- dm_cblock_t r;
+ READ_LOCK(cmd);
+ *result = cmd->cache_blocks;
+ READ_UNLOCK(cmd);
- down_read(&cmd->root_lock);
- r = cmd->cache_blocks;
- up_read(&cmd->root_lock);
-
- return r;
+ return 0;
}
static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
@@ -1188,9 +1207,9 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
{
int r;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = __load_mappings(cmd, policy, fn, context);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1215,18 +1234,18 @@ static int __dump_mappings(struct dm_cache_metadata *cmd)
void dm_cache_dump(struct dm_cache_metadata *cmd)
{
- down_read(&cmd->root_lock);
+ READ_LOCK_VOID(cmd);
__dump_mappings(cmd);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
}
int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
{
int r;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = cmd->changed;
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1276,9 +1295,9 @@ int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats)
{
- down_read(&cmd->root_lock);
+ READ_LOCK_VOID(cmd);
*stats = cmd->stats;
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
}
void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
@@ -1312,9 +1331,9 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
{
int r = -EINVAL;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = dm_sm_get_nr_free(cmd->metadata_sm, result);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1324,9 +1343,9 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
{
int r = -EINVAL;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1417,7 +1436,13 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
{
- return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
+ int r;
+
+ READ_LOCK(cmd);
+ r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
+ READ_UNLOCK(cmd);
+
+ return r;
}
void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
@@ -1440,10 +1465,7 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
struct dm_block *sblock;
struct cache_disk_superblock *disk_super;
- /*
- * We ignore fail_io for this function.
- */
- down_write(&cmd->root_lock);
+ WRITE_LOCK(cmd);
set_bit(NEEDS_CHECK, &cmd->flags);
r = superblock_lock(cmd, &sblock);
@@ -1458,19 +1480,17 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
dm_bm_unlock(sblock);
out:
- up_write(&cmd->root_lock);
+ WRITE_UNLOCK(cmd);
return r;
}
-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd)
+int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
{
- bool needs_check;
+ READ_LOCK(cmd);
+ *result = !!test_bit(NEEDS_CHECK, &cmd->flags);
+ READ_UNLOCK(cmd);
- down_read(&cmd->root_lock);
- needs_check = !!test_bit(NEEDS_CHECK, &cmd->flags);
- up_read(&cmd->root_lock);
-
- return needs_check;
+ return 0;
}
int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index 2ffee21f318d..8528744195e5 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -66,7 +66,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
* origin blocks to map to.
*/
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
+int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
sector_t discard_block_size,
@@ -137,7 +137,7 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
*/
int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd);
+int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 2fd4c8296144..515f83e7d9ab 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -987,9 +987,14 @@ static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mod
static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
{
- bool needs_check = dm_cache_metadata_needs_check(cache->cmd);
+ bool needs_check;
enum cache_metadata_mode old_mode = get_cache_mode(cache);
+ if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
+ DMERR("unable to read needs_check flag, setting failure mode");
+ new_mode = CM_FAIL;
+ }
+
if (new_mode == CM_WRITE && needs_check) {
DMERR("%s: unable to switch cache to write mode until repaired.",
cache_device_name(cache));
@@ -3513,6 +3518,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
char buf[BDEVNAME_SIZE];
struct cache *cache = ti->private;
dm_cblock_t residency;
+ bool needs_check;
switch (type) {
case STATUSTYPE_INFO:
@@ -3586,7 +3592,9 @@ static void cache_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("rw ");
- if (dm_cache_metadata_needs_check(cache->cmd))
+ r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
+
+ if (r || needs_check)
DMEMIT("needs_check ");
else
DMEMIT("- ");
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 61f184ad081c..e108deebbaaa 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1106,6 +1106,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
int i;
int r = -EINVAL;
char *origin_path, *cow_path;
+ dev_t origin_dev, cow_dev;
unsigned args_used, num_flush_bios = 1;
fmode_t origin_mode = FMODE_READ;
@@ -1136,11 +1137,19 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Cannot get origin device";
goto bad_origin;
}
+ origin_dev = s->origin->bdev->bd_dev;
cow_path = argv[0];
argv++;
argc--;
+ cow_dev = dm_get_dev_t(cow_path);
+ if (cow_dev && cow_dev == origin_dev) {
+ ti->error = "COW device cannot be the same as origin device";
+ r = -EINVAL;
+ goto bad_cow;
+ }
+
r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
if (r) {
ti->error = "Cannot get COW device";
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 061152a43730..cb5d0daf53bb 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -365,6 +365,26 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
}
/*
+ * Convert the path to a device
+ */
+dev_t dm_get_dev_t(const char *path)
+{
+ dev_t uninitialized_var(dev);
+ struct block_device *bdev;
+
+ bdev = lookup_bdev(path);
+ if (IS_ERR(bdev))
+ dev = name_to_dev_t(path);
+ else {
+ dev = bdev->bd_dev;
+ bdput(bdev);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(dm_get_dev_t);
+
+/*
* Add a device to the list, or just increment the usage count if
* it's already present.
*/
@@ -372,23 +392,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
struct dm_dev **result)
{
int r;
- dev_t uninitialized_var(dev);
+ dev_t dev;
struct dm_dev_internal *dd;
struct dm_table *t = ti->table;
- struct block_device *bdev;
BUG_ON(!t);
- /* convert the path to a device */
- bdev = lookup_bdev(path);
- if (IS_ERR(bdev)) {
- dev = name_to_dev_t(path);
- if (!dev)
- return -ENODEV;
- } else {
- dev = bdev->bd_dev;
- bdput(bdev);
- }
+ dev = dm_get_dev_t(path);
+ if (!dev)
+ return -ENODEV;
dd = find_device(&t->devices, dev);
if (!dd) {
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index c219a053c7f6..911ada643364 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1943,5 +1943,8 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
{
- dm_tm_issue_prefetches(pmd->tm);
+ down_read(&pmd->root_lock);
+ if (!pmd->fail_io)
+ dm_tm_issue_prefetches(pmd->tm);
+ up_read(&pmd->root_lock);
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c46c9dc9b667..479fdbb3dcb2 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1109,12 +1109,8 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
- if (run_queue) {
- if (md->queue->mq_ops)
- blk_mq_run_hw_queues(md->queue, true);
- else
- blk_run_queue_async(md->queue);
- }
+ if (!md->queue->mq_ops && run_queue)
+ blk_run_queue_async(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above
@@ -1214,9 +1210,9 @@ static void dm_requeue_original_request(struct mapped_device *md,
{
int rw = rq_data_dir(rq);
+ rq_end_stats(md, rq);
dm_unprep_request(rq);
- rq_end_stats(md, rq);
if (!rq->q->mq_ops)
old_requeue_request(rq);
else {
@@ -1336,7 +1332,10 @@ static void dm_complete_request(struct request *rq, int error)
struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error;
- blk_complete_request(rq);
+ if (!rq->q->mq_ops)
+ blk_complete_request(rq);
+ else
+ blk_mq_complete_request(rq, error);
}
/*
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 0a72ab6e6c20..dd483bb2e111 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -129,7 +129,9 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
}
multipath = conf->multipaths + mp_bh->path;
- mp_bh->bio = *bio;
+ bio_init(&mp_bh->bio);
+ __bio_clone_fast(&mp_bh->bio, bio);
+
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index c4b913409226..515554c7365b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2274,6 +2274,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
if (fail) {
spin_lock_irq(&conf->device_lock);
list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
+ conf->nr_queued++;
spin_unlock_irq(&conf->device_lock);
md_wakeup_thread(conf->mddev->thread);
} else {
@@ -2391,8 +2392,10 @@ static void raid1d(struct md_thread *thread)
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
- list_add(&tmp, &conf->bio_end_io_list);
- list_del_init(&conf->bio_end_io_list);
+ while (!list_empty(&conf->bio_end_io_list)) {
+ list_move(conf->bio_end_io_list.prev, &tmp);
+ conf->nr_queued--;
+ }
}
spin_unlock_irqrestore(&conf->device_lock, flags);
while (!list_empty(&tmp)) {
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ce959b4ae4df..ebb0dd612ebd 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2664,6 +2664,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
if (fail) {
spin_lock_irq(&conf->device_lock);
list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
+ conf->nr_queued++;
spin_unlock_irq(&conf->device_lock);
md_wakeup_thread(conf->mddev->thread);
} else {
@@ -2691,8 +2692,10 @@ static void raid10d(struct md_thread *thread)
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
- list_add(&tmp, &conf->bio_end_io_list);
- list_del_init(&conf->bio_end_io_list);
+ while (!list_empty(&conf->bio_end_io_list)) {
+ list_move(conf->bio_end_io_list.prev, &tmp);
+ conf->nr_queued--;
+ }
}
spin_unlock_irqrestore(&conf->device_lock, flags);
while (!list_empty(&tmp)) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 704ef7fcfbf8..10ce885445f6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -340,8 +340,7 @@ static void release_inactive_stripe_list(struct r5conf *conf,
int hash)
{
int size;
- unsigned long do_wakeup = 0;
- int i = 0;
+ bool do_wakeup = false;
unsigned long flags;
if (hash == NR_STRIPE_HASH_LOCKS) {
@@ -362,19 +361,15 @@ static void release_inactive_stripe_list(struct r5conf *conf,
!list_empty(list))
atomic_dec(&conf->empty_inactive_list_nr);
list_splice_tail_init(list, conf->inactive_list + hash);
- do_wakeup |= 1 << hash;
+ do_wakeup = true;
spin_unlock_irqrestore(conf->hash_locks + hash, flags);
}
size--;
hash--;
}
- for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
- if (do_wakeup & (1 << i))
- wake_up(&conf->wait_for_stripe[i]);
- }
-
if (do_wakeup) {
+ wake_up(&conf->wait_for_stripe);
if (atomic_read(&conf->active_stripes) == 0)
wake_up(&conf->wait_for_quiescent);
if (conf->retry_read_aligned)
@@ -687,15 +682,14 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
if (!sh) {
set_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
- wait_event_exclusive_cmd(
- conf->wait_for_stripe[hash],
+ wait_event_lock_irq(
+ conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash) &&
(atomic_read(&conf->active_stripes)
< (conf->max_nr_stripes * 3 / 4)
|| !test_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state)),
- spin_unlock_irq(conf->hash_locks + hash),
- spin_lock_irq(conf->hash_locks + hash));
+ *(conf->hash_locks + hash));
clear_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
} else {
@@ -720,9 +714,6 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
}
} while (sh == NULL);
- if (!list_empty(conf->inactive_list + hash))
- wake_up(&conf->wait_for_stripe[hash]);
-
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
@@ -2091,6 +2082,14 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
unsigned long cpu;
int err = 0;
+ /*
+ * Never shrink. And mddev_suspend() could deadlock if this is called
+ * from raid5d. In that case, scribble_disks and scribble_sectors
+ * should equal to new_disks and new_sectors
+ */
+ if (conf->scribble_disks >= new_disks &&
+ conf->scribble_sectors >= new_sectors)
+ return 0;
mddev_suspend(conf->mddev);
get_online_cpus();
for_each_present_cpu(cpu) {
@@ -2112,6 +2111,10 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
}
put_online_cpus();
mddev_resume(conf->mddev);
+ if (!err) {
+ conf->scribble_disks = new_disks;
+ conf->scribble_sectors = new_sectors;
+ }
return err;
}
@@ -2192,7 +2195,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
cnt = 0;
list_for_each_entry(nsh, &newstripes, lru) {
lock_device_hash_lock(conf, hash);
- wait_event_exclusive_cmd(conf->wait_for_stripe[hash],
+ wait_event_cmd(conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash),
unlock_device_hash_lock(conf, hash),
lock_device_hash_lock(conf, hash));
@@ -4238,7 +4241,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
(1 << STRIPE_SYNCING) |
(1 << STRIPE_REPLACED) |
- (1 << STRIPE_PREREAD_ACTIVE) |
(1 << STRIPE_DELAYED) |
(1 << STRIPE_BIT_DELAY) |
(1 << STRIPE_FULL_WRITE) |
@@ -4253,6 +4255,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
(1 << STRIPE_REPLACED)));
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+ (1 << STRIPE_PREREAD_ACTIVE) |
(1 << STRIPE_DEGRADED)),
head_sh->state & (1 << STRIPE_INSYNC));
@@ -6414,6 +6417,12 @@ static int raid5_alloc_percpu(struct r5conf *conf)
}
put_online_cpus();
+ if (!err) {
+ conf->scribble_disks = max(conf->raid_disks,
+ conf->previous_raid_disks);
+ conf->scribble_sectors = max(conf->chunk_sectors,
+ conf->prev_chunk_sectors);
+ }
return err;
}
@@ -6504,9 +6513,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
seqcount_init(&conf->gen_lock);
mutex_init(&conf->cache_size_mutex);
init_waitqueue_head(&conf->wait_for_quiescent);
- for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
- init_waitqueue_head(&conf->wait_for_stripe[i]);
- }
+ init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
INIT_LIST_HEAD(&conf->handle_list);
INIT_LIST_HEAD(&conf->hold_list);
@@ -7015,8 +7022,8 @@ static int run(struct mddev *mddev)
}
if (discard_supported &&
- mddev->queue->limits.max_discard_sectors >= stripe &&
- mddev->queue->limits.discard_granularity >= stripe)
+ mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
+ mddev->queue->limits.discard_granularity >= stripe)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
mddev->queue);
else
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index a415e1cd39b8..517d4b68a1be 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -510,6 +510,8 @@ struct r5conf {
* conversions
*/
} __percpu *percpu;
+ int scribble_disks;
+ int scribble_sectors;
#ifdef CONFIG_HOTPLUG_CPU
struct notifier_block cpu_notify;
#endif
@@ -522,7 +524,7 @@ struct r5conf {
atomic_t empty_inactive_list_nr;
struct llist_head released_stripes;
wait_queue_head_t wait_for_quiescent;
- wait_queue_head_t wait_for_stripe[NR_STRIPE_HASH_LOCKS];
+ wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
unsigned long cache_state;
#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index e4900df1140b..c24839cfcc35 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -1161,12 +1161,23 @@ static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, in
}
}
+static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ struct adv7511_edid_detect ed;
+
+ /* We failed to read the EDID, so send an event for this. */
+ ed.present = false;
+ ed.segment = adv7511_rd(sd, 0xc4);
+ v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
+}
+
static void adv7511_edid_handler(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
struct v4l2_subdev *sd = &state->sd;
- struct adv7511_edid_detect ed;
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
@@ -1191,9 +1202,7 @@ static void adv7511_edid_handler(struct work_struct *work)
}
/* We failed to read the EDID, so send an event for this. */
- ed.present = false;
- ed.segment = adv7511_rd(sd, 0xc4);
- v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ adv7511_notify_no_edid(sd);
v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
}
@@ -1264,7 +1273,6 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
/* update read only ctrls */
v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
- v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
@@ -1294,6 +1302,7 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
}
adv7511_s_power(sd, false);
memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
+ adv7511_notify_no_edid(sd);
}
}
@@ -1370,6 +1379,7 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
}
/* one more segment read ok */
state->edid.segments = segment + 1;
+ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
/* Request next EDID segment */
v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
@@ -1389,7 +1399,6 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
ed.present = true;
ed.segment = 0;
state->edid_detect_counter++;
- v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
return ed.present;
}
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 15a4ebc2844d..51dbef2f9a48 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2334,6 +2334,19 @@ static int bttv_g_fmt_vid_overlay(struct file *file, void *priv,
return 0;
}
+static void bttv_get_width_mask_vid_cap(const struct bttv_format *fmt,
+ unsigned int *width_mask,
+ unsigned int *width_bias)
+{
+ if (fmt->flags & FORMAT_FLAGS_PLANAR) {
+ *width_mask = ~15; /* width must be a multiple of 16 pixels */
+ *width_bias = 8; /* nearest */
+ } else {
+ *width_mask = ~3; /* width must be a multiple of 4 pixels */
+ *width_bias = 2; /* nearest */
+ }
+}
+
static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
@@ -2343,6 +2356,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
enum v4l2_field field;
__s32 width, height;
__s32 height2;
+ unsigned int width_mask, width_bias;
int rc;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
@@ -2375,9 +2389,9 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
width = f->fmt.pix.width;
height = f->fmt.pix.height;
+ bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
rc = limit_scaled_size_lock(fh, &width, &height, field,
- /* width_mask: 4 pixels */ ~3,
- /* width_bias: nearest */ 2,
+ width_mask, width_bias,
/* adjust_size */ 1,
/* adjust_crop */ 0);
if (0 != rc)
@@ -2410,6 +2424,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
__s32 width, height;
+ unsigned int width_mask, width_bias;
enum v4l2_field field;
retval = bttv_switch_type(fh, f->type);
@@ -2424,9 +2439,10 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
height = f->fmt.pix.height;
field = f->fmt.pix.field;
+ fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
- /* width_mask: 4 pixels */ ~3,
- /* width_bias: nearest */ 2,
+ width_mask, width_bias,
/* adjust_size */ 1,
/* adjust_crop */ 1);
if (0 != retval)
@@ -2434,8 +2450,6 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.field = field;
- fmt = format_by_fourcc(f->fmt.pix.pixelformat);
-
/* update our state informations */
fh->fmt = fmt;
fh->cap.field = f->fmt.pix.field;
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 518086c7aed5..15e56c07b217 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1219,10 +1219,13 @@ static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.height = dev->height;
f->fmt.pix.field = dev->field;
f->fmt.pix.pixelformat = dev->fmt->fourcc;
- f->fmt.pix.bytesperline =
- (f->fmt.pix.width * dev->fmt->depth) >> 3;
+ if (dev->fmt->planar)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ else
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * dev->fmt->depth) / 8;
f->fmt.pix.sizeimage =
- f->fmt.pix.height * f->fmt.pix.bytesperline;
+ (f->fmt.pix.height * f->fmt.pix.width * dev->fmt->depth) / 8;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
@@ -1298,10 +1301,13 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
if (f->fmt.pix.height > maxh)
f->fmt.pix.height = maxh;
f->fmt.pix.width &= ~0x03;
- f->fmt.pix.bytesperline =
- (f->fmt.pix.width * fmt->depth) >> 3;
+ if (fmt->planar)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ else
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * fmt->depth) / 8;
f->fmt.pix.sizeimage =
- f->fmt.pix.height * f->fmt.pix.bytesperline;
+ (f->fmt.pix.height * f->fmt.pix.width * fmt->depth) / 8;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 654e964f84a2..d76511c1c1e3 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -1342,7 +1342,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
/* Calculate bytesused field */
if (dst_buf->sequence == 0) {
- vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr +
ctx->vpu_header_size[0] +
ctx->vpu_header_size[1] +
ctx->vpu_header_size[2]);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 15516a6e3a39..323aad3c89de 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -2119,14 +2119,12 @@ static int coda_probe(struct platform_device *pdev)
pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
- if (of_id) {
+ if (of_id)
dev->devtype = of_id->data;
- } else if (pdev_id) {
+ else if (pdev_id)
dev->devtype = &coda_devdata[pdev_id->driver_data];
- } else {
- ret = -EINVAL;
- goto err_v4l2_register;
- }
+ else
+ return -EINVAL;
spin_lock_init(&dev->irqlock);
INIT_LIST_HEAD(&dev->instances);
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
index 6b43efce453f..03a61407aef8 100644
--- a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
@@ -788,8 +788,9 @@ static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
rc = msm_dma_map_sg_lazy(iommu_cb_set.cb_info[idx].dev, table->sgl,
table->nents, dma_dir, buf);
- if (!rc) {
+ if (rc != table->nents) {
pr_err("Error: msm_dma_map_sg_lazy failed\n");
+ rc = -ENOMEM;
goto err_unmap_sg;
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
index 84bd3fe3fb85..c12e95d3310a 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
@@ -756,9 +756,14 @@ static int32_t msm_flash_get_pmic_source_info(
"qcom,current",
&fctrl->flash_op_current[i]);
if (rc < 0) {
- pr_err("current: read failed\n");
- of_node_put(flash_src_node);
- continue;
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,current-ma",
+ &fctrl->flash_op_current[i]);
+ if (rc < 0) {
+ pr_err("current: read failed\n");
+ of_node_put(flash_src_node);
+ continue;
+ }
}
/* Read max-current */
@@ -776,8 +781,13 @@ static int32_t msm_flash_get_pmic_source_info(
"qcom,duration",
&fctrl->flash_max_duration[i]);
if (rc < 0) {
- pr_err("duration: read failed\n");
- of_node_put(flash_src_node);
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,duration-ms",
+ &fctrl->flash_max_duration[i]);
+ if (rc < 0) {
+ pr_err("duration: read failed\n");
+ of_node_put(flash_src_node);
+ }
/* Non-fatal; this property is optional */
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
index 9bf5738d838c..02b83c969958 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
@@ -283,6 +283,49 @@ static int32_t msm_sensor_fill_actuator_subdevid_by_name(
return rc;
}
+static int32_t msm_sensor_fill_flash_subdevid_by_name(
+ struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ struct device_node *src_node = NULL;
+ uint32_t val = 0, flash_name_len;
+ int32_t *flash_subdev_id;
+ struct msm_sensor_info_t *sensor_info;
+ struct device_node *of_node = s_ctrl->of_node;
+
+ if (!of_node || !s_ctrl->sensordata->flash_name)
+ return -EINVAL;
+
+ sensor_info = s_ctrl->sensordata->sensor_info;
+ flash_subdev_id = &sensor_info->subdev_id[SUB_MODULE_LED_FLASH];
+
+ *flash_subdev_id = -1;
+
+ flash_name_len = strlen(s_ctrl->sensordata->flash_name);
+ if (flash_name_len >= MAX_SENSOR_NAME)
+ return -EINVAL;
+
+ if (flash_name_len == 0)
+ return 0;
+
+ src_node = of_parse_phandle(of_node, "qcom,led-flash-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,flash cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ *flash_subdev_id = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+ return rc;
+}
+
static int32_t msm_sensor_fill_ois_subdevid_by_name(
struct msm_sensor_ctrl_t *s_ctrl)
{
@@ -872,6 +915,7 @@ CSID_TG:
s_ctrl->sensordata->eeprom_name = slave_info->eeprom_name;
s_ctrl->sensordata->actuator_name = slave_info->actuator_name;
s_ctrl->sensordata->ois_name = slave_info->ois_name;
+ s_ctrl->sensordata->flash_name = slave_info->flash_name;
/*
* Update eeporm subdevice Id by input eeprom name
*/
@@ -895,6 +939,12 @@ CSID_TG:
goto free_camera_info;
}
+ rc = msm_sensor_fill_flash_subdevid_by_name(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto free_camera_info;
+ }
+
/* Power up and probe sensor */
rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl);
if (rc < 0) {
@@ -911,14 +961,6 @@ CSID_TG:
s_ctrl->is_probe_succeed = 1;
/*
- * Update the subdevice id of flash-src based on availability in kernel.
- */
- if (strlen(slave_info->flash_name) == 0) {
- s_ctrl->sensordata->sensor_info->
- subdev_id[SUB_MODULE_LED_FLASH] = -1;
- }
-
- /*
* Create /dev/videoX node, comment for now until dummy /dev/videoX
* node is created and used by HAL
*/
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index c141797bcd3c..c8d14a6d253b 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -77,6 +77,142 @@
#define SDE_ROTREG_READ(base, off) \
readl_relaxed(base + (off))
+/* Invalid software timestamp value for initialization */
+#define SDE_REGDMA_SWTS_INVALID (~0)
+
+/**
+ * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
+ * @ts_curr: current software timestamp
+ * @ts_prev: previous software timestamp
+ * @return: the amount ts_curr is ahead of ts_prev
+ */
+static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
+{
+ u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
+
+ return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
+}
+
+/**
+ * sde_hw_rotator_pending_swts - Check if the given context is still pending
+ * @rot: Pointer to hw rotator
+ * @ctx: Pointer to rotator context
+ * @pswts: Pointer to returned reference software timestamp, optional
+ * @return: true if context has pending requests
+ */
+static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
+ struct sde_hw_rotator_context *ctx, u32 *pswts)
+{
+ u32 swts;
+ int ts_diff;
+ bool pending;
+
+ if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
+ swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
+ else
+ swts = ctx->last_regdma_timestamp;
+
+ if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
+ swts >>= SDE_REGDMA_SWTS_SHIFT;
+
+ swts &= SDE_REGDMA_SWTS_MASK;
+
+ ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
+
+ if (pswts)
+ *pswts = swts;
+
+ pending = (ts_diff > 0) ? true : false;
+
+ SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
+ ctx->timestamp, ctx->q_id, swts, pending);
+ return pending;
+}
+
+/**
+ * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
+ * Also, clear rotator/regdma irq status.
+ * @rot: Pointer to hw rotator
+ */
+static void sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
+{
+ SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
+ atomic_read(&rot->irq_enabled));
+
+ if (!atomic_read(&rot->irq_enabled)) {
+ if (rot->mode == ROT_REGDMA_OFF)
+ SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
+ ROT_DONE_MASK);
+ else
+ SDE_ROTREG_WRITE(rot->mdss_base,
+ REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
+
+ enable_irq(rot->irq_num);
+ }
+ atomic_inc(&rot->irq_enabled);
+}
+
+/**
+ * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
+ * Also, clear rotator/regdma irq enable masks.
+ * @rot: Pointer to hw rotator
+ */
+static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
+{
+ SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
+ atomic_read(&rot->irq_enabled));
+
+ if (!atomic_read(&rot->irq_enabled)) {
+ SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
+ return;
+ }
+
+ if (!atomic_dec_return(&rot->irq_enabled)) {
+ if (rot->mode == ROT_REGDMA_OFF)
+ SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
+ else
+ SDE_ROTREG_WRITE(rot->mdss_base,
+ REGDMA_CSR_REGDMA_INT_EN, 0);
+ /* disable irq after last pending irq is handled, if any */
+ synchronize_irq(rot->irq_num);
+ disable_irq_nosync(rot->irq_num);
+ }
+}
+
+/**
+ * sde_hw_rotator_dump_status - Dump hw rotator status on error
+ * @rot: Pointer to hw rotator
+ */
+static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
+{
+ SDEROT_ERR(
+ "op_mode = %x, int_en = %x, int_status = %x\n",
+ SDE_ROTREG_READ(rot->mdss_base,
+ REGDMA_CSR_REGDMA_OP_MODE),
+ SDE_ROTREG_READ(rot->mdss_base,
+ REGDMA_CSR_REGDMA_INT_EN),
+ SDE_ROTREG_READ(rot->mdss_base,
+ REGDMA_CSR_REGDMA_INT_STATUS));
+
+ SDEROT_ERR(
+ "ts = %x, q0_status = %x, q1_status = %x, block_status = %x\n",
+ SDE_ROTREG_READ(rot->mdss_base,
+ REGDMA_TIMESTAMP_REG),
+ SDE_ROTREG_READ(rot->mdss_base,
+ REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
+ SDE_ROTREG_READ(rot->mdss_base,
+ REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
+ SDE_ROTREG_READ(rot->mdss_base,
+ REGDMA_CSR_REGDMA_BLOCK_STATUS));
+
+ SDEROT_ERR(
+ "invalid_cmd_offset = %x, fsm_state = %x\n",
+ SDE_ROTREG_READ(rot->mdss_base,
+ REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
+ SDE_ROTREG_READ(rot->mdss_base,
+ REGDMA_CSR_REGDMA_FSM_STATE));
+}
+
/**
* sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
* on provided session_id. Each rotator has a different session_id.
@@ -476,7 +612,7 @@ static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
reinit_completion(&ctx->rot_comp);
- enable_irq(rot->irq_num);
+ sde_hw_rotator_enable_irq(rot);
}
SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
@@ -572,9 +708,6 @@ static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
wrptr = sde_hw_rotator_get_regdma_segment(ctx);
- if (rot->irq_num >= 0)
- reinit_completion(&ctx->regdma_comp);
-
/*
* Last ROT command must be ROT_START before REGDMA start
*/
@@ -676,7 +809,7 @@ static u32 sde_hw_rotator_wait_done_no_regdma(
SDEROT_WARN(
"Timeout waiting, but rotator job is done!!\n");
- disable_irq_nosync(rot->irq_num);
+ sde_hw_rotator_disable_irq(rot);
}
spin_unlock_irqrestore(&rot->rotisr_lock, flags);
} else {
@@ -719,13 +852,15 @@ static u32 sde_hw_rotator_wait_done_regdma(
u32 last_isr;
u32 last_ts;
u32 int_id;
+ u32 swts;
u32 sts = 0;
unsigned long flags;
if (rot->irq_num >= 0) {
SDEROT_DBG("Wait for REGDMA completion, ctx:%p, ts:%X\n",
ctx, ctx->timestamp);
- rc = wait_for_completion_timeout(&ctx->regdma_comp,
+ rc = wait_event_timeout(ctx->regdma_waitq,
+ !sde_hw_rotator_pending_swts(rot, ctx, &swts),
KOFF_TIMEOUT);
spin_lock_irqsave(&rot->rotisr_lock, flags);
@@ -738,11 +873,12 @@ static u32 sde_hw_rotator_wait_done_regdma(
status, int_id, last_ts);
if (rc == 0 || (status & REGDMA_INT_ERR_MASK)) {
+ bool pending;
+
+ pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
SDEROT_ERR(
- "Timeout wait for regdma interrupt status, ts:%X\n",
- ctx->timestamp);
- SDEROT_ERR("last_isr:0x%X, last_ts:0x%X, rc=%d\n",
- last_isr, last_ts, rc);
+ "Timeout wait for regdma interrupt status, ts:0x%X/0x%X pending:%d\n",
+ ctx->timestamp, swts, pending);
if (status & REGDMA_WATCHDOG_INT)
SDEROT_ERR("REGDMA watchdog interrupt\n");
@@ -753,24 +889,13 @@ static u32 sde_hw_rotator_wait_done_regdma(
else if (status & REGDMA_INVALID_CMD)
SDEROT_ERR("REGDMA invalid command\n");
+ sde_hw_rotator_dump_status(rot);
status = ROT_ERROR_BIT;
- } else if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
- /* Got to match exactly with interrupt ID */
- int_id = REGDMA_QUEUE0_INT0 << int_id;
-
- SDE_ROTREG_WRITE(rot->mdss_base,
- REGDMA_CSR_REGDMA_INT_CLEAR,
- int_id);
-
- status = 0;
- } else if (queue_id == ROT_QUEUE_LOW_PRIORITY) {
- /* Matching interrupt ID */
- int_id = REGDMA_QUEUE1_INT0 << int_id;
-
- SDE_ROTREG_WRITE(rot->mdss_base,
- REGDMA_CSR_REGDMA_INT_CLEAR,
- int_id);
-
+ } else {
+ if (rc == 1)
+ SDEROT_WARN(
+ "REGDMA done but no irq, ts:0x%X/0x%X\n",
+ ctx->timestamp, swts);
status = 0;
}
@@ -1007,7 +1132,7 @@ static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
}
if (resinfo->rot->irq_num >= 0)
- enable_irq(resinfo->rot->irq_num);
+ sde_hw_rotator_enable_irq(resinfo->rot);
SDEROT_DBG("New rotator resource:%p, priority:%d\n",
resinfo, wb_id);
@@ -1036,7 +1161,7 @@ static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
hw->pending_count);
if (resinfo->rot->irq_num >= 0)
- disable_irq(resinfo->rot->irq_num);
+ sde_hw_rotator_disable_irq(resinfo->rot);
devm_kfree(&mgr->pdev->dev, resinfo);
}
@@ -1078,8 +1203,10 @@ static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
sde_hw_rotator_get_regdma_ctxidx(ctx));
+ ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
+
init_completion(&ctx->rot_comp);
- init_completion(&ctx->regdma_comp);
+ init_waitqueue_head(&ctx->regdma_waitq);
/* Store rotator context for lookup purpose */
sde_hw_rotator_put_ctx(ctx);
@@ -1419,7 +1546,7 @@ static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
if (isr & ROT_DONE_MASK) {
if (rot->irq_num >= 0)
- disable_irq_nosync(rot->irq_num);
+ sde_hw_rotator_disable_irq(rot);
SDEROT_DBG("Notify rotator complete\n");
/* Normal rotator only 1 session, no need to lookup */
@@ -1456,6 +1583,8 @@ static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
u32 q_id;
isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
+ /* acknowledge interrupt before reading latest timestamp */
+ SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
SDEROT_DBG("intr_status = %8.8x, sw_TS:%X\n", isr, ts);
@@ -1480,30 +1609,23 @@ static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
}
ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
- WARN_ON(ctx == NULL);
/*
* Wake up all waiting context from the current and previous
* SW Timestamp.
*/
- do {
+ while (ctx &&
+ sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
ctx->last_regdma_isr_status = isr;
ctx->last_regdma_timestamp = ts;
SDEROT_DBG(
"regdma complete: ctx:%p, ts:%X\n", ctx, ts);
- complete_all(&ctx->regdma_comp);
+ wake_up_all(&ctx->regdma_waitq);
ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
ctx = rot->rotCtx[q_id]
[ts & SDE_HW_ROT_REGDMA_SEG_MASK];
- } while (ctx && (ctx->last_regdma_timestamp == 0));
-
- /*
- * Clear corresponding regdma interrupt because it is a level
- * interrupt
- */
- SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
- isr);
+ };
spin_unlock(&rot->rotisr_lock);
ret = IRQ_HANDLED;
@@ -1526,16 +1648,13 @@ static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
if (ctx && ctx->last_regdma_isr_status == 0) {
ctx->last_regdma_isr_status = isr;
ctx->last_regdma_timestamp = ts;
- complete_all(&ctx->regdma_comp);
+ wake_up_all(&ctx->regdma_waitq);
SDEROT_DBG("Wakeup rotctx[%d][%d]:%p\n",
i, j, ctx);
}
}
}
- SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
- isr);
-
spin_unlock(&rot->rotisr_lock);
ret = IRQ_HANDLED;
}
@@ -1810,6 +1929,7 @@ int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
disable_irq(rot->irq_num);
}
}
+ atomic_set(&rot->irq_enabled, 0);
setup_rotator_ops(&rot->ops, rot->mode);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
index 610caf16c764..272b15e01e8b 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
@@ -202,7 +202,7 @@ struct sde_hw_rotator_context {
u32 *regdma_wrptr;
u32 timestamp;
struct completion rot_comp;
- struct completion regdma_comp;
+ wait_queue_head_t regdma_waitq;
struct sde_dbg_buf src_dbgbuf;
struct sde_dbg_buf dst_dbgbuf;
u32 last_regdma_isr_status;
@@ -253,6 +253,7 @@ struct sde_hw_rotator {
/* logical interrupt number */
int irq_num;
+ atomic_t irq_enabled;
/* internal ION memory for SW timestamp */
struct ion_client *iclient;
@@ -260,8 +261,6 @@ struct sde_hw_rotator {
void *swts_buffer;
u32 highest_bank;
- struct completion rot_comp;
- struct completion regdma_comp;
spinlock_t rotctx_lock;
spinlock_t rotisr_lock;
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 4a26ab920016..9b9b74436d88 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -672,6 +672,15 @@ static int get_hfi_extradata_index(enum hal_extradata_id index)
case HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
ret = HFI_PROPERTY_PARAM_VDEC_CONTENT_LIGHT_LEVEL_SEI_EXTRADATA;
break;
+ case HAL_EXTRADATA_PQ_INFO:
+ ret = HFI_PROPERTY_PARAM_VENC_OVERRIDE_QP_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_VUI_DISPLAY_INFO:
+ ret = HFI_PROPERTY_PARAM_VUI_DISPLAY_INFO_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_VPX_COLORSPACE:
+ ret = HFI_PROPERTY_PARAM_VDEC_VPX_COLORSPACE_EXTRADATA;
+ break;
default:
dprintk(VIDC_WARN, "Extradata index not found: %d\n", index);
break;
@@ -2124,6 +2133,26 @@ int create_pkt_cmd_session_set_property(
pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
break;
}
+ case HAL_PARAM_VENC_VIDEO_SIGNAL_INFO:
+ {
+ struct hal_video_signal_info *hal = pdata;
+ struct hfi_video_signal_metadata *signal_info =
+ (struct hfi_video_signal_metadata *)
+ &pkt->rg_property_data[1];
+
+ signal_info->enable = true;
+ signal_info->video_format = MSM_VIDC_NTSC;
+ signal_info->video_full_range = hal->full_range;
+ signal_info->color_description = MSM_VIDC_COLOR_DESC_PRESENT;
+ signal_info->color_primaries = hal->color_space;
+ signal_info->transfer_characteristics = hal->transfer_chars;
+ signal_info->matrix_coeffs = hal->matrix_coeffs;
+
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_VIDEO_SIGNAL_INFO;
+ pkt->size += sizeof(u32) + sizeof(*signal_info);
+ break;
+ }
/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
case HAL_CONFIG_BUFFER_REQUIREMENTS:
case HAL_CONFIG_PRIORITY:
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index d8c6e30204d1..96fefea39241 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -247,7 +247,7 @@ static struct msm_vidc_ctrl msm_vdec_ctrls[] = {
.name = "Extradata Type",
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
- .maximum = V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI,
+ .maximum = V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE,
.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
.menu_skip_mask = ~(
(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
@@ -273,7 +273,10 @@ static struct msm_vidc_ctrl msm_vdec_ctrls[] = {
(1 << V4L2_MPEG_VIDC_EXTRADATA_VQZIP_SEI) |
(1 << V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP) |
(1 << V4L2_MPEG_VIDC_EXTRADATA_DISPLAY_COLOUR_SEI) |
- (1 << V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI)
+ (1 <<
+ V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_VUI_DISPLAY) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE)
),
.qmenu = mpeg_video_vidc_extradata,
},
@@ -1086,13 +1089,6 @@ int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
return -EINVAL;
}
- rc = msm_comm_try_get_bufreqs(inst);
- if (rc) {
- dprintk(VIDC_ERR, "Getting buffer requirements failed: %d\n",
- rc);
- return rc;
- }
-
hdev = inst->core->device;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
fmt = inst->fmts[CAPTURE_PORT];
@@ -1135,13 +1131,6 @@ int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
stride = inst->prop.width[CAPTURE_PORT];
scanlines = inst->prop.height[CAPTURE_PORT];
- rc = msm_comm_try_get_bufreqs(inst);
- if (rc) {
- dprintk(VIDC_ERR,
- "%s: Failed : Buffer requirements\n", __func__);
- goto exit;
- }
-
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
plane_sizes = &inst->bufq[OUTPUT_PORT].vb2_bufq.plane_sizes[0];
for (i = 0; i < fmt->num_planes; ++i) {
@@ -1176,10 +1165,10 @@ int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
scanlines = VENUS_Y_SCANLINES(color_format,
inst->prop.height[CAPTURE_PORT]);
- bufreq = get_buff_req_buffer(inst,
- msm_comm_get_hal_output_buffer(inst));
f->fmt.pix_mp.plane_fmt[0].sizeimage =
- bufreq ? bufreq->buffer_size : 0;
+ fmt->get_frame_size(0,
+ inst->prop.height[CAPTURE_PORT],
+ inst->prop.width[CAPTURE_PORT]);
extra_idx = EXTRADATA_IDX(fmt->num_planes);
if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
@@ -1316,28 +1305,6 @@ static int update_output_buffer_size(struct msm_vidc_inst *inst,
goto exit;
}
- /* Query buffer requirements from firmware */
- rc = msm_comm_try_get_bufreqs(inst);
- if (rc)
- dprintk(VIDC_WARN,
- "Failed to get buf req, %d\n", rc);
-
- /* Read back updated firmware size */
- for (i = 0; i < num_planes; ++i) {
- enum hal_buffer type = msm_comm_get_hal_output_buffer(inst);
-
- if (EXTRADATA_IDX(num_planes) &&
- i == EXTRADATA_IDX(num_planes)) {
- type = HAL_BUFFER_EXTRADATA_OUTPUT;
- }
-
- bufreq = get_buff_req_buffer(inst, type);
- f->fmt.pix_mp.plane_fmt[i].sizeimage = bufreq ?
- bufreq->buffer_size : 0;
- dprintk(VIDC_DBG,
- "updated buffer size for plane[%d] = %d\n",
- i, f->fmt.pix_mp.plane_fmt[i].sizeimage);
- }
exit:
return rc;
}
@@ -1377,10 +1344,12 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
{
struct msm_vidc_format *fmt = NULL;
struct hal_frame_size frame_sz;
+ unsigned int extra_idx = 0;
int rc = 0;
int ret = 0;
int i;
int max_input_size = 0;
+ struct hal_buffer_requirements *bufreq;
if (!inst || !f) {
dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
@@ -1425,23 +1394,22 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
HAL_PARAM_FRAME_SIZE, &frame_sz);
}
- ret = ret || msm_comm_try_get_bufreqs(inst);
- if (ret) {
- for (i = 0; i < fmt->num_planes; ++i) {
- f->fmt.pix_mp.plane_fmt[i].sizeimage =
- get_frame_size(inst, fmt, f->type, i);
- }
- } else {
- rc = update_output_buffer_size(inst, f,
- fmt->num_planes);
- if (rc) {
- dprintk(VIDC_ERR,
- "%s - failed to update buffer size: %d\n",
- __func__, rc);
- goto err_invalid_fmt;
- }
+ f->fmt.pix_mp.plane_fmt[0].sizeimage =
+ fmt->get_frame_size(0,
+ f->fmt.pix_mp.height, f->fmt.pix_mp.width);
+
+ extra_idx = EXTRADATA_IDX(fmt->num_planes);
+ if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
+ bufreq = get_buff_req_buffer(inst,
+ HAL_BUFFER_EXTRADATA_OUTPUT);
+ f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
+ bufreq ? bufreq->buffer_size : 0;
}
+ for (i = 0; i < fmt->num_planes; ++i)
+ inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] =
+ f->fmt.pix_mp.plane_fmt[i].sizeimage;
+
f->fmt.pix_mp.num_planes = fmt->num_planes;
for (i = 0; i < fmt->num_planes; ++i) {
inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] =
@@ -1619,6 +1587,13 @@ static int msm_vdec_queue_setup(struct vb2_queue *q,
return -EINVAL;
}
+ rc = msm_comm_try_get_bufreqs(inst);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s: Failed : Buffer requirements\n", __func__);
+ goto exit;
+ }
+
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
*num_planes = inst->fmts[OUTPUT_PORT]->num_planes;
@@ -1693,7 +1668,7 @@ static int msm_vdec_queue_setup(struct vb2_queue *q,
inst->buff_req.buffer[1].buffer_count_actual,
inst->buff_req.buffer[1].buffer_size,
inst->buff_req.buffer[1].buffer_alignment);
- sizes[0] = bufreq->buffer_size;
+ sizes[0] = inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[0];
/*
* Set actual buffer count to firmware for DPB buffers.
@@ -1734,6 +1709,7 @@ static int msm_vdec_queue_setup(struct vb2_queue *q,
rc = -EINVAL;
break;
}
+exit:
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 5c7408740e95..c08084a54e86 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -725,7 +725,7 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
.name = "Extradata Type",
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
- .maximum = V4L2_MPEG_VIDC_EXTRADATA_ROI_QP,
+ .maximum = V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO,
.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
.menu_skip_mask = ~(
(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
@@ -747,7 +747,8 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
(1 << V4L2_MPEG_VIDC_EXTRADATA_LTR) |
(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI) |
(1 << V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS)|
- (1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP)
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO)
),
.qmenu = mpeg_video_vidc_extradata,
},
@@ -1241,6 +1242,46 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
.default_value = V4L2_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8_ENABLE,
.step = 1,
},
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE,
+ .name = "Set Color space",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = MSM_VIDC_BT709_5,
+ .maximum = MSM_VIDC_BT2020,
+ .default_value = MSM_VIDC_BT601_6_625,
+ .step = 1,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE,
+ .name = "Set Color space range",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE_DISABLE,
+ .maximum = V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE_ENABLE,
+ .default_value = V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE_DISABLE,
+ .step = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS,
+ .name = "Set Color space transfer characterstics",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = MSM_VIDC_TRANSFER_BT709_5,
+ .maximum = MSM_VIDC_TRANSFER_BT_2020_12,
+ .default_value = MSM_VIDC_TRANSFER_601_6_625,
+ .step = 1,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_MATRIX_COEFFS,
+ .name = "Set Color space matrix coefficients",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = MSM_VIDC_MATRIX_BT_709_5,
+ .maximum = MSM_VIDC_MATRIX_BT_2020_CONST,
+ .default_value = MSM_VIDC_MATRIX_601_6_625,
+ .step = 1,
+ .qmenu = NULL,
+ },
+
};
#define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
@@ -1512,6 +1553,7 @@ static int msm_venc_queue_setup(struct vb2_queue *q,
case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
case V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS:
case V4L2_MPEG_VIDC_EXTRADATA_ROI_QP:
+ case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
*num_planes = *num_planes + 1;
break;
default:
@@ -2103,6 +2145,7 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
int baselayerid = 0;
int frameqp = 0;
int pic_order_cnt = 0;
+ struct hal_video_signal_info signal_info = {0};
if (!inst || !inst->core || !inst->core->device) {
dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
@@ -3091,6 +3134,64 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
pdata = &pic_order_cnt;
break;
}
+ case V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE:
+ {
+ signal_info.color_space = ctrl->val;
+ temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE);
+ signal_info.full_range = temp_ctrl ? temp_ctrl->val : 0;
+ temp_ctrl =
+ TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS);
+ signal_info.transfer_chars = temp_ctrl ? temp_ctrl->val : 0;
+ temp_ctrl =
+ TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_MATRIX_COEFFS);
+ signal_info.matrix_coeffs = temp_ctrl ? temp_ctrl->val : 0;
+ property_id = HAL_PARAM_VENC_VIDEO_SIGNAL_INFO;
+ pdata = &signal_info;
+ break;
+ }
+ case V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE:
+ {
+ signal_info.full_range = ctrl->val;
+ temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE);
+ signal_info.color_space = temp_ctrl ? temp_ctrl->val : 0;
+ temp_ctrl =
+ TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS);
+ signal_info.transfer_chars = temp_ctrl ? temp_ctrl->val : 0;
+ temp_ctrl =
+ TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_MATRIX_COEFFS);
+ signal_info.matrix_coeffs = temp_ctrl ? temp_ctrl->val : 0;
+ property_id = HAL_PARAM_VENC_VIDEO_SIGNAL_INFO;
+ pdata = &signal_info;
+ break;
+ }
+ case V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS:
+ {
+ signal_info.transfer_chars = ctrl->val;
+ temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE);
+ signal_info.full_range = temp_ctrl ? temp_ctrl->val : 0;
+ temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE);
+ signal_info.color_space = temp_ctrl ? temp_ctrl->val : 0;
+ temp_ctrl =
+ TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_MATRIX_COEFFS);
+ signal_info.matrix_coeffs = temp_ctrl ? temp_ctrl->val : 0;
+ property_id = HAL_PARAM_VENC_VIDEO_SIGNAL_INFO;
+ pdata = &signal_info;
+ break;
+ }
+ case V4L2_CID_MPEG_VIDC_VIDEO_MATRIX_COEFFS:
+ {
+ signal_info.matrix_coeffs = ctrl->val;
+ temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE);
+ signal_info.full_range = temp_ctrl ? temp_ctrl->val : 0;
+ temp_ctrl =
+ TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS);
+ signal_info.transfer_chars = temp_ctrl ? temp_ctrl->val : 0;
+ temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE);
+ signal_info.color_space = temp_ctrl ? temp_ctrl->val : 0;
+ property_id = HAL_PARAM_VENC_VIDEO_SIGNAL_INFO;
+ pdata = &signal_info;
+ break;
+ }
case V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC:
if (ctrl->val == V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC_ENABLE) {
rc = msm_venc_set_csc(inst);
@@ -3514,10 +3615,6 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
}
hdev = inst->core->device;
- if (msm_vidc_vpe_csc_601_to_709) {
- msm_venc_set_csc(inst);
- }
-
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
ARRAY_SIZE(venc_formats), f->fmt.pix_mp.pixelformat,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 05bfabce2bb2..1f071ba36ec1 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -76,6 +76,9 @@ const char *const mpeg_video_vidc_extradata[] = {
"Extradata output crop",
"Extradata display colour SEI",
"Extradata light level SEI",
+ "Extradata PQ Info",
+ "Extradata display VUI",
+ "Extradata vpx color space",
};
struct getprop_buf {
@@ -86,6 +89,7 @@ struct getprop_buf {
static void msm_comm_generate_session_error(struct msm_vidc_inst *inst);
static void msm_comm_generate_sys_error(struct msm_vidc_inst *inst);
static void handle_session_error(enum hal_command_response cmd, void *data);
+static void msm_vidc_print_running_insts(struct msm_vidc_core *core);
bool msm_comm_turbo_session(struct msm_vidc_inst *inst)
{
@@ -208,7 +212,8 @@ int msm_comm_ctrl_init(struct msm_vidc_inst *inst,
}
if (!ctrl) {
- dprintk(VIDC_ERR, "%s - invalid ctrl\n", __func__);
+ dprintk(VIDC_ERR, "%s - invalid ctrl %s\n", __func__,
+ drv_ctrls[idx].name);
return -EINVAL;
}
@@ -879,11 +884,13 @@ static int wait_for_sess_signal_receipt(struct msm_vidc_inst *inst,
enum hal_command_response cmd)
{
int rc = 0;
+ struct hfi_device *hdev;
if (!IS_HAL_SESSION_CMD(cmd)) {
dprintk(VIDC_ERR, "Invalid inst cmd response: %d\n", cmd);
return -EINVAL;
}
+ hdev = (struct hfi_device *)(inst->core->device);
rc = wait_for_completion_timeout(
&inst->completions[SESSION_MSG_INDEX(cmd)],
msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
@@ -891,7 +898,11 @@ static int wait_for_sess_signal_receipt(struct msm_vidc_inst *inst,
dprintk(VIDC_ERR, "Wait interrupted or timed out: %d\n",
SESSION_MSG_INDEX(cmd));
msm_comm_kill_session(inst);
- BUG_ON(msm_vidc_debug_timeout);
+ call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+ dprintk(VIDC_ERR,
+ "sess resp timeout can potentially crash the system\n");
+
+ BUG_ON(inst->core->resources.debug_timeout);
rc = -EIO;
} else {
rc = 0;
@@ -1623,6 +1634,13 @@ static void handle_sys_error(enum hal_command_response cmd, void *data)
core->state = VIDC_CORE_UNINIT;
}
mutex_unlock(&core->lock);
+
+ msm_vidc_print_running_insts(core);
+ call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+ dprintk(VIDC_ERR,
+ "SYS_ERROR can potentially crash the system\n");
+
+ BUG_ON(core->resources.debug_timeout);
}
void msm_comm_session_clean(struct msm_vidc_inst *inst)
@@ -1704,6 +1722,19 @@ static struct vb2_buffer *get_vb_from_device_addr(struct buf_queue *bufq,
return vb;
}
+static void msm_vidc_try_suspend(struct msm_vidc_inst *inst)
+{
+ bool batch_mode;
+
+ batch_mode = msm_comm_g_ctrl_for_id(inst, V4L2_CID_VIDC_QBUF_MODE)
+ == V4L2_VIDC_QBUF_BATCHED;
+ if (batch_mode) {
+ dprintk(VIDC_DBG,
+ "Trying to suspend Venus after finishing Batch\n");
+ msm_comm_suspend(inst->core->id);
+ }
+}
+
static void handle_ebd(enum hal_command_response cmd, void *data)
{
struct msm_vidc_cb_data_done *response = data;
@@ -1775,6 +1806,8 @@ static void handle_ebd(enum hal_command_response cmd, void *data)
msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_EBD);
}
+ msm_vidc_try_suspend(inst);
+
put_inst(inst);
}
@@ -2074,6 +2107,7 @@ static void handle_fbd(enum hal_command_response cmd, void *data)
msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FBD);
}
+ msm_vidc_try_suspend(inst);
err_handle_fbd:
put_inst(inst);
}
@@ -2393,7 +2427,11 @@ static int msm_comm_session_abort(struct msm_vidc_inst *inst)
dprintk(VIDC_ERR,
"%s: Wait interrupted or timed out [%p]: %d\n",
__func__, inst, abort_completion);
- BUG_ON(msm_vidc_debug_timeout);
+ call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+ dprintk(VIDC_ERR,
+ "ABORT timeout can potentially crash the system\n");
+
+ BUG_ON(inst->core->resources.debug_timeout);
rc = -EBUSY;
} else {
rc = 0;
@@ -2463,6 +2501,7 @@ void msm_comm_handle_thermal_event()
int msm_comm_check_core_init(struct msm_vidc_core *core)
{
int rc = 0;
+ struct hfi_device *hdev;
mutex_lock(&core->lock);
if (core->state >= VIDC_CORE_INIT_DONE) {
@@ -2471,13 +2510,18 @@ int msm_comm_check_core_init(struct msm_vidc_core *core)
goto exit;
}
dprintk(VIDC_DBG, "Waiting for SYS_INIT_DONE\n");
+ hdev = (struct hfi_device *)core->device;
rc = wait_for_completion_timeout(
&core->completions[SYS_MSG_INDEX(HAL_SYS_INIT_DONE)],
msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
if (!rc) {
dprintk(VIDC_ERR, "%s: Wait interrupted or timed out: %d\n",
__func__, SYS_MSG_INDEX(HAL_SYS_INIT_DONE));
- BUG_ON(msm_vidc_debug_timeout);
+ call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+ dprintk(VIDC_ERR,
+ "SYS_INIT timeout can potentially crash the system\n");
+
+ BUG_ON(core->resources.debug_timeout);
rc = -EIO;
goto exit;
} else {
@@ -3944,7 +3988,11 @@ int msm_comm_try_get_prop(struct msm_vidc_inst *inst, enum hal_property ptype,
SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO));
inst->state = MSM_VIDC_CORE_INVALID;
msm_comm_kill_session(inst);
- BUG_ON(msm_vidc_debug_timeout);
+ call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+ dprintk(VIDC_ERR,
+ "SESS_PROP timeout can potentially crash the system\n");
+
+ BUG_ON(inst->core->resources.debug_timeout);
rc = -ETIMEDOUT;
goto exit;
} else {
@@ -4660,6 +4708,16 @@ enum hal_extradata_id msm_comm_get_hal_extradata_index(
case V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
ret = HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI;
break;
+ case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
+ ret = HAL_EXTRADATA_PQ_INFO;
+ break;
+
+ case V4L2_MPEG_VIDC_EXTRADATA_VUI_DISPLAY:
+ ret = HAL_EXTRADATA_VUI_DISPLAY_INFO;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE:
+ ret = HAL_EXTRADATA_VPX_COLORSPACE;
+ break;
default:
dprintk(VIDC_WARN, "Extradata not found: %d\n", index);
break;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index fb79661dd2d7..7976d6e8a603 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -171,8 +171,6 @@ struct dentry *msm_vidc_debugfs_init_drv(void)
&msm_vidc_fw_low_power_mode) &&
__debugfs_create(u32, "debug_output", &msm_vidc_debug_out) &&
__debugfs_create(u32, "hw_rsp_timeout", &msm_vidc_hw_rsp_timeout) &&
- __debugfs_create(bool, "enable_vpe_csc_601_709",
- &msm_vidc_vpe_csc_601_to_709) &&
__debugfs_create(bool, "sys_idle_indicator",
&msm_vidc_sys_idle_indicator) &&
__debugfs_create(u32, "firmware_unload_delay",
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 4503e46b044d..2bb91ccc6c26 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -1103,6 +1103,11 @@ int read_platform_resources_from_dt(
res->never_unload_fw = of_property_read_bool(pdev->dev.of_node,
"qcom,never-unload-fw");
+ res->debug_timeout = of_property_read_bool(pdev->dev.of_node,
+ "qcom,debug-timeout");
+
+ res->debug_timeout |= msm_vidc_debug_timeout;
+
of_property_read_u32(pdev->dev.of_node,
"qcom,pm-qos-latency-us", &res->pm_qos_latency_us);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 1fc1888e81c6..c61605c7e405 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -185,6 +185,7 @@ struct msm_vidc_platform_resources {
const char *fw_name;
const char *hfi_version;
bool never_unload_fw;
+ bool debug_timeout;
uint32_t pm_qos_latency_us;
uint32_t max_inst_count;
uint32_t max_secure_inst_count;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index ac53b3bcb4ed..50c0eb351d4f 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1297,16 +1297,31 @@ static int venus_hfi_suspend(void *dev)
return -ENOTSUPP;
}
+ dprintk(VIDC_DBG, "Suspending Venus\n");
+ rc = flush_delayed_work(&venus_hfi_pm_work);
+
+ return rc;
+}
+
+static int venus_hfi_flush_debug_queue(void *dev)
+{
+ int rc = 0;
+ struct venus_hfi_device *device = (struct venus_hfi_device *) dev;
+
+ if (!device) {
+ dprintk(VIDC_ERR, "%s invalid device\n", __func__);
+ return -EINVAL;
+ }
+
mutex_lock(&device->lock);
if (device->power_enabled) {
dprintk(VIDC_DBG, "Venus is busy\n");
rc = -EBUSY;
- } else {
- dprintk(VIDC_DBG, "Venus is power suspended\n");
- rc = 0;
+ goto exit;
}
-
+ __flush_debug_queue(device, NULL);
+exit:
mutex_unlock(&device->lock);
return rc;
}
@@ -3322,6 +3337,7 @@ static void __process_sys_error(struct venus_hfi_device *device)
static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet)
{
bool local_packet = false;
+ enum vidc_msg_prio log_level = VIDC_FW;
if (!device) {
dprintk(VIDC_ERR, "%s: Invalid params\n", __func__);
@@ -3337,6 +3353,13 @@ static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet)
}
local_packet = true;
+
+ /*
+ * Local packek is used when something FATAL occurred.
+ * It is good to print these logs by default.
+ */
+
+ log_level = VIDC_ERR;
}
while (!__iface_dbgq_read(device, packet)) {
@@ -3353,7 +3376,7 @@ static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet)
} else {
struct hfi_msg_sys_debug_packet *pkt =
(struct hfi_msg_sys_debug_packet *) packet;
- dprintk(VIDC_FW, "%s", pkt->rg_msg_data);
+ dprintk(log_level, "%s", pkt->rg_msg_data);
}
}
@@ -4629,6 +4652,7 @@ static void venus_init_hfi_callbacks(struct hfi_device *hdev)
hdev->get_fw_info = venus_hfi_get_fw_info;
hdev->get_core_capabilities = venus_hfi_get_core_capabilities;
hdev->suspend = venus_hfi_suspend;
+ hdev->flush_debug_queue = venus_hfi_flush_debug_queue;
hdev->get_core_clock_rate = venus_hfi_get_core_clock_rate;
hdev->get_default_properties = venus_hfi_get_default_properties;
}
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index cbb4e3569b13..4cbb59d12f92 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -85,6 +85,7 @@
#define HFI_EXTRADATA_STREAM_USERDATA 0x0000000E
#define HFI_EXTRADATA_FRAME_QP 0x0000000F
#define HFI_EXTRADATA_FRAME_BITS_INFO 0x00000010
+#define HFI_EXTRADATA_VPX_COLORSPACE 0x00000014
#define HFI_EXTRADATA_MULTISLICE_INFO 0x7F100000
#define HFI_EXTRADATA_NUM_CONCEALED_MB 0x7F100001
#define HFI_EXTRADATA_INDEX 0x7F100002
@@ -253,6 +254,8 @@ struct hfi_extradata_header {
(HFI_PROPERTY_PARAM_VENC_OX_START + 0x007)
#define HFI_PROPERTY_PARAM_VENC_ROI_QP_EXTRADATA \
(HFI_PROPERTY_PARAM_VENC_OX_START + 0x008)
+#define HFI_PROPERTY_PARAM_VENC_OVERRIDE_QP_EXTRADATA \
+ (HFI_PROPERTY_PARAM_VENC_OX_START + 0x009)
#define HFI_PROPERTY_CONFIG_VENC_OX_START \
(HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000)
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index b2231869c499..34ab36a4647b 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -124,6 +124,9 @@ enum hal_extradata_id {
HAL_EXTRADATA_OUTPUT_CROP,
HAL_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI,
HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI,
+ HAL_EXTRADATA_PQ_INFO,
+ HAL_EXTRADATA_VUI_DISPLAY_INFO,
+ HAL_EXTRADATA_VPX_COLORSPACE,
};
enum hal_property {
@@ -238,6 +241,7 @@ enum hal_property {
HAL_CONFIG_VENC_BLUR_RESOLUTION,
HAL_PARAM_VENC_SESSION_QP_RANGE_PACKED,
HAL_PARAM_VENC_H264_TRANSFORM_8x8,
+ HAL_PARAM_VENC_VIDEO_SIGNAL_INFO,
};
enum hal_domain {
@@ -991,6 +995,13 @@ struct hal_vpe_color_space_conversion {
u32 csc_limit[HAL_MAX_LIMIT_COEFFS];
};
+struct hal_video_signal_info {
+ u32 color_space;
+ u32 transfer_chars;
+ u32 matrix_coeffs;
+ bool full_range;
+};
+
enum vidc_resource_id {
VIDC_RESOURCE_NONE,
VIDC_RESOURCE_OCMEM,
@@ -1499,6 +1510,7 @@ struct hfi_device {
int (*session_clean)(void *sess);
int (*get_core_capabilities)(void *dev);
int (*suspend)(void *dev);
+ int (*flush_debug_queue)(void *dev);
unsigned long (*get_core_clock_rate)(void *dev, bool actual_rate);
enum hal_default_properties (*get_default_properties)(void *dev);
};
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index ff043e9a819b..23240746baf1 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -350,7 +350,7 @@ struct hfi_buffer_info {
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01B)
#define HFI_PROPERTY_PARAM_VENC_LTRMODE \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01C)
-#define HFI_PROPERTY_PARAM_VENC_VIDEO_FULL_RANGE \
+#define HFI_PROPERTY_PARAM_VENC_VIDEO_SIGNAL_INFO \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01D)
#define HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01E)
@@ -671,6 +671,16 @@ struct hfi_frame_size {
u32 height;
};
+struct hfi_video_signal_metadata {
+ u32 enable;
+ u32 video_format;
+ u32 video_full_range;
+ u32 color_description;
+ u32 color_primaries;
+ u32 transfer_characteristics;
+ u32 matrix_coeffs;
+};
+
struct hfi_h264_vui_timing_info {
u32 enable;
u32 fixed_frame_rate;
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
index 6310acab60e7..d41ae950d1a1 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -154,6 +154,7 @@ static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
mutex_lock(sru->ctrls.lock);
ctrl0 |= vsp1_sru_read(sru, VI6_SRU_CTRL0)
& (VI6_SRU_CTRL0_PARAM0_MASK | VI6_SRU_CTRL0_PARAM1_MASK);
+ vsp1_sru_write(sru, VI6_SRU_CTRL0, ctrl0);
mutex_unlock(sru->ctrls.lock);
vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 0934024fb89d..d91ded795c93 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -159,7 +159,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
Set the status so poll routines can check and avoid
access after disconnect.
*/
- dev->dev_state = DEV_DISCONNECTED;
+ set_bit(DEV_DISCONNECTED, &dev->dev_state);
au0828_rc_unregister(dev);
/* Digital TV */
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index b0f067971979..3d6687f0407d 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
bool first = true;
/* do nothing if device is disconnected */
- if (ir->dev->dev_state == DEV_DISCONNECTED)
+ if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
return 0;
/* Check IR int */
@@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
/* do nothing if device is disconnected */
- if (ir->dev->dev_state != DEV_DISCONNECTED) {
+ if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
/* Disable IR */
au8522_rc_clear(ir, 0xe0, 1 << 4);
}
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 45c622e234f7..7b2fe1b56039 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -104,14 +104,13 @@ static inline void print_err_status(struct au0828_dev *dev,
static int check_dev(struct au0828_dev *dev)
{
- if (dev->dev_state & DEV_DISCONNECTED) {
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
pr_info("v4l2 ioctl: device not present\n");
return -ENODEV;
}
- if (dev->dev_state & DEV_MISCONFIGURED) {
- pr_info("v4l2 ioctl: device is misconfigured; "
- "close and open it again\n");
+ if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
+ pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
return -EIO;
}
return 0;
@@ -519,8 +518,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
if (!dev)
return 0;
- if ((dev->dev_state & DEV_DISCONNECTED) ||
- (dev->dev_state & DEV_MISCONFIGURED))
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
+ test_bit(DEV_MISCONFIGURED, &dev->dev_state))
return 0;
if (urb->status < 0) {
@@ -766,10 +765,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
int ret = 0;
dev->stream_state = STREAM_INTERRUPT;
- if (dev->dev_state == DEV_DISCONNECTED)
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
return -ENODEV;
else if (ret) {
- dev->dev_state = DEV_MISCONFIGURED;
+ set_bit(DEV_MISCONFIGURED, &dev->dev_state);
dprintk(1, "%s device is misconfigured!\n", __func__);
return ret;
}
@@ -958,7 +957,7 @@ static int au0828_v4l2_open(struct file *filp)
int ret;
dprintk(1,
- "%s called std_set %d dev_state %d stream users %d users %d\n",
+ "%s called std_set %d dev_state %ld stream users %d users %d\n",
__func__, dev->std_set_in_tuner_core, dev->dev_state,
dev->streaming_users, dev->users);
@@ -977,7 +976,7 @@ static int au0828_v4l2_open(struct file *filp)
au0828_analog_stream_enable(dev);
au0828_analog_stream_reset(dev);
dev->stream_state = STREAM_OFF;
- dev->dev_state |= DEV_INITIALIZED;
+ set_bit(DEV_INITIALIZED, &dev->dev_state);
}
dev->users++;
mutex_unlock(&dev->lock);
@@ -991,7 +990,7 @@ static int au0828_v4l2_close(struct file *filp)
struct video_device *vdev = video_devdata(filp);
dprintk(1,
- "%s called std_set %d dev_state %d stream users %d users %d\n",
+ "%s called std_set %d dev_state %ld stream users %d users %d\n",
__func__, dev->std_set_in_tuner_core, dev->dev_state,
dev->streaming_users, dev->users);
@@ -1007,7 +1006,7 @@ static int au0828_v4l2_close(struct file *filp)
del_timer_sync(&dev->vbi_timeout);
}
- if (dev->dev_state == DEV_DISCONNECTED)
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
goto end;
if (dev->users == 1) {
@@ -1036,7 +1035,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
.type = V4L2_TUNER_ANALOG_TV,
};
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
if (dev->std_set_in_tuner_core)
@@ -1108,7 +1107,7 @@ static int vidioc_querycap(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
strlcpy(cap->driver, "au0828", sizeof(cap->driver));
@@ -1151,7 +1150,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
f->fmt.pix.width = dev->width;
@@ -1170,7 +1169,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
@@ -1182,7 +1181,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct au0828_dev *dev = video_drvdata(file);
int rc;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
rc = check_dev(dev);
@@ -1204,7 +1203,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
if (norm == dev->std)
@@ -1236,7 +1235,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
*norm = dev->std;
@@ -1259,7 +1258,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
[AU0828_VMUX_DEBUG] = "tv debug"
};
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
tmp = input->index;
@@ -1289,7 +1288,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
*i = dev->ctrl_input;
@@ -1300,7 +1299,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
{
int i;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
switch (AUVI_INPUT(index).type) {
@@ -1385,7 +1384,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
a->index = dev->ctrl_ainput;
@@ -1405,7 +1404,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
if (a->index != dev->ctrl_ainput)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return 0;
}
@@ -1417,7 +1416,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
if (t->index != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
strcpy(t->name, "Auvitek tuner");
@@ -1437,7 +1436,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
if (t->index != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
au0828_init_tuner(dev);
@@ -1459,7 +1458,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
if (freq->tuner != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
freq->frequency = dev->ctrl_freq;
return 0;
@@ -1474,7 +1473,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
if (freq->tuner != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
au0828_init_tuner(dev);
@@ -1500,7 +1499,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
format->fmt.vbi.samples_per_line = dev->vbi_width;
@@ -1526,7 +1525,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
cc->bounds.left = 0;
@@ -1548,7 +1547,7 @@ static int vidioc_g_register(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
reg->val = au0828_read(dev, reg->reg);
@@ -1561,7 +1560,7 @@ static int vidioc_s_register(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return au0828_writereg(dev, reg->reg, reg->val);
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 60b59391ea2a..d1b6405a05a4 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -21,6 +21,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitops.h>
#include <linux/usb.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -122,9 +123,9 @@ enum au0828_stream_state {
/* device state */
enum au0828_dev_state {
- DEV_INITIALIZED = 0x01,
- DEV_DISCONNECTED = 0x02,
- DEV_MISCONFIGURED = 0x04
+ DEV_INITIALIZED = 0,
+ DEV_DISCONNECTED = 1,
+ DEV_MISCONFIGURED = 2
};
struct au0828_dev;
@@ -248,7 +249,7 @@ struct au0828_dev {
int input_type;
int std_set_in_tuner_core;
unsigned int ctrl_input;
- enum au0828_dev_state dev_state;
+ long unsigned int dev_state; /* defined at enum au0828_dev_state */;
enum au0828_stream_state stream_state;
wait_queue_head_t open;
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index b79c36fd8cd2..58f23bcfe94e 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -91,6 +91,7 @@ static const struct usb_device_id pwc_device_table [] = {
{ USB_DEVICE(0x0471, 0x0312) },
{ USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
{ USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */
+ { USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */
{ USB_DEVICE(0x069A, 0x0001) }, /* Askey */
{ USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
{ USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
@@ -811,6 +812,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
name = "Philips SPC 900NC webcam";
type_id = 740;
break;
+ case 0x032C:
+ PWC_INFO("Philips SPC 880NC USB webcam detected.\n");
+ name = "Philips SPC 880NC webcam";
+ type_id = 740;
+ break;
default:
return -ENODEV;
break;
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index b693206f66dd..d1dc1a198e3e 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1463,9 +1463,23 @@ static int usbvision_probe(struct usb_interface *intf,
if (usbvision_device_data[model].interface >= 0)
interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
- else
+ else if (ifnum < dev->actconfig->desc.bNumInterfaces)
interface = &dev->actconfig->interface[ifnum]->altsetting[0];
+ else {
+ dev_err(&intf->dev, "interface %d is invalid, max is %d\n",
+ ifnum, dev->actconfig->desc.bNumInterfaces - 1);
+ ret = -ENODEV;
+ goto err_usb;
+ }
+
+ if (interface->desc.bNumEndpoints < 2) {
+ dev_err(&intf->dev, "interface %d has %d endpoints, but must"
+ " have minimum 2\n", ifnum, interface->desc.bNumEndpoints);
+ ret = -ENODEV;
+ goto err_usb;
+ }
endpoint = &interface->endpoint[1].desc;
+
if (!usb_endpoint_xfer_isoc(endpoint)) {
dev_err(&intf->dev, "%s: interface %d. has non-ISO endpoint!\n",
__func__, ifnum);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index fc9e4395c21d..2da7fd7deacd 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -425,7 +425,8 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
get_user(kp->index, &up->index) ||
get_user(kp->type, &up->type) ||
get_user(kp->flags, &up->flags) ||
- get_user(kp->memory, &up->memory))
+ get_user(kp->memory, &up->memory) ||
+ get_user(kp->length, &up->length))
return -EFAULT;
if (V4L2_TYPE_IS_OUTPUT(kp->type))
@@ -437,9 +438,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
return -EFAULT;
if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
- if (get_user(kp->length, &up->length))
- return -EFAULT;
-
num_planes = kp->length;
if (num_planes == 0) {
kp->m.planes = NULL;
@@ -472,16 +470,14 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
} else {
switch (kp->memory) {
case V4L2_MEMORY_MMAP:
- if (get_user(kp->length, &up->length) ||
- get_user(kp->m.offset, &up->m.offset))
+ if (get_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
case V4L2_MEMORY_USERPTR:
{
compat_long_t tmp;
- if (get_user(kp->length, &up->length) ||
- get_user(tmp, &up->m.userptr))
+ if (get_user(tmp, &up->m.userptr))
return -EFAULT;
kp->m.userptr = (unsigned long)compat_ptr(tmp);
@@ -523,7 +519,8 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
put_user(kp->sequence, &up->sequence) ||
put_user(kp->reserved2, &up->reserved2) ||
- put_user(kp->reserved, &up->reserved))
+ put_user(kp->reserved, &up->reserved) ||
+ put_user(kp->length, &up->length))
return -EFAULT;
if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
@@ -546,13 +543,11 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
} else {
switch (kp->memory) {
case V4L2_MEMORY_MMAP:
- if (put_user(kp->length, &up->length) ||
- put_user(kp->m.offset, &up->m.offset))
+ if (put_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
case V4L2_MEMORY_USERPTR:
- if (put_user(kp->length, &up->length) ||
- put_user(kp->m.userptr, &up->m.userptr))
+ if (put_user(kp->m.userptr, &up->m.userptr))
return -EFAULT;
break;
case V4L2_MEMORY_OVERLAY:
diff --git a/drivers/mfd/wcd934x-regmap.c b/drivers/mfd/wcd934x-regmap.c
index 7f16f1f3f417..398f0086537a 100644
--- a/drivers/mfd/wcd934x-regmap.c
+++ b/drivers/mfd/wcd934x-regmap.c
@@ -1815,6 +1815,8 @@ static bool wcd934x_is_readable_register(struct device *dev, unsigned int reg)
pg_num = reg >> 0x8;
if (pg_num == 0x80)
pg_num = WCD934X_PAGE_0X80;
+ else if (pg_num == 0x50)
+ pg_num = WCD934X_PAGE_0x50;
else if (pg_num > 0xF)
return false;
@@ -1835,6 +1837,8 @@ static bool wcd934x_is_volatile_register(struct device *dev, unsigned int reg)
pg_num = reg >> 0x8;
if (pg_num == 0x80)
pg_num = WCD934X_PAGE_0X80;
+ else if (pg_num == 0x50)
+ pg_num = WCD934X_PAGE_0x50;
else if (pg_num > 0xF)
return false;
@@ -1844,6 +1848,15 @@ static bool wcd934x_is_volatile_register(struct device *dev, unsigned int reg)
if (reg_tbl && reg_tbl[reg_offset] == WCD934X_READ)
return true;
+ /*
+ * Need to mark volatile for registers that are writable but
+ * only few bits are read-only
+ */
+ switch (reg) {
+ case WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL:
+ return true;
+ }
+
return false;
}
diff --git a/drivers/mfd/wcd934x-tables.c b/drivers/mfd/wcd934x-tables.c
index ab5d18cd8493..db963d08b66e 100644
--- a/drivers/mfd/wcd934x-tables.c
+++ b/drivers/mfd/wcd934x-tables.c
@@ -1983,6 +1983,40 @@ const u8 wcd934x_page15_reg_access[WCD934X_PAGE_SIZE] = {
WCD934X_READ_WRITE,
};
+const u8 wcd934x_page_0x50_reg_access[WCD934X_PAGE_SIZE] = {
+ [WCD934X_REG(WCD934X_PAGE80_PAGE_REGISTER)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_WR_DATA_0)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_WR_DATA_1)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_WR_DATA_2)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_WR_DATA_3)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_WR_ADDR_0)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_WR_ADDR_1)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_WR_ADDR_2)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_WR_ADDR_3)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_RD_ADDR_0)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_RD_ADDR_1)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_RD_ADDR_2)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_RD_ADDR_3)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_RD_DATA_0)] = WCD934X_READ,
+ [WCD934X_REG(WCD934X_CODEC_CPR_RD_DATA_1)] = WCD934X_READ,
+ [WCD934X_REG(WCD934X_CODEC_CPR_RD_DATA_2)] = WCD934X_READ,
+ [WCD934X_REG(WCD934X_CODEC_CPR_RD_DATA_3)] = WCD934X_READ,
+ [WCD934X_REG(WCD934X_CODEC_CPR_ACCESS_CFG)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_ACCESS_STATUS)] = WCD934X_READ,
+ [WCD934X_REG(WCD934X_CODEC_CPR_NOM_CX_VDD)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_SVS_CX_VDD)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_SVS2_CX_VDD)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_NOM_MX_VDD)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_SVS_MX_VDD)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_SVS2_MX_VDD)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_SVS2_MIN_CX_VDD)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_MAX_SVS2_STEP)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_CTL)] = WCD934X_READ_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_SW_MODECHNG_STATUS)] = WCD934X_READ,
+ [WCD934X_REG(WCD934X_CODEC_CPR_SW_MODECHNG_START)] = WCD934X_WRITE,
+ [WCD934X_REG(WCD934X_CODEC_CPR_CPR_STATUS)] = WCD934X_READ_WRITE,
+};
+
const u8 wcd934x_page_0x80_reg_access[WCD934X_PAGE_SIZE] = {
[WCD934X_REG(WCD934X_PAGE80_PAGE_REGISTER)] = WCD934X_READ_WRITE,
[WCD934X_REG(WCD934X_CODEC_CPR_WR_DATA_0)] = WCD934X_READ_WRITE,
@@ -2116,5 +2150,6 @@ const u8 * const wcd934x_reg[WCD934X_NUM_PAGES] = {
[WCD934X_PAGE_13] = wcd934x_page13_reg_access,
[WCD934X_PAGE_14] = wcd934x_page14_reg_access,
[WCD934X_PAGE_15] = wcd934x_page15_reg_access,
+ [WCD934X_PAGE_0x50] = wcd934x_page_0x50_reg_access,
[WCD934X_PAGE_0X80] = wcd934x_page_0x80_reg_access,
};
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index 31ac6624b8b4..981d372277ee 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -1339,7 +1339,6 @@ static int wcd9xxx_slim_probe(struct slim_device *slim)
if (ret) {
dev_err(&slim->dev, "%s: failed to get slimbus %s logical address: %d\n",
__func__, wcd9xxx->slim->name, ret);
- ret = -EPROBE_DEFER;
goto err_reset;
}
wcd9xxx->read_dev = wcd9xxx_slim_read_device;
@@ -1364,7 +1363,6 @@ static int wcd9xxx_slim_probe(struct slim_device *slim)
if (ret) {
dev_err(&slim->dev, "%s: failed to get slimbus %s logical address: %d\n",
__func__, wcd9xxx->slim->name, ret);
- ret = -EPROBE_DEFER;
goto err_slim_add;
}
wcd9xxx_inf_la = wcd9xxx->slim_slave->laddr;
diff --git a/drivers/mfd/wcd9xxx-utils.c b/drivers/mfd/wcd9xxx-utils.c
index 22d61d96a11d..38286831a02c 100644
--- a/drivers/mfd/wcd9xxx-utils.c
+++ b/drivers/mfd/wcd9xxx-utils.c
@@ -298,6 +298,7 @@ struct wcd9xxx_pdata *wcd9xxx_populate_dt_data(struct device *dev)
struct wcd9xxx_pdata *pdata;
u32 dmic_sample_rate = WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED;
u32 mad_dmic_sample_rate = WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED;
+ u32 ecpp_dmic_sample_rate = WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED;
u32 dmic_clk_drive = WCD9XXX_DMIC_CLK_DRIVE_UNDEFINED;
u32 prop_val;
@@ -358,6 +359,15 @@ struct wcd9xxx_pdata *wcd9xxx_populate_dt_data(struct device *dev)
pdata->mclk_rate,
"mad_dmic_rate");
+ if (!(wcd9xxx_read_of_property_u32(dev, "qcom,cdc-ecpp-dmic-rate",
+ &prop_val)))
+ ecpp_dmic_sample_rate = prop_val;
+
+ pdata->ecpp_dmic_sample_rate = wcd9xxx_validate_dmic_sample_rate(dev,
+ ecpp_dmic_sample_rate,
+ pdata->mclk_rate,
+ "ecpp_dmic_rate");
+
if (!(of_property_read_u32(dev->of_node,
"qcom,cdc-dmic-clk-drv-strength",
&prop_val)))
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 055b19432e7c..8a08ca61062a 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -412,10 +412,6 @@ config TI_DAC7512
This driver can also be built as a module. If so, the module
will be called ti_dac7512.
-config UID_STAT
- bool "UID based statistics tracking exported to /proc/uid_stat"
- default n
-
config VMWARE_BALLOON
tristate "VMware Balloon Driver"
depends on VMWARE_VMCI && X86 && HYPERVISOR_GUEST
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index fa36baace850..6acb70964fb8 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -35,7 +35,6 @@ obj-$(CONFIG_ISL29020) += isl29020.o
obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
obj-$(CONFIG_DS1682) += ds1682.o
obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
-obj-$(CONFIG_UID_STAT) += uid_stat.o
obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c
index 7d1b9e9121a9..0c6f1de2465b 100644
--- a/drivers/misc/hdcp.c
+++ b/drivers/misc/hdcp.c
@@ -456,6 +456,7 @@ struct hdcp_lib_handle {
atomic_t hdcp_off;
uint32_t session_id;
bool legacy_app;
+ enum hdcp_device_type device_type;
struct task_struct *thread;
struct completion topo_wait;
@@ -901,7 +902,7 @@ static int hdcp_lib_session_init(struct hdcp_lib_handle *handle)
req_buf =
(struct hdcp_lib_session_init_req *)handle->qseecom_handle->sbuf;
req_buf->commandid = HDCP_SESSION_INIT;
- req_buf->deviceid = HDCP_TXMTR_HDMI;
+ req_buf->deviceid = handle->device_type;
rsp_buf = (struct hdcp_lib_session_init_rsp *)
(handle->qseecom_handle->sbuf +
QSEECOM_ALIGN(sizeof(struct hdcp_lib_session_init_req)));
@@ -2060,6 +2061,7 @@ int hdcp_library_register(struct hdcp_register_data *data)
handle->tethered = data->tethered;
handle->hdcp_app_init = NULL;
handle->hdcp_txmtr_init = NULL;
+ handle->device_type = data->device_type;
pr_debug("tethered %d\n", handle->tethered);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 0b05aa938799..1a173d0af694 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -53,6 +53,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
bus = cl->dev;
mutex_lock(&bus->device_lock);
+ if (bus->dev_state != MEI_DEV_ENABLED) {
+ rets = -ENODEV;
+ goto out;
+ }
+
if (!mei_cl_is_connected(cl)) {
rets = -ENODEV;
goto out;
@@ -109,6 +114,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
bus = cl->dev;
mutex_lock(&bus->device_lock);
+ if (bus->dev_state != MEI_DEV_ENABLED) {
+ rets = -ENODEV;
+ goto out;
+ }
cb = mei_cl_read_cb(cl, NULL);
if (cb)
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 273728482227..779994a1c9dd 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1371,7 +1371,7 @@ static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
(req.sb_len == 0)) {
- pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%p)\n",
+ pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
req.ifd_data_fd, req.sb_len, req.virt_sb_base);
return -EFAULT;
}
@@ -2100,7 +2100,7 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
pr_debug("Do not unload keymaster app from tz\n");
- return 0;
+ goto unload_exit;
}
__qseecom_cleanup_app(data);
@@ -2152,7 +2152,7 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
pr_err("scm_call to unload app (id = %d) failed\n",
req.app_id);
ret = -EFAULT;
- goto not_release_exit;
+ goto unload_exit;
} else {
pr_warn("App id %d now unloaded\n", req.app_id);
}
@@ -2160,7 +2160,7 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
pr_err("app (%d) unload_failed!!\n",
data->client.app_id);
ret = -EFAULT;
- goto not_release_exit;
+ goto unload_exit;
}
if (resp.result == QSEOS_RESULT_SUCCESS)
pr_debug("App (%d) is unloaded!!\n",
@@ -2170,7 +2170,7 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
if (ret) {
pr_err("process_incomplete_cmd fail err: %d\n",
ret);
- goto not_release_exit;
+ goto unload_exit;
}
}
}
@@ -2200,7 +2200,6 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
unload_exit:
qseecom_unmap_ion_allocated_memory(data);
data->released = true;
-not_release_exit:
return ret;
}
@@ -2225,7 +2224,7 @@ int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
void *req_buf = NULL;
if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
- pr_err("Error with pointer: req_ptr = %p, send_svc_ptr = %p\n",
+ pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
req_ptr, send_svc_ireq_ptr);
return -EINVAL;
}
@@ -2272,7 +2271,7 @@ int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
uint32_t reqd_len_sb_in = 0;
if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
- pr_err("Error with pointer: req_ptr = %p, send_svc_ptr = %p\n",
+ pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
req_ptr, send_svc_ireq_ptr);
return -EINVAL;
}
@@ -2877,7 +2876,7 @@ static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
(!cleanup) &&
((uint64_t)sg_dma_address(sg_ptr->sgl)
>= PHY_ADDR_4G - sg->length)) {
- pr_err("App %s sgl PA exceeds 4G: phy_addr=%pad, len=%x\n",
+ pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
data->client.app_name,
&(sg_dma_address(sg_ptr->sgl)),
sg->length);
@@ -2934,7 +2933,7 @@ static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
(!cleanup) &&
((uint64_t)(sg_dma_address(sg))
>= PHY_ADDR_4G - sg->length)) {
- pr_err("App %s sgl PA exceeds 4G: phy_addr=%pad, len=%x\n",
+ pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
data->client.app_name,
&(sg_dma_address(sg)),
sg->length);
@@ -4092,7 +4091,7 @@ int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
if (ret)
return ret;
- pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%p\n",
+ pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
req.resp_len, req.resp_buf);
return ret;
}
@@ -4152,41 +4151,80 @@ static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
return 0;
}
-static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
- void __user *argp, bool is_64bit_addr)
+static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
+ struct qseecom_send_modfd_listener_resp *resp,
+ struct qseecom_registered_listener_list *this_lstnr)
{
- struct qseecom_send_modfd_listener_resp resp;
int i;
- struct qseecom_registered_listener_list *this_lstnr = NULL;
- if (copy_from_user(&resp, argp, sizeof(resp))) {
- pr_err("copy_from_user failed");
+ if (!data || !resp || !this_lstnr) {
+ pr_err("listener handle or resp msg is null\n");
return -EINVAL;
}
- this_lstnr = __qseecom_find_svc(data->listener.id);
- if (this_lstnr == NULL)
+
+ if (resp->resp_buf_ptr == NULL) {
+ pr_err("resp buffer is null\n");
+ return -EINVAL;
+ }
+ /* validate resp buf length */
+ if ((resp->resp_len == 0) ||
+ (resp->resp_len > this_lstnr->sb_length)) {
+ pr_err("resp buf length %d not valid\n", resp->resp_len);
return -EINVAL;
+ }
- if (resp.resp_buf_ptr == NULL) {
- pr_err("Invalid resp_buf_ptr\n");
+ if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
+ pr_err("Integer overflow in resp_len & resp_buf\n");
return -EINVAL;
}
+ if ((uintptr_t)this_lstnr->user_virt_sb_base >
+ (ULONG_MAX - this_lstnr->sb_length)) {
+ pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+ return -EINVAL;
+ }
+ /* validate resp buf */
+ if (((uintptr_t)resp->resp_buf_ptr <
+ (uintptr_t)this_lstnr->user_virt_sb_base) ||
+ ((uintptr_t)resp->resp_buf_ptr >=
+ ((uintptr_t)this_lstnr->user_virt_sb_base +
+ this_lstnr->sb_length)) ||
+ (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
+ ((uintptr_t)this_lstnr->user_virt_sb_base +
+ this_lstnr->sb_length))) {
+ pr_err("resp buf is out of shared buffer region\n");
+ return -EINVAL;
+ }
+
/* validate offsets */
for (i = 0; i < MAX_ION_FD; i++) {
- if (resp.ifd_data[i].cmd_buf_offset >= resp.resp_len) {
+ if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
pr_err("Invalid offset %d = 0x%x\n",
- i, resp.ifd_data[i].cmd_buf_offset);
+ i, resp->ifd_data[i].cmd_buf_offset);
return -EINVAL;
}
}
- if ((resp.resp_buf_ptr < this_lstnr->user_virt_sb_base) ||
- ((uintptr_t)resp.resp_buf_ptr >=
- ((uintptr_t)this_lstnr->user_virt_sb_base +
- this_lstnr->sb_length))) {
- pr_err("resp_buf_ptr address not within shared buffer\n");
+ return 0;
+}
+
+static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
+ void __user *argp, bool is_64bit_addr)
+{
+ struct qseecom_send_modfd_listener_resp resp;
+ struct qseecom_registered_listener_list *this_lstnr = NULL;
+
+ if (copy_from_user(&resp, argp, sizeof(resp))) {
+ pr_err("copy_from_user failed");
return -EINVAL;
}
+
+ this_lstnr = __qseecom_find_svc(data->listener.id);
+ if (this_lstnr == NULL)
+ return -EINVAL;
+
+ if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
+ return -EINVAL;
+
resp.resp_buf_ptr = this_lstnr->sb_virt +
(uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
@@ -6339,7 +6377,7 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
ret = -EINVAL;
break;
}
- pr_debug("SET_MEM_PARAM: qseecom addr = 0x%p\n", data);
+ pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
ret = qseecom_set_client_mem_param(data, argp);
if (ret)
pr_err("failed Qqseecom_set_mem_param request: %d\n",
@@ -6355,7 +6393,7 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
break;
}
data->type = QSEECOM_CLIENT_APP;
- pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%p\n", data);
+ pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
mutex_lock(&app_access_lock);
atomic_inc(&data->ioctl_count);
ret = qseecom_load_app(data, argp);
@@ -6373,7 +6411,7 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
ret = -EINVAL;
break;
}
- pr_debug("UNLOAD_APP: qseecom_addr = 0x%p\n", data);
+ pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
mutex_lock(&app_access_lock);
atomic_inc(&data->ioctl_count);
ret = qseecom_unload_app(data, false);
@@ -6504,7 +6542,7 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
data->type = QSEECOM_CLIENT_APP;
mutex_lock(&app_access_lock);
atomic_inc(&data->ioctl_count);
- pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%p\n", data);
+ pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
ret = qseecom_query_app_loaded(data, argp);
atomic_dec(&data->ioctl_count);
mutex_unlock(&app_access_lock);
@@ -6823,7 +6861,7 @@ static int qseecom_release(struct inode *inode, struct file *file)
int ret = 0;
if (data->released == false) {
- pr_debug("data: released=false, type=%d, mode=%d, data=0x%p\n",
+ pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
data->type, data->mode, data);
switch (data->type) {
case QSEECOM_LISTENER_SERVICE:
diff --git a/drivers/misc/uid_stat.c b/drivers/misc/uid_stat.c
deleted file mode 100644
index 2141124a6c12..000000000000
--- a/drivers/misc/uid_stat.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/* drivers/misc/uid_stat.c
- *
- * Copyright (C) 2008 - 2009 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <asm/atomic.h>
-
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/proc_fs.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/stat.h>
-#include <linux/uid_stat.h>
-#include <net/activity_stats.h>
-
-static DEFINE_SPINLOCK(uid_lock);
-static LIST_HEAD(uid_list);
-static struct proc_dir_entry *parent;
-
-struct uid_stat {
- struct list_head link;
- uid_t uid;
- atomic_t tcp_rcv;
- atomic_t tcp_snd;
-};
-
-static struct uid_stat *find_uid_stat(uid_t uid) {
- unsigned long flags;
- struct uid_stat *entry;
-
- spin_lock_irqsave(&uid_lock, flags);
- list_for_each_entry(entry, &uid_list, link) {
- if (entry->uid == uid) {
- spin_unlock_irqrestore(&uid_lock, flags);
- return entry;
- }
- }
- spin_unlock_irqrestore(&uid_lock, flags);
- return NULL;
-}
-
-static int tcp_snd_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len;
- unsigned int bytes;
- char *p = page;
- struct uid_stat *uid_entry = (struct uid_stat *) data;
- if (!data)
- return 0;
-
- bytes = (unsigned int) (atomic_read(&uid_entry->tcp_snd) + INT_MIN);
- p += sprintf(p, "%u\n", bytes);
- len = (p - page) - off;
- *eof = (len <= count) ? 1 : 0;
- *start = page + off;
- return len;
-}
-
-static int tcp_rcv_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len;
- unsigned int bytes;
- char *p = page;
- struct uid_stat *uid_entry = (struct uid_stat *) data;
- if (!data)
- return 0;
-
- bytes = (unsigned int) (atomic_read(&uid_entry->tcp_rcv) + INT_MIN);
- p += sprintf(p, "%u\n", bytes);
- len = (p - page) - off;
- *eof = (len <= count) ? 1 : 0;
- *start = page + off;
- return len;
-}
-
-/* Create a new entry for tracking the specified uid. */
-static struct uid_stat *create_stat(uid_t uid) {
- unsigned long flags;
- char uid_s[32];
- struct uid_stat *new_uid;
- struct proc_dir_entry *entry;
-
- /* Create the uid stat struct and append it to the list. */
- if ((new_uid = kmalloc(sizeof(struct uid_stat), GFP_KERNEL)) == NULL)
- return NULL;
-
- new_uid->uid = uid;
- /* Counters start at INT_MIN, so we can track 4GB of network traffic. */
- atomic_set(&new_uid->tcp_rcv, INT_MIN);
- atomic_set(&new_uid->tcp_snd, INT_MIN);
-
- spin_lock_irqsave(&uid_lock, flags);
- list_add_tail(&new_uid->link, &uid_list);
- spin_unlock_irqrestore(&uid_lock, flags);
-
- sprintf(uid_s, "%d", uid);
- entry = proc_mkdir(uid_s, parent);
-
- /* Keep reference to uid_stat so we know what uid to read stats from. */
- create_proc_read_entry("tcp_snd", S_IRUGO, entry , tcp_snd_read_proc,
- (void *) new_uid);
-
- create_proc_read_entry("tcp_rcv", S_IRUGO, entry, tcp_rcv_read_proc,
- (void *) new_uid);
-
- return new_uid;
-}
-
-int uid_stat_tcp_snd(uid_t uid, int size) {
- struct uid_stat *entry;
- activity_stats_update();
- if ((entry = find_uid_stat(uid)) == NULL &&
- ((entry = create_stat(uid)) == NULL)) {
- return -1;
- }
- atomic_add(size, &entry->tcp_snd);
- return 0;
-}
-
-int uid_stat_tcp_rcv(uid_t uid, int size) {
- struct uid_stat *entry;
- activity_stats_update();
- if ((entry = find_uid_stat(uid)) == NULL &&
- ((entry = create_stat(uid)) == NULL)) {
- return -1;
- }
- atomic_add(size, &entry->tcp_rcv);
- return 0;
-}
-
-static int __init uid_stat_init(void)
-{
- parent = proc_mkdir("uid_stat", NULL);
- if (!parent) {
- pr_err("uid_stat: failed to create proc entry\n");
- return -1;
- }
- return 0;
-}
-
-__initcall(uid_stat_init);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index dafdbd99ce71..b116122c5767 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -935,6 +935,14 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
struct mmc_card *card;
int err = 0, ioc_err = 0;
+ /*
+ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+ * whole block device, not on a partition. This prevents overspray
+ * between sibling partitions.
+ */
+ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+ return -EPERM;
+
idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
if (IS_ERR_OR_NULL(idata))
return PTR_ERR(idata);
@@ -993,6 +1001,14 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
int i, err = 0, ioc_err = 0;
__u64 num_of_cmds;
+ /*
+ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+ * whole block device, not on a partition. This prevents overspray
+ * between sibling partitions.
+ */
+ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+ return -EPERM;
+
if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
sizeof(num_of_cmds)))
return -EFAULT;
@@ -1048,14 +1064,6 @@ cmd_err:
static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- /*
- * The caller must have CAP_SYS_RAWIO, and must be calling this on the
- * whole block device, not on a partition. This prevents overspray
- * between sibling partitions.
- */
- if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
- return -EPERM;
-
switch (cmd) {
case MMC_IOC_CMD:
return mmc_blk_ioctl_cmd(bdev,
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 1c1b45ef3faf..aad3243a48fc 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1436,6 +1436,12 @@ static int mmc_spi_probe(struct spi_device *spi)
host->pdata->cd_debounce);
if (status != 0)
goto fail_add_host;
+
+ /* The platform has a CD GPIO signal that may support
+ * interrupts, so let mmc_gpiod_request_cd_irq() decide
+ * if polling is needed or not.
+ */
+ mmc->caps &= ~MMC_CAP_NEEDS_POLL;
mmc_gpiod_request_cd_irq(mmc);
}
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 45ee07d3a761..610154836d79 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -390,6 +390,7 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
slot->cd_idx = 0;
slot->cd_override_level = true;
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
slot->host->mmc_host_ops.get_cd = bxt_get_cd;
@@ -1173,6 +1174,30 @@ static const struct pci_device_id pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXTM_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXTM_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXTM_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_APL_EMMC,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index d1a0b4db60db..89e7151684a1 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -28,6 +28,9 @@
#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
+#define PCI_DEVICE_ID_INTEL_BXTM_SD 0x1aca
+#define PCI_DEVICE_ID_INTEL_BXTM_EMMC 0x1acc
+#define PCI_DEVICE_ID_INTEL_BXTM_SDIO 0x1ad0
#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca
#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc
#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 36b75f048e76..5f7eac922c54 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -754,9 +754,20 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
if (!data)
target_timeout = cmd->busy_timeout * 1000;
else {
- target_timeout = data->timeout_ns / 1000;
- if (host->clock)
- target_timeout += data->timeout_clks / host->clock;
+ target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
+ if (host->clock && data->timeout_clks) {
+ unsigned long long val;
+
+ /*
+ * data->timeout_clks is in units of clock cycles.
+ * host->clock is in Hz. target_timeout is in us.
+ * Hence, us = 1000000 * cycles / Hz. Round up.
+ */
+ val = 1000000 * data->timeout_clks;
+ if (do_div(val, host->clock))
+ target_timeout++;
+ target_timeout += val;
+ }
}
/*
@@ -3921,14 +3932,14 @@ int sdhci_add_host(struct sdhci_host *host)
if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
host->timeout_clk *= 1000;
+ if (override_timeout_clk)
+ host->timeout_clk = override_timeout_clk;
+
mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
host->ops->get_max_timeout_count(host) : 1 << 27;
mmc->max_busy_timeout /= host->timeout_clk;
}
- if (override_timeout_clk)
- host->timeout_clk = override_timeout_clk;
-
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index ad9ffea7d659..6234eab38ff3 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -397,38 +397,26 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
}
static struct dma_chan *
-sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
- struct sh_mmcif_plat_data *pdata,
- enum dma_transfer_direction direction)
+sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id)
{
- struct dma_slave_config cfg = { 0, };
- struct dma_chan *chan;
- void *slave_data = NULL;
- struct resource *res;
- struct device *dev = sh_mmcif_host_to_dev(host);
dma_cap_mask_t mask;
- int ret;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
+ if (slave_id <= 0)
+ return NULL;
- if (pdata)
- slave_data = direction == DMA_MEM_TO_DEV ?
- (void *)pdata->slave_id_tx :
- (void *)pdata->slave_id_rx;
-
- chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
- slave_data, dev,
- direction == DMA_MEM_TO_DEV ? "tx" : "rx");
-
- dev_dbg(dev, "%s: %s: got channel %p\n", __func__,
- direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan);
+ return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id);
+}
- if (!chan)
- return NULL;
+static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
+ struct dma_chan *chan,
+ enum dma_transfer_direction direction)
+{
+ struct resource *res;
+ struct dma_slave_config cfg = { 0, };
res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
-
cfg.direction = direction;
if (direction == DMA_DEV_TO_MEM) {
@@ -439,38 +427,42 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
}
- ret = dmaengine_slave_config(chan, &cfg);
- if (ret < 0) {
- dma_release_channel(chan);
- return NULL;
- }
-
- return chan;
+ return dmaengine_slave_config(chan, &cfg);
}
-static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
- struct sh_mmcif_plat_data *pdata)
+static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
{
struct device *dev = sh_mmcif_host_to_dev(host);
host->dma_active = false;
- if (pdata) {
- if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
- return;
- } else if (!dev->of_node) {
- return;
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) {
+ struct sh_mmcif_plat_data *pdata = dev->platform_data;
+
+ host->chan_tx = sh_mmcif_request_dma_pdata(host,
+ pdata->slave_id_tx);
+ host->chan_rx = sh_mmcif_request_dma_pdata(host,
+ pdata->slave_id_rx);
+ } else {
+ host->chan_tx = dma_request_slave_channel(dev, "tx");
+ host->chan_rx = dma_request_slave_channel(dev, "rx");
}
+ dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
+ host->chan_rx);
- /* We can only either use DMA for both Tx and Rx or not use it at all */
- host->chan_tx = sh_mmcif_request_dma_one(host, pdata, DMA_MEM_TO_DEV);
- if (!host->chan_tx)
- return;
+ if (!host->chan_tx || !host->chan_rx ||
+ sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) ||
+ sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM))
+ goto error;
- host->chan_rx = sh_mmcif_request_dma_one(host, pdata, DMA_DEV_TO_MEM);
- if (!host->chan_rx) {
+ return;
+
+error:
+ if (host->chan_tx)
dma_release_channel(host->chan_tx);
- host->chan_tx = NULL;
- }
+ if (host->chan_rx)
+ dma_release_channel(host->chan_rx);
+ host->chan_tx = host->chan_rx = NULL;
}
static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
@@ -1102,7 +1094,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->power_mode == MMC_POWER_UP) {
if (!host->card_present) {
/* See if we also get DMA */
- sh_mmcif_request_dma(host, dev->platform_data);
+ sh_mmcif_request_dma(host);
host->card_present = true;
}
sh_mmcif_set_power(host, ios);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 43b3392ffee7..652d01832873 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -2599,6 +2599,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
*/
static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
+ struct onenand_chip *this = mtd->priv;
int ret;
ret = onenand_block_isbad(mtd, ofs);
@@ -2610,7 +2611,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
onenand_get_device(mtd, FL_WRITING);
- ret = mtd_block_markbad(mtd, ofs);
+ ret = this->block_markbad(mtd, ofs);
onenand_release_device(mtd);
return ret;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 28bbca0af238..b3d70a7a5262 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3260,6 +3260,30 @@ static int bond_close(struct net_device *bond_dev)
return 0;
}
+/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
+ * that some drivers can provide 32bit values only.
+ */
+static void bond_fold_stats(struct rtnl_link_stats64 *_res,
+ const struct rtnl_link_stats64 *_new,
+ const struct rtnl_link_stats64 *_old)
+{
+ const u64 *new = (const u64 *)_new;
+ const u64 *old = (const u64 *)_old;
+ u64 *res = (u64 *)_res;
+ int i;
+
+ for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
+ u64 nv = new[i];
+ u64 ov = old[i];
+
+ /* detects if this particular field is 32bit only */
+ if (((nv | ov) >> 32) == 0)
+ res[i] += (u32)nv - (u32)ov;
+ else
+ res[i] += nv - ov;
+ }
+}
+
static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats)
{
@@ -3268,43 +3292,23 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
struct list_head *iter;
struct slave *slave;
+ spin_lock(&bond->stats_lock);
memcpy(stats, &bond->bond_stats, sizeof(*stats));
- bond_for_each_slave(bond, slave, iter) {
- const struct rtnl_link_stats64 *sstats =
+ rcu_read_lock();
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ const struct rtnl_link_stats64 *new =
dev_get_stats(slave->dev, &temp);
- struct rtnl_link_stats64 *pstats = &slave->slave_stats;
-
- stats->rx_packets += sstats->rx_packets - pstats->rx_packets;
- stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes;
- stats->rx_errors += sstats->rx_errors - pstats->rx_errors;
- stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped;
-
- stats->tx_packets += sstats->tx_packets - pstats->tx_packets;;
- stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes;
- stats->tx_errors += sstats->tx_errors - pstats->tx_errors;
- stats->tx_dropped += sstats->tx_dropped - pstats->tx_dropped;
-
- stats->multicast += sstats->multicast - pstats->multicast;
- stats->collisions += sstats->collisions - pstats->collisions;
-
- stats->rx_length_errors += sstats->rx_length_errors - pstats->rx_length_errors;
- stats->rx_over_errors += sstats->rx_over_errors - pstats->rx_over_errors;
- stats->rx_crc_errors += sstats->rx_crc_errors - pstats->rx_crc_errors;
- stats->rx_frame_errors += sstats->rx_frame_errors - pstats->rx_frame_errors;
- stats->rx_fifo_errors += sstats->rx_fifo_errors - pstats->rx_fifo_errors;
- stats->rx_missed_errors += sstats->rx_missed_errors - pstats->rx_missed_errors;
-
- stats->tx_aborted_errors += sstats->tx_aborted_errors - pstats->tx_aborted_errors;
- stats->tx_carrier_errors += sstats->tx_carrier_errors - pstats->tx_carrier_errors;
- stats->tx_fifo_errors += sstats->tx_fifo_errors - pstats->tx_fifo_errors;
- stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors - pstats->tx_heartbeat_errors;
- stats->tx_window_errors += sstats->tx_window_errors - pstats->tx_window_errors;
+
+ bond_fold_stats(stats, new, &slave->slave_stats);
/* save off the slave stats for the next run */
- memcpy(pstats, sstats, sizeof(*sstats));
+ memcpy(&slave->slave_stats, new, sizeof(*new));
}
+ rcu_read_unlock();
+
memcpy(&bond->bond_stats, stats, sizeof(*stats));
+ spin_unlock(&bond->stats_lock);
return stats;
}
@@ -4118,6 +4122,7 @@ void bond_setup(struct net_device *bond_dev)
struct bonding *bond = netdev_priv(bond_dev);
spin_lock_init(&bond->mode_lock);
+ spin_lock_init(&bond->stats_lock);
bond->params = bonding_defaults;
/* Initialize pointers */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 17f017ab4dac..0fb3f8de88e9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1197,7 +1197,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
dev->stats.tx_bytes += tx_cb_ptr->skb->len;
dma_unmap_single(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
- tx_cb_ptr->skb->len,
+ dma_unmap_len(tx_cb_ptr, dma_len),
DMA_TO_DEVICE);
bcmgenet_free_cb(tx_cb_ptr);
} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
@@ -1308,7 +1308,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
}
dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
- dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
DMA_TX_APPEND_CRC;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 060dd3922974..973dade2d07f 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -3312,13 +3312,14 @@ jme_resume(struct device *dev)
jme_reset_phy_processor(jme);
jme_phy_calibration(jme);
jme_phy_setEA(jme);
- jme_start_irq(jme);
netif_device_attach(netdev);
atomic_inc(&jme->link_changing);
jme_reset_link(jme);
+ jme_start_irq(jme);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index ed622fa29dfa..a4ac6fedac75 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3404,7 +3404,7 @@ static int mvneta_probe(struct platform_device *pdev)
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
- dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
err = register_netdev(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index cad6c44df91c..d314d96dcb1c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3132,7 +3132,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
case QP_TRANS_RTS2RTS:
case QP_TRANS_SQD2SQD:
case QP_TRANS_SQD2RTS:
- if (slave != mlx4_master_func_num(dev))
+ if (slave != mlx4_master_func_num(dev)) {
if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
@@ -3151,6 +3151,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
if (qp_ctx->alt_path.mgid_index >= num_gids)
return -EINVAL;
}
+ }
break;
default:
break;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 4365c8bccc6d..605f6410f867 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -61,6 +61,8 @@ struct mlxsw_sp {
#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
unsigned int interval; /* ms */
} fdb_notify;
+#define MLXSW_SP_MIN_AGEING_TIME 10
+#define MLXSW_SP_MAX_AGEING_TIME 1000000
#define MLXSW_SP_DEFAULT_AGEING_TIME 300
u32 ageing_time;
struct {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 7dbeafa65934..d4c4c2b5156c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -232,8 +232,13 @@ static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
+ ageing_time > MLXSW_SP_MAX_AGEING_TIME)
+ return -ERANGE;
+ else
+ return 0;
+ }
return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 46bbea8e023c..55007f1e6bbc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -566,6 +566,7 @@ struct qlcnic_adapter_stats {
u64 tx_dma_map_error;
u64 spurious_intr;
u64 mac_filter_limit_overrun;
+ u64 mbx_spurious_intr;
};
/*
@@ -1099,7 +1100,7 @@ struct qlcnic_mailbox {
unsigned long status;
spinlock_t queue_lock; /* Mailbox queue lock */
spinlock_t aen_lock; /* Mailbox response/AEN lock */
- atomic_t rsp_status;
+ u32 rsp_status;
u32 num_cmds;
};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 37a731be7d39..f9640d5ce6ba 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -491,7 +491,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
{
- atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
complete(&mbx->completion);
}
@@ -510,7 +510,7 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
} else {
- if (atomic_read(&mbx->rsp_status) != rsp_status)
+ if (mbx->rsp_status != rsp_status)
qlcnic_83xx_notify_mbx_response(mbx);
}
out:
@@ -1023,7 +1023,7 @@ static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
} else {
- if (atomic_read(&mbx->rsp_status) != rsp_status)
+ if (mbx->rsp_status != rsp_status)
qlcnic_83xx_notify_mbx_response(mbx);
}
}
@@ -2338,9 +2338,9 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
{
+ u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
struct qlcnic_adapter *adapter = data;
struct qlcnic_mailbox *mbx;
- u32 mask, resp, event;
unsigned long flags;
mbx = adapter->ahw->mailbox;
@@ -2350,10 +2350,14 @@ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
goto out;
event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
- if (event & QLCNIC_MBX_ASYNC_EVENT)
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
- else
- qlcnic_83xx_notify_mbx_response(mbx);
+ } else {
+ if (mbx->rsp_status != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ else
+ adapter->stats.mbx_spurious_intr++;
+ }
out:
mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
@@ -4050,10 +4054,10 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
struct qlcnic_adapter *adapter = mbx->adapter;
const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
struct device *dev = &adapter->pdev->dev;
- atomic_t *rsp_status = &mbx->rsp_status;
struct list_head *head = &mbx->cmd_q;
struct qlcnic_hardware_context *ahw;
struct qlcnic_cmd_args *cmd = NULL;
+ unsigned long flags;
ahw = adapter->ahw;
@@ -4063,7 +4067,9 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
return;
}
- atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+ spin_lock_irqsave(&mbx->aen_lock, flags);
+ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
spin_lock(&mbx->queue_lock);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 494e8105adee..0a2318cad34d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -59,7 +59,8 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
QLC_OFF(stats.mac_filter_limit_overrun)},
{"spurious intr", QLC_SIZEOF(stats.spurious_intr),
QLC_OFF(stats.spurious_intr)},
-
+ {"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr),
+ QLC_OFF(stats.mbx_spurious_intr)},
};
static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 997976426799..b28e73ea2c25 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1648,7 +1648,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
return;
}
skb_reserve(new_skb, NET_IP_ALIGN);
+
+ pci_dma_sync_single_for_cpu(qdev->pdev,
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
+
memcpy(skb_put(new_skb, length), skb->data, length);
+
+ pci_dma_sync_single_for_device(qdev->pdev,
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
skb = new_skb;
/* Frame error, so drop the packet. */
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 689a4a5c8dcf..1ef03939d25f 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -811,7 +811,7 @@ qcaspi_netdev_setup(struct net_device *dev)
dev->netdev_ops = &qcaspi_netdev_ops;
qcaspi_set_ethtool_ops(dev);
dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
- dev->flags = IFF_MULTICAST;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->tx_queue_len = 100;
qca = netdev_priv(dev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6a8fc0f341ff..36fc9427418f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1185,11 +1185,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
break;
sh_eth_set_receive_align(skb);
- /* RX descriptor */
- rxdesc = &mdp->rx_ring[i];
/* The size of the buffer is a multiple of 32 bytes. */
buf_len = ALIGN(mdp->rx_buf_sz, 32);
- rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, dma_addr)) {
@@ -1197,6 +1194,10 @@ static void sh_eth_ring_format(struct net_device *ndev)
break;
}
mdp->rx_skbuff[i] = skb;
+
+ /* RX descriptor */
+ rxdesc = &mdp->rx_ring[i];
+ rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
@@ -1212,7 +1213,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
/* Mark the last entry as wrapping the ring. */
- rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE);
+ if (rxdesc)
+ rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE);
memset(mdp->tx_ring, 0, tx_ringsize);
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 52ec3d6e056a..2b34622a4bfe 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -239,6 +239,7 @@ struct rocker {
struct {
u64 id;
} hw;
+ unsigned long ageing_time;
spinlock_t cmd_ring_lock; /* for cmd ring accesses */
struct rocker_dma_ring_info cmd_ring;
struct rocker_dma_ring_info event_ring;
@@ -3704,7 +3705,7 @@ static void rocker_fdb_cleanup(unsigned long data)
struct rocker_port *rocker_port;
struct rocker_fdb_tbl_entry *entry;
struct hlist_node *tmp;
- unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
+ unsigned long next_timer = jiffies + rocker->ageing_time;
unsigned long expires;
unsigned long lock_flags;
int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
@@ -4367,8 +4368,12 @@ static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
struct switchdev_trans *trans,
u32 ageing_time)
{
+ struct rocker *rocker = rocker_port->rocker;
+
if (!switchdev_trans_ph_prepare(trans)) {
rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
+ if (rocker_port->ageing_time < rocker->ageing_time)
+ rocker->ageing_time = rocker_port->ageing_time;
mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
}
@@ -5206,10 +5211,13 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init_tbls;
}
+ rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
(unsigned long) rocker);
mod_timer(&rocker->fdb_cleanup_timer, jiffies);
+ rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
+
err = rocker_probe_ports(rocker);
if (err) {
dev_err(&pdev->dev, "failed to probe ports\n");
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 696852eb23c3..7a3f990c1935 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -430,16 +430,6 @@ static int irtty_open(struct tty_struct *tty)
/* Module stuff handled via irda_ldisc.owner - Jean II */
- /* First make sure we're not already connected. */
- if (tty->disc_data != NULL) {
- priv = tty->disc_data;
- if (priv && priv->magic == IRTTY_MAGIC) {
- ret = -EEXIST;
- goto out;
- }
- tty->disc_data = NULL; /* ### */
- }
-
/* stop the underlying driver */
irtty_stop_receiver(tty, TRUE);
if (tty->ops->stop)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 0fc521941c71..159a68782bec 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -760,6 +760,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
if (copylen > good_linear)
copylen = good_linear;
+ else if (copylen < ETH_HLEN)
+ copylen = ETH_HLEN;
linear = copylen;
i = *from;
iov_iter_advance(&i, copylen);
@@ -769,10 +771,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (!zerocopy) {
copylen = len;
- if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear)
+ linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
+ if (linear > good_linear)
linear = good_linear;
- else
- linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
+ else if (linear < ETH_HLEN)
+ linear = ETH_HLEN;
}
skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 9a863c6a6a33..174e06ec7c2f 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -567,7 +567,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct ppp_file *pf = file->private_data;
+ struct ppp_file *pf;
struct ppp *ppp;
int err = -EFAULT, val, val2, i;
struct ppp_idle idle;
@@ -577,9 +577,14 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
void __user *argp = (void __user *)arg;
int __user *p = argp;
- if (!pf)
- return ppp_unattached_ioctl(current->nsproxy->net_ns,
- pf, file, cmd, arg);
+ mutex_lock(&ppp_mutex);
+
+ pf = file->private_data;
+ if (!pf) {
+ err = ppp_unattached_ioctl(current->nsproxy->net_ns,
+ pf, file, cmd, arg);
+ goto out;
+ }
if (cmd == PPPIOCDETACH) {
/*
@@ -594,7 +599,6 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
* this fd and reopening /dev/ppp.
*/
err = -EINVAL;
- mutex_lock(&ppp_mutex);
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
rtnl_lock();
@@ -608,15 +612,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} else
pr_warn("PPPIOCDETACH file->f_count=%ld\n",
atomic_long_read(&file->f_count));
- mutex_unlock(&ppp_mutex);
- return err;
+ goto out;
}
if (pf->kind == CHANNEL) {
struct channel *pch;
struct ppp_channel *chan;
- mutex_lock(&ppp_mutex);
pch = PF_TO_CHANNEL(pf);
switch (cmd) {
@@ -638,17 +640,16 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = chan->ops->ioctl(chan, cmd, arg);
up_read(&pch->chan_sem);
}
- mutex_unlock(&ppp_mutex);
- return err;
+ goto out;
}
if (pf->kind != INTERFACE) {
/* can't happen */
pr_err("PPP: not interface or channel??\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
- mutex_lock(&ppp_mutex);
ppp = PF_TO_PPP(pf);
switch (cmd) {
case PPPIOCSMRU:
@@ -823,7 +824,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
default:
err = -ENOTTY;
}
+
+out:
mutex_unlock(&ppp_mutex);
+
return err;
}
@@ -836,7 +840,6 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct ppp_net *pn;
int __user *p = (int __user *)arg;
- mutex_lock(&ppp_mutex);
switch (cmd) {
case PPPIOCNEWUNIT:
/* Create a new ppp unit */
@@ -886,7 +889,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
default:
err = -ENOTTY;
}
- mutex_unlock(&ppp_mutex);
+
return err;
}
@@ -2290,7 +2293,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
pch->ppp = NULL;
pch->chan = chan;
- pch->chan_net = net;
+ pch->chan_net = get_net(net);
chan->ppp = pch;
init_ppp_file(&pch->file, CHANNEL);
pch->file.hdrlen = chan->hdrlen;
@@ -2387,6 +2390,8 @@ ppp_unregister_channel(struct ppp_channel *chan)
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
+ put_net(pch->chan_net);
+ pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
@@ -2803,6 +2808,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
out2:
mutex_unlock(&pn->all_ppp_mutex);
+ rtnl_unlock();
free_netdev(dev);
out1:
*retp = ret;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 01f08a7751f7..e7034c55e796 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -280,7 +280,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
struct net_device *ndev = dev_id;
struct rionet_private *rnet = netdev_priv(ndev);
- spin_lock(&rnet->lock);
+ spin_lock(&rnet->tx_lock);
if (netif_msg_intr(rnet))
printk(KERN_INFO
@@ -299,7 +299,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
netif_wake_queue(ndev);
- spin_unlock(&rnet->lock);
+ spin_unlock(&rnet->tx_lock);
}
static int rionet_open(struct net_device *ndev)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 6d398f13e1e6..4b15d9ee5a54 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -621,7 +621,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
/* Re-attach the filter to persist device */
if (!skip_filter && (tun->filter_attached == true)) {
- err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
+ lockdep_rtnl_is_held());
if (!err)
goto out;
}
@@ -1000,7 +1001,6 @@ static void tun_net_init(struct net_device *dev)
/* Zero header length */
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
- dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
break;
case IFF_TAP:
@@ -1012,7 +1012,6 @@ static void tun_net_init(struct net_device *dev)
eth_hw_addr_random(dev);
- dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
break;
}
}
@@ -1467,6 +1466,8 @@ static void tun_setup(struct net_device *dev)
dev->ethtool_ops = &tun_ethtool_ops;
dev->destructor = tun_free_netdev;
+ /* We prefer our own queue length */
+ dev->tx_queue_len = TUN_READQ_SIZE;
}
/* Trivial set of netlink ops to allow deleting tun or tap
@@ -1808,7 +1809,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- sk_detach_filter(tfile->socket.sk);
+ __sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
}
tun->filter_attached = false;
@@ -1821,7 +1822,8 @@ static int tun_attach_filter(struct tun_struct *tun)
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
+ lockdep_rtnl_is_held());
if (ret) {
tun_detach_filter(tun, i);
return ret;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 3da70bf9936a..7cba2c3759df 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -160,6 +160,12 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
info->u = header.usb_cdc_union_desc;
info->header = header.usb_cdc_header_desc;
info->ether = header.usb_cdc_ether_desc;
+ if (!info->u) {
+ if (rndis)
+ goto skip;
+ else /* in that case a quirk is mandatory */
+ goto bad_desc;
+ }
/* we need a master/control interface (what we're
* probed with) and a slave/data interface; union
* descriptors sort this all out.
@@ -256,7 +262,7 @@ skip:
goto bad_desc;
}
- } else if (!info->header || !info->u || (!rndis && !info->ether)) {
+ } else if (!info->header || (!rndis && !info->ether)) {
dev_dbg(&intf->dev, "missing cdc %s%s%sdescriptor\n",
info->header ? "" : "header ",
info->u ? "" : "union ",
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index e8a1144c5a8b..8c2bb77db049 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -794,7 +794,11 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
- /* reset data interface */
+ /* Reset data interface. Some devices will not reset properly
+ * unless they are configured first. Toggle the altsetting to
+ * force a reset
+ */
+ usb_set_interface(dev->udev, iface_no, data_altsetting);
temp = usb_set_interface(dev->udev, iface_no, 0);
if (temp) {
dev_dbg(&intf->dev, "set interface failed\n");
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 982e0acd1a36..a34f491224c1 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -699,6 +699,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
+ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
@@ -718,8 +719,10 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9070, 8)}, /* Sierra Wireless MC74xx/EM74xx */
{QMI_FIXED_INTF(0x1199, 0x9070, 10)}, /* Sierra Wireless MC74xx/EM74xx */
- {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */
- {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 0744bf2ef2d6..c2ea4e5666fb 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1766,6 +1766,13 @@ out3:
if (info->unbind)
info->unbind (dev, udev);
out1:
+ /* subdrivers must undo all they did in bind() if they
+ * fail it, but we may fail later and a deferred kevent
+ * may trigger an error resubmitting itself and, worse,
+ * schedule a timer. So we kill it all just in case.
+ */
+ cancel_work_sync(&dev->kevent);
+ del_timer_sync(&dev->delay);
free_netdev(net);
out:
return status;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 0a242b200df4..903bda437839 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -114,20 +114,23 @@ static struct dst_ops vrf_dst_ops = {
#if IS_ENABLED(CONFIG_IPV6)
static bool check_ipv6_frame(const struct sk_buff *skb)
{
- const struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data;
- size_t hlen = sizeof(*ipv6h);
+ const struct ipv6hdr *ipv6h;
+ struct ipv6hdr _ipv6h;
bool rc = true;
- if (skb->len < hlen)
+ ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h);
+ if (!ipv6h)
goto out;
if (ipv6h->nexthdr == NEXTHDR_ICMP) {
const struct icmp6hdr *icmph;
+ struct icmp6hdr _icmph;
- if (skb->len < hlen + sizeof(*icmph))
+ icmph = skb_header_pointer(skb, sizeof(_ipv6h),
+ sizeof(_icmph), &_icmph);
+ if (!icmph)
goto out;
- icmph = (struct icmp6hdr *)(skb->data + sizeof(*ipv6h));
switch (icmph->icmp6_type) {
case NDISC_ROUTER_SOLICITATION:
case NDISC_ROUTER_ADVERTISEMENT:
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e0fcda4ddd55..3c0df70e2f53 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1306,8 +1306,10 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
gbp = (struct vxlanhdr_gbp *)vxh;
md->gbp = ntohs(gbp->policy_id);
- if (tun_dst)
+ if (tun_dst) {
tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
+ tun_dst->u.tun_info.options_len = sizeof(*md);
+ }
if (gbp->dont_learn)
md->gbp |= VXLAN_GBP_DONT_LEARN;
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 44541dbc5c28..69b994f3b8c5 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2516,7 +2516,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->mem_start = card->phys_mem
+ BUF_OFFSET ( txBuffer[i][0][0]);
dev->mem_end = card->phys_mem
- + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
+ + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
dev->base_addr = card->pci_conf;
dev->irq = card->irq;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index cc81482c934d..113a43fca9cf 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -403,10 +403,9 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
if (match) {
if (AR_SREV_9287(ah)) {
- /* FIXME: array overrun? */
for (i = 0; i < numXpdGains; i++) {
minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
- maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4];
+ maxPwrT4[i] = data_9287[idxL].pwrPdg[i][intercepts - 1];
ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
data_9287[idxL].pwrPdg[i],
data_9287[idxL].vpdPdg[i],
@@ -416,7 +415,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
} else if (eeprom_4k) {
for (i = 0; i < numXpdGains; i++) {
minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
- maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4];
+ maxPwrT4[i] = data_4k[idxL].pwrPdg[i][intercepts - 1];
ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
data_4k[idxL].pwrPdg[i],
data_4k[idxL].vpdPdg[i],
@@ -426,7 +425,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
} else {
for (i = 0; i < numXpdGains; i++) {
minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
- maxPwrT4[i] = data_def[idxL].pwrPdg[i][4];
+ maxPwrT4[i] = data_def[idxL].pwrPdg[i][intercepts - 1];
ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
data_def[idxL].pwrPdg[i],
data_def[idxL].vpdPdg[i],
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index d906fa13ba97..610c442c7ab2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -106,7 +106,7 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
sizeof(tx_ant_cmd), &tx_ant_cmd);
}
-static void iwl_free_fw_paging(struct iwl_mvm *mvm)
+void iwl_free_fw_paging(struct iwl_mvm *mvm)
{
int i;
@@ -126,6 +126,8 @@ static void iwl_free_fw_paging(struct iwl_mvm *mvm)
get_order(mvm->fw_paging_db[i].fw_paging_size));
}
kfree(mvm->trans->paging_download_buf);
+ mvm->trans->paging_download_buf = NULL;
+
memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 4bde2d027dcd..244e26c26821 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -1190,6 +1190,9 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+/* Paging */
+void iwl_free_fw_paging(struct iwl_mvm *mvm);
+
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 13c97f665ba8..c3adf2bcdc85 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -645,6 +645,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
+ iwl_free_fw_paging(mvm);
+
iwl_mvm_tof_clean(mvm);
ieee80211_free_hw(mvm->hw);
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 918f8c82acdd..9b962a63c3d8 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -107,36 +107,14 @@ static void nqx_disable_irq(struct nqx_dev *nqx_dev)
spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
}
-static void nqx_enable_irq(struct nqx_dev *nqx_dev)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
- if (!nqx_dev->irq_enabled) {
- nqx_dev->irq_enabled = true;
- enable_irq(nqx_dev->client->irq);
- }
- spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
-}
-
static irqreturn_t nqx_dev_irq_handler(int irq, void *dev_id)
{
struct nqx_dev *nqx_dev = dev_id;
unsigned long flags;
- int ret;
if (device_may_wakeup(&nqx_dev->client->dev))
pm_wakeup_event(&nqx_dev->client->dev, WAKEUP_SRC_TIMEOUT);
- ret = gpio_get_value(nqx_dev->irq_gpio);
- if (!ret) {
-#ifdef NFC_KERNEL_BU
- dev_info(&nqx_dev->client->dev,
- "nqx nfc : nqx_dev_irq_handler error = %d\n", ret);
-#endif
- return IRQ_HANDLED;
- }
-
nqx_disable_irq(nqx_dev);
spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
nqx_dev->count_irq++;
@@ -175,15 +153,24 @@ static ssize_t nfc_read(struct file *filp, char __user *buf,
ret = -EAGAIN;
goto err;
}
- if (!nqx_dev->irq_enabled) {
- enable_irq(nqx_dev->client->irq);
- nqx_dev->irq_enabled = true;
+ while (1) {
+ ret = 0;
+ if (!nqx_dev->irq_enabled) {
+ nqx_dev->irq_enabled = true;
+ enable_irq(nqx_dev->client->irq);
+ }
+ if (!gpio_get_value(nqx_dev->irq_gpio)) {
+ ret = wait_event_interruptible(nqx_dev->read_wq,
+ !nqx_dev->irq_enabled);
+ }
+ if (ret)
+ goto err;
+ nqx_disable_irq(nqx_dev);
+
+ if (gpio_get_value(nqx_dev->irq_gpio))
+ break;
+ dev_err_ratelimited(&nqx_dev->client->dev, "gpio is low, no need to read data\n");
}
- ret = wait_event_interruptible(nqx_dev->read_wq,
- gpio_get_value(nqx_dev->irq_gpio));
- if (ret)
- goto err;
- nqx_disable_irq(nqx_dev);
}
tmp = nqx_dev->kbuf;
@@ -393,7 +380,6 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
/* hardware dependent delay */
msleep(100);
} else if (arg == 1) {
- nqx_enable_irq(nqx_dev);
dev_dbg(&nqx_dev->client->dev,
"gpio_set_value enable: %s: info: %p\n",
__func__, nqx_dev);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 7e2c43f701bc..5f47356d6942 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -335,7 +335,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
[ND_CMD_IMPLEMENTED] = { },
[ND_CMD_SMART] = {
.out_num = 2,
- .out_sizes = { 4, 8, },
+ .out_sizes = { 4, 128, },
},
[ND_CMD_SMART_THRESHOLD] = {
.out_num = 2,
@@ -513,10 +513,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
/* fail write commands (when read-only) */
if (read_only)
- switch (ioctl_cmd) {
- case ND_IOCTL_VENDOR:
- case ND_IOCTL_SET_CONFIG_DATA:
- case ND_IOCTL_ARS_START:
+ switch (cmd) {
+ case ND_CMD_VENDOR:
+ case ND_CMD_SET_CONFIG_DATA:
+ case ND_CMD_ARS_START:
dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
nvdimm ? nvdimm_cmd_name(cmd)
: nvdimm_bus_cmd_name(cmd));
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 71805a1aa0f3..9d3974591cd6 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -275,7 +275,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
} else {
/* from init we validate */
if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
- return -EINVAL;
+ return -ENODEV;
}
/*
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index edb1984201e9..7aafb5fb9336 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -179,6 +179,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
u16 orig_cmd;
struct pci_bus_region region, inverted_region;
+ if (dev->non_compliant_bars)
+ return 0;
+
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
/* No printks while decoding is disabled! */
@@ -1174,6 +1177,7 @@ void pci_msi_setup_pci_dev(struct pci_dev *dev)
int pci_setup_device(struct pci_dev *dev)
{
u32 class;
+ u16 cmd;
u8 hdr_type;
int pos = 0;
struct pci_bus_region region;
@@ -1219,6 +1223,16 @@ int pci_setup_device(struct pci_dev *dev)
/* device class may be changed after fixup */
class = dev->class >> 8;
+ if (dev->non_compliant_bars) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
+ dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
+ cmd &= ~PCI_COMMAND_IO;
+ cmd &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ }
+
switch (dev->hdr_type) { /* header type */
case PCI_HEADER_TYPE_NORMAL: /* standard header */
if (class == PCI_CLASS_BRIDGE_PCI)
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 4c2fa05b4589..944674ee3464 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -56,6 +56,7 @@ struct db1x_pcmcia_sock {
int stschg_irq; /* card-status-change irq */
int card_irq; /* card irq */
int eject_irq; /* db1200/pb1200 have these */
+ int insert_gpio; /* db1000 carddetect gpio */
#define BOARD_TYPE_DEFAULT 0 /* most boards */
#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */
@@ -83,7 +84,7 @@ static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
/* carddetect gpio: low-active */
static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
{
- return !gpio_get_value(irq_to_gpio(sock->insert_irq));
+ return !gpio_get_value(sock->insert_gpio);
}
static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
@@ -457,9 +458,15 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
sock->card_irq = r ? r->start : 0;
- /* insert: irq which triggers on card insertion/ejection */
+ /* insert: irq which triggers on card insertion/ejection
+ * BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here!
+ */
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
sock->insert_irq = r ? r->start : -1;
+ if (sock->board_type == BOARD_TYPE_DEFAULT) {
+ sock->insert_gpio = r ? r->start : -1;
+ sock->insert_irq = r ? gpio_to_irq(r->start) : -1;
+ }
/* stschg: irq which trigger on card status change (optional) */
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 2e6ca69635aa..17dd8fe12b54 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -779,7 +779,7 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
}
if (num_pulls) {
err = of_property_read_u32_index(np, "brcm,pull",
- (num_funcs > 1) ? i : 0, &pull);
+ (num_pulls > 1) ? i : 0, &pull);
if (err)
goto out;
err = bcm2835_pctl_dt_node_to_map_pull(pc, np, pin,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index a5bb93987378..1029aa7889b5 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -726,19 +726,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
if (of_property_read_bool(dev_np, "fsl,input-sel")) {
np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
- if (np) {
- ipctl->input_sel_base = of_iomap(np, 0);
- if (IS_ERR(ipctl->input_sel_base)) {
- of_node_put(np);
- dev_err(&pdev->dev,
- "iomuxc input select base address not found\n");
- return PTR_ERR(ipctl->input_sel_base);
- }
- } else {
+ if (!np) {
dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
return -EINVAL;
}
+
+ ipctl->input_sel_base = of_iomap(np, 0);
of_node_put(np);
+ if (!ipctl->input_sel_base) {
+ dev_err(&pdev->dev,
+ "iomuxc input select base address not found\n");
+ return -ENOMEM;
+ }
}
imx_pinctrl_desc.name = dev_name(&pdev->dev);
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index eebfae0c9b7c..f844b4ae7f79 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -995,7 +995,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
int val;
if (pull)
- pullidx = data_out ? 1 : 2;
+ pullidx = data_out ? 2 : 1;
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
gpio,
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 85c9046c690e..6b1a47f8c096 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -469,27 +469,27 @@ static const char * const pistachio_mips_pll_lock_groups[] = {
"mfio83",
};
-static const char * const pistachio_sys_pll_lock_groups[] = {
+static const char * const pistachio_audio_pll_lock_groups[] = {
"mfio84",
};
-static const char * const pistachio_wifi_pll_lock_groups[] = {
+static const char * const pistachio_rpu_v_pll_lock_groups[] = {
"mfio85",
};
-static const char * const pistachio_bt_pll_lock_groups[] = {
+static const char * const pistachio_rpu_l_pll_lock_groups[] = {
"mfio86",
};
-static const char * const pistachio_rpu_v_pll_lock_groups[] = {
+static const char * const pistachio_sys_pll_lock_groups[] = {
"mfio87",
};
-static const char * const pistachio_rpu_l_pll_lock_groups[] = {
+static const char * const pistachio_wifi_pll_lock_groups[] = {
"mfio88",
};
-static const char * const pistachio_audio_pll_lock_groups[] = {
+static const char * const pistachio_bt_pll_lock_groups[] = {
"mfio89",
};
@@ -559,12 +559,12 @@ enum pistachio_mux_option {
PISTACHIO_FUNCTION_DREQ4,
PISTACHIO_FUNCTION_DREQ5,
PISTACHIO_FUNCTION_MIPS_PLL_LOCK,
+ PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
+ PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
+ PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
PISTACHIO_FUNCTION_SYS_PLL_LOCK,
PISTACHIO_FUNCTION_WIFI_PLL_LOCK,
PISTACHIO_FUNCTION_BT_PLL_LOCK,
- PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
- PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
- PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND,
PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND,
PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND,
@@ -620,12 +620,12 @@ static const struct pistachio_function pistachio_functions[] = {
FUNCTION(dreq4),
FUNCTION(dreq5),
FUNCTION(mips_pll_lock),
+ FUNCTION(audio_pll_lock),
+ FUNCTION(rpu_v_pll_lock),
+ FUNCTION(rpu_l_pll_lock),
FUNCTION(sys_pll_lock),
FUNCTION(wifi_pll_lock),
FUNCTION(bt_pll_lock),
- FUNCTION(rpu_v_pll_lock),
- FUNCTION(rpu_l_pll_lock),
- FUNCTION(audio_pll_lock),
FUNCTION(debug_raw_cca_ind),
FUNCTION(debug_ed_sec20_cca_ind),
FUNCTION(debug_ed_sec40_cca_ind),
diff --git a/drivers/pinctrl/qcom/pinctrl-msmfalcon.c b/drivers/pinctrl/qcom/pinctrl-msmfalcon.c
index 14abb75fffe0..45db409eb7c1 100644
--- a/drivers/pinctrl/qcom/pinctrl-msmfalcon.c
+++ b/drivers/pinctrl/qcom/pinctrl-msmfalcon.c
@@ -25,9 +25,9 @@
.ngroups = ARRAY_SIZE(fname##_groups), \
}
-#define SOUTH 0x00500000
-#define WEST 0x00100000
-#define EAST 0x00900000
+#define NORTH 0x00900000
+#define CENTER 0x00500000
+#define SOUTH 0x00100000
#define REG_SIZE 0x1000
#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
{ \
@@ -204,9 +204,16 @@ static const struct pinctrl_pin_desc msmfalcon_pins[] = {
PINCTRL_PIN(108, "GPIO_108"),
PINCTRL_PIN(109, "GPIO_109"),
PINCTRL_PIN(110, "GPIO_110"),
- PINCTRL_PIN(111, "SDC2_CLK"),
- PINCTRL_PIN(112, "SDC2_CMD"),
- PINCTRL_PIN(113, "SDC2_DATA"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "SDC1_CLK"),
+ PINCTRL_PIN(115, "SDC1_CMD"),
+ PINCTRL_PIN(116, "SDC1_DATA"),
+ PINCTRL_PIN(117, "SDC2_CLK"),
+ PINCTRL_PIN(118, "SDC2_CMD"),
+ PINCTRL_PIN(119, "SDC2_DATA"),
+ PINCTRL_PIN(120, "SDC1_RCLK"),
};
#define DECLARE_MSM_GPIO_PINS(pin) \
@@ -322,88 +329,101 @@ DECLARE_MSM_GPIO_PINS(107);
DECLARE_MSM_GPIO_PINS(108);
DECLARE_MSM_GPIO_PINS(109);
DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
-static const unsigned int sdc2_clk_pins[] = { 111 };
-static const unsigned int sdc2_cmd_pins[] = { 112 };
-static const unsigned int sdc2_data_pins[] = { 113 };
+static const unsigned int sdc1_clk_pins[] = { 114 };
+static const unsigned int sdc1_cmd_pins[] = { 115 };
+static const unsigned int sdc1_data_pins[] = { 116 };
+static const unsigned int sdc2_clk_pins[] = { 117 };
+static const unsigned int sdc2_cmd_pins[] = { 118 };
+static const unsigned int sdc2_data_pins[] = { 119 };
+static const unsigned int sdc1_rclk_pins[] = { 120 };
enum msmfalcon_functions {
msm_mux_blsp_spi1,
msm_mux_gpio,
+ msm_mux_blsp_uim1,
msm_mux_tgu_ch0,
- msm_mux_tgu_ch1,
+ msm_mux_qdss_gpio4,
+ msm_mux_atest_gpsadc1,
msm_mux_blsp_uart1,
+ msm_mux_SMB_STAT,
+ msm_mux_phase_flag14,
+ msm_mux_blsp_i2c2,
+ msm_mux_phase_flag31,
msm_mux_blsp_spi3,
msm_mux_wlan1_adc1,
msm_mux_atest_usb13,
- msm_mux_bimc_dte1,
- msm_mux_wlan1_adc0,
- msm_mux_atest_usb12,
- msm_mux_bimc_dte0,
+ msm_mux_tgu_ch1,
+ msm_mux_qdss_gpio5,
+ msm_mux_atest_gpsadc0,
msm_mux_blsp_i2c1,
- msm_mux_blsp_uim1,
msm_mux_ddr_bist,
msm_mux_atest_tsens2,
msm_mux_atest_usb1,
msm_mux_blsp_spi2,
- msm_mux_phase_flag3,
- msm_mux_phase_flag14,
- msm_mux_blsp_i2c2,
msm_mux_blsp_uim2,
- msm_mux_phase_flag31,
+ msm_mux_phase_flag3,
+ msm_mux_bimc_dte1,
+ msm_mux_wlan1_adc0,
+ msm_mux_atest_usb12,
+ msm_mux_bimc_dte0,
msm_mux_blsp_i2c3,
- msm_mux_atest_gpsadc1,
msm_mux_wlan2_adc1,
msm_mux_atest_usb11,
msm_mux_dbg_out,
- msm_mux_atest_gpsadc0,
msm_mux_wlan2_adc0,
msm_mux_atest_usb10,
+ msm_mux_RCM_MARKER,
msm_mux_blsp_spi4,
msm_mux_pri_mi2s,
msm_mux_phase_flag26,
- msm_mux_qdss_gpio4,
+ msm_mux_qdss_cti,
+ msm_mux_DP_HOT,
msm_mux_pri_mi2s_ws,
msm_mux_phase_flag27,
- msm_mux_qdss_gpio5,
msm_mux_blsp_i2c4,
msm_mux_phase_flag28,
msm_mux_blsp_uart5,
msm_mux_blsp_spi5,
+ msm_mux_blsp_uim5,
msm_mux_phase_flag5,
msm_mux_blsp_i2c5,
- msm_mux_blsp_uim5,
msm_mux_blsp_spi6,
msm_mux_blsp_uart2,
- msm_mux_qdss_cti,
- msm_mux_sec_mi2s,
- msm_mux_sndwire_clk,
- msm_mux_phase_flag17,
- msm_mux_vsense_clkout,
- msm_mux_sndwire_data,
- msm_mux_phase_flag18,
- msm_mux_blsp_i2c7,
- msm_mux_wsa_en1,
- msm_mux_phase_flag19,
+ msm_mux_blsp_uim6,
msm_mux_phase_flag11,
msm_mux_vsense_data0,
msm_mux_blsp_i2c6,
- msm_mux_blsp_uim6,
msm_mux_phase_flag12,
msm_mux_vsense_data1,
msm_mux_phase_flag13,
msm_mux_vsense_mode,
msm_mux_blsp_spi7,
msm_mux_BLSP_UART,
+ msm_mux_sec_mi2s,
+ msm_mux_sndwire_clk,
+ msm_mux_phase_flag17,
+ msm_mux_vsense_clkout,
+ msm_mux_sndwire_data,
+ msm_mux_phase_flag18,
+ msm_mux_WSA_SPKR,
+ msm_mux_blsp_i2c7,
+ msm_mux_phase_flag19,
msm_mux_vfr_1,
- msm_mux_wsa_en2,
msm_mux_phase_flag20,
+ msm_mux_NFC_INT,
msm_mux_blsp_spi,
msm_mux_m_voc,
msm_mux_phase_flag21,
+ msm_mux_NFC_EN,
msm_mux_phase_flag22,
+ msm_mux_NFC_DWL,
msm_mux_BLSP_I2C,
msm_mux_phase_flag23,
+ msm_mux_NFC_ESE,
msm_mux_pwr_modem,
msm_mux_phase_flag24,
msm_mux_qdss_gpio,
@@ -419,88 +439,92 @@ enum msmfalcon_functions {
msm_mux_qspi_data2,
msm_mux_jitter_bist,
msm_mux_qdss_gpio3,
+ msm_mux_qdss_gpio7,
+ msm_mux_FL_R3LED,
+ msm_mux_CCI_TIMER0,
+ msm_mux_FL_STROBE,
+ msm_mux_CCI_TIMER1,
+ msm_mux_CAM_LDO1,
+ msm_mux_mdss_vsync0,
+ msm_mux_mdss_vsync1,
+ msm_mux_mdss_vsync2,
+ msm_mux_mdss_vsync3,
+ msm_mux_qdss_gpio9,
+ msm_mux_CAM_IRQ,
+ msm_mux_atest_usb2,
msm_mux_cci_i2c,
msm_mux_pll_bypassnl,
msm_mux_atest_tsens,
+ msm_mux_atest_usb21,
msm_mux_pll_reset,
- msm_mux_qdss_gpio9,
- msm_mux_CAM_IRQ,
+ msm_mux_atest_usb23,
+ msm_mux_qdss_gpio6,
msm_mux_CCI_TIMER3,
msm_mux_CCI_ASYNC,
msm_mux_qspi_cs,
msm_mux_qdss_gpio10,
- msm_mux_CAM4_STANDBY,
+ msm_mux_CAM3_STANDBY,
msm_mux_CCI_TIMER4,
msm_mux_qdss_gpio11,
- msm_mux_bt_reset,
+ msm_mux_CAM_LDO2,
msm_mux_cci_async,
msm_mux_qdss_gpio12,
- msm_mux_CAM1_RST,
- msm_mux_qdss_gpio6,
- msm_mux_qdss_gpio7,
- msm_mux_FL_FRONT,
- msm_mux_CCI_TIMER0,
- msm_mux_qdss_gpio8,
- msm_mux_FL_STROBE,
- msm_mux_CCI_TIMER1,
- msm_mux_LASER_CE,
- msm_mux_mdss_vsync0,
- msm_mux_mdss_vsync1,
- msm_mux_mdss_vsync2,
- msm_mux_mdss_vsync3,
+ msm_mux_CAM0_RST,
msm_mux_qdss_gpio13,
- msm_mux_CAM2_RST,
+ msm_mux_CAM1_RST,
msm_mux_qspi_clk,
msm_mux_phase_flag30,
msm_mux_qdss_gpio14,
- msm_mux_CAM3_RST,
msm_mux_qspi_resetn,
msm_mux_phase_flag1,
msm_mux_qdss_gpio15,
- msm_mux_CAM1_STANDBY,
+ msm_mux_CAM0_STANDBY,
msm_mux_phase_flag2,
- msm_mux_CAM2_STANDBY,
+ msm_mux_CAM1_STANDBY,
msm_mux_phase_flag9,
- msm_mux_CAM3_STANDBY,
+ msm_mux_CAM2_STANDBY,
msm_mux_qspi_data3,
msm_mux_phase_flag15,
- msm_mux_CAM4_RST,
+ msm_mux_qdss_gpio8,
+ msm_mux_CAM3_RST,
msm_mux_CCI_TIMER2,
msm_mux_phase_flag16,
+ msm_mux_LCD0_RESET,
msm_mux_phase_flag6,
- msm_mux_RCM_MARKER2,
+ msm_mux_SD_CARD,
msm_mux_phase_flag29,
- msm_mux_SS_SWITCH,
+ msm_mux_DP_EN,
msm_mux_phase_flag25,
+ msm_mux_USBC_ORIENTATION,
msm_mux_phase_flag10,
+ msm_mux_atest_usb20,
msm_mux_gcc_gp1,
msm_mux_phase_flag4,
- msm_mux_USB_DIR,
+ msm_mux_atest_usb22,
msm_mux_USB_PHY,
msm_mux_gcc_gp2,
msm_mux_atest_char,
msm_mux_mdp_vsync,
msm_mux_gcc_gp3,
msm_mux_atest_char3,
- msm_mux_Lcd_mode,
- msm_mux_EDP_HOT,
+ msm_mux_FORCE_TOUCH,
msm_mux_cri_trng0,
msm_mux_atest_char2,
msm_mux_cri_trng1,
msm_mux_atest_char1,
+ msm_mux_AUDIO_USBC,
msm_mux_audio_ref,
msm_mux_MDP_VSYNC,
msm_mux_cri_trng,
msm_mux_atest_char0,
msm_mux_US_EURO,
- msm_mux_KEY_FOCUS,
- msm_mux_NAV_PPS,
+ msm_mux_LCD_BACKLIGHT,
msm_mux_blsp_spi8,
msm_mux_sp_cmu,
- msm_mux_SLT_PWR,
+ msm_mux_NAV_PPS,
+ msm_mux_GPS_TX,
msm_mux_adsp_ext,
msm_mux_TS_RESET,
- msm_mux_TS_INT,
msm_mux_ssc_irq,
msm_mux_isense_dbg,
msm_mux_phase_flag0,
@@ -508,8 +532,10 @@ enum msmfalcon_functions {
msm_mux_phase_flag8,
msm_mux_tsense_pwm1,
msm_mux_tsense_pwm2,
+ msm_mux_SENSOR_RST,
+ msm_mux_WMSS_RESETN,
msm_mux_HAPTICS_PWM,
- msm_mux_wmss_reset,
+ msm_mux_GPS_eLNA,
msm_mux_mss_lte,
msm_mux_uim2_data,
msm_mux_uim2_clk,
@@ -521,12 +547,12 @@ enum msmfalcon_functions {
msm_mux_uim1_present,
msm_mux_uim_batt,
msm_mux_pa_indicator,
- msm_mux_ssbi_gnss,
msm_mux_ldo_en,
msm_mux_ldo_update,
msm_mux_qlink_request,
msm_mux_qlink_enable,
msm_mux_prng_rosc,
+ msm_mux_LCD_PWR,
msm_mux_NA,
};
@@ -534,27 +560,44 @@ static const char * const blsp_spi1_groups[] = {
"gpio0", "gpio1", "gpio2", "gpio3", "gpio46",
};
static const char * const gpio_groups[] = {
- "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
- "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
- "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
- "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
- "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
- "gpio36", "gpio37", "gpio38", "gpio39", "gpio53", "gpio57", "gpio59",
- "gpio61", "gpio62", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
- "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
- "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
- "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
- "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio6", "gpio7", "gpio8",
+ "gpio9", "gpio10", "gpio11", "gpio14", "gpio15", "gpio16", "gpio17",
+ "gpio18", "gpio19", "gpio20", "gpio21", "gpio22", "gpio23", "gpio24",
+ "gpio25", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36", "gpio37",
+ "gpio38", "gpio39", "gpio57", "gpio58", "gpio59", "gpio61", "gpio65",
+ "gpio81", "gpio82", "gpio83", "gpio84", "gpio85", "gpio86", "gpio87",
+ "gpio88", "gpio89", "gpio90", "gpio91", "gpio92", "gpio93", "gpio94",
+ "gpio95", "gpio96", "gpio97", "gpio98", "gpio99", "gpio100", "gpio101",
+ "gpio102", "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+ "gpio108", "gpio109", "gpio110", "gpio111", "gpio112", "gpio113",
+};
+static const char * const blsp_uim1_groups[] = {
+ "gpio0", "gpio1",
};
static const char * const tgu_ch0_groups[] = {
"gpio0",
};
-static const char * const tgu_ch1_groups[] = {
- "gpio1",
+static const char * const qdss_gpio4_groups[] = {
+ "gpio0", "gpio36",
+};
+static const char * const atest_gpsadc1_groups[] = {
+ "gpio0",
};
static const char * const blsp_uart1_groups[] = {
"gpio0", "gpio1", "gpio2", "gpio3",
};
+static const char * const SMB_STAT_groups[] = {
+ "gpio5",
+};
+static const char * const phase_flag14_groups[] = {
+ "gpio5",
+};
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7",
+};
+static const char * const phase_flag31_groups[] = {
+ "gpio6",
+};
static const char * const blsp_spi3_groups[] = {
"gpio8", "gpio9", "gpio10", "gpio11", "gpio30", "gpio65",
};
@@ -564,24 +607,18 @@ static const char * const wlan1_adc1_groups[] = {
static const char * const atest_usb13_groups[] = {
"gpio8",
};
-static const char * const bimc_dte1_groups[] = {
- "gpio8", "gpio10",
-};
-static const char * const wlan1_adc0_groups[] = {
- "gpio9",
+static const char * const tgu_ch1_groups[] = {
+ "gpio1",
};
-static const char * const atest_usb12_groups[] = {
- "gpio9",
+static const char * const qdss_gpio5_groups[] = {
+ "gpio1", "gpio37",
};
-static const char * const bimc_dte0_groups[] = {
- "gpio9", "gpio11",
+static const char * const atest_gpsadc0_groups[] = {
+ "gpio1",
};
static const char * const blsp_i2c1_groups[] = {
"gpio2", "gpio3",
};
-static const char * const blsp_uim1_groups[] = {
- "gpio2", "gpio3",
-};
static const char * const ddr_bist_groups[] = {
"gpio3", "gpio8", "gpio9", "gpio10",
};
@@ -594,27 +631,27 @@ static const char * const atest_usb1_groups[] = {
static const char * const blsp_spi2_groups[] = {
"gpio4", "gpio5", "gpio6", "gpio7",
};
+static const char * const blsp_uim2_groups[] = {
+ "gpio4", "gpio5",
+};
static const char * const phase_flag3_groups[] = {
"gpio4",
};
-static const char * const phase_flag14_groups[] = {
- "gpio5",
+static const char * const bimc_dte1_groups[] = {
+ "gpio8", "gpio10",
};
-static const char * const blsp_i2c2_groups[] = {
- "gpio6", "gpio7",
+static const char * const wlan1_adc0_groups[] = {
+ "gpio9",
};
-static const char * const blsp_uim2_groups[] = {
- "gpio6", "gpio7",
+static const char * const atest_usb12_groups[] = {
+ "gpio9",
};
-static const char * const phase_flag31_groups[] = {
- "gpio6",
+static const char * const bimc_dte0_groups[] = {
+ "gpio9", "gpio11",
};
static const char * const blsp_i2c3_groups[] = {
"gpio10", "gpio11",
};
-static const char * const atest_gpsadc1_groups[] = {
- "gpio10",
-};
static const char * const wlan2_adc1_groups[] = {
"gpio10",
};
@@ -624,15 +661,15 @@ static const char * const atest_usb11_groups[] = {
static const char * const dbg_out_groups[] = {
"gpio11",
};
-static const char * const atest_gpsadc0_groups[] = {
- "gpio11",
-};
static const char * const wlan2_adc0_groups[] = {
"gpio11",
};
static const char * const atest_usb10_groups[] = {
"gpio11",
};
+static const char * const RCM_MARKER_groups[] = {
+ "gpio12", "gpio13",
+};
static const char * const blsp_spi4_groups[] = {
"gpio12", "gpio13", "gpio14", "gpio15",
};
@@ -642,8 +679,12 @@ static const char * const pri_mi2s_groups[] = {
static const char * const phase_flag26_groups[] = {
"gpio12",
};
-static const char * const qdss_gpio4_groups[] = {
- "gpio12", "gpio36",
+static const char * const qdss_cti_groups[] = {
+ "gpio12", "gpio13", "gpio21", "gpio49", "gpio50", "gpio53", "gpio55",
+ "gpio66",
+};
+static const char * const DP_HOT_groups[] = {
+ "gpio13",
};
static const char * const pri_mi2s_ws_groups[] = {
"gpio13",
@@ -651,9 +692,6 @@ static const char * const pri_mi2s_ws_groups[] = {
static const char * const phase_flag27_groups[] = {
"gpio13",
};
-static const char * const qdss_gpio5_groups[] = {
- "gpio13", "gpio37",
-};
static const char * const blsp_i2c4_groups[] = {
"gpio14", "gpio15",
};
@@ -666,51 +704,23 @@ static const char * const blsp_uart5_groups[] = {
static const char * const blsp_spi5_groups[] = {
"gpio16", "gpio17", "gpio18", "gpio19",
};
+static const char * const blsp_uim5_groups[] = {
+ "gpio16", "gpio17",
+};
static const char * const phase_flag5_groups[] = {
"gpio17",
};
static const char * const blsp_i2c5_groups[] = {
"gpio18", "gpio19",
};
-static const char * const blsp_uim5_groups[] = {
- "gpio18", "gpio19",
-};
static const char * const blsp_spi6_groups[] = {
"gpio20", "gpio21", "gpio22", "gpio23",
};
static const char * const blsp_uart2_groups[] = {
"gpio20", "gpio21", "gpio22", "gpio23",
};
-static const char * const qdss_cti_groups[] = {
- "gpio20", "gpio21", "gpio24", "gpio25", "gpio26", "gpio49", "gpio50",
- "gpio61",
-};
-static const char * const sec_mi2s_groups[] = {
- "gpio24", "gpio25", "gpio26", "gpio27", "gpio62",
-};
-static const char * const sndwire_clk_groups[] = {
- "gpio24",
-};
-static const char * const phase_flag17_groups[] = {
- "gpio24",
-};
-static const char * const vsense_clkout_groups[] = {
- "gpio24",
-};
-static const char * const sndwire_data_groups[] = {
- "gpio25",
-};
-static const char * const phase_flag18_groups[] = {
- "gpio25",
-};
-static const char * const blsp_i2c7_groups[] = {
- "gpio26", "gpio27",
-};
-static const char * const wsa_en1_groups[] = {
- "gpio26",
-};
-static const char * const phase_flag19_groups[] = {
- "gpio26",
+static const char * const blsp_uim6_groups[] = {
+ "gpio20", "gpio21",
};
static const char * const phase_flag11_groups[] = {
"gpio21",
@@ -721,9 +731,6 @@ static const char * const vsense_data0_groups[] = {
static const char * const blsp_i2c6_groups[] = {
"gpio22", "gpio23",
};
-static const char * const blsp_uim6_groups[] = {
- "gpio22", "gpio23",
-};
static const char * const phase_flag12_groups[] = {
"gpio22",
};
@@ -743,15 +750,42 @@ static const char * const BLSP_UART_groups[] = {
"gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29", "gpio30",
"gpio31",
};
-static const char * const vfr_1_groups[] = {
- "gpio27",
+static const char * const sec_mi2s_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27", "gpio62",
+};
+static const char * const sndwire_clk_groups[] = {
+ "gpio24",
+};
+static const char * const phase_flag17_groups[] = {
+ "gpio24",
+};
+static const char * const vsense_clkout_groups[] = {
+ "gpio24",
+};
+static const char * const sndwire_data_groups[] = {
+ "gpio25",
+};
+static const char * const phase_flag18_groups[] = {
+ "gpio25",
+};
+static const char * const WSA_SPKR_groups[] = {
+ "gpio26", "gpio27",
};
-static const char * const wsa_en2_groups[] = {
+static const char * const blsp_i2c7_groups[] = {
+ "gpio26", "gpio27",
+};
+static const char * const phase_flag19_groups[] = {
+ "gpio26",
+};
+static const char * const vfr_1_groups[] = {
"gpio27",
};
static const char * const phase_flag20_groups[] = {
"gpio27",
};
+static const char * const NFC_INT_groups[] = {
+ "gpio28",
+};
static const char * const blsp_spi_groups[] = {
"gpio28", "gpio29", "gpio30", "gpio31", "gpio40", "gpio41", "gpio44",
"gpio52",
@@ -762,15 +796,24 @@ static const char * const m_voc_groups[] = {
static const char * const phase_flag21_groups[] = {
"gpio28",
};
+static const char * const NFC_EN_groups[] = {
+ "gpio29",
+};
static const char * const phase_flag22_groups[] = {
"gpio29",
};
+static const char * const NFC_DWL_groups[] = {
+ "gpio30",
+};
static const char * const BLSP_I2C_groups[] = {
"gpio30", "gpio31", "gpio44", "gpio52",
};
static const char * const phase_flag23_groups[] = {
"gpio30",
};
+static const char * const NFC_ESE_groups[] = {
+ "gpio31",
+};
static const char * const pwr_modem_groups[] = {
"gpio31",
};
@@ -778,7 +821,7 @@ static const char * const phase_flag24_groups[] = {
"gpio31",
};
static const char * const qdss_gpio_groups[] = {
- "gpio31", "gpio41", "gpio68", "gpio69",
+ "gpio31", "gpio52", "gpio68", "gpio69",
};
static const char * const cam_mclk_groups[] = {
"gpio32", "gpio33", "gpio34", "gpio35",
@@ -787,7 +830,7 @@ static const char * const pwr_nav_groups[] = {
"gpio32",
};
static const char * const qdss_gpio0_groups[] = {
- "gpio32", "gpio62",
+ "gpio32", "gpio67",
};
static const char * const qspi_data0_groups[] = {
"gpio33",
@@ -814,7 +857,46 @@ static const char * const jitter_bist_groups[] = {
"gpio35",
};
static const char * const qdss_gpio3_groups[] = {
- "gpio35", "gpio65",
+ "gpio35", "gpio56",
+};
+static const char * const qdss_gpio7_groups[] = {
+ "gpio39", "gpio71",
+};
+static const char * const FL_R3LED_groups[] = {
+ "gpio40",
+};
+static const char * const CCI_TIMER0_groups[] = {
+ "gpio40",
+};
+static const char * const FL_STROBE_groups[] = {
+ "gpio41",
+};
+static const char * const CCI_TIMER1_groups[] = {
+ "gpio41",
+};
+static const char * const CAM_LDO1_groups[] = {
+ "gpio42",
+};
+static const char * const mdss_vsync0_groups[] = {
+ "gpio42",
+};
+static const char * const mdss_vsync1_groups[] = {
+ "gpio42",
+};
+static const char * const mdss_vsync2_groups[] = {
+ "gpio42",
+};
+static const char * const mdss_vsync3_groups[] = {
+ "gpio42",
+};
+static const char * const qdss_gpio9_groups[] = {
+ "gpio42", "gpio76",
+};
+static const char * const CAM_IRQ_groups[] = {
+ "gpio43",
+};
+static const char * const atest_usb2_groups[] = {
+ "gpio35",
};
static const char * const cci_i2c_groups[] = {
"gpio36", "gpio37", "gpio38", "gpio39",
@@ -825,14 +907,17 @@ static const char * const pll_bypassnl_groups[] = {
static const char * const atest_tsens_groups[] = {
"gpio36",
};
+static const char * const atest_usb21_groups[] = {
+ "gpio36",
+};
static const char * const pll_reset_groups[] = {
"gpio37",
};
-static const char * const qdss_gpio9_groups[] = {
- "gpio42", "gpio76",
+static const char * const atest_usb23_groups[] = {
+ "gpio37",
};
-static const char * const CAM_IRQ_groups[] = {
- "gpio43",
+static const char * const qdss_gpio6_groups[] = {
+ "gpio38", "gpio70",
};
static const char * const CCI_TIMER3_groups[] = {
"gpio43",
@@ -846,7 +931,7 @@ static const char * const qspi_cs_groups[] = {
static const char * const qdss_gpio10_groups[] = {
"gpio43", "gpio77",
};
-static const char * const CAM4_STANDBY_groups[] = {
+static const char * const CAM3_STANDBY_groups[] = {
"gpio44",
};
static const char * const CCI_TIMER4_groups[] = {
@@ -855,7 +940,7 @@ static const char * const CCI_TIMER4_groups[] = {
static const char * const qdss_gpio11_groups[] = {
"gpio44", "gpio79",
};
-static const char * const bt_reset_groups[] = {
+static const char * const CAM_LDO2_groups[] = {
"gpio45",
};
static const char * const cci_async_groups[] = {
@@ -864,49 +949,13 @@ static const char * const cci_async_groups[] = {
static const char * const qdss_gpio12_groups[] = {
"gpio45", "gpio80",
};
-static const char * const CAM1_RST_groups[] = {
+static const char * const CAM0_RST_groups[] = {
"gpio46",
};
-static const char * const qdss_gpio6_groups[] = {
- "gpio38", "gpio70",
-};
-static const char * const qdss_gpio7_groups[] = {
- "gpio39", "gpio71",
-};
-static const char * const FL_FRONT_groups[] = {
- "gpio40",
-};
-static const char * const CCI_TIMER0_groups[] = {
- "gpio40",
-};
-static const char * const qdss_gpio8_groups[] = {
- "gpio40", "gpio75",
-};
-static const char * const FL_STROBE_groups[] = {
- "gpio41",
-};
-static const char * const CCI_TIMER1_groups[] = {
- "gpio41",
-};
-static const char * const LASER_CE_groups[] = {
- "gpio42",
-};
-static const char * const mdss_vsync0_groups[] = {
- "gpio42",
-};
-static const char * const mdss_vsync1_groups[] = {
- "gpio42",
-};
-static const char * const mdss_vsync2_groups[] = {
- "gpio42",
-};
-static const char * const mdss_vsync3_groups[] = {
- "gpio42",
-};
static const char * const qdss_gpio13_groups[] = {
"gpio46", "gpio78",
};
-static const char * const CAM2_RST_groups[] = {
+static const char * const CAM1_RST_groups[] = {
"gpio47",
};
static const char * const qspi_clk_groups[] = {
@@ -918,9 +967,6 @@ static const char * const phase_flag30_groups[] = {
static const char * const qdss_gpio14_groups[] = {
"gpio47", "gpio72",
};
-static const char * const CAM3_RST_groups[] = {
- "gpio48",
-};
static const char * const qspi_resetn_groups[] = {
"gpio48",
};
@@ -930,19 +976,19 @@ static const char * const phase_flag1_groups[] = {
static const char * const qdss_gpio15_groups[] = {
"gpio48", "gpio73",
};
-static const char * const CAM1_STANDBY_groups[] = {
+static const char * const CAM0_STANDBY_groups[] = {
"gpio49",
};
static const char * const phase_flag2_groups[] = {
"gpio49",
};
-static const char * const CAM2_STANDBY_groups[] = {
+static const char * const CAM1_STANDBY_groups[] = {
"gpio50",
};
static const char * const phase_flag9_groups[] = {
"gpio50",
};
-static const char * const CAM3_STANDBY_groups[] = {
+static const char * const CAM2_STANDBY_groups[] = {
"gpio51",
};
static const char * const qspi_data3_groups[] = {
@@ -951,7 +997,10 @@ static const char * const qspi_data3_groups[] = {
static const char * const phase_flag15_groups[] = {
"gpio51",
};
-static const char * const CAM4_RST_groups[] = {
+static const char * const qdss_gpio8_groups[] = {
+ "gpio51", "gpio75",
+};
+static const char * const CAM3_RST_groups[] = {
"gpio52",
};
static const char * const CCI_TIMER2_groups[] = {
@@ -960,32 +1009,41 @@ static const char * const CCI_TIMER2_groups[] = {
static const char * const phase_flag16_groups[] = {
"gpio52",
};
+static const char * const LCD0_RESET_groups[] = {
+ "gpio53",
+};
static const char * const phase_flag6_groups[] = {
"gpio53",
};
-static const char * const RCM_MARKER2_groups[] = {
+static const char * const SD_CARD_groups[] = {
"gpio54",
};
static const char * const phase_flag29_groups[] = {
"gpio54",
};
-static const char * const SS_SWITCH_groups[] = {
- "gpio55", "gpio56",
+static const char * const DP_EN_groups[] = {
+ "gpio55",
};
static const char * const phase_flag25_groups[] = {
"gpio55",
};
+static const char * const USBC_ORIENTATION_groups[] = {
+ "gpio56",
+};
static const char * const phase_flag10_groups[] = {
"gpio56",
};
+static const char * const atest_usb20_groups[] = {
+ "gpio56",
+};
static const char * const gcc_gp1_groups[] = {
"gpio57", "gpio78",
};
static const char * const phase_flag4_groups[] = {
"gpio57",
};
-static const char * const USB_DIR_groups[] = {
- "gpio58",
+static const char * const atest_usb22_groups[] = {
+ "gpio57",
};
static const char * const USB_PHY_groups[] = {
"gpio58",
@@ -1005,11 +1063,8 @@ static const char * const gcc_gp3_groups[] = {
static const char * const atest_char3_groups[] = {
"gpio59",
};
-static const char * const Lcd_mode_groups[] = {
- "gpio60",
-};
-static const char * const EDP_HOT_groups[] = {
- "gpio60",
+static const char * const FORCE_TOUCH_groups[] = {
+ "gpio60", "gpio73",
};
static const char * const cri_trng0_groups[] = {
"gpio60",
@@ -1023,6 +1078,9 @@ static const char * const cri_trng1_groups[] = {
static const char * const atest_char1_groups[] = {
"gpio61",
};
+static const char * const AUDIO_USBC_groups[] = {
+ "gpio62",
+};
static const char * const audio_ref_groups[] = {
"gpio62",
};
@@ -1038,20 +1096,20 @@ static const char * const atest_char0_groups[] = {
static const char * const US_EURO_groups[] = {
"gpio63",
};
-static const char * const KEY_FOCUS_groups[] = {
+static const char * const LCD_BACKLIGHT_groups[] = {
"gpio64",
};
-static const char * const NAV_PPS_groups[] = {
- "gpio64", "gpio65", "gpio98", "gpio98",
-};
static const char * const blsp_spi8_groups[] = {
"gpio64", "gpio76",
};
static const char * const sp_cmu_groups[] = {
"gpio64",
};
-static const char * const SLT_PWR_groups[] = {
- "gpio65",
+static const char * const NAV_PPS_groups[] = {
+ "gpio65", "gpio65", "gpio80", "gpio80", "gpio98", "gpio98",
+};
+static const char * const GPS_TX_groups[] = {
+ "gpio65", "gpio80", "gpio98",
};
static const char * const adsp_ext_groups[] = {
"gpio65",
@@ -1059,12 +1117,9 @@ static const char * const adsp_ext_groups[] = {
static const char * const TS_RESET_groups[] = {
"gpio66",
};
-static const char * const TS_INT_groups[] = {
- "gpio67",
-};
static const char * const ssc_irq_groups[] = {
- "gpio68", "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74",
- "gpio75", "gpio76", "gpio77",
+ "gpio67", "gpio68", "gpio69", "gpio70", "gpio71", "gpio72", "gpio74",
+ "gpio75", "gpio76",
};
static const char * const isense_dbg_groups[] = {
"gpio68",
@@ -1084,12 +1139,18 @@ static const char * const tsense_pwm1_groups[] = {
static const char * const tsense_pwm2_groups[] = {
"gpio71",
};
-static const char * const HAPTICS_PWM_groups[] = {
+static const char * const SENSOR_RST_groups[] = {
+ "gpio77",
+};
+static const char * const WMSS_RESETN_groups[] = {
"gpio78",
};
-static const char * const wmss_reset_groups[] = {
+static const char * const HAPTICS_PWM_groups[] = {
"gpio79",
};
+static const char * const GPS_eLNA_groups[] = {
+ "gpio80",
+};
static const char * const mss_lte_groups[] = {
"gpio81", "gpio82",
};
@@ -1123,9 +1184,6 @@ static const char * const uim_batt_groups[] = {
static const char * const pa_indicator_groups[] = {
"gpio92",
};
-static const char * const ssbi_gnss_groups[] = {
- "gpio94",
-};
static const char * const ldo_en_groups[] = {
"gpio97",
};
@@ -1141,84 +1199,93 @@ static const char * const qlink_enable_groups[] = {
static const char * const prng_rosc_groups[] = {
"gpio102",
};
+static const char * const LCD_PWR_groups[] = {
+ "gpio113",
+};
static const struct msm_function msmfalcon_functions[] = {
FUNCTION(blsp_spi1),
FUNCTION(gpio),
+ FUNCTION(blsp_uim1),
FUNCTION(tgu_ch0),
- FUNCTION(tgu_ch1),
+ FUNCTION(qdss_gpio4),
+ FUNCTION(atest_gpsadc1),
FUNCTION(blsp_uart1),
+ FUNCTION(SMB_STAT),
+ FUNCTION(phase_flag14),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(phase_flag31),
FUNCTION(blsp_spi3),
FUNCTION(wlan1_adc1),
FUNCTION(atest_usb13),
- FUNCTION(bimc_dte1),
- FUNCTION(wlan1_adc0),
- FUNCTION(atest_usb12),
- FUNCTION(bimc_dte0),
+ FUNCTION(tgu_ch1),
+ FUNCTION(qdss_gpio5),
+ FUNCTION(atest_gpsadc0),
FUNCTION(blsp_i2c1),
- FUNCTION(blsp_uim1),
FUNCTION(ddr_bist),
FUNCTION(atest_tsens2),
FUNCTION(atest_usb1),
FUNCTION(blsp_spi2),
- FUNCTION(phase_flag3),
- FUNCTION(phase_flag14),
- FUNCTION(blsp_i2c2),
FUNCTION(blsp_uim2),
- FUNCTION(phase_flag31),
+ FUNCTION(phase_flag3),
+ FUNCTION(bimc_dte1),
+ FUNCTION(wlan1_adc0),
+ FUNCTION(atest_usb12),
+ FUNCTION(bimc_dte0),
FUNCTION(blsp_i2c3),
- FUNCTION(atest_gpsadc1),
FUNCTION(wlan2_adc1),
FUNCTION(atest_usb11),
FUNCTION(dbg_out),
- FUNCTION(atest_gpsadc0),
FUNCTION(wlan2_adc0),
FUNCTION(atest_usb10),
+ FUNCTION(RCM_MARKER),
FUNCTION(blsp_spi4),
FUNCTION(pri_mi2s),
FUNCTION(phase_flag26),
- FUNCTION(qdss_gpio4),
+ FUNCTION(qdss_cti),
+ FUNCTION(DP_HOT),
FUNCTION(pri_mi2s_ws),
FUNCTION(phase_flag27),
- FUNCTION(qdss_gpio5),
FUNCTION(blsp_i2c4),
FUNCTION(phase_flag28),
FUNCTION(blsp_uart5),
FUNCTION(blsp_spi5),
+ FUNCTION(blsp_uim5),
FUNCTION(phase_flag5),
FUNCTION(blsp_i2c5),
- FUNCTION(blsp_uim5),
FUNCTION(blsp_spi6),
FUNCTION(blsp_uart2),
- FUNCTION(qdss_cti),
- FUNCTION(sec_mi2s),
- FUNCTION(sndwire_clk),
- FUNCTION(phase_flag17),
- FUNCTION(vsense_clkout),
- FUNCTION(sndwire_data),
- FUNCTION(phase_flag18),
- FUNCTION(blsp_i2c7),
- FUNCTION(wsa_en1),
- FUNCTION(phase_flag19),
+ FUNCTION(blsp_uim6),
FUNCTION(phase_flag11),
FUNCTION(vsense_data0),
FUNCTION(blsp_i2c6),
- FUNCTION(blsp_uim6),
FUNCTION(phase_flag12),
FUNCTION(vsense_data1),
FUNCTION(phase_flag13),
FUNCTION(vsense_mode),
FUNCTION(blsp_spi7),
FUNCTION(BLSP_UART),
+ FUNCTION(sec_mi2s),
+ FUNCTION(sndwire_clk),
+ FUNCTION(phase_flag17),
+ FUNCTION(vsense_clkout),
+ FUNCTION(sndwire_data),
+ FUNCTION(phase_flag18),
+ FUNCTION(WSA_SPKR),
+ FUNCTION(blsp_i2c7),
+ FUNCTION(phase_flag19),
FUNCTION(vfr_1),
- FUNCTION(wsa_en2),
FUNCTION(phase_flag20),
+ FUNCTION(NFC_INT),
FUNCTION(blsp_spi),
FUNCTION(m_voc),
FUNCTION(phase_flag21),
+ FUNCTION(NFC_EN),
FUNCTION(phase_flag22),
+ FUNCTION(NFC_DWL),
FUNCTION(BLSP_I2C),
FUNCTION(phase_flag23),
+ FUNCTION(NFC_ESE),
FUNCTION(pwr_modem),
FUNCTION(phase_flag24),
FUNCTION(qdss_gpio),
@@ -1234,88 +1301,92 @@ static const struct msm_function msmfalcon_functions[] = {
FUNCTION(qspi_data2),
FUNCTION(jitter_bist),
FUNCTION(qdss_gpio3),
+ FUNCTION(qdss_gpio7),
+ FUNCTION(FL_R3LED),
+ FUNCTION(CCI_TIMER0),
+ FUNCTION(FL_STROBE),
+ FUNCTION(CCI_TIMER1),
+ FUNCTION(CAM_LDO1),
+ FUNCTION(mdss_vsync0),
+ FUNCTION(mdss_vsync1),
+ FUNCTION(mdss_vsync2),
+ FUNCTION(mdss_vsync3),
+ FUNCTION(qdss_gpio9),
+ FUNCTION(CAM_IRQ),
+ FUNCTION(atest_usb2),
FUNCTION(cci_i2c),
FUNCTION(pll_bypassnl),
FUNCTION(atest_tsens),
+ FUNCTION(atest_usb21),
FUNCTION(pll_reset),
- FUNCTION(qdss_gpio9),
- FUNCTION(CAM_IRQ),
+ FUNCTION(atest_usb23),
+ FUNCTION(qdss_gpio6),
FUNCTION(CCI_TIMER3),
FUNCTION(CCI_ASYNC),
FUNCTION(qspi_cs),
FUNCTION(qdss_gpio10),
- FUNCTION(CAM4_STANDBY),
+ FUNCTION(CAM3_STANDBY),
FUNCTION(CCI_TIMER4),
FUNCTION(qdss_gpio11),
- FUNCTION(bt_reset),
+ FUNCTION(CAM_LDO2),
FUNCTION(cci_async),
FUNCTION(qdss_gpio12),
- FUNCTION(CAM1_RST),
- FUNCTION(qdss_gpio6),
- FUNCTION(qdss_gpio7),
- FUNCTION(FL_FRONT),
- FUNCTION(CCI_TIMER0),
- FUNCTION(qdss_gpio8),
- FUNCTION(FL_STROBE),
- FUNCTION(CCI_TIMER1),
- FUNCTION(LASER_CE),
- FUNCTION(mdss_vsync0),
- FUNCTION(mdss_vsync1),
- FUNCTION(mdss_vsync2),
- FUNCTION(mdss_vsync3),
+ FUNCTION(CAM0_RST),
FUNCTION(qdss_gpio13),
- FUNCTION(CAM2_RST),
+ FUNCTION(CAM1_RST),
FUNCTION(qspi_clk),
FUNCTION(phase_flag30),
FUNCTION(qdss_gpio14),
- FUNCTION(CAM3_RST),
FUNCTION(qspi_resetn),
FUNCTION(phase_flag1),
FUNCTION(qdss_gpio15),
- FUNCTION(CAM1_STANDBY),
+ FUNCTION(CAM0_STANDBY),
FUNCTION(phase_flag2),
- FUNCTION(CAM2_STANDBY),
+ FUNCTION(CAM1_STANDBY),
FUNCTION(phase_flag9),
- FUNCTION(CAM3_STANDBY),
+ FUNCTION(CAM2_STANDBY),
FUNCTION(qspi_data3),
FUNCTION(phase_flag15),
- FUNCTION(CAM4_RST),
+ FUNCTION(qdss_gpio8),
+ FUNCTION(CAM3_RST),
FUNCTION(CCI_TIMER2),
FUNCTION(phase_flag16),
+ FUNCTION(LCD0_RESET),
FUNCTION(phase_flag6),
- FUNCTION(RCM_MARKER2),
+ FUNCTION(SD_CARD),
FUNCTION(phase_flag29),
- FUNCTION(SS_SWITCH),
+ FUNCTION(DP_EN),
FUNCTION(phase_flag25),
+ FUNCTION(USBC_ORIENTATION),
FUNCTION(phase_flag10),
+ FUNCTION(atest_usb20),
FUNCTION(gcc_gp1),
FUNCTION(phase_flag4),
- FUNCTION(USB_DIR),
+ FUNCTION(atest_usb22),
FUNCTION(USB_PHY),
FUNCTION(gcc_gp2),
FUNCTION(atest_char),
FUNCTION(mdp_vsync),
FUNCTION(gcc_gp3),
FUNCTION(atest_char3),
- FUNCTION(Lcd_mode),
- FUNCTION(EDP_HOT),
+ FUNCTION(FORCE_TOUCH),
FUNCTION(cri_trng0),
FUNCTION(atest_char2),
FUNCTION(cri_trng1),
FUNCTION(atest_char1),
+ FUNCTION(AUDIO_USBC),
FUNCTION(audio_ref),
FUNCTION(MDP_VSYNC),
FUNCTION(cri_trng),
FUNCTION(atest_char0),
FUNCTION(US_EURO),
- FUNCTION(KEY_FOCUS),
- FUNCTION(NAV_PPS),
+ FUNCTION(LCD_BACKLIGHT),
FUNCTION(blsp_spi8),
FUNCTION(sp_cmu),
- FUNCTION(SLT_PWR),
+ FUNCTION(NAV_PPS),
+ FUNCTION(GPS_TX),
FUNCTION(adsp_ext),
FUNCTION(TS_RESET),
- FUNCTION(TS_INT),
FUNCTION(ssc_irq),
FUNCTION(isense_dbg),
FUNCTION(phase_flag0),
@@ -1323,8 +1394,10 @@ static const struct msm_function msmfalcon_functions[] = {
FUNCTION(phase_flag8),
FUNCTION(tsense_pwm1),
FUNCTION(tsense_pwm2),
+ FUNCTION(SENSOR_RST),
+ FUNCTION(WMSS_RESETN),
FUNCTION(HAPTICS_PWM),
- FUNCTION(wmss_reset),
+ FUNCTION(GPS_eLNA),
FUNCTION(mss_lte),
FUNCTION(uim2_data),
FUNCTION(uim2_clk),
@@ -1336,153 +1409,155 @@ static const struct msm_function msmfalcon_functions[] = {
FUNCTION(uim1_present),
FUNCTION(uim_batt),
FUNCTION(pa_indicator),
- FUNCTION(ssbi_gnss),
FUNCTION(ldo_en),
FUNCTION(ldo_update),
FUNCTION(qlink_request),
FUNCTION(qlink_enable),
FUNCTION(prng_rosc),
+ FUNCTION(LCD_PWR),
};
static const struct msm_pingroup msmfalcon_groups[] = {
- PINGROUP(0, SOUTH, blsp_spi1, blsp_uart1, tgu_ch0, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(1, SOUTH, blsp_spi1, blsp_uart1, tgu_ch1, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(2, SOUTH, blsp_spi1, blsp_uart1, blsp_i2c1, blsp_uim1, NA, NA,
- NA, NA, NA),
- PINGROUP(3, SOUTH, blsp_spi1, blsp_uart1, blsp_i2c1, blsp_uim1,
- ddr_bist, NA, atest_tsens2, atest_usb1, NA),
- PINGROUP(4, WEST, blsp_spi2, phase_flag3, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(5, WEST, blsp_spi2, phase_flag14, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(6, WEST, blsp_spi2, blsp_i2c2, blsp_uim2, phase_flag31, NA,
- NA, NA, NA, NA),
- PINGROUP(7, WEST, blsp_spi2, blsp_i2c2, blsp_uim2, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(8, WEST, blsp_spi3, ddr_bist, NA, NA, wlan1_adc1, atest_usb13,
- bimc_dte1, NA, NA),
- PINGROUP(9, WEST, blsp_spi3, ddr_bist, NA, NA, wlan1_adc0, atest_usb12,
- bimc_dte0, NA, NA),
- PINGROUP(10, WEST, blsp_spi3, blsp_i2c3, ddr_bist, NA, atest_gpsadc1,
- wlan2_adc1, atest_usb11, bimc_dte1, NA),
- PINGROUP(11, WEST, blsp_spi3, blsp_i2c3, dbg_out, atest_gpsadc0,
- wlan2_adc0, atest_usb10, bimc_dte0, NA, NA),
- PINGROUP(12, SOUTH, blsp_spi4, pri_mi2s, phase_flag26, qdss_gpio4, NA,
+ PINGROUP(0, SOUTH, blsp_spi1, blsp_uart1, blsp_uim1, tgu_ch0, NA, NA,
+ qdss_gpio4, atest_gpsadc1, NA),
+ PINGROUP(1, SOUTH, blsp_spi1, blsp_uart1, blsp_uim1, tgu_ch1, NA, NA,
+ qdss_gpio5, atest_gpsadc0, NA),
+ PINGROUP(2, SOUTH, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(3, SOUTH, blsp_spi1, blsp_uart1, blsp_i2c1, ddr_bist, NA, NA,
+ atest_tsens2, atest_usb1, NA),
+ PINGROUP(4, NORTH, blsp_spi2, blsp_uim2, NA, phase_flag3, NA, NA, NA,
+ NA, NA),
+ PINGROUP(5, SOUTH, blsp_spi2, blsp_uim2, NA, phase_flag14, NA, NA, NA,
+ NA, NA),
+ PINGROUP(6, SOUTH, blsp_spi2, blsp_i2c2, NA, phase_flag31, NA, NA, NA,
+ NA, NA),
+ PINGROUP(7, SOUTH, blsp_spi2, blsp_i2c2, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(8, NORTH, blsp_spi3, ddr_bist, NA, NA, NA, wlan1_adc1,
+ atest_usb13, bimc_dte1, NA),
+ PINGROUP(9, NORTH, blsp_spi3, ddr_bist, NA, NA, NA, wlan1_adc0,
+ atest_usb12, bimc_dte0, NA),
+ PINGROUP(10, NORTH, blsp_spi3, blsp_i2c3, ddr_bist, NA, NA, wlan2_adc1,
+ atest_usb11, bimc_dte1, NA),
+ PINGROUP(11, NORTH, blsp_spi3, blsp_i2c3, NA, dbg_out, wlan2_adc0,
+ atest_usb10, bimc_dte0, NA, NA),
+ PINGROUP(12, NORTH, blsp_spi4, pri_mi2s, NA, phase_flag26, qdss_cti,
NA, NA, NA, NA),
- PINGROUP(13, SOUTH, blsp_spi4, pri_mi2s_ws, NA, phase_flag27,
- qdss_gpio5, NA, NA, NA, NA),
- PINGROUP(14, SOUTH, blsp_spi4, blsp_i2c4, pri_mi2s, phase_flag28, NA,
+ PINGROUP(13, NORTH, blsp_spi4, DP_HOT, pri_mi2s_ws, NA, NA,
+ phase_flag27, qdss_cti, NA, NA),
+ PINGROUP(14, NORTH, blsp_spi4, blsp_i2c4, pri_mi2s, NA, phase_flag28,
NA, NA, NA, NA),
- PINGROUP(15, SOUTH, blsp_spi4, blsp_i2c4, pri_mi2s, NA, NA, NA, NA, NA,
+ PINGROUP(15, NORTH, blsp_spi4, blsp_i2c4, pri_mi2s, NA, NA, NA, NA, NA,
NA),
- PINGROUP(16, WEST, blsp_uart5, blsp_spi5, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(17, WEST, blsp_uart5, blsp_spi5, phase_flag5, NA, NA, NA, NA,
+ PINGROUP(16, CENTER, blsp_uart5, blsp_spi5, blsp_uim5, NA, NA, NA, NA,
NA, NA),
- PINGROUP(18, WEST, blsp_uart5, blsp_spi5, blsp_i2c5, blsp_uim5, NA, NA,
- NA, NA, NA),
- PINGROUP(19, WEST, blsp_uart5, blsp_spi5, blsp_i2c5, blsp_uim5, NA, NA,
- NA, NA, NA),
- PINGROUP(20, WEST, blsp_spi6, blsp_uart2, NA, qdss_cti, NA, NA, NA, NA,
- NA),
- PINGROUP(21, WEST, blsp_spi6, blsp_uart2, phase_flag11, qdss_cti,
- vsense_data0, NA, NA, NA, NA),
- PINGROUP(22, WEST, blsp_spi6, blsp_uart2, blsp_i2c6, blsp_uim6,
+ PINGROUP(17, CENTER, blsp_uart5, blsp_spi5, blsp_uim5, NA, phase_flag5,
+ NA, NA, NA, NA),
+ PINGROUP(18, CENTER, blsp_uart5, blsp_spi5, blsp_i2c5, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(19, CENTER, blsp_uart5, blsp_spi5, blsp_i2c5, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(20, SOUTH, blsp_spi6, blsp_uart2, blsp_uim6, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(21, SOUTH, blsp_spi6, blsp_uart2, blsp_uim6, NA, phase_flag11,
+ qdss_cti, vsense_data0, NA, NA),
+ PINGROUP(22, CENTER, blsp_spi6, blsp_uart2, blsp_i2c6, NA,
phase_flag12, vsense_data1, NA, NA, NA),
- PINGROUP(23, WEST, blsp_spi6, blsp_uart2, blsp_i2c6, blsp_uim6,
+ PINGROUP(23, CENTER, blsp_spi6, blsp_uart2, blsp_i2c6, NA,
phase_flag13, vsense_mode, NA, NA, NA),
- PINGROUP(24, WEST, blsp_spi7, BLSP_UART, sec_mi2s, sndwire_clk, NA,
- phase_flag17, qdss_cti, vsense_clkout, NA),
- PINGROUP(25, WEST, blsp_spi7, BLSP_UART, sec_mi2s, sndwire_data, NA,
- phase_flag18, qdss_cti, NA, NA),
- PINGROUP(26, WEST, blsp_spi7, BLSP_UART, blsp_i2c7, sec_mi2s, wsa_en1,
- phase_flag19, qdss_cti, NA, NA),
- PINGROUP(27, WEST, blsp_spi7, BLSP_UART, blsp_i2c7, vfr_1, sec_mi2s,
- wsa_en2, phase_flag20, NA, NA),
- PINGROUP(28, SOUTH, blsp_spi, BLSP_UART, m_voc, phase_flag21, NA, NA,
+ PINGROUP(24, NORTH, blsp_spi7, BLSP_UART, sec_mi2s, sndwire_clk, NA,
+ NA, phase_flag17, vsense_clkout, NA),
+ PINGROUP(25, NORTH, blsp_spi7, BLSP_UART, sec_mi2s, sndwire_data, NA,
+ NA, phase_flag18, NA, NA),
+ PINGROUP(26, NORTH, blsp_spi7, BLSP_UART, blsp_i2c7, sec_mi2s, NA,
+ phase_flag19, NA, NA, NA),
+ PINGROUP(27, NORTH, blsp_spi7, BLSP_UART, blsp_i2c7, vfr_1, sec_mi2s,
+ NA, phase_flag20, NA, NA),
+ PINGROUP(28, CENTER, blsp_spi, BLSP_UART, m_voc, NA, phase_flag21, NA,
NA, NA, NA),
- PINGROUP(29, SOUTH, blsp_spi, BLSP_UART, NA, phase_flag22, NA, NA, NA,
+ PINGROUP(29, CENTER, blsp_spi, BLSP_UART, NA, NA, phase_flag22, NA, NA,
NA, NA),
- PINGROUP(30, SOUTH, blsp_spi, BLSP_UART, BLSP_I2C, blsp_spi3,
- phase_flag23, NA, NA, NA, NA),
- PINGROUP(31, SOUTH, blsp_spi, BLSP_UART, BLSP_I2C, pwr_modem,
- phase_flag24, qdss_gpio, NA, NA, NA),
- PINGROUP(32, SOUTH, cam_mclk, pwr_nav, NA, qdss_gpio0, NA, NA, NA, NA,
+ PINGROUP(30, CENTER, blsp_spi, BLSP_UART, BLSP_I2C, blsp_spi3, NA,
+ phase_flag23, NA, NA, NA),
+ PINGROUP(31, CENTER, blsp_spi, BLSP_UART, BLSP_I2C, pwr_modem, NA,
+ phase_flag24, qdss_gpio, NA, NA),
+ PINGROUP(32, SOUTH, cam_mclk, pwr_nav, NA, NA, qdss_gpio0, NA, NA, NA,
NA),
- PINGROUP(33, SOUTH, cam_mclk, qspi_data0, pwr_crypto, NA, qdss_gpio1,
- NA, NA, NA, NA),
- PINGROUP(34, SOUTH, cam_mclk, qspi_data1, agera_pll, NA, qdss_gpio2,
- NA, NA, NA, NA),
- PINGROUP(35, SOUTH, cam_mclk, qspi_data2, jitter_bist, NA, qdss_gpio3,
- NA, NA, NA, NA),
- PINGROUP(36, SOUTH, cci_i2c, pll_bypassnl, agera_pll, NA, qdss_gpio4,
- atest_tsens, NA, NA, NA),
- PINGROUP(37, SOUTH, cci_i2c, pll_reset, NA, qdss_gpio5, NA, NA, NA, NA,
- NA),
- PINGROUP(38, SOUTH, cci_i2c, NA, qdss_gpio6, NA, NA, NA, NA, NA, NA),
- PINGROUP(39, SOUTH, cci_i2c, NA, qdss_gpio7, NA, NA, NA, NA, NA, NA),
- PINGROUP(40, SOUTH, CCI_TIMER0, NA, blsp_spi, NA, qdss_gpio8, NA, NA,
- NA, NA),
- PINGROUP(41, SOUTH, CCI_TIMER1, NA, blsp_spi, NA, qdss_gpio, NA, NA,
- NA, NA),
+ PINGROUP(33, SOUTH, cam_mclk, qspi_data0, pwr_crypto, NA, NA,
+ qdss_gpio1, NA, NA, NA),
+ PINGROUP(34, SOUTH, cam_mclk, qspi_data1, agera_pll, NA, NA,
+ qdss_gpio2, NA, NA, NA),
+ PINGROUP(35, SOUTH, cam_mclk, qspi_data2, jitter_bist, NA, NA,
+ qdss_gpio3, NA, atest_usb2, NA),
+ PINGROUP(36, SOUTH, cci_i2c, pll_bypassnl, agera_pll, NA, NA,
+ qdss_gpio4, atest_tsens, atest_usb21, NA),
+ PINGROUP(37, SOUTH, cci_i2c, pll_reset, NA, NA, qdss_gpio5,
+ atest_usb23, NA, NA, NA),
+ PINGROUP(38, SOUTH, cci_i2c, NA, NA, qdss_gpio6, NA, NA, NA, NA, NA),
+ PINGROUP(39, SOUTH, cci_i2c, NA, NA, qdss_gpio7, NA, NA, NA, NA, NA),
+ PINGROUP(40, SOUTH, CCI_TIMER0, NA, blsp_spi, NA, NA, NA, NA, NA, NA),
+ PINGROUP(41, SOUTH, CCI_TIMER1, NA, blsp_spi, NA, NA, NA, NA, NA, NA),
PINGROUP(42, SOUTH, mdss_vsync0, mdss_vsync1, mdss_vsync2, mdss_vsync3,
- NA, qdss_gpio9, NA, NA, NA),
- PINGROUP(43, SOUTH, CCI_TIMER3, CCI_ASYNC, qspi_cs, NA, qdss_gpio10,
- NA, NA, NA, NA),
- PINGROUP(44, SOUTH, CCI_TIMER4, CCI_ASYNC, blsp_spi, BLSP_I2C, NA,
- qdss_gpio11, NA, NA, NA),
- PINGROUP(45, SOUTH, cci_async, NA, qdss_gpio12, NA, NA, NA, NA, NA, NA),
- PINGROUP(46, SOUTH, blsp_spi1, NA, qdss_gpio13, NA, NA, NA, NA, NA, NA),
- PINGROUP(47, SOUTH, qspi_clk, phase_flag30, qdss_gpio14, NA, NA, NA,
- NA, NA, NA),
- PINGROUP(48, SOUTH, qspi_resetn, phase_flag1, qdss_gpio15, NA, NA, NA,
+ NA, NA, qdss_gpio9, NA, NA),
+ PINGROUP(43, SOUTH, CCI_TIMER3, CCI_ASYNC, qspi_cs, NA, NA,
+ qdss_gpio10, NA, NA, NA),
+ PINGROUP(44, SOUTH, CCI_TIMER4, CCI_ASYNC, blsp_spi, BLSP_I2C, NA, NA,
+ qdss_gpio11, NA, NA),
+ PINGROUP(45, SOUTH, cci_async, NA, NA, qdss_gpio12, NA, NA, NA, NA, NA),
+ PINGROUP(46, SOUTH, blsp_spi1, NA, NA, qdss_gpio13, NA, NA, NA, NA, NA),
+ PINGROUP(47, SOUTH, qspi_clk, NA, phase_flag30, qdss_gpio14, NA, NA,
NA, NA, NA),
- PINGROUP(49, SOUTH, phase_flag2, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(50, SOUTH, qspi_cs, phase_flag9, qdss_cti, NA, NA, NA, NA, NA,
+ PINGROUP(48, SOUTH, NA, phase_flag1, qdss_gpio15, NA, NA, NA, NA, NA,
NA),
- PINGROUP(51, SOUTH, qspi_data3, phase_flag15, NA, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(52, EAST, CCI_TIMER2, blsp_spi, BLSP_I2C, phase_flag16, NA,
- NA, NA, NA, NA),
- PINGROUP(53, EAST, phase_flag6, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(54, EAST, NA, phase_flag29, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(55, WEST, phase_flag25, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(56, WEST, phase_flag10, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(57, SOUTH, gcc_gp1, phase_flag4, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(58, SOUTH, USB_PHY, gcc_gp2, NA, atest_char, NA, NA, NA, NA,
+ PINGROUP(49, SOUTH, NA, phase_flag2, qdss_cti, NA, NA, NA, NA, NA, NA),
+ PINGROUP(50, SOUTH, qspi_cs, NA, phase_flag9, qdss_cti, NA, NA, NA, NA,
NA),
- PINGROUP(59, EAST, mdp_vsync, gcc_gp3, NA, atest_char3, NA, NA, NA, NA,
- NA),
- PINGROUP(60, EAST, EDP_HOT, cri_trng0, NA, atest_char2, NA, NA, NA, NA,
- NA),
- PINGROUP(61, EAST, pri_mi2s, cri_trng1, NA, qdss_cti, atest_char1, NA,
+ PINGROUP(51, SOUTH, qspi_data3, NA, phase_flag15, qdss_gpio8, NA, NA,
+ NA, NA, NA),
+ PINGROUP(52, SOUTH, CCI_TIMER2, blsp_spi, BLSP_I2C, NA, phase_flag16,
+ qdss_gpio, NA, NA, NA),
+ PINGROUP(53, NORTH, NA, phase_flag6, qdss_cti, NA, NA, NA, NA, NA, NA),
+ PINGROUP(54, NORTH, NA, NA, phase_flag29, NA, NA, NA, NA, NA, NA),
+ PINGROUP(55, SOUTH, NA, phase_flag25, qdss_cti, NA, NA, NA, NA, NA, NA),
+ PINGROUP(56, SOUTH, NA, phase_flag10, qdss_gpio3, NA, atest_usb20, NA,
NA, NA, NA),
- PINGROUP(62, SOUTH, sec_mi2s, audio_ref, MDP_VSYNC, cri_trng, NA,
- qdss_gpio0, atest_char0, NA, NA),
- PINGROUP(63, SOUTH, NA, NA, qdss_gpio1, NA, NA, NA, NA, NA, NA),
- PINGROUP(64, SOUTH, NAV_PPS, blsp_spi8, sp_cmu, NA, qdss_gpio2, NA, NA,
+ PINGROUP(57, SOUTH, gcc_gp1, NA, phase_flag4, atest_usb22, NA, NA, NA,
NA, NA),
- PINGROUP(65, SOUTH, NAV_PPS, blsp_spi3, NA, adsp_ext, NA, qdss_gpio3,
+ PINGROUP(58, SOUTH, USB_PHY, gcc_gp2, NA, NA, atest_char, NA, NA, NA,
+ NA),
+ PINGROUP(59, NORTH, mdp_vsync, gcc_gp3, NA, NA, atest_char3, NA, NA,
+ NA, NA),
+ PINGROUP(60, NORTH, cri_trng0, NA, NA, atest_char2, NA, NA, NA, NA, NA),
+ PINGROUP(61, NORTH, pri_mi2s, cri_trng1, NA, NA, atest_char1, NA, NA,
+ NA, NA),
+ PINGROUP(62, NORTH, sec_mi2s, audio_ref, MDP_VSYNC, cri_trng, NA, NA,
+ atest_char0, NA, NA),
+ PINGROUP(63, NORTH, NA, NA, NA, qdss_gpio1, NA, NA, NA, NA, NA),
+ PINGROUP(64, SOUTH, blsp_spi8, sp_cmu, NA, NA, qdss_gpio2, NA, NA, NA,
+ NA),
+ PINGROUP(65, SOUTH, NA, NAV_PPS, NAV_PPS, GPS_TX, blsp_spi3, adsp_ext,
NA, NA, NA),
- PINGROUP(66, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(67, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(68, SOUTH, isense_dbg, phase_flag0, qdss_gpio, NA, NA, NA, NA,
+ PINGROUP(66, NORTH, NA, NA, qdss_cti, NA, NA, NA, NA, NA, NA),
+ PINGROUP(67, NORTH, NA, NA, qdss_gpio0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(68, NORTH, isense_dbg, NA, phase_flag0, qdss_gpio, NA, NA, NA,
NA, NA),
- PINGROUP(69, SOUTH, phase_flag7, qdss_gpio, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(70, SOUTH, phase_flag8, qdss_gpio6, NA, NA, NA, NA, NA, NA,
+ PINGROUP(69, NORTH, NA, phase_flag7, qdss_gpio, NA, NA, NA, NA, NA, NA),
+ PINGROUP(70, NORTH, NA, phase_flag8, qdss_gpio6, NA, NA, NA, NA, NA,
NA),
- PINGROUP(71, SOUTH, NA, qdss_gpio7, tsense_pwm1, tsense_pwm2, NA, NA,
+ PINGROUP(71, NORTH, NA, NA, qdss_gpio7, tsense_pwm1, tsense_pwm2, NA,
NA, NA, NA),
- PINGROUP(72, SOUTH, qdss_gpio14, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(73, SOUTH, NA, qdss_gpio15, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(74, SOUTH, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(75, WEST, NA, qdss_gpio8, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(76, WEST, blsp_spi8, NA, NA, qdss_gpio9, NA, NA, NA, NA, NA),
- PINGROUP(77, SOUTH, NA, qdss_gpio10, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(78, SOUTH, gcc_gp1, qdss_gpio13, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(79, SOUTH, NA, qdss_gpio11, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(80, SOUTH, NA, qdss_gpio12, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(81, SOUTH, mss_lte, gcc_gp2, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(82, SOUTH, mss_lte, NA, gcc_gp3, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, NORTH, NA, qdss_gpio14, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, NORTH, NA, NA, qdss_gpio15, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, NORTH, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, NORTH, NA, NA, qdss_gpio8, NA, NA, NA, NA, NA, NA),
+ PINGROUP(76, NORTH, blsp_spi8, NA, NA, NA, qdss_gpio9, NA, NA, NA, NA),
+ PINGROUP(77, NORTH, NA, NA, qdss_gpio10, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, NORTH, gcc_gp1, NA, qdss_gpio13, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, SOUTH, NA, NA, qdss_gpio11, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, SOUTH, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, qdss_gpio12, NA,
+ NA, NA),
+ PINGROUP(81, CENTER, mss_lte, gcc_gp2, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, CENTER, mss_lte, gcc_gp3, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(83, SOUTH, uim2_data, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(84, SOUTH, uim2_clk, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(85, SOUTH, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA),
@@ -1494,12 +1569,12 @@ static const struct msm_pingroup msmfalcon_groups[] = {
PINGROUP(91, SOUTH, uim_batt, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(92, SOUTH, NA, NA, pa_indicator, NA, NA, NA, NA, NA, NA),
PINGROUP(93, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(94, SOUTH, NA, ssbi_gnss, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(95, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(96, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(97, SOUTH, NA, NA, ldo_en, NA, NA, NA, NA, NA, NA),
- PINGROUP(98, SOUTH, NA, NAV_PPS, NAV_PPS, ldo_update, NA, NA, NA, NA,
- NA),
+ PINGROUP(97, SOUTH, NA, ldo_en, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, SOUTH, NA, NAV_PPS, NAV_PPS, GPS_TX, ldo_update, NA, NA,
+ NA, NA),
PINGROUP(99, SOUTH, qlink_request, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(100, SOUTH, qlink_enable, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(101, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
@@ -1508,13 +1583,20 @@ static const struct msm_pingroup msmfalcon_groups[] = {
PINGROUP(104, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(105, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(106, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(107, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(108, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(109, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(110, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- SDC_QDSD_PINGROUP(sdc2_clk, 0x999000, 14, 6),
- SDC_QDSD_PINGROUP(sdc2_cmd, 0x999000, 11, 3),
- SDC_QDSD_PINGROUP(sdc2_data, 0x999000, 9, 0),
+ PINGROUP(107, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(108, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(109, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(110, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(111, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(112, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(113, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ SDC_QDSD_PINGROUP(sdc1_clk, 0x99a000, 13, 6),
+ SDC_QDSD_PINGROUP(sdc1_cmd, 0x99a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc1_data, 0x99a000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc2_clk, 0x99b000, 14, 6),
+ SDC_QDSD_PINGROUP(sdc2_cmd, 0x99b000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc2_data, 0x99b000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc1_rclk, 0x99a000, 15, 0),
};
static const struct msm_pinctrl_soc_data msmfalcon_pinctrl = {
@@ -1524,7 +1606,7 @@ static const struct msm_pinctrl_soc_data msmfalcon_pinctrl = {
.nfunctions = ARRAY_SIZE(msmfalcon_functions),
.groups = msmfalcon_groups,
.ngroups = ARRAY_SIZE(msmfalcon_groups),
- .ngpios = 111,
+ .ngpios = 114,
};
static int msmfalcon_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 181ea98a63b7..2b0d70217bbd 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -545,7 +545,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
return ret;
}
- pinctrl_provide_dummies();
+ /* Enable dummy states for those platforms without pinctrl support */
+ if (!of_have_populated_dt())
+ pinctrl_provide_dummies();
ret = sh_pfc_init_ranges(pfc);
if (ret < 0)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index 00265f0435a7..8b381d69df86 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -485,6 +485,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
.pins = sun8i_a33_pins,
.npins = ARRAY_SIZE(sun8i_a33_pins),
.irq_banks = 2,
+ .irq_bank_base = 1,
};
static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index dead97daca35..a4a5b504c532 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -578,7 +578,7 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 reg = sunxi_irq_cfg_reg(d->hwirq);
+ u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base);
u8 index = sunxi_irq_cfg_offset(d->hwirq);
unsigned long flags;
u32 regval;
@@ -625,7 +625,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
static void sunxi_pinctrl_irq_ack(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 status_reg = sunxi_irq_status_reg(d->hwirq);
+ u32 status_reg = sunxi_irq_status_reg(d->hwirq,
+ pctl->desc->irq_bank_base);
u8 status_idx = sunxi_irq_status_offset(d->hwirq);
/* Clear the IRQ */
@@ -635,7 +636,7 @@ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
static void sunxi_pinctrl_irq_mask(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
+ u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
unsigned long flags;
u32 val;
@@ -652,7 +653,7 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
+ u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
unsigned long flags;
u32 val;
@@ -744,7 +745,7 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
if (bank == pctl->desc->irq_banks)
return;
- reg = sunxi_irq_status_reg_from_bank(bank);
+ reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base);
val = readl(pctl->membase + reg);
if (val) {
@@ -1023,9 +1024,11 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
for (i = 0; i < pctl->desc->irq_banks; i++) {
/* Mask and clear all IRQs before registering a handler */
- writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i));
+ writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i,
+ pctl->desc->irq_bank_base));
writel(0xffffffff,
- pctl->membase + sunxi_irq_status_reg_from_bank(i));
+ pctl->membase + sunxi_irq_status_reg_from_bank(i,
+ pctl->desc->irq_bank_base));
irq_set_chained_handler_and_data(pctl->irq[i],
sunxi_pinctrl_irq_handler,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index e248e81a0f9e..0afce1ab12d0 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -97,6 +97,7 @@ struct sunxi_pinctrl_desc {
int npins;
unsigned pin_base;
unsigned irq_banks;
+ unsigned irq_bank_base;
bool irq_read_needs_mux;
};
@@ -233,12 +234,12 @@ static inline u32 sunxi_pull_offset(u16 pin)
return pin_num * PULL_PINS_BITS;
}
-static inline u32 sunxi_irq_cfg_reg(u16 irq)
+static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base)
{
u8 bank = irq / IRQ_PER_BANK;
u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
- return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg;
+ return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg;
}
static inline u32 sunxi_irq_cfg_offset(u16 irq)
@@ -247,16 +248,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
return irq_num * IRQ_CFG_IRQ_BITS;
}
-static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank)
+static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base)
{
- return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE;
+ return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE;
}
-static inline u32 sunxi_irq_ctrl_reg(u16 irq)
+static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base)
{
u8 bank = irq / IRQ_PER_BANK;
- return sunxi_irq_ctrl_reg_from_bank(bank);
+ return sunxi_irq_ctrl_reg_from_bank(bank, bank_base);
}
static inline u32 sunxi_irq_ctrl_offset(u16 irq)
@@ -265,16 +266,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
return irq_num * IRQ_CTRL_IRQ_BITS;
}
-static inline u32 sunxi_irq_status_reg_from_bank(u8 bank)
+static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
{
- return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE;
+ return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
}
-static inline u32 sunxi_irq_status_reg(u16 irq)
+static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base)
{
u8 bank = irq / IRQ_PER_BANK;
- return sunxi_irq_status_reg_from_bank(bank);
+ return sunxi_irq_status_reg_from_bank(bank, bank_base);
}
static inline u32 sunxi_irq_status_offset(u16 irq)
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 0069c07474d4..d2e31c3b0945 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -93,26 +93,6 @@ static void __gsi_config_gen_irq(int ee, uint32_t mask, uint32_t val)
GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee));
}
-static void __gsi_config_inter_ee_ch_irq(int ee, uint32_t mask, uint32_t val)
-{
- uint32_t curr;
-
- curr = gsi_readl(gsi_ctx->base +
- GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
- gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
- GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
-}
-
-static void __gsi_config_inter_ee_evt_irq(int ee, uint32_t mask, uint32_t val)
-{
- uint32_t curr;
-
- curr = gsi_readl(gsi_ctx->base +
- GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(ee));
- gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
- GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(ee));
-}
-
static void gsi_handle_ch_ctrl(int ee)
{
uint32_t ch;
@@ -684,7 +664,10 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
/* only support 16 un-reserved + 7 reserved event virtual IDs */
gsi_ctx->evt_bmap = ~0x7E03FF;
- /* enable all interrupts but GSI_BREAK_POINT */
+ /*
+ * enable all interrupts but GSI_BREAK_POINT.
+ * Inter EE commands / interrupt are no supported.
+ */
__gsi_config_type_irq(props->ee, ~0, ~0);
__gsi_config_ch_irq(props->ee, ~0, ~0);
__gsi_config_evt_irq(props->ee, ~0, ~0);
@@ -692,8 +675,6 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
__gsi_config_glob_irq(props->ee, ~0, ~0);
__gsi_config_gen_irq(props->ee, ~0,
~GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK);
- __gsi_config_inter_ee_ch_irq(props->ee, ~0, ~0);
- __gsi_config_inter_ee_evt_irq(props->ee, ~0, ~0);
gsi_writel(props->intr, gsi_ctx->base +
GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
@@ -791,8 +772,6 @@ int gsi_deregister_device(unsigned long dev_hdl, bool force)
__gsi_config_ieob_irq(gsi_ctx->per.ee, ~0, 0);
__gsi_config_glob_irq(gsi_ctx->per.ee, ~0, 0);
__gsi_config_gen_irq(gsi_ctx->per.ee, ~0, 0);
- __gsi_config_inter_ee_ch_irq(gsi_ctx->per.ee, ~0, 0);
- __gsi_config_inter_ee_evt_irq(gsi_ctx->per.ee, ~0, 0);
devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx);
devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index bd8cdf3f9770..2ab8b79acc6d 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -653,11 +653,11 @@ static ssize_t gsi_rst_stats(struct file *file,
} else if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
+ } else {
+ min = ch_id;
+ max = ch_id + 1;
}
- min = ch_id;
- max = ch_id + 1;
-
for (ch_id = min; ch_id < max; ch_id++)
memset(&gsi_ctx->chan[ch_id].stats, 0,
sizeof(gsi_ctx->chan[ch_id].stats));
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 208a4ce1e40e..09d1166e29a6 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/ipa_uc_offload.h>
#include "ipa_api.h"
#define DRV_NAME "ipa"
@@ -369,6 +370,24 @@ int ipa_reset_endpoint(u32 clnt_hdl)
}
EXPORT_SYMBOL(ipa_reset_endpoint);
+/**
+* ipa_disable_endpoint() - Disable an endpoint from IPA perspective
+* @clnt_hdl: [in] IPA client handle
+*
+* Returns: 0 on success, negative on failure
+*
+* Note: Should not be called from atomic context
+*/
+int ipa_disable_endpoint(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_disable_endpoint, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_disable_endpoint);
+
/**
* ipa_cfg_ep - IPA end-point configuration
@@ -2803,6 +2822,35 @@ void ipa_recycle_wan_skb(struct sk_buff *skb)
}
EXPORT_SYMBOL(ipa_recycle_wan_skb);
+/**
+ * ipa_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_setup_uc_ntn_pipes, inp,
+ notify, priv, hdr_len, outp);
+
+ return ret;
+}
+
+/**
+ * ipa_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_tear_down_uc_offload_pipes, ipa_ep_idx_ul,
+ ipa_ep_idx_dl);
+
+ return ret;
+}
+
static const struct dev_pm_ops ipa_pm_ops = {
.suspend_noirq = ipa_ap_suspend,
.resume_noirq = ipa_ap_resume,
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 862bdc475025..eab048323bd5 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -11,6 +11,7 @@
*/
#include <linux/ipa_mhi.h>
+#include <linux/ipa_uc_offload.h>
#include "ipa_common_i.h"
#ifndef _IPA_API_H_
@@ -26,6 +27,8 @@ struct ipa_api_controller {
int (*ipa_clear_endpoint_delay)(u32 clnt_hdl);
+ int (*ipa_disable_endpoint)(u32 clnt_hdl);
+
int (*ipa_cfg_ep)(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
int (*ipa_cfg_ep_nat)(u32 clnt_hdl,
@@ -360,6 +363,12 @@ struct ipa_api_controller {
void (*ipa_recycle_wan_skb)(struct sk_buff *skb);
+ int (*ipa_setup_uc_ntn_pipes)(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *);
+
+ int (*ipa_tear_down_uc_offload_pipes)(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl);
};
#ifdef CONFIG_IPA
diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile
index aac473f62751..61cef2d71960 100644
--- a/drivers/platform/msm/ipa/ipa_clients/Makefile
+++ b/drivers/platform/msm/ipa/ipa_clients/Makefile
@@ -1,2 +1,2 @@
-obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o
-obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o \ No newline at end of file
+obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
+obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
new file mode 100644
index 000000000000..069f0a2e3fee
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -0,0 +1,597 @@
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa_uc_offload.h>
+#include <linux/msm_ipa.h>
+#include "../ipa_common_i.h"
+
+#define IPA_NTN_DMA_POOL_ALIGNMENT 8
+#define OFFLOAD_DRV_NAME "ipa_uc_offload"
+#define IPA_UC_OFFLOAD_DBG(fmt, args...) \
+ do { \
+ pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_UC_OFFLOAD_LOW(fmt, args...) \
+ do { \
+ pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_UC_OFFLOAD_ERR(fmt, args...) \
+ do { \
+ pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_UC_OFFLOAD_INFO(fmt, args...) \
+ do { \
+ pr_info(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+enum ipa_uc_offload_state {
+ IPA_UC_OFFLOAD_STATE_INVALID,
+ IPA_UC_OFFLOAD_STATE_INITIALIZED,
+ IPA_UC_OFFLOAD_STATE_UP,
+ IPA_UC_OFFLOAD_STATE_DOWN,
+};
+
+struct ipa_uc_offload_ctx {
+ enum ipa_uc_offload_proto proto;
+ enum ipa_uc_offload_state state;
+ void *priv;
+ u8 hdr_len;
+ u32 partial_hdr_hdl[IPA_IP_MAX];
+ char netdev_name[IPA_RESOURCE_NAME_MAX];
+ ipa_notify_cb notify;
+ struct completion ntn_completion;
+};
+
+static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE];
+
+static int ipa_commit_partial_hdr(
+ struct ipa_ioc_add_hdr *hdr,
+ const char *netdev_name,
+ struct ipa_hdr_info *hdr_info)
+{
+ int i;
+
+ if (hdr == NULL || hdr_info == NULL) {
+ IPA_UC_OFFLOAD_ERR("Invalid input\n");
+ return -EINVAL;
+ }
+
+ hdr->commit = 1;
+ hdr->num_hdrs = 2;
+
+ snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
+ "%s_ipv4", netdev_name);
+ snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
+ "%s_ipv6", netdev_name);
+ for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
+ hdr->hdr[i].hdr_len = hdr_info[i].hdr_len;
+ memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len);
+ hdr->hdr[i].type = hdr_info[i].hdr_type;
+ hdr->hdr[i].is_partial = 1;
+ hdr->hdr[i].is_eth2_ofst_valid = 1;
+ hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
+ }
+
+ if (ipa_add_hdr(hdr)) {
+ IPA_UC_OFFLOAD_ERR("fail to add partial headers\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int ipa_uc_offload_ntn_reg_intf(
+ struct ipa_uc_offload_intf_params *inp,
+ struct ipa_uc_offload_out_params *outp,
+ struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ struct ipa_ioc_add_hdr *hdr;
+ struct ipa_tx_intf tx;
+ struct ipa_rx_intf rx;
+ struct ipa_ioc_tx_intf_prop tx_prop[2];
+ struct ipa_ioc_rx_intf_prop rx_prop[2];
+ u32 len;
+ int ret = 0;
+
+ IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
+ inp->netdev_name);
+
+ memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX);
+ ntn_ctx->hdr_len = inp->hdr_info[0].hdr_len;
+ ntn_ctx->notify = inp->notify;
+ ntn_ctx->priv = inp->priv;
+
+ /* add partial header */
+ len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add);
+ hdr = kzalloc(len, GFP_KERNEL);
+ if (hdr == NULL) {
+ IPA_UC_OFFLOAD_ERR("fail to alloc %d bytes\n", len);
+ return -ENOMEM;
+ }
+
+ if (ipa_commit_partial_hdr(hdr, ntn_ctx->netdev_name, inp->hdr_info)) {
+ IPA_UC_OFFLOAD_ERR("fail to commit partial headers\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ /* populate tx prop */
+ tx.num_props = 2;
+ tx.prop = tx_prop;
+
+ memset(tx_prop, 0, sizeof(tx_prop));
+ tx_prop[0].ip = IPA_IP_v4;
+ tx_prop[0].dst_pipe = IPA_CLIENT_ODU_TETH_CONS;
+ tx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
+ memcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
+ sizeof(tx_prop[0].hdr_name));
+
+ tx_prop[1].ip = IPA_IP_v6;
+ tx_prop[1].dst_pipe = IPA_CLIENT_ODU_TETH_CONS;
+ tx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
+ memcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
+ sizeof(tx_prop[1].hdr_name));
+
+ /* populate rx prop */
+ rx.num_props = 2;
+ rx.prop = rx_prop;
+
+ memset(rx_prop, 0, sizeof(rx_prop));
+ rx_prop[0].ip = IPA_IP_v4;
+ rx_prop[0].src_pipe = IPA_CLIENT_ODU_PROD;
+ rx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
+ if (inp->is_meta_data_valid) {
+ rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_prop[0].attrib.meta_data = inp->meta_data;
+ rx_prop[0].attrib.meta_data_mask = inp->meta_data_mask;
+ }
+
+ rx_prop[1].ip = IPA_IP_v6;
+ rx_prop[1].src_pipe = IPA_CLIENT_ODU_PROD;
+ rx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
+ if (inp->is_meta_data_valid) {
+ rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_prop[1].attrib.meta_data = inp->meta_data;
+ rx_prop[1].attrib.meta_data_mask = inp->meta_data_mask;
+ }
+
+ if (ipa_register_intf(inp->netdev_name, &tx, &rx)) {
+ IPA_UC_OFFLOAD_ERR("fail to add interface prop\n");
+ memset(ntn_ctx, 0, sizeof(*ntn_ctx));
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ ntn_ctx->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl;
+ ntn_ctx->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl;
+ init_completion(&ntn_ctx->ntn_completion);
+ ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
+
+fail:
+ kfree(hdr);
+ return ret;
+}
+
+int ipa_uc_offload_reg_intf(
+ struct ipa_uc_offload_intf_params *inp,
+ struct ipa_uc_offload_out_params *outp)
+{
+ struct ipa_uc_offload_ctx *ctx;
+ int ret = 0;
+
+ if (inp == NULL || outp == NULL) {
+ IPA_UC_OFFLOAD_ERR("invalid params in=%p out=%p\n", inp, outp);
+ return -EINVAL;
+ }
+
+ if (inp->proto <= IPA_UC_INVALID ||
+ inp->proto >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("invalid proto %d\n", inp->proto);
+ return -EINVAL;
+ }
+
+ if (!ipa_uc_offload_ctx[inp->proto]) {
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (ctx == NULL) {
+ IPA_UC_OFFLOAD_ERR("fail to alloc uc offload ctx\n");
+ return -EFAULT;
+ }
+ ipa_uc_offload_ctx[inp->proto] = ctx;
+ ctx->proto = inp->proto;
+ } else
+ ctx = ipa_uc_offload_ctx[inp->proto];
+
+ if (ctx->state != IPA_UC_OFFLOAD_STATE_INVALID) {
+ IPA_UC_OFFLOAD_ERR("Already Initialized\n");
+ return -EINVAL;
+ }
+
+ if (ctx->proto == IPA_UC_NTN) {
+ ret = ipa_uc_offload_ntn_reg_intf(inp, outp, ctx);
+ if (!ret)
+ outp->clnt_hndl = IPA_UC_NTN;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_reg_intf);
+
+static int ipa_uc_ntn_cons_release(void)
+{
+ return 0;
+}
+
+static int ipa_uc_ntn_cons_request(void)
+{
+ int ret = 0;
+ struct ipa_uc_offload_ctx *ntn_ctx;
+
+ ntn_ctx = ipa_uc_offload_ctx[IPA_UC_NTN];
+ if (!ntn_ctx) {
+ IPA_UC_OFFLOAD_ERR("NTN is not initialized\n");
+ ret = -EFAULT;
+ } else if (ntn_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
+ IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", ntn_ctx->state);
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+static void ipa_uc_offload_rm_notify(void *user_data, enum ipa_rm_event event,
+ unsigned long data)
+{
+ struct ipa_uc_offload_ctx *offload_ctx;
+
+ offload_ctx = (struct ipa_uc_offload_ctx *)user_data;
+ if (!(offload_ctx && offload_ctx->proto > IPA_UC_INVALID &&
+ offload_ctx->proto < IPA_UC_MAX_PROT_SIZE)) {
+ IPA_UC_OFFLOAD_ERR("Invalid user data\n");
+ return;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED)
+ IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", offload_ctx->state);
+
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ complete_all(&offload_ctx->ntn_completion);
+ break;
+
+ case IPA_RM_RESOURCE_RELEASED:
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid RM Evt: %d", event);
+ break;
+ }
+}
+
+int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp,
+ struct ipa_ntn_conn_out_params *outp,
+ struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ struct ipa_rm_create_params param;
+ int result = 0;
+
+ if (inp->dl.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
+ inp->dl.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
+ IPA_UC_OFFLOAD_ERR("alignment failure on TX\n");
+ return -EINVAL;
+ }
+ if (inp->ul.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
+ inp->ul.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
+ IPA_UC_OFFLOAD_ERR("alignment failure on RX\n");
+ return -EINVAL;
+ }
+
+ memset(&param, 0, sizeof(param));
+ param.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+ param.reg_params.user_data = ntn_ctx;
+ param.reg_params.notify_cb = ipa_uc_offload_rm_notify;
+ param.floor_voltage = IPA_VOLTAGE_SVS;
+ result = ipa_rm_create_resource(&param);
+ if (result) {
+ IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_PROD resource\n");
+ return -EFAULT;
+ }
+
+ memset(&param, 0, sizeof(param));
+ param.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+ param.request_resource = ipa_uc_ntn_cons_request;
+ param.release_resource = ipa_uc_ntn_cons_release;
+ result = ipa_rm_create_resource(&param);
+ if (result) {
+ IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_CONS resource\n");
+ goto fail_create_rm_cons;
+ }
+
+ if (ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ IPA_RM_RESOURCE_APPS_CONS)) {
+ IPA_UC_OFFLOAD_ERR("fail to add rm dependency\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify,
+ ntn_ctx->priv, ntn_ctx->hdr_len, outp)) {
+ IPA_UC_OFFLOAD_ERR("fail to setup uc offload pipes\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
+ result = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+ if (result == -EINPROGRESS) {
+ if (wait_for_completion_timeout(&ntn_ctx->ntn_completion,
+ 10*HZ) == 0) {
+ IPA_UC_OFFLOAD_ERR("ODU PROD resource req time out\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ } else if (result != 0) {
+ IPA_UC_OFFLOAD_ERR("fail to request resource\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+fail_create_rm_cons:
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+
+ return result;
+}
+
+int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp,
+ struct ipa_uc_offload_conn_out_params *outp)
+{
+ int ret = 0;
+ struct ipa_uc_offload_ctx *offload_ctx;
+
+ if (!(inp && outp)) {
+ IPA_UC_OFFLOAD_ERR("bad parm. in=%p out=%p\n", inp, outp);
+ return -EINVAL;
+ }
+
+ if (inp->clnt_hndl <= IPA_UC_INVALID ||
+ inp->clnt_hndl >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("invalid client handle %d\n",
+ inp->clnt_hndl);
+ return -EINVAL;
+ }
+
+ offload_ctx = ipa_uc_offload_ctx[inp->clnt_hndl];
+ if (!offload_ctx) {
+ IPA_UC_OFFLOAD_ERR("Invalid Handle\n");
+ return -EINVAL;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
+ IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state);
+ return -EPERM;
+ }
+
+ switch (offload_ctx->proto) {
+ case IPA_UC_NTN:
+ ret = ipa_uc_ntn_conn_pipes(&inp->u.ntn, &outp->u.ntn,
+ offload_ctx);
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", offload_ctx->proto);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_conn_pipes);
+
+int ipa_set_perf_profile(struct ipa_perf_profile *profile)
+{
+ struct ipa_rm_perf_profile rm_profile;
+ enum ipa_rm_resource_name resource_name;
+
+ if (profile == NULL) {
+ IPA_UC_OFFLOAD_ERR("Invalid input\n");
+ return -EINVAL;
+ }
+
+ rm_profile.max_supported_bandwidth_mbps =
+ profile->max_supported_bw_mbps;
+
+ if (profile->client == IPA_CLIENT_ODU_PROD) {
+ resource_name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+ } else if (profile->client == IPA_CLIENT_ODU_TETH_CONS) {
+ resource_name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+ } else {
+ IPA_UC_OFFLOAD_ERR("not supported\n");
+ return -EINVAL;
+ }
+
+ if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) {
+ IPA_UC_OFFLOAD_ERR("fail to setup rm perf profile\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_set_perf_profile);
+
+static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ int ipa_ep_idx_ul, ipa_ep_idx_dl;
+
+ ntn_ctx->state = IPA_UC_OFFLOAD_STATE_DOWN;
+ if (ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ IPA_RM_RESOURCE_APPS_CONS)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete rm dependency\n");
+ return -EFAULT;
+ }
+
+ if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_PROD resource\n");
+ return -EFAULT;
+ }
+
+ if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_CONS resource\n");
+ return -EFAULT;
+ }
+
+ ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ODU_PROD);
+ ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ODU_TETH_CONS);
+ if (ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl)) {
+ IPA_UC_OFFLOAD_ERR("fail to tear down uc offload pipes\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
+{
+ struct ipa_uc_offload_ctx *offload_ctx;
+ int ret = 0;
+
+ if (clnt_hdl <= IPA_UC_INVALID ||
+ clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
+ if (!offload_ctx) {
+ IPA_UC_OFFLOAD_ERR("Invalid client Handle\n");
+ return -EINVAL;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
+ IPA_UC_OFFLOAD_ERR("Invalid state\n");
+ return -EINVAL;
+ }
+
+ switch (offload_ctx->proto) {
+ case IPA_UC_NTN:
+ ret = ipa_uc_ntn_disconn_pipes(offload_ctx);
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_disconn_pipes);
+
+static int ipa_uc_ntn_cleanup(struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ int len, result = 0;
+ struct ipa_ioc_del_hdr *hdr;
+
+ len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del);
+ hdr = kzalloc(len, GFP_KERNEL);
+ if (hdr == NULL) {
+ IPA_UC_OFFLOAD_ERR("fail to alloc %d bytes\n", len);
+ return -ENOMEM;
+ }
+
+ hdr->commit = 1;
+ hdr->num_hdls = 2;
+ hdr->hdl[0].hdl = ntn_ctx->partial_hdr_hdl[0];
+ hdr->hdl[1].hdl = ntn_ctx->partial_hdr_hdl[1];
+
+ if (ipa_del_hdr(hdr)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete partial header\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa_deregister_intf(ntn_ctx->netdev_name)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete interface prop\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+fail:
+ kfree(hdr);
+ return result;
+}
+
+int ipa_uc_offload_cleanup(u32 clnt_hdl)
+{
+ struct ipa_uc_offload_ctx *offload_ctx;
+ int ret = 0;
+
+ if (clnt_hdl <= IPA_UC_INVALID ||
+ clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
+ if (!offload_ctx) {
+ IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_DOWN) {
+ IPA_UC_OFFLOAD_ERR("Invalid State %d\n", offload_ctx->state);
+ return -EINVAL;
+ }
+
+ switch (offload_ctx->proto) {
+ case IPA_UC_NTN:
+ ret = ipa_uc_ntn_cleanup(offload_ctx);
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ kfree(offload_ctx);
+ offload_ctx = NULL;
+ ipa_uc_offload_ctx[clnt_hdl] = NULL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_cleanup);
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index 060613281e4c..115348251d17 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -17,6 +17,7 @@
#define _IPA_COMMON_I_H_
#include <linux/ipc_logging.h>
#include <linux/ipa.h>
+#include <linux/ipa_uc_offload.h>
#define __FILENAME__ \
(strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
@@ -342,6 +343,11 @@ int ipa_uc_state_check(void);
void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb);
void ipa_set_tag_process_before_gating(bool val);
bool ipa_has_open_aggr_frame(enum ipa_client_type client);
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
u8 *ipa_write_64(u64 w, u8 *dest);
u8 *ipa_write_32(u32 w, u8 *dest);
diff --git a/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h
new file mode 100644
index 000000000000..ae6cfc4fcd50
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa_mhi.h>
+#include <linux/ipa_qmi_service_v01.h>
+
+#ifndef _IPA_UC_OFFLOAD_COMMON_I_H_
+#define _IPA_UC_OFFLOAD_COMMON_I_H_
+
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
+#endif /* _IPA_UC_OFFLOAD_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/Makefile b/drivers/platform/msm/ipa/ipa_v2/Makefile
index 435acbf1cab8..69b8a4c94461 100644
--- a/drivers/platform/msm/ipa/ipa_v2/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v2/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_IPA) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
- ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o
+ ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o
obj-$(CONFIG_RMNET_IPA) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index cb808bd2a8b7..fc3d9f355da6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -65,6 +65,10 @@
#define IPA2_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
#define IPA2_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
+#define MAX_POLLING_ITERATION 40
+#define MIN_POLLING_ITERATION 1
+#define ONE_MSEC 1
+
#define IPA_AGGR_STR_IN_BYTES(str) \
(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
@@ -3613,6 +3617,19 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
ipa_ctx->use_dma_zone = resource_p->use_dma_zone;
ipa_ctx->tethered_flow_control = resource_p->tethered_flow_control;
+ /* Setting up IPA RX Polling Timeout Seconds */
+ ipa_rx_timeout_min_max_calc(&ipa_ctx->ipa_rx_min_timeout_usec,
+ &ipa_ctx->ipa_rx_max_timeout_usec,
+ resource_p->ipa_rx_polling_sleep_msec);
+
+ /* Setting up ipa polling iteration */
+ if ((resource_p->ipa_polling_iteration >= MIN_POLLING_ITERATION)
+ && (resource_p->ipa_polling_iteration <= MAX_POLLING_ITERATION))
+ ipa_ctx->ipa_polling_iteration =
+ resource_p->ipa_polling_iteration;
+ else
+ ipa_ctx->ipa_polling_iteration = MAX_POLLING_ITERATION;
+
/* default aggregation parameters */
ipa_ctx->aggregation_type = IPA_MBIM_16;
ipa_ctx->aggregation_byte_limit = 1;
@@ -4035,6 +4052,12 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
else
IPADBG(":wdi init ok\n");
+ result = ipa_ntn_init();
+ if (result)
+ IPAERR(":ntn init failed (%d)\n", -result);
+ else
+ IPADBG(":ntn init ok\n");
+
ipa_ctx->q6_proxy_clk_vote_valid = true;
ipa_register_panic_hdlr();
@@ -4268,6 +4291,31 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
if (result)
ipa_drv_res->ee = 0;
+ /* Get IPA RX Polling Timeout Seconds */
+ result = of_property_read_u32(pdev->dev.of_node,
+ "qcom,rx-polling-sleep-ms",
+ &ipa_drv_res->ipa_rx_polling_sleep_msec);
+
+ if (result) {
+ ipa_drv_res->ipa_rx_polling_sleep_msec = ONE_MSEC;
+ IPADBG("using default polling timeout of 1MSec\n");
+ } else {
+ IPADBG(": found ipa_drv_res->ipa_rx_polling_sleep_sec = %d",
+ ipa_drv_res->ipa_rx_polling_sleep_msec);
+ }
+
+ /* Get IPA Polling Iteration */
+ result = of_property_read_u32(pdev->dev.of_node,
+ "qcom,ipa-polling-iteration",
+ &ipa_drv_res->ipa_polling_iteration);
+ if (result) {
+ ipa_drv_res->ipa_polling_iteration = MAX_POLLING_ITERATION;
+ IPADBG("using default polling iteration\n");
+ } else {
+ IPADBG(": found ipa_drv_res->ipa_polling_iteration = %d",
+ ipa_drv_res->ipa_polling_iteration);
+ }
+
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
index 64246ac4eec0..66e329a03df7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
@@ -560,22 +560,30 @@ int ipa2_disconnect(u32 clnt_hdl)
if (!ep->keep_ipa_awake)
IPA_ACTIVE_CLIENTS_INC_EP(client_type);
- /* Set Disconnect in Progress flag. */
- spin_lock(&ipa_ctx->disconnect_lock);
- ep->disconnect_in_progress = true;
- spin_unlock(&ipa_ctx->disconnect_lock);
-
- /* Notify uc to stop monitoring holb on USB BAM Producer pipe. */
- if (IPA_CLIENT_IS_USB_CONS(ep->client)) {
- ipa_uc_monitor_holb(ep->client, false);
- IPADBG("Disabling holb monitor for client: %d\n", ep->client);
- }
+ /* For USB 2.0 controller, first the ep will be disabled.
+ * so this sequence is not needed again when disconnecting the pipe.
+ */
+ if (!ep->ep_disabled) {
+ /* Set Disconnect in Progress flag. */
+ spin_lock(&ipa_ctx->disconnect_lock);
+ ep->disconnect_in_progress = true;
+ spin_unlock(&ipa_ctx->disconnect_lock);
+
+ /* Notify uc to stop monitoring holb on USB BAM
+ * Producer pipe.
+ */
+ if (IPA_CLIENT_IS_USB_CONS(ep->client)) {
+ ipa_uc_monitor_holb(ep->client, false);
+ IPADBG("Disabling holb monitor for client: %d\n",
+ ep->client);
+ }
- result = ipa_disable_data_path(clnt_hdl);
- if (result) {
- IPAERR("disable data path failed res=%d clnt=%d.\n", result,
- clnt_hdl);
- return -EPERM;
+ result = ipa_disable_data_path(clnt_hdl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n",
+ result, clnt_hdl);
+ return -EPERM;
+ }
}
result = sps_disconnect(ep->ep_hdl);
@@ -784,6 +792,82 @@ int ipa2_clear_endpoint_delay(u32 clnt_hdl)
}
/**
+ * ipa2_disable_endpoint() - low-level IPA client disable endpoint
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to
+ * disable the pipe from IPA in BAM-BAM mode.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_disable_endpoint(u32 clnt_hdl)
+{
+ int result;
+ struct ipa_ep_context *ep;
+ enum ipa_client_type client_type;
+ unsigned long bam;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+ client_type = ipa2_get_client_mapping(clnt_hdl);
+ IPA_ACTIVE_CLIENTS_INC_EP(client_type);
+
+ /* Set Disconnect in Progress flag. */
+ spin_lock(&ipa_ctx->disconnect_lock);
+ ep->disconnect_in_progress = true;
+ spin_unlock(&ipa_ctx->disconnect_lock);
+
+ /* Notify uc to stop monitoring holb on USB BAM Producer pipe. */
+ if (IPA_CLIENT_IS_USB_CONS(ep->client)) {
+ ipa_uc_monitor_holb(ep->client, false);
+ IPADBG("Disabling holb monitor for client: %d\n", ep->client);
+ }
+
+ result = ipa_disable_data_path(clnt_hdl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ clnt_hdl);
+ goto fail;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ep->client))
+ bam = ep->connect.source;
+ else
+ bam = ep->connect.destination;
+
+ result = sps_pipe_reset(bam, clnt_hdl);
+ if (result) {
+ IPAERR("SPS pipe reset failed.\n");
+ goto fail;
+ }
+
+ ep->ep_disabled = true;
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
+
+ IPADBG("client (ep: %d) disabled\n", clnt_hdl);
+
+ return 0;
+
+fail:
+ IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
+ return -EPERM;
+}
+
+
+/**
* ipa_sps_connect_safe() - connect endpoint from BAM prespective
* @h: [in] sps pipe handle
* @connect: [in] sps connect parameters
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 566cb4d03c51..0eab77d27760 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -25,6 +25,12 @@
* IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) \
+ IPA_MAX_MSG_LEN)
+#define RX_MIN_POLL_CNT "Rx Min Poll Count"
+#define RX_MAX_POLL_CNT "Rx Max Poll Count"
+#define MAX_COUNT_LENGTH 6
+#define MAX_POLLING_ITERATION 40
+#define MIN_POLLING_ITERATION 1
+
#define IPA_DUMP_STATUS_FIELD(f) \
pr_err(#f "=0x%x\n", status->f)
@@ -104,12 +110,16 @@ static struct dentry *dfile_ip6_flt;
static struct dentry *dfile_stats;
static struct dentry *dfile_wstats;
static struct dentry *dfile_wdi_stats;
+static struct dentry *dfile_ntn_stats;
static struct dentry *dfile_dbg_cnt;
static struct dentry *dfile_msg;
static struct dentry *dfile_ip4_nat;
static struct dentry *dfile_rm_stats;
static struct dentry *dfile_status_stats;
static struct dentry *dfile_active_clients;
+static struct dentry *dfile_ipa_rx_poll_timeout;
+static struct dentry *dfile_ipa_poll_iteration;
+
static char dbg_buff[IPA_MAX_MSG_LEN];
static char *active_clients_buf;
static s8 ep_reg_idx;
@@ -1091,6 +1101,110 @@ nxt_clnt_cons:
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
+static ssize_t ipa_read_ntn(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+#define TX_STATS(y) \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ struct IpaHwStatsNTNInfoData_t stats;
+ int nbytes;
+ int cnt = 0;
+
+ if (!ipa2_get_ntn_stats(&stats)) {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "TX num_pkts_processed=%u\n"
+ "TX tail_ptr_val=%u\n"
+ "TX num_db_fired=%u\n"
+ "TX ringFull=%u\n"
+ "TX ringEmpty=%u\n"
+ "TX ringUsageHigh=%u\n"
+ "TX ringUsageLow=%u\n"
+ "TX RingUtilCount=%u\n"
+ "TX bamFifoFull=%u\n"
+ "TX bamFifoEmpty=%u\n"
+ "TX bamFifoUsageHigh=%u\n"
+ "TX bamFifoUsageLow=%u\n"
+ "TX bamUtilCount=%u\n"
+ "TX num_db=%u\n"
+ "TX num_unexpected_db=%u\n"
+ "TX num_bam_int_handled=%u\n"
+ "TX num_bam_int_in_non_running_state=%u\n"
+ "TX num_qmb_int_handled=%u\n"
+ "TX num_bam_int_handled_while_wait_for_bam=%u\n"
+ "TX num_bam_int_handled_while_not_in_bam=%u\n",
+ TX_STATS(num_pkts_processed),
+ TX_STATS(tail_ptr_val),
+ TX_STATS(num_db_fired),
+ TX_STATS(tx_comp_ring_stats.ringFull),
+ TX_STATS(tx_comp_ring_stats.ringEmpty),
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh),
+ TX_STATS(tx_comp_ring_stats.ringUsageLow),
+ TX_STATS(tx_comp_ring_stats.RingUtilCount),
+ TX_STATS(bam_stats.bamFifoFull),
+ TX_STATS(bam_stats.bamFifoEmpty),
+ TX_STATS(bam_stats.bamFifoUsageHigh),
+ TX_STATS(bam_stats.bamFifoUsageLow),
+ TX_STATS(bam_stats.bamUtilCount),
+ TX_STATS(num_db),
+ TX_STATS(num_unexpected_db),
+ TX_STATS(num_bam_int_handled),
+ TX_STATS(num_bam_int_in_non_running_state),
+ TX_STATS(num_qmb_int_handled),
+ TX_STATS(num_bam_int_handled_while_wait_for_bam),
+ TX_STATS(num_bam_int_handled_while_not_in_bam));
+ cnt += nbytes;
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "RX max_outstanding_pkts=%u\n"
+ "RX num_pkts_processed=%u\n"
+ "RX rx_ring_rp_value=%u\n"
+ "RX ringFull=%u\n"
+ "RX ringEmpty=%u\n"
+ "RX ringUsageHigh=%u\n"
+ "RX ringUsageLow=%u\n"
+ "RX RingUtilCount=%u\n"
+ "RX bamFifoFull=%u\n"
+ "RX bamFifoEmpty=%u\n"
+ "RX bamFifoUsageHigh=%u\n"
+ "RX bamFifoUsageLow=%u\n"
+ "RX bamUtilCount=%u\n"
+ "RX num_bam_int_handled=%u\n"
+ "RX num_db=%u\n"
+ "RX num_unexpected_db=%u\n"
+ "RX num_pkts_in_dis_uninit_state=%u\n"
+ "num_ic_inj_vdev_change=%u\n"
+ "num_ic_inj_fw_desc_change=%u\n",
+ RX_STATS(max_outstanding_pkts),
+ RX_STATS(num_pkts_processed),
+ RX_STATS(rx_ring_rp_value),
+ RX_STATS(rx_ind_ring_stats.ringFull),
+ RX_STATS(rx_ind_ring_stats.ringEmpty),
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh),
+ RX_STATS(rx_ind_ring_stats.ringUsageLow),
+ RX_STATS(rx_ind_ring_stats.RingUtilCount),
+ RX_STATS(bam_stats.bamFifoFull),
+ RX_STATS(bam_stats.bamFifoEmpty),
+ RX_STATS(bam_stats.bamFifoUsageHigh),
+ RX_STATS(bam_stats.bamFifoUsageLow),
+ RX_STATS(bam_stats.bamUtilCount),
+ RX_STATS(num_bam_int_handled),
+ RX_STATS(num_db),
+ RX_STATS(num_unexpected_db),
+ RX_STATS(num_pkts_in_dis_uninit_state),
+ RX_STATS(num_bam_int_handled_while_not_in_bam),
+ RX_STATS(num_bam_int_handled_while_in_bam_state));
+ cnt += nbytes;
+ } else {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "Fail to read NTN stats\n");
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
static ssize_t ipa_read_wdi(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
@@ -1597,6 +1711,97 @@ static ssize_t ipa2_clear_active_clients_log(struct file *file,
return count;
}
+static ssize_t ipa_read_rx_polling_timeout(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int min_cnt;
+ int max_cnt;
+
+ if (active_clients_buf == NULL) {
+ IPAERR("Active Clients buffer is not allocated");
+ return 0;
+ }
+ memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE);
+ min_cnt = scnprintf(active_clients_buf,
+ IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE,
+ "Rx Min Poll count = %u\n",
+ ipa_ctx->ipa_rx_min_timeout_usec);
+
+ max_cnt = scnprintf(active_clients_buf + min_cnt,
+ IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE,
+ "Rx Max Poll count = %u\n",
+ ipa_ctx->ipa_rx_max_timeout_usec);
+
+ return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf,
+ min_cnt + max_cnt);
+}
+
+static ssize_t ipa_write_rx_polling_timeout(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ s8 polltime = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ if (copy_from_user(dbg_buff, ubuf, count))
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+
+ if (kstrtos8(dbg_buff, 0, &polltime))
+ return -EFAULT;
+
+ ipa_rx_timeout_min_max_calc(&ipa_ctx->ipa_rx_min_timeout_usec,
+ &ipa_ctx->ipa_rx_max_timeout_usec, polltime);
+ return count;
+}
+
+static ssize_t ipa_read_polling_iteration(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int cnt;
+
+ if (active_clients_buf == NULL) {
+ IPAERR("Active Clients buffer is not allocated");
+ return 0;
+ }
+
+ memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE);
+
+ cnt = scnprintf(active_clients_buf, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE,
+ "Polling Iteration count = %u\n",
+ ipa_ctx->ipa_polling_iteration);
+
+ return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf,
+ cnt);
+}
+
+static ssize_t ipa_write_polling_iteration(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ s8 iteration_cnt = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ if (copy_from_user(dbg_buff, ubuf, count))
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+
+ if (kstrtos8(dbg_buff, 0, &iteration_cnt))
+ return -EFAULT;
+
+ if ((iteration_cnt >= MIN_POLLING_ITERATION) &&
+ (iteration_cnt <= MAX_POLLING_ITERATION))
+ ipa_ctx->ipa_polling_iteration = iteration_cnt;
+ else
+ ipa_ctx->ipa_polling_iteration = MAX_POLLING_ITERATION;
+
+ return count;
+}
+
const struct file_operations ipa_gen_reg_ops = {
.read = ipa_read_gen_reg,
};
@@ -1645,6 +1850,10 @@ const struct file_operations ipa_wdi_ops = {
.read = ipa_read_wdi,
};
+const struct file_operations ipa_ntn_ops = {
+ .read = ipa_read_ntn,
+};
+
const struct file_operations ipa_msg_ops = {
.read = ipa_read_msg,
};
@@ -1671,6 +1880,16 @@ const struct file_operations ipa2_active_clients = {
.write = ipa2_clear_active_clients_log,
};
+const struct file_operations ipa_rx_poll_time_ops = {
+ .read = ipa_read_rx_polling_timeout,
+ .write = ipa_write_rx_polling_timeout,
+};
+
+const struct file_operations ipa_poll_iteration_ops = {
+ .read = ipa_read_polling_iteration,
+ .write = ipa_write_polling_iteration,
+};
+
void ipa_debugfs_init(void)
{
const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
@@ -1797,6 +2016,13 @@ void ipa_debugfs_init(void)
goto fail;
}
+ dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, 0,
+ &ipa_ntn_ops);
+ if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) {
+ IPAERR("fail to create file for debug_fs ntn stats\n");
+ goto fail;
+ }
+
dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0,
&ipa_dbg_cnt_ops);
if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) {
@@ -1832,6 +2058,20 @@ void ipa_debugfs_init(void)
goto fail;
}
+ dfile_ipa_rx_poll_timeout = debugfs_create_file("ipa_rx_poll_time",
+ read_write_mode, dent, 0, &ipa_rx_poll_time_ops);
+ if (!dfile_ipa_rx_poll_timeout || IS_ERR(dfile_ipa_rx_poll_timeout)) {
+ IPAERR("fail to create file for debug_fs rx poll timeout\n");
+ goto fail;
+ }
+
+ dfile_ipa_poll_iteration = debugfs_create_file("ipa_poll_iteration",
+ read_write_mode, dent, 0, &ipa_poll_iteration_ops);
+ if (!dfile_ipa_poll_iteration || IS_ERR(dfile_ipa_poll_iteration)) {
+ IPAERR("fail to create file for debug_fs poll iteration\n");
+ goto fail;
+ }
+
file = debugfs_create_u32("enable_clock_scaling", read_write_mode,
dent, &ipa_ctx->enable_clock_scaling);
if (!file) {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 1c93ac16d419..005508fdcdc1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -20,8 +20,6 @@
#define IPA_LAST_DESC_CNT 0xFFFF
#define POLLING_INACTIVITY_RX 40
-#define POLLING_MIN_SLEEP_RX 1010
-#define POLLING_MAX_SLEEP_RX 1050
#define POLLING_INACTIVITY_TX 40
#define POLLING_MIN_SLEEP_TX 400
#define POLLING_MAX_SLEEP_TX 500
@@ -42,6 +40,8 @@
IPA_GENERIC_RX_BUFF_BASE_SZ) -\
IPA_GENERIC_RX_BUFF_BASE_SZ)
+#define IPA_RX_BUFF_CLIENT_HEADROOM 256
+
/* less 1 nominal MTU (1500 bytes) rounded to units of KB */
#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
@@ -1045,8 +1045,8 @@ static void ipa_handle_rx(struct ipa_sys_context *sys)
if (cnt == 0) {
inactive_cycles++;
trace_idle_sleep_enter(sys->ep->client);
- usleep_range(POLLING_MIN_SLEEP_RX,
- POLLING_MAX_SLEEP_RX);
+ usleep_range(ipa_ctx->ipa_rx_min_timeout_usec,
+ ipa_ctx->ipa_rx_max_timeout_usec);
trace_idle_sleep_exit(sys->ep->client);
} else {
inactive_cycles = 0;
@@ -1059,7 +1059,7 @@ static void ipa_handle_rx(struct ipa_sys_context *sys)
if (sys->len == 0)
break;
- } while (inactive_cycles <= POLLING_INACTIVITY_RX);
+ } while (inactive_cycles <= ipa_ctx->ipa_polling_iteration);
trace_poll_to_intr(sys->ep->client);
ipa_rx_switch_to_intr_mode(sys);
@@ -2290,6 +2290,21 @@ static void ipa_cleanup_rx(struct ipa_sys_context *sys)
}
}
+static struct sk_buff *ipa_skb_copy_for_client(struct sk_buff *skb, int len)
+{
+ struct sk_buff *skb2 = NULL;
+
+ skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL);
+ if (likely(skb2)) {
+ /* Set the data pointer */
+ skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
+ memcpy(skb2->data, skb->data, len);
+ skb2->len = len;
+ skb_set_tail_pointer(skb2, len);
+ }
+
+ return skb2;
+}
static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb,
struct ipa_sys_context *sys)
@@ -2486,7 +2501,8 @@ begin:
sys->drop_packet = true;
}
- skb2 = skb_clone(skb, GFP_KERNEL);
+ skb2 = ipa_skb_copy_for_client(skb,
+ status->pkt_len + IPA_PKT_STATUS_SIZE);
if (likely(skb2)) {
if (skb->len < len + IPA_PKT_STATUS_SIZE) {
IPADBG("SPL skb len %d len %d\n",
@@ -2529,7 +2545,7 @@ begin:
IPA_PKT_STATUS_SIZE);
}
} else {
- IPAERR("fail to clone\n");
+ IPAERR("fail to alloc skb\n");
if (skb->len < len) {
sys->prev_skb = NULL;
sys->len_rem = len - skb->len +
@@ -3171,7 +3187,7 @@ static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in,
IPA_GENERIC_RX_BUFF_SZ(
ipa_adjust_ra_buff_base_sz(
in->ipa_ep_cfg.aggr.
- aggr_byte_limit));
+ aggr_byte_limit - IPA_HEADROOM));
in->ipa_ep_cfg.aggr.
aggr_byte_limit =
sys->rx_buff_sz < in->
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 790a0b41147e..62e026262663 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -581,7 +581,8 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
return 0;
bad_len:
- hdr_entry->ref_cnt--;
+ if (add_ref_hdr)
+ hdr_entry->ref_cnt--;
entry->cookie = 0;
kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, entry);
return -EPERM;
@@ -761,7 +762,7 @@ static int __ipa_del_hdr_proc_ctx(u32 proc_ctx_hdl, bool release_hdr)
}
if (release_hdr)
- __ipa_release_hdr(entry->hdr->id);
+ __ipa_del_hdr(entry->hdr->id);
/* move the offset entry to appropriate free list */
list_move(&entry->offset_entry->link,
@@ -1089,12 +1090,19 @@ int ipa2_reset_hdr(void)
&ipa_ctx->hdr_tbl.head_hdr_entry_list, link) {
/* do not remove the default header */
- if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME))
+ if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
+ if (entry->is_hdr_proc_ctx) {
+ mutex_unlock(&ipa_ctx->lock);
+ WARN_ON(1);
+ IPAERR("default header is proc ctx\n");
+ return -EFAULT;
+ }
continue;
+ }
if (ipa_id_find(entry->id) == NULL) {
- WARN_ON(1);
mutex_unlock(&ipa_ctx->lock);
+ WARN_ON(1);
return -EFAULT;
}
if (entry->is_hdr_proc_ctx) {
@@ -1147,8 +1155,8 @@ int ipa2_reset_hdr(void)
link) {
if (ipa_id_find(ctx_entry->id) == NULL) {
- WARN_ON(1);
mutex_unlock(&ipa_ctx->lock);
+ WARN_ON(1);
return -EFAULT;
}
list_del(&ctx_entry->link);
@@ -1311,8 +1319,8 @@ int ipa2_put_hdr(u32 hdr_hdl)
goto bail;
}
- if (entry == NULL || entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_COOKIE) {
+ IPAERR("invalid header entry\n");
result = -EINVAL;
goto bail;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index f94418efc927..5ea7a08b3135 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -26,12 +26,14 @@
#include <linux/platform_device.h>
#include <asm/dma-iommu.h>
#include <linux/iommu.h>
+#include <linux/ipa_uc_offload.h>
#include "ipa_hw_defs.h"
#include "ipa_ram_mmap.h"
#include "ipa_reg.h"
#include "ipa_qmi_service.h"
#include "../ipa_api.h"
#include "../ipa_common_i.h"
+#include "ipa_uc_offload_i.h"
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
@@ -242,7 +244,7 @@ struct ipa_rt_tbl {
* @is_partial: flag indicating if header table entry is partial
* @is_hdr_proc_ctx: false - hdr entry resides in hdr table,
* true - hdr entry resides in DDR and pointed to by proc ctx
- * @phys_base: physical address of entry in SRAM when is_hdr_proc_ctx is true,
+ * @phys_base: physical address of entry in DDR when is_hdr_proc_ctx is true,
* else 0
* @proc_ctx: processing context header
* @offset_entry: entry's offset
@@ -544,7 +546,7 @@ struct ipa_ep_context {
bool skip_ep_cfg;
bool keep_ipa_awake;
struct ipa_wlan_stats wstats;
- u32 wdi_state;
+ u32 uc_offload_state;
u32 rx_replenish_threshold;
bool disconnect_in_progress;
u32 qmi_request_sent;
@@ -553,6 +555,7 @@ struct ipa_ep_context {
bool switch_to_intr;
int inactive_cycles;
u32 eot_in_poll_err;
+ bool ep_disabled;
/* sys MUST be the last element of this struct */
struct ipa_sys_context *sys;
@@ -816,134 +819,6 @@ struct ipa_tag_completion {
struct ipa_controller;
/**
- * @brief Enum value determined based on the feature it
- * corresponds to
- * +----------------+----------------+
- * | 3 bits | 5 bits |
- * +----------------+----------------+
- * | HW_FEATURE | OPCODE |
- * +----------------+----------------+
- *
- */
-#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
-#define EXTRACT_UC_FEATURE(value) (value >> 5)
-
-#define IPA_HW_NUM_FEATURES 0x8
-
-/**
- * enum ipa_hw_features - Values that represent the features supported in IPA HW
- * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
- * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
- * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
-*/
-enum ipa_hw_features {
- IPA_HW_FEATURE_COMMON = 0x0,
- IPA_HW_FEATURE_MHI = 0x1,
- IPA_HW_FEATURE_WDI = 0x3,
- IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
-};
-
-/**
- * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
- * section in 128B shared memory located in offset zero of SW Partition in IPA
- * SRAM.
- * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
- * @cmdParams : CPU->HW command parameter. The parameter filed can hold 32 bits
- * of parameters (immediate parameters) and point on structure in system memory
- * (in such case the address must be accessible for HW)
- * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
- * @responseParams : HW->CPU response parameter. The parameter filed can hold 32
- * bits of parameters (immediate parameters) and point on structure in system
- * memory
- * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
- * @eventParams : HW->CPU event parameter. The parameter filed can hold 32 bits of
- * parameters (immediate parameters) and point on structure in system memory
- * @firstErrorAddress : Contains the address of first error-source on SNOC
- * @hwState : State of HW. The state carries information regarding the error type.
- * @warningCounter : The warnings counter. The counter carries information regarding
- * non fatal errors in HW
- * @interfaceVersionCommon : The Common interface version as reported by HW
- *
- * The shared memory is used for communication between IPA HW and CPU.
- */
-struct IpaHwSharedMemCommonMapping_t {
- u8 cmdOp;
- u8 reserved_01;
- u16 reserved_03_02;
- u32 cmdParams;
- u8 responseOp;
- u8 reserved_09;
- u16 reserved_0B_0A;
- u32 responseParams;
- u8 eventOp;
- u8 reserved_11;
- u16 reserved_13_12;
- u32 eventParams;
- u32 reserved_1B_18;
- u32 firstErrorAddress;
- u8 hwState;
- u8 warningCounter;
- u16 reserved_23_22;
- u16 interfaceVersionCommon;
- u16 reserved_27_26;
-} __packed;
-
-/**
- * union IpaHwFeatureInfoData_t - parameters for stats/config blob
- *
- * @offset : Location of a feature within the EventInfoData
- * @size : Size of the feature
- */
-union IpaHwFeatureInfoData_t {
- struct IpaHwFeatureInfoParams_t {
- u32 offset:16;
- u32 size:16;
- } __packed params;
- u32 raw32b;
-} __packed;
-
-/**
- * struct IpaHwEventInfoData_t - Structure holding the parameters for
- * statistics and config info
- *
- * @baseAddrOffset : Base Address Offset of the statistics or config
- * structure from IPA_WRAPPER_BASE
- * @IpaHwFeatureInfoData_t : Location and size of each feature within
- * the statistics or config structure
- *
- * @note Information about each feature in the featureInfo[]
- * array is populated at predefined indices per the IPA_HW_FEATURES
- * enum definition
- */
-struct IpaHwEventInfoData_t {
- u32 baseAddrOffset;
- union IpaHwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
-} __packed;
-
-/**
- * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
- * IPA_HW_2_CPU_EVENT_LOG_INFO Event
- *
- * @featureMask : Mask indicating the features enabled in HW.
- * Refer IPA_HW_FEATURE_MASK
- * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
- * Log Buffer structure
- * @statsInfo : Statistics related information
- * @configInfo : Configuration related information
- *
- * @note The offset location of this structure from IPA_WRAPPER_BASE
- * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
- * Event
- */
-struct IpaHwEventLogInfoData_t {
- u32 featureMask;
- u32 circBuffBaseAddrOffset;
- struct IpaHwEventInfoData_t statsInfo;
- struct IpaHwEventInfoData_t configInfo;
-
-} __packed;
-
-/**
* struct ipa_uc_hdlrs - IPA uC callback functions
* @ipa_uc_loaded_hdlr: Function handler when uC is loaded
* @ipa_uc_event_hdlr: Event handler function
@@ -1225,6 +1100,7 @@ struct ipa_context {
struct ipa_uc_ctx uc_ctx;
struct ipa_uc_wdi_ctx uc_wdi_ctx;
+ struct ipa_uc_ntn_ctx uc_ntn_ctx;
u32 wan_rx_ring_size;
bool skip_uc_pipe_reset;
bool smmu_present;
@@ -1244,6 +1120,9 @@ struct ipa_context {
/* M-release support to know client pipes */
struct ipacm_client_info ipacm_client[IPA_MAX_NUM_PIPES];
bool tethered_flow_control;
+ u32 ipa_rx_min_timeout_usec;
+ u32 ipa_rx_max_timeout_usec;
+ u32 ipa_polling_iteration;
};
/**
@@ -1295,6 +1174,8 @@ struct ipa_plat_drv_res {
bool skip_uc_pipe_reset;
bool use_dma_zone;
bool tethered_flow_control;
+ u32 ipa_rx_polling_sleep_msec;
+ u32 ipa_polling_iteration;
};
struct ipa_mem_partition {
@@ -1426,6 +1307,11 @@ int ipa2_reset_endpoint(u32 clnt_hdl);
int ipa2_clear_endpoint_delay(u32 clnt_hdl);
/*
+ * Disable ep
+ */
+int ipa2_disable_endpoint(u32 clnt_hdl);
+
+/*
* Configuration
*/
int ipa2_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
@@ -1593,6 +1479,11 @@ int ipa2_resume_wdi_pipe(u32 clnt_hdl);
int ipa2_suspend_wdi_pipe(u32 clnt_hdl);
int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
u16 ipa2_get_smem_restr_bytes(void);
+int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
/*
* To retrieve doorbell physical address of
* wlan pipes
@@ -1730,6 +1621,9 @@ void ipa_debugfs_init(void);
void ipa_debugfs_remove(void);
void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
+
+void ipa_rx_timeout_min_max_calc(u32 *min, u32 *max, s8 time);
+
#ifdef IPA_DEBUG
#define IPA_DUMP_BUFF(base, phy_base, size) \
ipa_dump_buff_internal(base, phy_base, size)
@@ -1935,4 +1829,8 @@ int ipa_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
int ipa2_rx_poll(u32 clnt_hdl, int budget);
void ipa2_recycle_wan_skb(struct sk_buff *skb);
+int ipa_ntn_init(void);
+int ipa2_get_ntn_stats(struct IpaHwStatsNTNInfoData_t *stats);
+int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *),
+ void *user_data);
#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
index 7c10c4cee150..e8f25c9c23d3 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
@@ -276,8 +276,6 @@ fail_ep_exists:
*/
int ipa2_disconnect_mhi_pipe(u32 clnt_hdl)
{
- struct ipa_ep_context *ep;
-
IPA_MHI_FUNC_ENTRY();
if (clnt_hdl >= ipa_ctx->ipa_num_pipes) {
@@ -290,7 +288,8 @@ int ipa2_disconnect_mhi_pipe(u32 clnt_hdl)
return -EINVAL;
}
- ep->valid = 0;
+ ipa_ctx->ep[clnt_hdl].valid = 0;
+
ipa_delete_dflt_flt_rules(clnt_hdl);
IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
@@ -302,14 +301,13 @@ int ipa2_mhi_resume_channels_internal(enum ipa_client_type client,
bool LPTransitionRejected, bool brstmode_enabled,
union __packed gsi_channel_scratch ch_scratch, u8 index)
{
- int i;
int res;
IPA_MHI_FUNC_ENTRY();
res = ipa_uc_mhi_resume_channel(index, LPTransitionRejected);
if (res) {
- IPA_MHI_ERR("failed to suspend channel %d error %d\n",
- i, res);
+ IPA_MHI_ERR("failed to suspend channel %u error %d\n",
+ index, res);
return res;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 9d4704ded0c3..15476f38cf44 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -1008,6 +1008,10 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
return 0;
ipa_insert_failed:
+ if (entry->hdr)
+ entry->hdr->ref_cnt--;
+ else if (entry->proc_ctx)
+ entry->proc_ctx->ref_cnt--;
list_del(&entry->link);
kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
error:
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
new file mode 100644
index 000000000000..08ed47f3cacf
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
@@ -0,0 +1,438 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+
+#define IPA_UC_NTN_DB_PA_TX 0x79620DC
+#define IPA_UC_NTN_DB_PA_RX 0x79620D8
+
+static void ipa_uc_ntn_event_handler(
+ struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio)
+{
+ union IpaHwNTNErrorEventData_t ntn_evt;
+
+ if (uc_sram_mmio->eventOp == IPA_HW_2_CPU_EVENT_NTN_ERROR) {
+ ntn_evt.raw32b = uc_sram_mmio->eventParams;
+ IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n",
+ ntn_evt.params.ntn_error_type,
+ ntn_evt.params.ipa_pipe_number,
+ ntn_evt.params.ntn_ch_err_type);
+ }
+}
+
+static void ipa_uc_ntn_event_log_info_handler(
+ struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+{
+ if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) {
+ IPAERR("NTN feature missing 0x%x\n",
+ uc_event_top_mmio->featureMask);
+ return;
+ }
+
+ if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].
+ params.size != sizeof(struct IpaHwStatsNTNInfoData_t)) {
+ IPAERR("NTN stats sz invalid exp=%zu is=%u\n",
+ sizeof(struct IpaHwStatsNTNInfoData_t),
+ uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.size);
+ return;
+ }
+
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst = uc_event_top_mmio->
+ statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.offset;
+ IPAERR("NTN stats ofst=0x%x\n", ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ if (ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst +
+ sizeof(struct IpaHwStatsNTNInfoData_t) >=
+ ipa_ctx->ctrl->ipa_reg_base_ofst +
+ IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) +
+ ipa_ctx->smem_sz) {
+ IPAERR("uc_ntn_stats 0x%x outside SRAM\n",
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ return;
+ }
+
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio =
+ ioremap(ipa_ctx->ipa_wrapper_base +
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst,
+ sizeof(struct IpaHwStatsNTNInfoData_t));
+ if (!ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("fail to ioremap uc ntn stats\n");
+ return;
+ }
+}
+
+/**
+ * ipa2_get_wdi_stats() - Query WDI statistics from uc
+ * @stats: [inout] stats blob from client populated by driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa2_get_ntn_stats(struct IpaHwStatsNTNInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats[0].y = \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) stats->rx_ch_stats[0].y = \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (!stats || !ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("bad parms stats=%p ntn_stats=%p\n",
+ stats,
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio);
+ return -EINVAL;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ TX_STATS(num_pkts_processed);
+ TX_STATS(tail_ptr_val);
+ TX_STATS(num_db_fired);
+ TX_STATS(tx_comp_ring_stats.ringFull);
+ TX_STATS(tx_comp_ring_stats.ringEmpty);
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+ TX_STATS(tx_comp_ring_stats.ringUsageLow);
+ TX_STATS(tx_comp_ring_stats.RingUtilCount);
+ TX_STATS(bam_stats.bamFifoFull);
+ TX_STATS(bam_stats.bamFifoEmpty);
+ TX_STATS(bam_stats.bamFifoUsageHigh);
+ TX_STATS(bam_stats.bamFifoUsageLow);
+ TX_STATS(bam_stats.bamUtilCount);
+ TX_STATS(num_db);
+ TX_STATS(num_unexpected_db);
+ TX_STATS(num_bam_int_handled);
+ TX_STATS(num_bam_int_in_non_running_state);
+ TX_STATS(num_qmb_int_handled);
+ TX_STATS(num_bam_int_handled_while_wait_for_bam);
+ TX_STATS(num_bam_int_handled_while_not_in_bam);
+
+ RX_STATS(max_outstanding_pkts);
+ RX_STATS(num_pkts_processed);
+ RX_STATS(rx_ring_rp_value);
+ RX_STATS(rx_ind_ring_stats.ringFull);
+ RX_STATS(rx_ind_ring_stats.ringEmpty);
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+ RX_STATS(rx_ind_ring_stats.ringUsageLow);
+ RX_STATS(rx_ind_ring_stats.RingUtilCount);
+ RX_STATS(bam_stats.bamFifoFull);
+ RX_STATS(bam_stats.bamFifoEmpty);
+ RX_STATS(bam_stats.bamFifoUsageHigh);
+ RX_STATS(bam_stats.bamFifoUsageLow);
+ RX_STATS(bam_stats.bamUtilCount);
+ RX_STATS(num_bam_int_handled);
+ RX_STATS(num_db);
+ RX_STATS(num_unexpected_db);
+ RX_STATS(num_pkts_in_dis_uninit_state);
+ RX_STATS(num_bam_int_handled_while_not_in_bam);
+ RX_STATS(num_bam_int_handled_while_in_bam_state);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
+{
+ int ret;
+
+ ret = ipa2_uc_state_check();
+ if (ret) {
+ ipa_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb;
+ ipa_ctx->uc_ntn_ctx.priv = user_data;
+ }
+
+ return -EEXIST;
+}
+
+static void ipa_uc_ntn_loaded_handler(void)
+{
+ if (!ipa_ctx) {
+ IPAERR("IPA ctx is null\n");
+ return;
+ }
+
+ if (ipa_ctx->uc_ntn_ctx.uc_ready_cb) {
+ ipa_ctx->uc_ntn_ctx.uc_ready_cb(
+ ipa_ctx->uc_ntn_ctx.priv);
+
+ ipa_ctx->uc_ntn_ctx.uc_ready_cb =
+ NULL;
+ ipa_ctx->uc_ntn_ctx.priv = NULL;
+ }
+}
+
+int ipa_ntn_init(void)
+{
+ struct ipa_uc_hdlrs uc_ntn_cbs = { 0 };
+
+ uc_ntn_cbs.ipa_uc_event_hdlr = ipa_uc_ntn_event_handler;
+ uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
+ ipa_uc_ntn_event_log_info_handler;
+ uc_ntn_cbs.ipa_uc_loaded_hdlr =
+ ipa_uc_ntn_loaded_handler;
+
+ ipa_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
+
+ return 0;
+}
+
+static int ipa2_uc_send_ntn_setup_pipe_cmd(
+ struct ipa_ntn_setup_info *ntn_info, u8 dir)
+{
+ int ipa_ep_idx;
+ int result = 0;
+ struct ipa_mem_buffer cmd;
+ struct IpaHwNtnSetUpCmdData_t *Ntn_params;
+ struct IpaHwOffloadSetUpCmdData_t *cmd_data;
+
+ if (ntn_info == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to get ep idx.\n");
+ return -EFAULT;
+ }
+
+ IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx);
+
+ IPADBG("ring_base_pa = 0x%pa\n",
+ &ntn_info->ring_base_pa);
+ IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size);
+ IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa);
+ IPADBG("num_buffers = %d\n", ntn_info->num_buffers);
+ IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size);
+ IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa);
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params;
+ Ntn_params->ring_base_pa = ntn_info->ring_base_pa;
+ Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa;
+ Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size;
+ Ntn_params->num_buffers = ntn_info->num_buffers;
+ Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa;
+ Ntn_params->data_buff_size = ntn_info->data_buff_size;
+ Ntn_params->ipa_pipe_number = ipa_ep_idx;
+ Ntn_params->dir = dir;
+
+ result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result)
+ result = -EFAULT;
+
+ dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ return result;
+}
+
+/**
+ * ipa2_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp)
+{
+ int ipa_ep_idx_ul, ipa_ep_idx_dl;
+ struct ipa_ep_context *ep_ul, *ep_dl;
+ int result = 0;
+
+ if (in == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client);
+ ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client);
+ if (ipa_ep_idx_ul == -1 || ipa_ep_idx_dl == -1) {
+ IPAERR("fail to alloc EP.\n");
+ return -EFAULT;
+ }
+
+ ep_ul = &ipa_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->valid || ep_dl->valid) {
+ IPAERR("EP already allocated ul:%d dl:%d\n",
+ ep_ul->valid, ep_dl->valid);
+ return -EFAULT;
+ }
+
+ memset(ep_ul, 0, offsetof(struct ipa_ep_context, sys));
+ memset(ep_dl, 0, offsetof(struct ipa_ep_context, sys));
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ /* setup ul ep cfg */
+ ep_ul->valid = 1;
+ ep_ul->client = in->ul.client;
+ result = ipa_enable_data_path(ipa_ep_idx_ul);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_ul);
+ return -EFAULT;
+ }
+ ep_ul->client_notify = notify;
+ ep_ul->priv = priv;
+
+ memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg));
+ ep_ul->cfg.nat.nat_en = IPA_SRC_NAT;
+ ep_ul->cfg.hdr.hdr_len = hdr_len;
+ ep_ul->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa2_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) {
+ IPAERR("fail to setup ul pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa2_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) {
+ IPAERR("fail to send cmd to uc for ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa_install_dflt_flt_rules(ipa_ep_idx_ul);
+ outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX;
+ ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPAERR("client %d (ep: %d) connected\n", in->ul.client,
+ ipa_ep_idx_ul);
+
+ /* setup dl ep cfg */
+ ep_dl->valid = 1;
+ ep_dl->client = in->dl.client;
+ result = ipa_enable_data_path(ipa_ep_idx_dl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_dl);
+ result = -EFAULT;
+ goto fail;
+ }
+
+ memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
+ ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
+ ep_dl->cfg.hdr.hdr_len = hdr_len;
+ ep_dl->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa2_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) {
+ IPAERR("fail to setup dl pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa2_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) {
+ IPAERR("fail to send cmd to uc for dl pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
+ ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPAERR("client %d (ep: %d) connected\n", in->dl.client,
+ ipa_ep_idx_dl);
+
+fail:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
+
+/**
+ * ipa2_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+
+int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl)
+{
+ struct ipa_mem_buffer cmd;
+ struct ipa_ep_context *ep_ul, *ep_dl;
+ struct IpaHwOffloadCommonChCmdData_t *cmd_data;
+ union IpaHwNtnCommonChCmdData_t *tear;
+ int result = 0;
+
+ IPADBG("ep_ul = %d\n", ipa_ep_idx_ul);
+ IPADBG("ep_dl = %d\n", ipa_ep_idx_dl);
+
+ ep_ul = &ipa_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED ||
+ ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) {
+ IPAERR("channel bad state: ul %d dl %d\n",
+ ep_ul->uc_offload_state, ep_dl->uc_offload_state);
+ return -EFAULT;
+ }
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ /* teardown the UL pipe */
+ cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+ tear->params.ipa_pipe_number = ipa_ep_idx_ul;
+ result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa_disable_data_path(ipa_ep_idx_ul);
+ ipa_delete_dflt_flt_rules(ipa_ep_idx_ul);
+ memset(&ipa_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa_ep_context));
+ IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
+
+ /* teardown the DL pipe */
+ tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+ result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa_disable_data_path(ipa_ep_idx_dl);
+ memset(&ipa_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa_ep_context));
+ IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+
+fail:
+ dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
new file mode 100644
index 000000000000..3bec471b4656
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
@@ -0,0 +1,514 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_I_H_
+#define _IPA_UC_OFFLOAD_I_H_
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/*
+ * Neutrino protocol related data structures
+ */
+
+#define IPA_UC_MAX_NTN_TX_CHANNELS 1
+#define IPA_UC_MAX_NTN_RX_CHANNELS 1
+
+#define IPA_NTN_TX_DIR 1
+#define IPA_NTN_RX_DIR 2
+
+/**
+ * @brief Enum value determined based on the feature it
+ * corresponds to
+ * +----------------+----------------+
+ * | 3 bits | 5 bits |
+ * +----------------+----------------+
+ * | HW_FEATURE | OPCODE |
+ * +----------------+----------------+
+ *
+ */
+#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
+#define EXTRACT_UC_FEATURE(value) (value >> 5)
+
+#define IPA_HW_NUM_FEATURES 0x8
+
+/**
+ * enum ipa_hw_features - Values that represent the features supported in IPA HW
+ * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
+ * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
+ * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
+ * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW
+ * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW
+*/
+enum ipa_hw_features {
+ IPA_HW_FEATURE_COMMON = 0x0,
+ IPA_HW_FEATURE_MHI = 0x1,
+ IPA_HW_FEATURE_WDI = 0x3,
+ IPA_HW_FEATURE_NTN = 0x4,
+ IPA_HW_FEATURE_OFFLOAD = 0x5,
+ IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
+};
+
+/**
+ * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
+ * section in 128B shared memory located in offset zero of SW Partition in IPA
+ * SRAM.
+ * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
+ * @cmdParams : CPU->HW command parameter. The parameter filed can hold 32 bits
+ * of parameters (immediate parameters) and point on structure in
+ * system memory (in such case the address must be accessible
+ * for HW)
+ * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
+ * @responseParams : HW->CPU response parameter. The parameter filed can hold
+ * 32 bits of parameters (immediate parameters) and point
+ * on structure in system memory
+ * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
+ * @eventParams : HW->CPU event parameter. The parameter filed can hold 32 bits
+ * of parameters (immediate parameters) and point on
+ * structure in system memory
+ * @firstErrorAddress : Contains the address of first error-source on SNOC
+ * @hwState : State of HW. The state carries information regarding the error
+ * type.
+ * @warningCounter : The warnings counter. The counter carries information
+ * regarding non fatal errors in HW
+ * @interfaceVersionCommon : The Common interface version as reported by HW
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemCommonMapping_t {
+ u8 cmdOp;
+ u8 reserved_01;
+ u16 reserved_03_02;
+ u32 cmdParams;
+ u8 responseOp;
+ u8 reserved_09;
+ u16 reserved_0B_0A;
+ u32 responseParams;
+ u8 eventOp;
+ u8 reserved_11;
+ u16 reserved_13_12;
+ u32 eventParams;
+ u32 reserved_1B_18;
+ u32 firstErrorAddress;
+ u8 hwState;
+ u8 warningCounter;
+ u16 reserved_23_22;
+ u16 interfaceVersionCommon;
+ u16 reserved_27_26;
+} __packed;
+
+/**
+ * union IpaHwFeatureInfoData_t - parameters for stats/config blob
+ *
+ * @offset : Location of a feature within the EventInfoData
+ * @size : Size of the feature
+ */
+union IpaHwFeatureInfoData_t {
+ struct IpaHwFeatureInfoParams_t {
+ u32 offset:16;
+ u32 size:16;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * struct IpaHwEventInfoData_t - Structure holding the parameters for
+ * statistics and config info
+ *
+ * @baseAddrOffset : Base Address Offset of the statistics or config
+ * structure from IPA_WRAPPER_BASE
+ * @IpaHwFeatureInfoData_t : Location and size of each feature within
+ * the statistics or config structure
+ *
+ * @note Information about each feature in the featureInfo[]
+ * array is populated at predefined indices per the IPA_HW_FEATURES
+ * enum definition
+ */
+struct IpaHwEventInfoData_t {
+ u32 baseAddrOffset;
+ union IpaHwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
+} __packed;
+
+/**
+ * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_LOG_INFO Event
+ *
+ * @featureMask : Mask indicating the features enabled in HW.
+ * Refer IPA_HW_FEATURE_MASK
+ * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
+ * Log Buffer structure
+ * @statsInfo : Statistics related information
+ * @configInfo : Configuration related information
+ *
+ * @note The offset location of this structure from IPA_WRAPPER_BASE
+ * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
+ * Event
+ */
+struct IpaHwEventLogInfoData_t {
+ u32 featureMask;
+ u32 circBuffBaseAddrOffset;
+ struct IpaHwEventInfoData_t statsInfo;
+ struct IpaHwEventInfoData_t configInfo;
+
+} __packed;
+
+/**
+ * struct ipa_uc_ntn_ctx
+ * @ntn_uc_stats_ofst: Neutrino stats offset
+ * @ntn_uc_stats_mmio: Neutrino stats
+ * @priv: private data of client
+ * @uc_ready_cb: uc Ready cb
+ */
+struct ipa_uc_ntn_ctx {
+ u32 ntn_uc_stats_ofst;
+ struct IpaHwStatsNTNInfoData_t *ntn_uc_stats_mmio;
+ void *priv;
+ ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * enum ipa_hw_2_cpu_ntn_events - Values that represent HW event
+ * to be sent to CPU
+ * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW
+ * detected an error in NTN
+ *
+ */
+enum ipa_hw_2_cpu_ntn_events {
+ IPA_HW_2_CPU_EVENT_NTN_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0),
+};
+
+
+/**
+ * enum ipa_hw_ntn_errors - NTN specific error types.
+ * @IPA_HW_NTN_ERROR_NONE : No error persists
+ * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa_hw_ntn_errors {
+ IPA_HW_NTN_ERROR_NONE = 0,
+ IPA_HW_NTN_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa_hw_ntn_channel_states - Values that represent NTN
+ * channel state machine.
+ * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is
+ * initialized but disabled
+ * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running.
+ * Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa_hw_ntn_channel_states {
+ IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_NTN_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_NTN_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_NTN_CHANNEL_STATE_INVALID = 0xFF
+};
+
+/**
+ * enum ipa_hw_ntn_channel_errors - List of NTN Channel error
+ * types. This is present in the event param
+ * @IPA_HW_NTN_CH_ERR_NONE: No error persists
+ * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating
+ * num RE to bring
+ * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update
+ * failed in Rx ring
+ * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_RX_CACHE_NON_EMPTY:
+ * @IPA_HW_NTN_CH_ERR_RESERVED:
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in.
+ */
+enum ipa_hw_ntn_channel_errors {
+ IPA_HW_NTN_CH_ERR_NONE = 0,
+ IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1,
+ IPA_HW_NTN_TX_FSM_ERROR = 2,
+ IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL = 3,
+ IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4,
+ IPA_HW_NTN_RX_FSM_ERROR = 5,
+ IPA_HW_NTN_RX_CACHE_NON_EMPTY = 6,
+ IPA_HW_NTN_CH_ERR_RESERVED = 0xFF
+};
+
+
+/**
+ * struct IpaHwNtnSetUpCmdData_t - Ntn setup command data
+ * @ring_base_pa: physical address of the base of the Tx/Rx NTN
+ * ring
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ * buffer pool
+ * @ntn_ring_size: size of the Tx/Rx NTN ring
+ * @num_buffers: Rx/tx buffer pool size
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN
+ * Ring's tail pointer
+ * @ipa_pipe_number: IPA pipe number that has to be used for the
+ * Tx/Rx path
+ * @dir: Tx/Rx Direction
+ * @data_buff_size: size of the each data buffer allocated in
+ * DDR
+ */
+struct IpaHwNtnSetUpCmdData_t {
+ u32 ring_base_pa;
+ u32 buff_pool_base_pa;
+ u16 ntn_ring_size;
+ u16 num_buffers;
+ u32 ntn_reg_base_ptr_pa;
+ u8 ipa_pipe_number;
+ u8 dir;
+ u16 data_buff_size;
+
+} __packed;
+
+/**
+ * struct IpaHwNtnCommonChCmdData_t - Structure holding the
+ * parameters for Ntn Tear down command data params
+ *
+ *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe
+ */
+union IpaHwNtnCommonChCmdData_t {
+ struct IpaHwNtnCommonChCmdParams_t {
+ u32 ipa_pipe_number :8;
+ u32 reserved :24;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+
+/**
+ * struct IpaHwNTNErrorEventData_t - Structure holding the
+ * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed
+ * as immediate params in the shared memory
+ *
+ *@ntn_error_type: type of NTN error (IPA_HW_NTN_ERRORS)
+ *@ipa_pipe_number: IPA pipe number on which error has happened
+ * Applicable only if error type indicates channel error
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ */
+union IpaHwNTNErrorEventData_t {
+ struct IpaHwNTNErrorEventParams_t {
+ u32 ntn_error_type :8;
+ u32 reserved :8;
+ u32 ipa_pipe_number :8;
+ u32 ntn_ch_err_type :8;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+/**
+ * struct NTNRxInfoData_t - NTN Structure holding the
+ * Rx pipe information
+ *
+ *@max_outstanding_pkts: Number of outstanding packets in Rx
+ * Ring
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW
+ *
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ *@rx_ind_ring_stats:
+ *@bam_stats:
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_pkts_in_dis_uninit_state:
+ *@num_bam_int_handled_while_not_in_bam: Number of Bam
+ * Interrupts handled by FW
+ *@num_bam_int_handled_while_in_bam_state: Number of Bam
+ * Interrupts handled by FW
+ */
+struct NTNRxInfoData_t {
+ u32 max_outstanding_pkts;
+ u32 num_pkts_processed;
+ u32 rx_ring_rp_value;
+ struct IpaHwRingStats_t rx_ind_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_bam_int_handled;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_pkts_in_dis_uninit_state;
+ u32 num_bam_int_handled_while_not_in_bam;
+ u32 num_bam_int_handled_while_in_bam_state;
+} __packed;
+
+
+/**
+ * struct NTNTxInfoData_t - Structure holding the NTN Tx channel
+ * Ensure that this is always word aligned
+ *
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@tail_ptr_val: Latest value of doorbell written to copy engine
+ *@num_db_fired: Number of DB from uC FW to Copy engine
+ *
+ *@tx_comp_ring_stats:
+ *@bam_stats:
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_bam_int_in_non_running_state: Number of Bam interrupts
+ * while not in Running state
+ *@num_qmb_int_handled: Number of QMB interrupts handled
+ *@num_bam_int_handled_while_wait_for_bam: Number of times the
+ * Imm Cmd is injected due to fw_desc change
+ */
+struct NTNTxInfoData_t {
+ u32 num_pkts_processed;
+ u32 tail_ptr_val;
+ u32 num_db_fired;
+ struct IpaHwRingStats_t tx_comp_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_bam_int_handled;
+ u32 num_bam_int_in_non_running_state;
+ u32 num_qmb_int_handled;
+ u32 num_bam_int_handled_while_wait_for_bam;
+ u32 num_bam_int_handled_while_not_in_bam;
+} __packed;
+
+
+/**
+ * struct IpaHwStatsNTNInfoData_t - Structure holding the NTN Tx
+ * channel Ensure that this is always word aligned
+ *
+ */
+struct IpaHwStatsNTNInfoData_t {
+ struct NTNRxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS];
+ struct NTNTxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
+} __packed;
+
+
+/*
+ * uC offload related data structures
+ */
+#define IPA_UC_OFFLOAD_CONNECTED BIT(0)
+#define IPA_UC_OFFLOAD_ENABLED BIT(1)
+#define IPA_UC_OFFLOAD_RESUMED BIT(2)
+
+/**
+ * enum ipa_cpu_2_hw_offload_commands - Values that represent
+ * the offload commands from CPU
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up
+ * Offload protocol's Tx/Rx Path
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_RX_SET_UP : Command to tear down
+ * Offload protocol's Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_offload_commands {
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+};
+
+
+/**
+ * enum ipa_hw_offload_channel_states - Values that represent
+ * offload channel state machine.
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is initialized
+ * but disabled
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running. Entered after
+ * SET_UP_COMMAND is processed successfully
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use
+ * in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in
+ */
+enum ipa_hw_offload_channel_states {
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID = 0xFF
+};
+
+
+/**
+ * enum ipa_hw_2_cpu_cmd_resp_status - Values that represent
+ * offload related command response status to be sent to CPU.
+ */
+enum ipa_hw_2_cpu_offload_cmd_resp_status {
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0),
+ IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
+ IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+ IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+ IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11),
+ IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12),
+ IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14),
+};
+
+/**
+ * struct IpaHwSetUpCmd -
+ *
+ *
+ */
+union IpaHwSetUpCmd {
+ struct IpaHwNtnSetUpCmdData_t NtnSetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwOffloadSetUpCmdData_t -
+ *
+ *
+ */
+struct IpaHwOffloadSetUpCmdData_t {
+ u8 protocol;
+ union IpaHwSetUpCmd SetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwCommonChCmd - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN
+ *
+ *
+ */
+union IpaHwCommonChCmd {
+ union IpaHwNtnCommonChCmdData_t NtnCommonCh_params;
+} __packed;
+
+struct IpaHwOffloadCommonChCmdData_t {
+ u8 protocol;
+ union IpaHwCommonChCmd CommonCh_params;
+} __packed;
+
+#endif /* _IPA_UC_OFFLOAD_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index a45b51ad7b7b..a1072638b281 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -963,7 +963,7 @@ int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
- ep->wdi_state |= IPA_WDI_CONNECTED;
+ ep->uc_offload_state |= IPA_WDI_CONNECTED;
IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx);
return 0;
@@ -1001,7 +1001,7 @@ int ipa2_disconnect_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1013,8 +1013,8 @@ int ipa2_disconnect_wdi_pipe(u32 clnt_hdl)
ep = &ipa_ctx->ep[clnt_hdl];
- if (ep->wdi_state != IPA_WDI_CONNECTED) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
@@ -1067,7 +1067,7 @@ int ipa2_enable_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1079,8 +1079,8 @@ int ipa2_enable_wdi_pipe(u32 clnt_hdl)
ep = &ipa_ctx->ep[clnt_hdl];
- if (ep->wdi_state != IPA_WDI_CONNECTED) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
@@ -1105,7 +1105,7 @@ int ipa2_enable_wdi_pipe(u32 clnt_hdl)
}
IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
- ep->wdi_state |= IPA_WDI_ENABLED;
+ ep->uc_offload_state |= IPA_WDI_ENABLED;
IPADBG("client (ep: %d) enabled\n", clnt_hdl);
uc_timeout:
@@ -1135,7 +1135,7 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1147,8 +1147,8 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
ep = &ipa_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
@@ -1206,7 +1206,7 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
}
IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
- ep->wdi_state &= ~IPA_WDI_ENABLED;
+ ep->uc_offload_state &= ~IPA_WDI_ENABLED;
IPADBG("client (ep: %d) disabled\n", clnt_hdl);
uc_timeout:
@@ -1235,7 +1235,7 @@ int ipa2_resume_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1247,8 +1247,8 @@ int ipa2_resume_wdi_pipe(u32 clnt_hdl)
ep = &ipa_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
@@ -1273,7 +1273,7 @@ int ipa2_resume_wdi_pipe(u32 clnt_hdl)
else
IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl);
- ep->wdi_state |= IPA_WDI_RESUMED;
+ ep->uc_offload_state |= IPA_WDI_RESUMED;
IPADBG("client (ep: %d) resumed\n", clnt_hdl);
uc_timeout:
@@ -1302,7 +1302,7 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1314,9 +1314,9 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
ep = &ipa_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
IPA_WDI_RESUMED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
@@ -1369,7 +1369,7 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
ipa_ctx->tag_process_before_gating = true;
IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
- ep->wdi_state &= ~IPA_WDI_RESUMED;
+ ep->uc_offload_state &= ~IPA_WDI_RESUMED;
IPADBG("client (ep: %d) suspended\n", clnt_hdl);
uc_timeout:
@@ -1384,7 +1384,7 @@ int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1396,8 +1396,8 @@ int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
ep = &ipa_ctx->ep[clnt_hdl];
- if (!(ep->wdi_state & IPA_WDI_CONNECTED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 0dd10743a01e..b627cd1fc833 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -43,6 +43,11 @@
#define IPA_AGGR_GRAN_MAX (32)
#define IPA_EOT_COAL_GRAN_MIN (1)
#define IPA_EOT_COAL_GRAN_MAX (16)
+#define MSEC 1000
+#define MIN_RX_POLL_TIME 1
+#define MAX_RX_POLL_TIME 5
+#define UPPER_CUTOFF 50
+#define LOWER_CUTOFF 10
#define IPA_DEFAULT_SYS_YELLOW_WM 32
@@ -3623,6 +3628,30 @@ void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
}
/**
+ * void ipa_rx_timeout_min_max_calc() - calc min max timeout time of rx polling
+ * @time: time fom dtsi entry or from debugfs file system
+ * @min: rx polling min timeout
+ * @max: rx polling max timeout
+ * Maximum time could be of 10Msec allowed.
+ */
+void ipa_rx_timeout_min_max_calc(u32 *min, u32 *max, s8 time)
+{
+ if ((time >= MIN_RX_POLL_TIME) &&
+ (time <= MAX_RX_POLL_TIME)) {
+ *min = (time * MSEC) + LOWER_CUTOFF;
+ *max = (time * MSEC) + UPPER_CUTOFF;
+ } else {
+ /* Setting up the default min max time */
+ IPADBG("Setting up default rx polling timeout\n");
+ *min = (MIN_RX_POLL_TIME * MSEC) +
+ LOWER_CUTOFF;
+ *max = (MIN_RX_POLL_TIME * MSEC) +
+ UPPER_CUTOFF;
+ }
+ IPADBG("Rx polling timeout Min = %u len = %u\n", *min, *max);
+}
+
+/**
* ipa_pipe_mem_init() - initialize the pipe memory
* @start_ofst: start offset
* @size: size
@@ -4920,6 +4949,7 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_disconnect = ipa2_disconnect;
api_ctrl->ipa_reset_endpoint = ipa2_reset_endpoint;
api_ctrl->ipa_clear_endpoint_delay = ipa2_clear_endpoint_delay;
+ api_ctrl->ipa_disable_endpoint = ipa2_disable_endpoint;
api_ctrl->ipa_cfg_ep = ipa2_cfg_ep;
api_ctrl->ipa_cfg_ep_nat = ipa2_cfg_ep_nat;
api_ctrl->ipa_cfg_ep_hdr = ipa2_cfg_ep_hdr;
@@ -5054,6 +5084,7 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_get_dma_dev = ipa2_get_dma_dev;
api_ctrl->ipa_get_gsi_ep_info = ipa2_get_gsi_ep_info;
api_ctrl->ipa_stop_gsi_channel = ipa2_stop_gsi_channel;
+ api_ctrl->ipa_register_ipa_ready_cb = ipa2_register_ipa_ready_cb;
api_ctrl->ipa_inc_client_enable_clks = ipa2_inc_client_enable_clks;
api_ctrl->ipa_dec_client_disable_clks = ipa2_dec_client_disable_clks;
api_ctrl->ipa_inc_client_enable_clks_no_block =
@@ -5068,6 +5099,9 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_get_ipc_logbuf_low = ipa2_get_ipc_logbuf_low;
api_ctrl->ipa_rx_poll = ipa2_rx_poll;
api_ctrl->ipa_recycle_wan_skb = ipa2_recycle_wan_skb;
+ api_ctrl->ipa_setup_uc_ntn_pipes = ipa2_setup_uc_ntn_pipes;
+ api_ctrl->ipa_tear_down_uc_offload_pipes =
+ ipa2_tear_down_uc_offload_pipes;
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 50e820992f29..2420dd78b4c0 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1144,14 +1144,16 @@ static void apps_ipa_tx_complete_notify(void *priv,
struct net_device *dev = (struct net_device *)priv;
struct wwan_private *wwan_ptr;
- if (evt != IPA_WRITE_DONE) {
- IPAWANDBG("unsupported event on Tx callback\n");
+ if (dev != ipa_netdevs[0]) {
+ IPAWANDBG("Received pre-SSR packet completion\n");
+ dev_kfree_skb_any(skb);
return;
}
- if (dev != ipa_netdevs[0]) {
- IPAWANDBG("Received pre-SSR packet completion\n");
+ if (evt != IPA_WRITE_DONE) {
+ IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
return;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
index 9653dd6d27f2..a4faaea715a8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -3,6 +3,6 @@ obj-$(CONFIG_IPA3) += ipahal/
obj-$(CONFIG_IPA3) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
- ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o
+ ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o
obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index c553be1ad717..4db07bad7d93 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -37,6 +37,7 @@
#include <linux/hashtable.h>
#include <linux/hash.h>
#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/smem.h>
#define IPA_SUBSYSTEM_NAME "ipa_fws"
#include "ipa_i.h"
#include "../ipa_rm_i.h"
@@ -75,6 +76,17 @@
#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
+#define IPA_SMEM_SIZE (8 * 1024)
+
+/* round addresses for closes page per SMMU requirements */
+#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
+ do { \
+ (iova_p) = rounddown((iova), PAGE_SIZE); \
+ (pa_p) = rounddown((pa), PAGE_SIZE); \
+ (size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
+ } while (0)
+
+
/* The relative location in /lib/firmware where the FWs will reside */
#define IPA_FWS_PATH "ipa/ipa_fws.elf"
@@ -1887,44 +1899,43 @@ static int ipa3_q6_clean_q6_tables(void)
if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
IPAERR("failed to clean q6 flt tbls (v4/hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
IPAERR("failed to clean q6 flt tbls (v6/hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
IPAERR("failed to clean q6 rt tbls (v4/hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
IPAERR("failed to clean q6 rt tbls (v6/hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
/* Flush rules cache */
desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
if (!desc) {
IPAERR("failed to allocate memory\n");
- retval = -ENOMEM;
- goto bail_dma;
+ return -ENOMEM;
}
flush.v4_flt = true;
@@ -1941,6 +1952,7 @@ static int ipa3_q6_clean_q6_tables(void)
&reg_write_cmd, false);
if (!cmd_pyld) {
IPAERR("fail construct register_write imm cmd\n");
+ retval = -EFAULT;
goto bail_desc;
}
desc->opcode =
@@ -1957,9 +1969,9 @@ static int ipa3_q6_clean_q6_tables(void)
}
ipahal_destroy_imm_cmd(cmd_pyld);
+
bail_desc:
kfree(desc);
-bail_dma:
IPADBG("Done - retval = %d\n", retval);
return retval;
}
@@ -3702,6 +3714,12 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
else
IPADBG(":wdi init ok\n");
+ result = ipa3_ntn_init();
+ if (result)
+ IPAERR(":ntn init failed (%d)\n", -result);
+ else
+ IPADBG(":ntn init ok\n");
+
ipa3_register_panic_hdlr();
ipa3_ctx->q6_proxy_clk_vote_valid = true;
@@ -4813,6 +4831,10 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
int fast = 1;
int bypass = 1;
u32 iova_ap_mapping[2];
+ u32 add_map_size;
+ const u32 *add_map;
+ void *smem_addr;
+ int i;
IPADBG("AP CB probe: sub pdev=%p\n", dev);
@@ -4902,6 +4924,55 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
return result;
}
+ add_map = of_get_property(dev->of_node,
+ "qcom,additional-mapping", &add_map_size);
+ if (add_map) {
+ /* mapping size is an array of 3-tuple of u32 */
+ if (add_map_size % (3 * sizeof(u32))) {
+ IPAERR("wrong additional mapping format\n");
+ cb->valid = false;
+ return -EFAULT;
+ }
+
+ /* iterate of each entry of the additional mapping array */
+ for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
+ u32 iova = be32_to_cpu(add_map[i]);
+ u32 pa = be32_to_cpu(add_map[i + 1]);
+ u32 size = be32_to_cpu(add_map[i + 2]);
+ unsigned long iova_p;
+ phys_addr_t pa_p;
+ u32 size_p;
+
+ IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
+ iova_p, pa_p, size_p);
+ IPADBG("mapping 0x%lx to 0x%pa size %d\n",
+ iova_p, &pa_p, size_p);
+ ipa3_iommu_map(cb->mapping->domain,
+ iova_p, pa_p, size_p,
+ IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+ }
+ }
+
+ /* map SMEM memory for IPA table accesses */
+ smem_addr = smem_alloc(SMEM_IPA_FILTER_TABLE, IPA_SMEM_SIZE,
+ SMEM_MODEM, 0);
+ if (smem_addr) {
+ phys_addr_t iova = smem_virt_to_phys(smem_addr);
+ phys_addr_t pa = iova;
+ unsigned long iova_p;
+ phys_addr_t pa_p;
+ u32 size_p;
+
+ IPA_SMMU_ROUND_TO_PAGE(iova, pa, IPA_SMEM_SIZE,
+ iova_p, pa_p, size_p);
+ IPADBG("mapping 0x%lx to 0x%pa size %d\n",
+ iova_p, &pa_p, size_p);
+ ipa3_iommu_map(cb->mapping->domain,
+ iova_p, pa_p, size_p,
+ IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+ }
+
+
smmu_info.present = true;
if (!ipa3_bus_scale_table)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 0319c5c78b0d..c3c5ae38ec14 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -95,6 +95,7 @@ static struct dentry *dfile_ip6_flt_hw;
static struct dentry *dfile_stats;
static struct dentry *dfile_wstats;
static struct dentry *dfile_wdi_stats;
+static struct dentry *dfile_ntn_stats;
static struct dentry *dfile_dbg_cnt;
static struct dentry *dfile_msg;
static struct dentry *dfile_ip4_nat;
@@ -1184,6 +1185,110 @@ nxt_clnt_cons:
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
+static ssize_t ipa3_read_ntn(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+#define TX_STATS(y) \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ struct Ipa3HwStatsNTNInfoData_t stats;
+ int nbytes;
+ int cnt = 0;
+
+ if (!ipa3_get_ntn_stats(&stats)) {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "TX num_pkts_processed=%u\n"
+ "TX tail_ptr_val=%u\n"
+ "TX num_db_fired=%u\n"
+ "TX ringFull=%u\n"
+ "TX ringEmpty=%u\n"
+ "TX ringUsageHigh=%u\n"
+ "TX ringUsageLow=%u\n"
+ "TX RingUtilCount=%u\n"
+ "TX bamFifoFull=%u\n"
+ "TX bamFifoEmpty=%u\n"
+ "TX bamFifoUsageHigh=%u\n"
+ "TX bamFifoUsageLow=%u\n"
+ "TX bamUtilCount=%u\n"
+ "TX num_db=%u\n"
+ "TX num_unexpected_db=%u\n"
+ "TX num_bam_int_handled=%u\n"
+ "TX num_bam_int_in_non_running_state=%u\n"
+ "TX num_qmb_int_handled=%u\n"
+ "TX num_bam_int_handled_while_wait_for_bam=%u\n"
+ "TX num_bam_int_handled_while_not_in_bam=%u\n",
+ TX_STATS(num_pkts_processed),
+ TX_STATS(tail_ptr_val),
+ TX_STATS(num_db_fired),
+ TX_STATS(tx_comp_ring_stats.ringFull),
+ TX_STATS(tx_comp_ring_stats.ringEmpty),
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh),
+ TX_STATS(tx_comp_ring_stats.ringUsageLow),
+ TX_STATS(tx_comp_ring_stats.RingUtilCount),
+ TX_STATS(bam_stats.bamFifoFull),
+ TX_STATS(bam_stats.bamFifoEmpty),
+ TX_STATS(bam_stats.bamFifoUsageHigh),
+ TX_STATS(bam_stats.bamFifoUsageLow),
+ TX_STATS(bam_stats.bamUtilCount),
+ TX_STATS(num_db),
+ TX_STATS(num_unexpected_db),
+ TX_STATS(num_bam_int_handled),
+ TX_STATS(num_bam_int_in_non_running_state),
+ TX_STATS(num_qmb_int_handled),
+ TX_STATS(num_bam_int_handled_while_wait_for_bam),
+ TX_STATS(num_bam_int_handled_while_not_in_bam));
+ cnt += nbytes;
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "RX max_outstanding_pkts=%u\n"
+ "RX num_pkts_processed=%u\n"
+ "RX rx_ring_rp_value=%u\n"
+ "RX ringFull=%u\n"
+ "RX ringEmpty=%u\n"
+ "RX ringUsageHigh=%u\n"
+ "RX ringUsageLow=%u\n"
+ "RX RingUtilCount=%u\n"
+ "RX bamFifoFull=%u\n"
+ "RX bamFifoEmpty=%u\n"
+ "RX bamFifoUsageHigh=%u\n"
+ "RX bamFifoUsageLow=%u\n"
+ "RX bamUtilCount=%u\n"
+ "RX num_bam_int_handled=%u\n"
+ "RX num_db=%u\n"
+ "RX num_unexpected_db=%u\n"
+ "RX num_pkts_in_dis_uninit_state=%u\n"
+ "num_ic_inj_vdev_change=%u\n"
+ "num_ic_inj_fw_desc_change=%u\n",
+ RX_STATS(max_outstanding_pkts),
+ RX_STATS(num_pkts_processed),
+ RX_STATS(rx_ring_rp_value),
+ RX_STATS(rx_ind_ring_stats.ringFull),
+ RX_STATS(rx_ind_ring_stats.ringEmpty),
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh),
+ RX_STATS(rx_ind_ring_stats.ringUsageLow),
+ RX_STATS(rx_ind_ring_stats.RingUtilCount),
+ RX_STATS(bam_stats.bamFifoFull),
+ RX_STATS(bam_stats.bamFifoEmpty),
+ RX_STATS(bam_stats.bamFifoUsageHigh),
+ RX_STATS(bam_stats.bamFifoUsageLow),
+ RX_STATS(bam_stats.bamUtilCount),
+ RX_STATS(num_bam_int_handled),
+ RX_STATS(num_db),
+ RX_STATS(num_unexpected_db),
+ RX_STATS(num_pkts_in_dis_uninit_state),
+ RX_STATS(num_bam_int_handled_while_not_in_bam),
+ RX_STATS(num_bam_int_handled_while_in_bam_state));
+ cnt += nbytes;
+ } else {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "Fail to read NTN stats\n");
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
static ssize_t ipa3_read_wdi(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
@@ -1747,6 +1852,10 @@ const struct file_operations ipa3_wdi_ops = {
.read = ipa3_read_wdi,
};
+const struct file_operations ipa3_ntn_ops = {
+ .read = ipa3_read_ntn,
+};
+
const struct file_operations ipa3_msg_ops = {
.read = ipa3_read_msg,
};
@@ -1931,6 +2040,13 @@ void ipa3_debugfs_init(void)
goto fail;
}
+ dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, 0,
+ &ipa3_ntn_ops);
+ if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) {
+ IPAERR("fail to create file for debug_fs ntn stats\n");
+ goto fail;
+ }
+
dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0,
&ipa3_dbg_cnt_ops);
if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 029647213531..11da023c9d6a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -418,7 +418,8 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
return 0;
bad_len:
- hdr_entry->ref_cnt--;
+ if (add_ref_hdr)
+ hdr_entry->ref_cnt--;
entry->cookie = 0;
kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
return -EPERM;
@@ -589,7 +590,7 @@ static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl, bool release_hdr)
}
if (release_hdr)
- __ipa3_release_hdr(entry->hdr->id);
+ __ipa3_del_hdr(entry->hdr->id);
/* move the offset entry to appropriate free list */
list_move(&entry->offset_entry->link,
@@ -893,12 +894,19 @@ int ipa3_reset_hdr(void)
&ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
/* do not remove the default header */
- if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME))
+ if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
+ if (entry->is_hdr_proc_ctx) {
+ IPAERR("default header is proc ctx\n");
+ mutex_unlock(&ipa3_ctx->lock);
+ WARN_ON(1);
+ return -EFAULT;
+ }
continue;
+ }
if (ipa3_id_find(entry->id) == NULL) {
- WARN_ON(1);
mutex_unlock(&ipa3_ctx->lock);
+ WARN_ON(1);
return -EFAULT;
}
if (entry->is_hdr_proc_ctx) {
@@ -951,8 +959,8 @@ int ipa3_reset_hdr(void)
link) {
if (ipa3_id_find(ctx_entry->id) == NULL) {
- WARN_ON(1);
mutex_unlock(&ipa3_ctx->lock);
+ WARN_ON(1);
return -EFAULT;
}
list_del(&ctx_entry->link);
@@ -1115,8 +1123,8 @@ int ipa3_put_hdr(u32 hdr_hdl)
goto bail;
}
- if (entry == NULL || entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_COOKIE) {
+ IPAERR("invalid header entry\n");
result = -EINVAL;
goto bail;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index cce05cf31b3c..806510ea8867 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -35,6 +35,7 @@
#include "ipahal/ipahal.h"
#include "ipahal/ipahal_fltrt.h"
#include "../ipa_common_i.h"
+#include "ipa_uc_offload_i.h"
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
@@ -271,7 +272,7 @@ struct ipa3_rt_tbl {
* @is_partial: flag indicating if header table entry is partial
* @is_hdr_proc_ctx: false - hdr entry resides in hdr table,
* true - hdr entry resides in DDR and pointed to by proc ctx
- * @phys_base: physical address of entry in SRAM when is_hdr_proc_ctx is true,
+ * @phys_base: physical address of entry in DDR when is_hdr_proc_ctx is true,
* else 0
* @proc_ctx: processing context header
* @offset_entry: entry's offset
@@ -546,7 +547,7 @@ struct ipa3_ep_context {
bool skip_ep_cfg;
bool keep_ipa_awake;
struct ipa3_wlan_stats wstats;
- u32 wdi_state;
+ u32 uc_offload_state;
bool disconnect_in_progress;
u32 qmi_request_sent;
bool napi_enabled;
@@ -869,200 +870,6 @@ struct ipa3_tag_completion {
struct ipa3_controller;
/**
- * @brief Enum value determined based on the feature it
- * corresponds to
- * +----------------+----------------+
- * | 3 bits | 5 bits |
- * +----------------+----------------+
- * | HW_FEATURE | OPCODE |
- * +----------------+----------------+
- *
- */
-#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
-#define EXTRACT_UC_FEATURE(value) (value >> 5)
-
-#define IPA_HW_NUM_FEATURES 0x8
-
-/**
- * enum ipa3_hw_features - Values that represent the features supported in IPA HW
- * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
- * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
- * @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse
- * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
- * @IPA_HW_FEATURE_ZIP: Feature related to CMP/DCMP operation in IPA HW
-*/
-enum ipa3_hw_features {
- IPA_HW_FEATURE_COMMON = 0x0,
- IPA_HW_FEATURE_MHI = 0x1,
- IPA_HW_FEATURE_POWER_COLLAPSE = 0x2,
- IPA_HW_FEATURE_WDI = 0x3,
- IPA_HW_FEATURE_ZIP = 0x4,
- IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
-};
-
-/**
- * enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
- * @IPA_HW_2_CPU_EVENT_NO_OP : No event present
- * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
- * device
- * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
- */
-enum ipa3_hw_2_cpu_events {
- IPA_HW_2_CPU_EVENT_NO_OP =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
- IPA_HW_2_CPU_EVENT_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
- IPA_HW_2_CPU_EVENT_LOG_INFO =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
-};
-
-/**
- * enum ipa3_hw_errors - Common error types.
- * @IPA_HW_ERROR_NONE : No error persists
- * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
- * @IPA_HW_DMA_ERROR : Unexpected DMA error
- * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
- * @IPA_HW_INVALID_OPCODE : Invalid opcode sent
- * @IPA_HW_INVALID_PARAMS : Invalid params for the requested command
- * @IPA_HW_GSI_CH_NOT_EMPTY_FAILURE : GSI channel emptiness validation failed
- */
-enum ipa3_hw_errors {
- IPA_HW_ERROR_NONE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
- IPA_HW_INVALID_DOORBELL_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
- IPA_HW_DMA_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
- IPA_HW_FATAL_SYSTEM_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
- IPA_HW_INVALID_OPCODE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
- IPA_HW_INVALID_PARAMS =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
- IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
- IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
- IPA_HW_GSI_CH_NOT_EMPTY_FAILURE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8)
-};
-
-/**
- * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
- * section in 128B shared memory located in offset zero of SW Partition in IPA
- * SRAM.
- * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
- * @cmdParams : CPU->HW command parameter lower 32bit.
- * @cmdParams_hi : CPU->HW command parameter higher 32bit.
- * of parameters (immediate parameters) and point on structure in system memory
- * (in such case the address must be accessible for HW)
- * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
- * @responseParams : HW->CPU response parameter. The parameter filed can hold 32
- * bits of parameters (immediate parameters) and point on structure in system
- * memory
- * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
- * @eventParams : HW->CPU event parameter. The parameter filed can hold 32 bits of
- * parameters (immediate parameters) and point on structure in system memory
- * @firstErrorAddress : Contains the address of first error-source on SNOC
- * @hwState : State of HW. The state carries information regarding the error type.
- * @warningCounter : The warnings counter. The counter carries information regarding
- * non fatal errors in HW
- * @interfaceVersionCommon : The Common interface version as reported by HW
- *
- * The shared memory is used for communication between IPA HW and CPU.
- */
-struct IpaHwSharedMemCommonMapping_t {
- u8 cmdOp;
- u8 reserved_01;
- u16 reserved_03_02;
- u32 cmdParams;
- u32 cmdParams_hi;
- u8 responseOp;
- u8 reserved_0D;
- u16 reserved_0F_0E;
- u32 responseParams;
- u8 eventOp;
- u8 reserved_15;
- u16 reserved_17_16;
- u32 eventParams;
- u32 firstErrorAddress;
- u8 hwState;
- u8 warningCounter;
- u16 reserved_23_22;
- u16 interfaceVersionCommon;
- u16 reserved_27_26;
-} __packed;
-
-/**
- * union IpaHwFeatureInfoData_t - parameters for stats/config blob
- *
- * @offset : Location of a feature within the EventInfoData
- * @size : Size of the feature
- */
-union IpaHwFeatureInfoData_t {
- struct IpaHwFeatureInfoParams_t {
- u32 offset:16;
- u32 size:16;
- } __packed params;
- u32 raw32b;
-} __packed;
-
-/**
- * union IpaHwErrorEventData_t - HW->CPU Common Events
- * @errorType : Entered when a system error is detected by the HW. Type of
- * error is specified by IPA_HW_ERRORS
- * @reserved : Reserved
- */
-union IpaHwErrorEventData_t {
- struct IpaHwErrorEventParams_t {
- u32 errorType:8;
- u32 reserved:24;
- } __packed params;
- u32 raw32b;
-} __packed;
-
-/**
- * struct IpaHwEventInfoData_t - Structure holding the parameters for
- * statistics and config info
- *
- * @baseAddrOffset : Base Address Offset of the statistics or config
- * structure from IPA_WRAPPER_BASE
- * @IpaHwFeatureInfoData_t : Location and size of each feature within
- * the statistics or config structure
- *
- * @note Information about each feature in the featureInfo[]
- * array is populated at predefined indices per the IPA_HW_FEATURES
- * enum definition
- */
-struct IpaHwEventInfoData_t {
- u32 baseAddrOffset;
- union IpaHwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
-} __packed;
-
-/**
- * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
- * IPA_HW_2_CPU_EVENT_LOG_INFO Event
- *
- * @featureMask : Mask indicating the features enabled in HW.
- * Refer IPA_HW_FEATURE_MASK
- * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
- * Log Buffer structure
- * @statsInfo : Statistics related information
- * @configInfo : Configuration related information
- *
- * @note The offset location of this structure from IPA_WRAPPER_BASE
- * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
- * Event
- */
-struct IpaHwEventLogInfoData_t {
- u32 featureMask;
- u32 circBuffBaseAddrOffset;
- struct IpaHwEventInfoData_t statsInfo;
- struct IpaHwEventInfoData_t configInfo;
-
-} __packed;
-
-/**
* struct ipa3_uc_hdlrs - IPA uC callback functions
* @ipa_uc_loaded_hdlr: Function handler when uC is loaded
* @ipa_uc_event_hdlr: Event handler function
@@ -1393,6 +1200,7 @@ struct ipa3_context {
struct ipa3_uc_ctx uc_ctx;
struct ipa3_uc_wdi_ctx uc_wdi_ctx;
+ struct ipa3_uc_ntn_ctx uc_ntn_ctx;
u32 wan_rx_ring_size;
bool skip_uc_pipe_reset;
enum ipa_transport_type transport_prototype;
@@ -1854,6 +1662,11 @@ int ipa3_resume_wdi_pipe(u32 clnt_hdl);
int ipa3_suspend_wdi_pipe(u32 clnt_hdl);
int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
u16 ipa3_get_smem_restr_bytes(void);
+int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
/*
* To retrieve doorbell physical address of
* wlan pipes
@@ -2197,4 +2010,6 @@ void ipa3_recycle_wan_skb(struct sk_buff *skb);
int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map);
int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr,
u32 size, bool map);
+int ipa3_ntn_init(void);
+int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats);
#endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index 14e2f1f4c510..e83c249ad425 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -549,11 +549,6 @@ int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
return res;
}
}
- if (res) {
- IPA_MHI_ERR("failed to resume channel error %d\n",
- res);
- return res;
- }
res = gsi_start_channel(ep->gsi_chan_hdl);
if (res) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 138db3dbde84..b06e33a8258a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -957,6 +957,10 @@ static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl,
return 0;
ipa_insert_failed:
+ if (entry->hdr)
+ entry->hdr->ref_cnt--;
+ else if (entry->proc_ctx)
+ entry->proc_ctx->ref_cnt--;
idr_remove(&tbl->rule_ids, entry->rule_id);
list_del(&entry->link);
kmem_cache_free(ipa3_ctx->rt_rule_cache, entry);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
new file mode 100644
index 000000000000..7b891843028d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -0,0 +1,410 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+
+#define IPA_UC_NTN_DB_PA_TX 0x79620DC
+#define IPA_UC_NTN_DB_PA_RX 0x79620D8
+
+static void ipa3_uc_ntn_event_handler(struct IpaHwSharedMemCommonMapping_t
+ *uc_sram_mmio)
+
+{
+ union Ipa3HwNTNErrorEventData_t ntn_evt;
+
+ if (uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_NTN_ERROR) {
+ ntn_evt.raw32b = uc_sram_mmio->eventParams;
+ IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n",
+ ntn_evt.params.ntn_error_type,
+ ntn_evt.params.ipa_pipe_number,
+ ntn_evt.params.ntn_ch_err_type);
+ }
+}
+
+static void ipa3_uc_ntn_event_log_info_handler(
+struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+ if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) {
+ IPAERR("NTN feature missing 0x%x\n",
+ uc_event_top_mmio->featureMask);
+ return;
+ }
+
+ if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].
+ params.size != sizeof(struct Ipa3HwStatsNTNInfoData_t)) {
+ IPAERR("NTN stats sz invalid exp=%zu is=%u\n",
+ sizeof(struct Ipa3HwStatsNTNInfoData_t),
+ uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.size);
+ return;
+ }
+
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst = uc_event_top_mmio->
+ statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.offset;
+ IPAERR("NTN stats ofst=0x%x\n", ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ if (ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst +
+ sizeof(struct Ipa3HwStatsNTNInfoData_t) >=
+ ipa3_ctx->ctrl->ipa_reg_base_ofst +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+ ipa3_ctx->smem_sz) {
+ IPAERR("uc_ntn_stats 0x%x outside SRAM\n",
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ return;
+ }
+
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio =
+ ioremap(ipa3_ctx->ipa_wrapper_base +
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst,
+ sizeof(struct Ipa3HwStatsNTNInfoData_t));
+ if (!ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("fail to ioremap uc ntn stats\n");
+ return;
+ }
+}
+
+/**
+ * ipa2_get_wdi_stats() - Query WDI statistics from uc
+ * @stats: [inout] stats blob from client populated by driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats[0].y = \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) stats->rx_ch_stats[0].y = \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ if (unlikely(!ipa3_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (!stats || !ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("bad parms stats=%p ntn_stats=%p\n",
+ stats,
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio);
+ return -EINVAL;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ TX_STATS(num_pkts_processed);
+ TX_STATS(tail_ptr_val);
+ TX_STATS(num_db_fired);
+ TX_STATS(tx_comp_ring_stats.ringFull);
+ TX_STATS(tx_comp_ring_stats.ringEmpty);
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+ TX_STATS(tx_comp_ring_stats.ringUsageLow);
+ TX_STATS(tx_comp_ring_stats.RingUtilCount);
+ TX_STATS(bam_stats.bamFifoFull);
+ TX_STATS(bam_stats.bamFifoEmpty);
+ TX_STATS(bam_stats.bamFifoUsageHigh);
+ TX_STATS(bam_stats.bamFifoUsageLow);
+ TX_STATS(bam_stats.bamUtilCount);
+ TX_STATS(num_db);
+ TX_STATS(num_unexpected_db);
+ TX_STATS(num_bam_int_handled);
+ TX_STATS(num_bam_int_in_non_running_state);
+ TX_STATS(num_qmb_int_handled);
+ TX_STATS(num_bam_int_handled_while_wait_for_bam);
+ TX_STATS(num_bam_int_handled_while_not_in_bam);
+
+ RX_STATS(max_outstanding_pkts);
+ RX_STATS(num_pkts_processed);
+ RX_STATS(rx_ring_rp_value);
+ RX_STATS(rx_ind_ring_stats.ringFull);
+ RX_STATS(rx_ind_ring_stats.ringEmpty);
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+ RX_STATS(rx_ind_ring_stats.ringUsageLow);
+ RX_STATS(rx_ind_ring_stats.RingUtilCount);
+ RX_STATS(bam_stats.bamFifoFull);
+ RX_STATS(bam_stats.bamFifoEmpty);
+ RX_STATS(bam_stats.bamFifoUsageHigh);
+ RX_STATS(bam_stats.bamFifoUsageLow);
+ RX_STATS(bam_stats.bamUtilCount);
+ RX_STATS(num_bam_int_handled);
+ RX_STATS(num_db);
+ RX_STATS(num_unexpected_db);
+ RX_STATS(num_pkts_in_dis_uninit_state);
+ RX_STATS(num_bam_int_handled_while_not_in_bam);
+ RX_STATS(num_bam_int_handled_while_in_bam_state);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+int ipa3_ntn_init(void)
+{
+ struct ipa3_uc_hdlrs uc_ntn_cbs = { 0 };
+
+ uc_ntn_cbs.ipa_uc_event_hdlr = ipa3_uc_ntn_event_handler;
+ uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
+ ipa3_uc_ntn_event_log_info_handler;
+
+ ipa3_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
+
+ return 0;
+}
+
+static int ipa3_uc_send_ntn_setup_pipe_cmd(
+ struct ipa_ntn_setup_info *ntn_info, u8 dir)
+{
+ int ipa_ep_idx;
+ int result = 0;
+ struct ipa_mem_buffer cmd;
+ struct Ipa3HwNtnSetUpCmdData_t *Ntn_params;
+ struct IpaHwOffloadSetUpCmdData_t *cmd_data;
+
+ if (ntn_info == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to get ep idx.\n");
+ return -EFAULT;
+ }
+
+ IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx);
+
+ IPADBG("ring_base_pa = 0x%pa\n",
+ &ntn_info->ring_base_pa);
+ IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size);
+ IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa);
+ IPADBG("num_buffers = %d\n", ntn_info->num_buffers);
+ IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size);
+ IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa);
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params;
+ Ntn_params->ring_base_pa = ntn_info->ring_base_pa;
+ Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa;
+ Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size;
+ Ntn_params->num_buffers = ntn_info->num_buffers;
+ Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa;
+ Ntn_params->data_buff_size = ntn_info->data_buff_size;
+ Ntn_params->ipa_pipe_number = ipa_ep_idx;
+ Ntn_params->dir = dir;
+
+ result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result)
+ result = -EFAULT;
+
+ dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ return result;
+}
+
+/**
+ * ipa3_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp)
+{
+ struct ipa3_ep_context *ep_ul;
+ struct ipa3_ep_context *ep_dl;
+ int ipa_ep_idx_ul;
+ int ipa_ep_idx_dl;
+ int result = 0;
+
+ if (in == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client);
+ ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client);
+ if (ipa_ep_idx_ul == -1 || ipa_ep_idx_dl == -1) {
+ IPAERR("fail to alloc EP.\n");
+ return -EFAULT;
+ }
+
+ ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->valid || ep_dl->valid) {
+ IPAERR("EP already allocated.\n");
+ return -EFAULT;
+ }
+
+ memset(ep_ul, 0, offsetof(struct ipa3_ep_context, sys));
+ memset(ep_dl, 0, offsetof(struct ipa3_ep_context, sys));
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ /* setup ul ep cfg */
+ ep_ul->valid = 1;
+ ep_ul->client = in->ul.client;
+ result = ipa3_enable_data_path(ipa_ep_idx_ul);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_ul);
+ return -EFAULT;
+ }
+ ep_ul->client_notify = notify;
+ ep_ul->priv = priv;
+
+ memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg));
+ ep_ul->cfg.nat.nat_en = IPA_SRC_NAT;
+ ep_ul->cfg.hdr.hdr_len = hdr_len;
+ ep_ul->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa3_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) {
+ IPAERR("fail to setup ul pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) {
+ IPAERR("fail to send cmd to uc for ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa3_install_dflt_flt_rules(ipa_ep_idx_ul);
+ outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX;
+ ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPADBG("client %d (ep: %d) connected\n", in->ul.client,
+ ipa_ep_idx_ul);
+
+ /* setup dl ep cfg */
+ ep_dl->valid = 1;
+ ep_dl->client = in->dl.client;
+ result = ipa3_enable_data_path(ipa_ep_idx_dl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_dl);
+ result = -EFAULT;
+ goto fail;
+ }
+
+ memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
+ ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
+ ep_dl->cfg.hdr.hdr_len = hdr_len;
+ ep_dl->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa3_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) {
+ IPAERR("fail to setup dl pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) {
+ IPAERR("fail to send cmd to uc for dl pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
+ ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPADBG("client %d (ep: %d) connected\n", in->dl.client,
+ ipa_ep_idx_dl);
+
+fail:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
+
+/**
+ * ipa3_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+
+int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl)
+{
+ struct ipa_mem_buffer cmd;
+ struct ipa3_ep_context *ep_ul, *ep_dl;
+ struct IpaHwOffloadCommonChCmdData_t *cmd_data;
+ union Ipa3HwNtnCommonChCmdData_t *tear;
+ int result = 0;
+
+ IPADBG("ep_ul = %d\n", ipa_ep_idx_ul);
+ IPADBG("ep_dl = %d\n", ipa_ep_idx_dl);
+
+ ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED ||
+ ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) {
+ IPAERR("channel bad state: ul %d dl %d\n",
+ ep_ul->uc_offload_state, ep_dl->uc_offload_state);
+ return -EFAULT;
+ }
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ /* teardown the UL pipe */
+ cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+ tear->params.ipa_pipe_number = ipa_ep_idx_ul;
+ result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa3_disable_data_path(ipa_ep_idx_ul);
+ ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul);
+ memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context));
+ IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
+
+ /* teardown the DL pipe */
+ tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+ result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa3_disable_data_path(ipa_ep_idx_dl);
+ memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context));
+ IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+
+fail:
+ dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
new file mode 100644
index 000000000000..946fc7e31fb9
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
@@ -0,0 +1,580 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_I_H_
+#define _IPA_UC_OFFLOAD_I_H_
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/*
+ * Neutrino protocol related data structures
+ */
+
+#define IPA_UC_MAX_NTN_TX_CHANNELS 1
+#define IPA_UC_MAX_NTN_RX_CHANNELS 1
+
+#define IPA_NTN_TX_DIR 1
+#define IPA_NTN_RX_DIR 2
+
+/**
+ * @brief Enum value determined based on the feature it
+ * corresponds to
+ * +----------------+----------------+
+ * | 3 bits | 5 bits |
+ * +----------------+----------------+
+ * | HW_FEATURE | OPCODE |
+ * +----------------+----------------+
+ *
+ */
+#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
+#define EXTRACT_UC_FEATURE(value) (value >> 5)
+
+#define IPA_HW_NUM_FEATURES 0x8
+
+/**
+ * enum ipa3_hw_features - Values that represent the features supported
+ * in IPA HW
+ * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
+ * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
+ * @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse
+ * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
+ * @IPA_HW_FEATURE_ZIP: Feature related to CMP/DCMP operation in IPA HW
+ * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW
+ * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW
+*/
+enum ipa3_hw_features {
+ IPA_HW_FEATURE_COMMON = 0x0,
+ IPA_HW_FEATURE_MHI = 0x1,
+ IPA_HW_FEATURE_POWER_COLLAPSE = 0x2,
+ IPA_HW_FEATURE_WDI = 0x3,
+ IPA_HW_FEATURE_ZIP = 0x4,
+ IPA_HW_FEATURE_NTN = 0x5,
+ IPA_HW_FEATURE_OFFLOAD = 0x6,
+ IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
+};
+
+/**
+ * enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_NO_OP : No event present
+ * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
+ * device
+ * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
+ */
+enum ipa3_hw_2_cpu_events {
+ IPA_HW_2_CPU_EVENT_NO_OP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+ IPA_HW_2_CPU_EVENT_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_HW_2_CPU_EVENT_LOG_INFO =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+};
+
+/**
+ * enum ipa3_hw_errors - Common error types.
+ * @IPA_HW_ERROR_NONE : No error persists
+ * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
+ * @IPA_HW_DMA_ERROR : Unexpected DMA error
+ * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
+ * @IPA_HW_INVALID_OPCODE : Invalid opcode sent
+ * @IPA_HW_INVALID_PARAMS : Invalid params for the requested command
+ * @IPA_HW_GSI_CH_NOT_EMPTY_FAILURE : GSI channel emptiness validation failed
+ */
+enum ipa3_hw_errors {
+ IPA_HW_ERROR_NONE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+ IPA_HW_INVALID_DOORBELL_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_HW_DMA_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+ IPA_HW_FATAL_SYSTEM_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+ IPA_HW_INVALID_OPCODE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+ IPA_HW_INVALID_PARAMS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
+ IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
+ IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
+ IPA_HW_GSI_CH_NOT_EMPTY_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8)
+};
+
+/**
+ * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
+ * section in 128B shared memory located in offset zero of SW Partition in IPA
+ * SRAM.
+ * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
+ * @cmdParams : CPU->HW command parameter lower 32bit.
+ * @cmdParams_hi : CPU->HW command parameter higher 32bit.
+ * of parameters (immediate parameters) and point on structure in system memory
+ * (in such case the address must be accessible for HW)
+ * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
+ * @responseParams : HW->CPU response parameter. The parameter filed can hold 32
+ * bits of parameters (immediate parameters) and point on structure in system
+ * memory
+ * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
+ * @eventParams : HW->CPU event parameter. The parameter filed can hold 32
+ * bits of parameters (immediate parameters) and point on
+ * structure in system memory
+ * @firstErrorAddress : Contains the address of first error-source on SNOC
+ * @hwState : State of HW. The state carries information regarding the
+ * error type.
+ * @warningCounter : The warnings counter. The counter carries information
+ * regarding non fatal errors in HW
+ * @interfaceVersionCommon : The Common interface version as reported by HW
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemCommonMapping_t {
+ u8 cmdOp;
+ u8 reserved_01;
+ u16 reserved_03_02;
+ u32 cmdParams;
+ u32 cmdParams_hi;
+ u8 responseOp;
+ u8 reserved_0D;
+ u16 reserved_0F_0E;
+ u32 responseParams;
+ u8 eventOp;
+ u8 reserved_15;
+ u16 reserved_17_16;
+ u32 eventParams;
+ u32 firstErrorAddress;
+ u8 hwState;
+ u8 warningCounter;
+ u16 reserved_23_22;
+ u16 interfaceVersionCommon;
+ u16 reserved_27_26;
+} __packed;
+
+/**
+ * union Ipa3HwFeatureInfoData_t - parameters for stats/config blob
+ *
+ * @offset : Location of a feature within the EventInfoData
+ * @size : Size of the feature
+ */
+union Ipa3HwFeatureInfoData_t {
+ struct IpaHwFeatureInfoParams_t {
+ u32 offset:16;
+ u32 size:16;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwErrorEventData_t - HW->CPU Common Events
+ * @errorType : Entered when a system error is detected by the HW. Type of
+ * error is specified by IPA_HW_ERRORS
+ * @reserved : Reserved
+ */
+union IpaHwErrorEventData_t {
+ struct IpaHwErrorEventParams_t {
+ u32 errorType:8;
+ u32 reserved:24;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * struct Ipa3HwEventInfoData_t - Structure holding the parameters for
+ * statistics and config info
+ *
+ * @baseAddrOffset : Base Address Offset of the statistics or config
+ * structure from IPA_WRAPPER_BASE
+ * @Ipa3HwFeatureInfoData_t : Location and size of each feature within
+ * the statistics or config structure
+ *
+ * @note Information about each feature in the featureInfo[]
+ * array is populated at predefined indices per the IPA_HW_FEATURES
+ * enum definition
+ */
+struct Ipa3HwEventInfoData_t {
+ u32 baseAddrOffset;
+ union Ipa3HwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
+} __packed;
+
+/**
+ * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_LOG_INFO Event
+ *
+ * @featureMask : Mask indicating the features enabled in HW.
+ * Refer IPA_HW_FEATURE_MASK
+ * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
+ * Log Buffer structure
+ * @statsInfo : Statistics related information
+ * @configInfo : Configuration related information
+ *
+ * @note The offset location of this structure from IPA_WRAPPER_BASE
+ * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
+ * Event
+ */
+struct IpaHwEventLogInfoData_t {
+ u32 featureMask;
+ u32 circBuffBaseAddrOffset;
+ struct Ipa3HwEventInfoData_t statsInfo;
+ struct Ipa3HwEventInfoData_t configInfo;
+
+} __packed;
+
+/**
+ * struct ipa3_uc_ntn_ctx
+ * @ntn_uc_stats_ofst: Neutrino stats offset
+ * @ntn_uc_stats_mmio: Neutrino stats
+ * @priv: private data of client
+ * @uc_ready_cb: uc Ready cb
+ */
+struct ipa3_uc_ntn_ctx {
+ u32 ntn_uc_stats_ofst;
+ struct Ipa3HwStatsNTNInfoData_t *ntn_uc_stats_mmio;
+ void *priv;
+ ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * enum ipa3_hw_2_cpu_ntn_events - Values that represent HW event
+ * to be sent to CPU
+ * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW
+ * detected an error in NTN
+ *
+ */
+enum ipa3_hw_2_cpu_ntn_events {
+ IPA_HW_2_CPU_EVENT_NTN_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0),
+};
+
+
+/**
+ * enum ipa3_hw_ntn_errors - NTN specific error types.
+ * @IPA_HW_NTN_ERROR_NONE : No error persists
+ * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa3_hw_ntn_errors {
+ IPA_HW_NTN_ERROR_NONE = 0,
+ IPA_HW_NTN_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa3_hw_ntn_channel_states - Values that represent NTN
+ * channel state machine.
+ * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is
+ * initialized but disabled
+ * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running.
+ * Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa3_hw_ntn_channel_states {
+ IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_NTN_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_NTN_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_NTN_CHANNEL_STATE_INVALID = 0xFF
+};
+
+/**
+ * enum ipa3_hw_ntn_channel_errors - List of NTN Channel error
+ * types. This is present in the event param
+ * @IPA_HW_NTN_CH_ERR_NONE: No error persists
+ * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating
+ * num RE to bring
+ * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update
+ * failed in Rx ring
+ * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_RX_CACHE_NON_EMPTY:
+ * @IPA_HW_NTN_CH_ERR_RESERVED:
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in.
+ */
+enum ipa3_hw_ntn_channel_errors {
+ IPA_HW_NTN_CH_ERR_NONE = 0,
+ IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1,
+ IPA_HW_NTN_TX_FSM_ERROR = 2,
+ IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL = 3,
+ IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4,
+ IPA_HW_NTN_RX_FSM_ERROR = 5,
+ IPA_HW_NTN_RX_CACHE_NON_EMPTY = 6,
+ IPA_HW_NTN_CH_ERR_RESERVED = 0xFF
+};
+
+
+/**
+ * struct Ipa3HwNtnSetUpCmdData_t - Ntn setup command data
+ * @ring_base_pa: physical address of the base of the Tx/Rx NTN
+ * ring
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ * buffer pool
+ * @ntn_ring_size: size of the Tx/Rx NTN ring
+ * @num_buffers: Rx/tx buffer pool size
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN
+ * Ring's tail pointer
+ * @ipa_pipe_number: IPA pipe number that has to be used for the
+ * Tx/Rx path
+ * @dir: Tx/Rx Direction
+ * @data_buff_size: size of the each data buffer allocated in
+ * DDR
+ */
+struct Ipa3HwNtnSetUpCmdData_t {
+ u32 ring_base_pa;
+ u32 buff_pool_base_pa;
+ u16 ntn_ring_size;
+ u16 num_buffers;
+ u32 ntn_reg_base_ptr_pa;
+ u8 ipa_pipe_number;
+ u8 dir;
+ u16 data_buff_size;
+
+} __packed;
+
+/**
+ * struct Ipa3HwNtnCommonChCmdData_t - Structure holding the
+ * parameters for Ntn Tear down command data params
+ *
+ *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe
+ */
+union Ipa3HwNtnCommonChCmdData_t {
+ struct IpaHwNtnCommonChCmdParams_t {
+ u32 ipa_pipe_number :8;
+ u32 reserved :24;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+
+/**
+ * struct Ipa3HwNTNErrorEventData_t - Structure holding the
+ * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed
+ * as immediate params in the shared memory
+ *
+ *@ntn_error_type: type of NTN error (ipa3_hw_ntn_errors)
+ *@ipa_pipe_number: IPA pipe number on which error has happened
+ * Applicable only if error type indicates channel error
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ */
+union Ipa3HwNTNErrorEventData_t {
+ struct IpaHwNTNErrorEventParams_t {
+ u32 ntn_error_type :8;
+ u32 reserved :8;
+ u32 ipa_pipe_number :8;
+ u32 ntn_ch_err_type :8;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+/**
+ * struct NTN3RxInfoData_t - NTN Structure holding the Rx pipe
+ * information
+ *
+ *@max_outstanding_pkts: Number of outstanding packets in Rx
+ * Ring
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW
+ *
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ *@rx_ind_ring_stats:
+ *@bam_stats:
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_pkts_in_dis_uninit_state:
+ *@num_bam_int_handled_while_not_in_bam: Number of Bam
+ * Interrupts handled by FW
+ *@num_bam_int_handled_while_in_bam_state: Number of Bam
+ * Interrupts handled by FW
+ */
+struct NTN3RxInfoData_t {
+ u32 max_outstanding_pkts;
+ u32 num_pkts_processed;
+ u32 rx_ring_rp_value;
+ struct IpaHwRingStats_t rx_ind_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_bam_int_handled;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_pkts_in_dis_uninit_state;
+ u32 num_bam_int_handled_while_not_in_bam;
+ u32 num_bam_int_handled_while_in_bam_state;
+} __packed;
+
+
+/**
+ * struct NTNTxInfoData_t - Structure holding the NTN Tx channel
+ * Ensure that this is always word aligned
+ *
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@tail_ptr_val: Latest value of doorbell written to copy engine
+ *@num_db_fired: Number of DB from uC FW to Copy engine
+ *
+ *@tx_comp_ring_stats:
+ *@bam_stats:
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_bam_int_in_non_running_state: Number of Bam interrupts
+ * while not in Running state
+ *@num_qmb_int_handled: Number of QMB interrupts handled
+ *@num_bam_int_handled_while_wait_for_bam: Number of times the
+ * Imm Cmd is injected due to fw_desc change
+ */
+struct NTNTxInfoData_t {
+ u32 num_pkts_processed;
+ u32 tail_ptr_val;
+ u32 num_db_fired;
+ struct IpaHwRingStats_t tx_comp_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_bam_int_handled;
+ u32 num_bam_int_in_non_running_state;
+ u32 num_qmb_int_handled;
+ u32 num_bam_int_handled_while_wait_for_bam;
+ u32 num_bam_int_handled_while_not_in_bam;
+} __packed;
+
+
+/**
+ * struct Ipa3HwStatsNTNInfoData_t - Structure holding the NTN Tx
+ * channel Ensure that this is always word aligned
+ *
+ */
+struct Ipa3HwStatsNTNInfoData_t {
+ struct NTN3RxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS];
+ struct NTNTxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
+} __packed;
+
+
+/*
+ * uC offload related data structures
+ */
+#define IPA_UC_OFFLOAD_CONNECTED BIT(0)
+#define IPA_UC_OFFLOAD_ENABLED BIT(1)
+#define IPA_UC_OFFLOAD_RESUMED BIT(2)
+
+/**
+ * enum ipa_cpu_2_hw_offload_commands - Values that represent
+ * the offload commands from CPU
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up
+ * Offload protocol's Tx/Rx Path
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_RX_SET_UP : Command to tear down
+ * Offload protocol's Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_offload_commands {
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+};
+
+
+/**
+ * enum ipa3_hw_offload_channel_states - Values that represent
+ * offload channel state machine.
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is
+ * initialized but disabled
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running.
+ * Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in
+ */
+enum ipa3_hw_offload_channel_states {
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID = 0xFF
+};
+
+
+/**
+ * enum ipa3_hw_2_cpu_cmd_resp_status - Values that represent
+ * offload related command response status to be sent to CPU.
+ */
+enum ipa3_hw_2_cpu_offload_cmd_resp_status {
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0),
+ IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
+ IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+ IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+ IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11),
+ IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12),
+ IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14),
+};
+
+/**
+ * struct IpaHwSetUpCmd -
+ *
+ *
+ */
+union IpaHwSetUpCmd {
+ struct Ipa3HwNtnSetUpCmdData_t NtnSetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwOffloadSetUpCmdData_t -
+ *
+ *
+ */
+struct IpaHwOffloadSetUpCmdData_t {
+ u8 protocol;
+ union IpaHwSetUpCmd SetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwCommonChCmd - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN
+ *
+ *
+ */
+union IpaHwCommonChCmd {
+ union Ipa3HwNtnCommonChCmdData_t NtnCommonCh_params;
+} __packed;
+
+struct IpaHwOffloadCommonChCmdData_t {
+ u8 protocol;
+ union IpaHwCommonChCmd CommonCh_params;
+} __packed;
+
+#endif /* _IPA_UC_OFFLOAD_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 1caccddf5834..e0f32bdcbb3d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1190,7 +1190,7 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
- ep->wdi_state |= IPA_WDI_CONNECTED;
+ ep->uc_offload_state |= IPA_WDI_CONNECTED;
IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx);
return 0;
@@ -1222,7 +1222,7 @@ int ipa3_disconnect_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1234,8 +1234,8 @@ int ipa3_disconnect_wdi_pipe(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ep->wdi_state != IPA_WDI_CONNECTED) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
@@ -1283,7 +1283,7 @@ int ipa3_enable_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1295,8 +1295,8 @@ int ipa3_enable_wdi_pipe(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ep->wdi_state != IPA_WDI_CONNECTED) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
@@ -1319,7 +1319,7 @@ int ipa3_enable_wdi_pipe(u32 clnt_hdl)
result = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
}
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
- ep->wdi_state |= IPA_WDI_ENABLED;
+ ep->uc_offload_state |= IPA_WDI_ENABLED;
IPADBG("client (ep: %d) enabled\n", clnt_hdl);
uc_timeout:
@@ -1345,7 +1345,7 @@ int ipa3_disable_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1379,8 +1379,8 @@ int ipa3_disable_wdi_pipe(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
@@ -1436,7 +1436,7 @@ int ipa3_disable_wdi_pipe(u32 clnt_hdl)
ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
}
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
- ep->wdi_state &= ~IPA_WDI_ENABLED;
+ ep->uc_offload_state &= ~IPA_WDI_ENABLED;
IPADBG("client (ep: %d) disabled\n", clnt_hdl);
uc_timeout:
@@ -1460,7 +1460,7 @@ int ipa3_resume_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1472,8 +1472,8 @@ int ipa3_resume_wdi_pipe(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
@@ -1497,7 +1497,7 @@ int ipa3_resume_wdi_pipe(u32 clnt_hdl)
else
IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl);
- ep->wdi_state |= IPA_WDI_RESUMED;
+ ep->uc_offload_state |= IPA_WDI_RESUMED;
IPADBG("client (ep: %d) resumed\n", clnt_hdl);
uc_timeout:
@@ -1521,7 +1521,7 @@ int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1533,9 +1533,9 @@ int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
IPA_WDI_RESUMED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
@@ -1588,7 +1588,7 @@ int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
ipa3_ctx->tag_process_before_gating = true;
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
- ep->wdi_state &= ~IPA_WDI_RESUMED;
+ ep->uc_offload_state &= ~IPA_WDI_RESUMED;
IPADBG("client (ep: %d) suspended\n", clnt_hdl);
uc_timeout:
@@ -1603,7 +1603,7 @@ int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1615,8 +1615,8 @@ int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
ep = &ipa3_ctx->ep[clnt_hdl];
- if (!(ep->wdi_state & IPA_WDI_CONNECTED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 21b8cac11d2b..395cf62c9728 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -40,7 +40,8 @@
#define IPA_TAG_SLEEP_MIN_USEC (1000)
#define IPA_TAG_SLEEP_MAX_USEC (2000)
#define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ)
-#define IPA_BCR_REG_VAL (0x00000001)
+#define IPA_BCR_REG_VAL_v3_0 (0x00000001)
+#define IPA_BCR_REG_VAL_v3_5 (0x0000003B)
#define IPA_AGGR_GRAN_MIN (1)
#define IPA_AGGR_GRAN_MAX (32)
#define IPA_EOT_COAL_GRAN_MIN (1)
@@ -848,14 +849,28 @@ void ipa3_cfg_qsb(void)
int ipa3_init_hw(void)
{
u32 ipa_version = 0;
+ u32 val;
/* Read IPA version and make sure we have access to the registers */
ipa_version = ipahal_read_reg(IPA_VERSION);
if (ipa_version == 0)
return -EFAULT;
- /* using old BCR configuration(IPAv2.6)*/
- ipahal_write_reg(IPA_BCR, IPA_BCR_REG_VAL);
+ switch (ipa3_ctx->ipa_hw_type) {
+ case IPA_HW_v3_0:
+ case IPA_HW_v3_1:
+ val = IPA_BCR_REG_VAL_v3_0;
+ break;
+ case IPA_HW_v3_5:
+ case IPA_HW_v3_5_1:
+ val = IPA_BCR_REG_VAL_v3_5;
+ break;
+ default:
+ IPAERR("unknown HW type in dts\n");
+ return -EFAULT;
+ }
+
+ ipahal_write_reg(IPA_BCR, val);
ipa3_cfg_qsb();
@@ -3029,6 +3044,7 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_disconnect = ipa3_disconnect;
api_ctrl->ipa_reset_endpoint = ipa3_reset_endpoint;
api_ctrl->ipa_clear_endpoint_delay = ipa3_clear_endpoint_delay;
+ api_ctrl->ipa_disable_endpoint = NULL;
api_ctrl->ipa_cfg_ep = ipa3_cfg_ep;
api_ctrl->ipa_cfg_ep_nat = ipa3_cfg_ep_nat;
api_ctrl->ipa_cfg_ep_hdr = ipa3_cfg_ep_hdr;
@@ -3178,6 +3194,9 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_get_ipc_logbuf_low = ipa3_get_ipc_logbuf_low;
api_ctrl->ipa_rx_poll = ipa3_rx_poll;
api_ctrl->ipa_recycle_wan_skb = ipa3_recycle_wan_skb;
+ api_ctrl->ipa_setup_uc_ntn_pipes = ipa3_setup_uc_ntn_pipes;
+ api_ctrl->ipa_tear_down_uc_offload_pipes =
+ ipa3_tear_down_uc_offload_pipes;
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 4f6097c6da35..6c4d14b093c3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -1215,7 +1215,10 @@ int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
(!phys_base && !hdr_base_addr) ||
!hdr_base_addr ||
((is_hdr_proc_ctx == false) && !offset_entry)) {
- IPAHAL_ERR("failed on validating params");
+ IPAHAL_ERR(
+ "invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%u is_hdr_proc_ctx:%d offset_entry:%pK\n"
+ , hdr_len, &phys_base, hdr_base_addr
+ , is_hdr_proc_ctx, offset_entry);
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index a4eab02cb571..aebdaab3ac77 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1157,14 +1157,16 @@ static void apps_ipa_tx_complete_notify(void *priv,
struct net_device *dev = (struct net_device *)priv;
struct ipa3_wwan_private *wwan_ptr;
- if (evt != IPA_WRITE_DONE) {
- IPAWANDBG("unsupported event on Tx callback\n");
+ if (dev != IPA_NETDEV()) {
+ IPAWANDBG("Received pre-SSR packet completion\n");
+ dev_kfree_skb_any(skb);
return;
}
- if (dev != IPA_NETDEV()) {
- IPAWANDBG("Received pre-SSR packet completion\n");
+ if (evt != IPA_WRITE_DONE) {
+ IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
return;
}
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index d78ee151c9e4..be3bc2f4edd4 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -865,6 +865,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo ideapad Y700-15ISK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ISK"),
+ },
+ },
+ {
+ .ident = "Lenovo ideapad Y700 Touch-15ISK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700 Touch-15ISK"),
+ },
+ },
+ {
.ident = "Lenovo ideapad Y700-17ISK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 61f611296ad6..a45a5d103040 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -265,6 +265,8 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(typec_power_role),
POWER_SUPPLY_ATTR(pd_allowed),
POWER_SUPPLY_ATTR(pd_active),
+ POWER_SUPPLY_ATTR(charger_temp),
+ POWER_SUPPLY_ATTR(charger_temp_max),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/qcom-charger/qpnp-qnovo.c b/drivers/power/qcom-charger/qpnp-qnovo.c
index d50188a5efbf..2418b112d670 100644
--- a/drivers/power/qcom-charger/qpnp-qnovo.c
+++ b/drivers/power/qcom-charger/qpnp-qnovo.c
@@ -153,7 +153,7 @@ struct qnovo {
struct work_struct status_change_work;
int fv_uV_request;
int fcc_uA_request;
- struct votable *fcc_votable;
+ struct votable *fcc_max_votable;
struct votable *fv_votable;
};
@@ -243,8 +243,9 @@ static int qnovo_disable_cb(struct votable *votable, void *data, int disable,
vote(chip->fv_votable, QNOVO_VOTER, false, 0);
}
if (chip->fcc_uA_request != -EINVAL) {
- if (chip->fcc_votable)
- vote(chip->fcc_votable, QNOVO_VOTER, false, 0);
+ if (chip->fcc_max_votable)
+ vote(chip->fcc_max_votable, QNOVO_VOTER,
+ false, 0);
}
}
@@ -265,10 +266,10 @@ static int qnovo_disable_cb(struct votable *votable, void *data, int disable,
true, chip->fv_uV_request);
}
if (chip->fcc_uA_request != -EINVAL) {
- if (!chip->fcc_votable)
- chip->fcc_votable = find_votable("FCC");
- if (chip->fcc_votable)
- vote(chip->fcc_votable, QNOVO_VOTER,
+ if (!chip->fcc_max_votable)
+ chip->fcc_max_votable = find_votable("FCC_MAX");
+ if (chip->fcc_max_votable)
+ vote(chip->fcc_max_votable, QNOVO_VOTER,
true, chip->fcc_uA_request);
}
}
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index 541e40aeb91a..83cb87f94665 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -25,9 +25,6 @@
#include "smb-lib.h"
#include "pmic-voter.h"
-#define SMB2_DEFAULT_FCC_UA 3000000
-#define SMB2_DEFAULT_FV_UV 4350000
-#define SMB2_DEFAULT_ICL_UA 3000000
#define SMB2_DEFAULT_WPWR_UW 8000000
static struct smb_params v1_params = {
@@ -146,7 +143,7 @@ static int smb2_parse_dt(struct smb2 *chip)
{
struct smb_charger *chg = &chip->chg;
struct device_node *node = chg->dev->of_node;
- int rc;
+ int rc, byte_len;
if (!node) {
pr_err("device tree node missing\n");
@@ -159,28 +156,47 @@ static int smb2_parse_dt(struct smb2 *chip)
rc = of_property_read_u32(node,
"qcom,fcc-max-ua", &chip->dt.fcc_ua);
if (rc < 0)
- chip->dt.fcc_ua = SMB2_DEFAULT_FCC_UA;
+ chip->dt.fcc_ua = -EINVAL;
rc = of_property_read_u32(node,
"qcom,fv-max-uv", &chip->dt.fv_uv);
if (rc < 0)
- chip->dt.fv_uv = SMB2_DEFAULT_FV_UV;
+ chip->dt.fv_uv = -EINVAL;
rc = of_property_read_u32(node,
"qcom,usb-icl-ua", &chip->dt.usb_icl_ua);
if (rc < 0)
- chip->dt.usb_icl_ua = SMB2_DEFAULT_ICL_UA;
+ chip->dt.usb_icl_ua = -EINVAL;
rc = of_property_read_u32(node,
"qcom,dc-icl-ua", &chip->dt.dc_icl_ua);
if (rc < 0)
- chip->dt.dc_icl_ua = SMB2_DEFAULT_ICL_UA;
+ chip->dt.dc_icl_ua = -EINVAL;
rc = of_property_read_u32(node,
"qcom,wipower-max-uw", &chip->dt.wipower_max_uw);
if (rc < 0)
chip->dt.wipower_max_uw = SMB2_DEFAULT_WPWR_UW;
+ if (of_find_property(node, "qcom,thermal-mitigation", &byte_len)) {
+ chg->thermal_mitigation = devm_kzalloc(chg->dev, byte_len,
+ GFP_KERNEL);
+
+ if (chg->thermal_mitigation == NULL)
+ return -ENOMEM;
+
+ chg->thermal_levels = byte_len / sizeof(u32);
+ rc = of_property_read_u32_array(node,
+ "qcom,thermal-mitigation",
+ chg->thermal_mitigation,
+ chg->thermal_levels);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't read threm limits rc = %d\n", rc);
+ return rc;
+ }
+ }
+
return 0;
}
@@ -350,6 +366,105 @@ static int smb2_init_usb_psy(struct smb2 *chip)
}
/*************************
+ * DC PSY REGISTRATION *
+ *************************/
+
+static enum power_supply_property smb2_dc_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static int smb2_dc_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smb2 *chip = power_supply_get_drvdata(psy);
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ rc = smblib_get_prop_dc_present(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ rc = smblib_get_prop_dc_online(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = smblib_get_prop_dc_current_max(chg, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int smb2_dc_set_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct smb2 *chip = power_supply_get_drvdata(psy);
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = smblib_set_prop_dc_current_max(chg, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int smb2_dc_prop_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ int rc;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+
+ return rc;
+}
+
+static const struct power_supply_desc dc_psy_desc = {
+ .name = "dc",
+ .type = POWER_SUPPLY_TYPE_WIPOWER,
+ .properties = smb2_dc_props,
+ .num_properties = ARRAY_SIZE(smb2_dc_props),
+ .get_property = smb2_dc_get_prop,
+ .set_property = smb2_dc_set_prop,
+ .property_is_writeable = smb2_dc_prop_is_writeable,
+};
+
+static int smb2_init_dc_psy(struct smb2 *chip)
+{
+ struct power_supply_config dc_cfg = {};
+ struct smb_charger *chg = &chip->chg;
+
+ dc_cfg.drv_data = chip;
+ dc_cfg.of_node = chg->dev->of_node;
+ chg->dc_psy = devm_power_supply_register(chg->dev,
+ &dc_psy_desc,
+ &dc_cfg);
+ if (IS_ERR(chg->dc_psy)) {
+ pr_err("Couldn't register USB power supply\n");
+ return PTR_ERR(chg->dc_psy);
+ }
+
+ return 0;
+}
+
+/*************************
* BATT PSY REGISTRATION *
*************************/
@@ -360,6 +475,7 @@ static enum power_supply_property smb2_batt_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
};
static int smb2_batt_get_prop(struct power_supply *psy,
@@ -387,9 +503,11 @@ static int smb2_batt_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CAPACITY:
smblib_get_prop_batt_capacity(chg, val);
break;
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ smblib_get_prop_system_temp_level(chg, val);
+ break;
default:
- pr_err("batt power supply prop %d not supported\n",
- psp);
+ pr_err("batt power supply prop %d not supported\n", psp);
return -EINVAL;
}
@@ -400,17 +518,21 @@ static int smb2_batt_set_prop(struct power_supply *psy,
enum power_supply_property prop,
const union power_supply_propval *val)
{
+ int rc = 0;
struct smb_charger *chg = power_supply_get_drvdata(psy);
switch (prop) {
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
- smblib_set_prop_input_suspend(chg, val);
+ rc = smblib_set_prop_input_suspend(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ rc = smblib_set_prop_system_temp_level(chg, val);
break;
default:
- return -EINVAL;
+ rc = -EINVAL;
}
- return 0;
+ return rc;
}
static int smb2_batt_prop_is_writeable(struct power_supply *psy,
@@ -418,6 +540,7 @@ static int smb2_batt_prop_is_writeable(struct power_supply *psy,
{
switch (psp) {
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
return 1;
default:
break;
@@ -599,6 +722,20 @@ static int smb2_init_hw(struct smb2 *chip)
struct smb_charger *chg = &chip->chg;
int rc;
+ if (chip->dt.fcc_ua < 0)
+ smblib_get_charge_param(chg, &chg->param.fcc, &chip->dt.fcc_ua);
+
+ if (chip->dt.fv_uv < 0)
+ smblib_get_charge_param(chg, &chg->param.fv, &chip->dt.fv_uv);
+
+ if (chip->dt.usb_icl_ua < 0)
+ smblib_get_charge_param(chg, &chg->param.usb_icl,
+ &chip->dt.usb_icl_ua);
+
+ if (chip->dt.dc_icl_ua < 0)
+ smblib_get_charge_param(chg, &chg->param.dc_icl,
+ &chip->dt.dc_icl_ua);
+
/* votes must be cast before configuring software control */
vote(chg->pl_disable_votable,
USBIN_ICL_VOTER, true, 0);
@@ -608,7 +745,7 @@ static int smb2_init_hw(struct smb2 *chip)
DEFAULT_VOTER, chip->dt.suspend_input, 0);
vote(chg->dc_suspend_votable,
DEFAULT_VOTER, chip->dt.suspend_input, 0);
- vote(chg->fcc_votable,
+ vote(chg->fcc_max_votable,
DEFAULT_VOTER, true, chip->dt.fcc_ua);
vote(chg->fv_votable,
DEFAULT_VOTER, true, chip->dt.fv_uv);
@@ -617,17 +754,21 @@ static int smb2_init_hw(struct smb2 *chip)
vote(chg->dc_icl_votable,
DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
- /* configure charge enable for software control; active high */
+ /*
+ * Configure charge enable for software control; active high, and end
+ * the charge cycle while the battery is OV.
+ */
rc = smblib_masked_write(chg, CHGR_CFG2_REG,
- CHG_EN_POLARITY_BIT | CHG_EN_SRC_BIT, 0);
+ CHG_EN_POLARITY_BIT |
+ CHG_EN_SRC_BIT |
+ BAT_OV_ECC_BIT, BAT_OV_ECC_BIT);
if (rc < 0) {
- dev_err(chg->dev,
- "Couldn't configure charge enable source rc=%d\n", rc);
+ dev_err(chg->dev, "Couldn't configure charger rc=%d\n", rc);
return rc;
}
/* enable the charging path */
- rc = smblib_enable_charging(chg, true);
+ rc = vote(chg->chg_disable_votable, DEFAULT_VOTER, false, 0);
if (rc < 0) {
dev_err(chg->dev, "Couldn't enable charging rc=%d\n", rc);
return rc;
@@ -635,11 +776,10 @@ static int smb2_init_hw(struct smb2 *chip)
/*
* trigger the usb-typec-change interrupt only when the CC state
- * changes, or there was a VBUS error
+ * changes
*/
rc = smblib_write(chg, TYPE_C_INTRPT_ENB_REG,
- TYPEC_CCSTATE_CHANGE_INT_EN_BIT
- | TYPEC_VBUS_ERROR_INT_EN_BIT);
+ TYPEC_CCSTATE_CHANGE_INT_EN_BIT);
if (rc < 0) {
dev_err(chg->dev,
"Couldn't configure Type-C interrupts rc=%d\n", rc);
@@ -904,6 +1044,12 @@ static int smb2_probe(struct platform_device *pdev)
goto cleanup;
}
+ rc = smb2_init_dc_psy(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize dc psy rc=%d\n", rc);
+ goto cleanup;
+ }
+
rc = smb2_init_usb_psy(chip);
if (rc < 0) {
pr_err("Couldn't initialize usb psy rc=%d\n", rc);
diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c
index dd3ec1eb51e3..8fe882e078f0 100644
--- a/drivers/power/qcom-charger/smb-lib.c
+++ b/drivers/power/qcom-charger/smb-lib.c
@@ -210,22 +210,6 @@ static const struct apsd_result *smblib_get_apsd_result(struct smb_charger *chg)
* REGISTER SETTERS *
********************/
-int smblib_enable_charging(struct smb_charger *chg, bool enable)
-{
- int rc = 0;
-
- rc = smblib_masked_write(chg, CHARGING_ENABLE_CMD_REG,
- CHARGING_ENABLE_CMD_BIT,
- enable ? CHARGING_ENABLE_CMD_BIT : 0);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't %s charging rc=%d\n",
- enable ? "enable" : "disable", rc);
- return rc;
- }
-
- return rc;
-}
-
int smblib_set_charge_param(struct smb_charger *chg,
struct smb_chg_param *param, int val_u)
{
@@ -416,6 +400,14 @@ static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
return smblib_set_dc_suspend(chg, suspend);
}
+static int smblib_fcc_max_vote_callback(struct votable *votable, void *data,
+ int fcc_ua, const char *client)
+{
+ struct smb_charger *chg = data;
+
+ return vote(chg->fcc_votable, FCC_MAX_RESULT, true, fcc_ua);
+}
+
static int smblib_fcc_vote_callback(struct votable *votable, void *data,
int fcc_ua, const char *client)
{
@@ -606,6 +598,23 @@ static int smblib_pl_disable_vote_callback(struct votable *votable, void *data,
return 0;
}
+static int smblib_chg_disable_vote_callback(struct votable *votable, void *data,
+ int chg_disable, const char *client)
+{
+ struct smb_charger *chg = data;
+ int rc;
+
+ rc = smblib_masked_write(chg, CHARGING_ENABLE_CMD_REG,
+ CHARGING_ENABLE_CMD_BIT,
+ chg_disable ? 0 : CHARGING_ENABLE_CMD_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't %s charging rc=%d\n",
+ chg_disable ? "disable" : "enable", rc);
+ return rc;
+ }
+
+ return 0;
+}
/*****************
* OTG REGULATOR *
*****************/
@@ -849,6 +858,13 @@ done:
return rc;
}
+int smblib_get_prop_system_temp_level(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ val->intval = chg->system_temp_level;
+ return 0;
+}
+
/***********************
* BATTERY PSY SETTERS *
***********************/
@@ -876,6 +892,101 @@ int smblib_set_prop_input_suspend(struct smb_charger *chg,
return rc;
}
+int smblib_set_prop_system_temp_level(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ if (val->intval < 0)
+ return -EINVAL;
+
+ if (chg->thermal_levels <= 0)
+ return -EINVAL;
+
+ if (val->intval > chg->thermal_levels)
+ return -EINVAL;
+
+ chg->system_temp_level = val->intval;
+ if (chg->system_temp_level == chg->thermal_levels)
+ return vote(chg->chg_disable_votable, THERMAL_DAEMON, true, 0);
+
+ vote(chg->chg_disable_votable, THERMAL_DAEMON, false, 0);
+ if (chg->system_temp_level == 0)
+ return vote(chg->fcc_votable, THERMAL_DAEMON, false, 0);
+
+ vote(chg->fcc_votable, THERMAL_DAEMON, true,
+ chg->thermal_mitigation[chg->system_temp_level]);
+ return 0;
+}
+
+/*******************
+ * DC PSY GETTERS *
+ *******************/
+
+int smblib_get_prop_dc_present(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc = 0;
+ u8 stat;
+
+ rc = smblib_read(chg, DC_INT_RT_STS_REG, &stat);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read DC_INT_RT_STS_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+ smblib_dbg(chg, PR_REGISTER, "DC_INT_RT_STS_REG = 0x%02x\n",
+ stat);
+
+ val->intval = (bool)(stat & DCIN_PLUGIN_RT_STS_BIT);
+
+ return rc;
+}
+
+int smblib_get_prop_dc_online(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc = 0;
+ u8 stat;
+
+ if (get_client_vote(chg->dc_suspend_votable, USER_VOTER)) {
+ val->intval = false;
+ return rc;
+ }
+
+ rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+ rc);
+ return rc;
+ }
+ smblib_dbg(chg, PR_REGISTER, "POWER_PATH_STATUS = 0x%02x\n",
+ stat);
+
+ val->intval = (stat & USE_DCIN_BIT) &&
+ (stat & VALID_INPUT_POWER_SOURCE_BIT);
+
+ return rc;
+}
+
+int smblib_get_prop_dc_current_max(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ val->intval = get_effective_result_locked(chg->dc_icl_votable);
+ return 0;
+}
+
+/*******************
+ * USB PSY SETTERS *
+ * *****************/
+
+int smblib_set_prop_dc_current_max(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc;
+
+ rc = vote(chg->dc_icl_votable, USER_VOTER, true, val->intval);
+ return rc;
+}
+
/*******************
* USB PSY GETTERS *
*******************/
@@ -1694,7 +1805,15 @@ int smblib_create_votables(struct smb_charger *chg)
return rc;
}
- chg->fcc_votable = create_votable("FCC", VOTE_MAX,
+ chg->fcc_max_votable = create_votable("FCC_MAX", VOTE_MAX,
+ smblib_fcc_max_vote_callback,
+ chg);
+ if (IS_ERR(chg->fcc_max_votable)) {
+ rc = PTR_ERR(chg->fcc_max_votable);
+ return rc;
+ }
+
+ chg->fcc_votable = create_votable("FCC", VOTE_MIN,
smblib_fcc_vote_callback,
chg);
if (IS_ERR(chg->fcc_votable)) {
@@ -1749,6 +1868,14 @@ int smblib_create_votables(struct smb_charger *chg)
return rc;
}
+ chg->chg_disable_votable = create_votable("CHG_DISABLE", VOTE_SET_ANY,
+ smblib_chg_disable_vote_callback,
+ chg);
+ if (IS_ERR(chg->chg_disable_votable)) {
+ rc = PTR_ERR(chg->chg_disable_votable);
+ return rc;
+ }
+
return rc;
}
@@ -1796,6 +1923,7 @@ int smblib_deinit(struct smb_charger *chg)
{
destroy_votable(chg->usb_suspend_votable);
destroy_votable(chg->dc_suspend_votable);
+ destroy_votable(chg->fcc_max_votable);
destroy_votable(chg->fcc_votable);
destroy_votable(chg->fv_votable);
destroy_votable(chg->usb_icl_votable);
diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h
index 2e35e1e3b174..1521fdb3fccf 100644
--- a/drivers/power/qcom-charger/smb-lib.h
+++ b/drivers/power/qcom-charger/smb-lib.h
@@ -31,6 +31,8 @@ enum print_reason {
#define CHG_STATE_VOTER "CHG_STATE_VOTER"
#define TYPEC_SRC_VOTER "TYPEC_SRC_VOTER"
#define TAPER_END_VOTER "TAPER_END_VOTER"
+#define FCC_MAX_RESULT "FCC_MAX_RESULT"
+#define THERMAL_DAEMON "THERMAL_DAEMON"
enum smb_mode {
PARALLEL_MASTER = 0,
@@ -93,6 +95,7 @@ struct smb_charger {
/* power supplies */
struct power_supply *batt_psy;
struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
struct power_supply_desc usb_psy_desc;
/* parallel charging */
@@ -106,6 +109,7 @@ struct smb_charger {
/* votables */
struct votable *usb_suspend_votable;
struct votable *dc_suspend_votable;
+ struct votable *fcc_max_votable;
struct votable *fcc_votable;
struct votable *fv_votable;
struct votable *usb_icl_votable;
@@ -113,6 +117,7 @@ struct smb_charger {
struct votable *pd_allowed_votable;
struct votable *awake_votable;
struct votable *pl_disable_votable;
+ struct votable *chg_disable_votable;
/* work */
struct work_struct pl_detect_work;
@@ -125,6 +130,10 @@ struct smb_charger {
int voltage_max_uv;
bool pd_active;
bool vbus_present;
+
+ int system_temp_level;
+ int thermal_levels;
+ int *thermal_mitigation;
};
int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
@@ -171,8 +180,22 @@ int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_batt_health(struct smb_charger *chg,
union power_supply_propval *val);
+int smblib_get_prop_system_temp_level(struct smb_charger *chg,
+ union power_supply_propval *val);
+
int smblib_set_prop_input_suspend(struct smb_charger *chg,
const union power_supply_propval *val);
+int smblib_set_prop_system_temp_level(struct smb_charger *chg,
+ const union power_supply_propval *val);
+
+int smblib_get_prop_dc_present(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_dc_online(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_dc_current_max(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_set_prop_dc_current_max(struct smb_charger *chg,
+ const union power_supply_propval *val);
int smblib_get_prop_usb_present(struct smb_charger *chg,
union power_supply_propval *val);
diff --git a/drivers/power/qcom-charger/smb-reg.h b/drivers/power/qcom-charger/smb-reg.h
index 5af01c229f01..b03e8a7e0403 100644
--- a/drivers/power/qcom-charger/smb-reg.h
+++ b/drivers/power/qcom-charger/smb-reg.h
@@ -641,6 +641,9 @@ enum {
#define WIPWR_RANGE_STATUS_REG (DCIN_BASE + 0x08)
#define WIPWR_RANGE_STATUS_MASK GENMASK(4, 0)
+#define DC_INT_RT_STS_REG (DCIN_BASE + 0x10)
+#define DCIN_PLUGIN_RT_STS_BIT BIT(4)
+
/* DCIN Interrupt Bits */
#define WIPWR_VOLTAGE_RANGE_RT_STS_BIT BIT(7)
#define DCIN_ICL_CHANGE_RT_STS_BIT BIT(6)
diff --git a/drivers/power/qcom-charger/smb138x-charger.c b/drivers/power/qcom-charger/smb138x-charger.c
index 9a6baff27dac..11d936762e3c 100644
--- a/drivers/power/qcom-charger/smb138x-charger.c
+++ b/drivers/power/qcom-charger/smb138x-charger.c
@@ -564,7 +564,7 @@ static int smb138x_init_hw(struct smb138x *chip)
}
/* enable the charging path */
- rc = smblib_enable_charging(chg, true);
+ rc = vote(chg->chg_disable_votable, DEFAULT_VOTER, false, 0);
if (rc < 0) {
dev_err(chg->dev, "Couldn't enable charging rc=%d\n", rc);
return rc;
@@ -857,7 +857,9 @@ static int smb138x_slave_probe(struct smb138x *chip)
}
/* enable the charging path */
- rc = smblib_enable_charging(chg, true);
+ rc = smblib_masked_write(chg, CHARGING_ENABLE_CMD_REG,
+ CHARGING_ENABLE_CMD_BIT,
+ CHARGING_ENABLE_CMD_BIT);
if (rc < 0) {
dev_err(chg->dev, "Couldn't enable charging rc=%d\n", rc);
return rc;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index eb515721dfde..d49d8606da15 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -134,24 +134,24 @@ static bool have_full_constraints(void)
return has_full_constraints || of_have_populated_dt();
}
+static inline struct regulator_dev *rdev_get_supply(struct regulator_dev *rdev)
+{
+ if (rdev && rdev->supply)
+ return rdev->supply->rdev;
+
+ return NULL;
+}
+
/**
* regulator_lock_supply - lock a regulator and its supplies
* @rdev: regulator source
*/
static void regulator_lock_supply(struct regulator_dev *rdev)
{
- struct regulator *supply;
- int i = 0;
-
- while (1) {
- mutex_lock_nested(&rdev->mutex, i++);
- supply = rdev->supply;
-
- if (!rdev->supply)
- return;
+ int i;
- rdev = supply->rdev;
- }
+ for (i = 0; rdev->supply; rdev = rdev_get_supply(rdev), i++)
+ mutex_lock_nested(&rdev->mutex, i);
}
/**
diff --git a/drivers/regulator/cpr3-mmss-regulator.c b/drivers/regulator/cpr3-mmss-regulator.c
index e5055708a871..232bcf8fcf31 100644
--- a/drivers/regulator/cpr3-mmss-regulator.c
+++ b/drivers/regulator/cpr3-mmss-regulator.c
@@ -50,6 +50,9 @@
* @limitation: CPR limitation select fuse parameter value
* @aging_init_quot_diff: Initial quotient difference between CPR aging
* min and max sensors measured at time of manufacturing
+ * @force_highest_corner: Flag indicating that all corners must operate
+ * at the voltage of the highest corner. This is
+ * applicable to MSMCOBALT only.
*
* This struct holds the values for all of the fuses read from memory.
*/
@@ -60,6 +63,7 @@ struct cpr3_msm8996_mmss_fuses {
u64 cpr_fusing_rev;
u64 limitation;
u64 aging_init_quot_diff;
+ u64 force_highest_corner;
};
/* Fuse combos 0 - 7 map to CPR fusing revision 0 - 7 */
@@ -158,6 +162,12 @@ msmcobalt_mmss_offset_voltage_param[MSM8996_MMSS_FUSE_CORNERS][2] = {
{{65, 44, 47}, {} },
};
+static const struct cpr3_fuse_param
+msmcobalt_cpr_force_highest_corner_param[] = {
+ {100, 45, 45},
+ {},
+};
+
#define MSM8996PRO_SOC_ID 4
#define MSMCOBALT_SOC_ID 5
@@ -243,6 +253,12 @@ enum msmcobalt_cpr_partial_binning {
MSMCOBALT_CPR_PARTIAL_BINNING_SAFE_CORNER = 0xE,
};
+/*
+ * The partial binning open-loop voltage fuse values only apply to the lowest
+ * two fuse corners (0 and 1, i.e. MinSVS and SVS).
+ */
+#define MSMCOBALT_CPR_PARTIAL_BINNING_MAX_FUSE_CORNER 1
+
/**
* cpr3_msm8996_mmss_read_fuse_data() - load MMSS specific fuse parameter values
* @vreg: Pointer to the CPR3 regulator
@@ -338,6 +354,19 @@ static int cpr3_msm8996_mmss_read_fuse_data(struct cpr3_regulator *vreg)
}
if (vreg->thread->ctrl->soc_revision == MSMCOBALT_SOC_ID) {
+ rc = cpr3_read_fuse_param(base,
+ msmcobalt_cpr_force_highest_corner_param,
+ &fuse->force_highest_corner);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read CPR force highest corner fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+ if (fuse->force_highest_corner)
+ cpr3_info(vreg, "Fusing requires all operation at the highest corner\n");
+ }
+
+ if (vreg->thread->ctrl->soc_revision == MSMCOBALT_SOC_ID) {
combo_max = CPR3_MSMCOBALT_MMSS_FUSE_COMBO_COUNT;
vreg->fuse_combo = fuse->cpr_fusing_rev;
} else if (vreg->thread->ctrl->soc_revision == MSM8996PRO_SOC_ID) {
@@ -738,7 +767,8 @@ static int cpr3_msm8996_mmss_calculate_open_loop_voltages(
*/
if (is_msmcobalt &&
(volt_init == MSMCOBALT_CPR_PARTIAL_BINNING_NEXT_CORNER ||
- volt_init == MSMCOBALT_CPR_PARTIAL_BINNING_SAFE_CORNER))
+ volt_init == MSMCOBALT_CPR_PARTIAL_BINNING_SAFE_CORNER) &&
+ i <= MSMCOBALT_CPR_PARTIAL_BINNING_MAX_FUSE_CORNER)
volt_init = MSM8996_MMSS_MIN_VOLTAGE_FUSE_VAL;
fuse_volt[i] = cpr3_convert_open_loop_voltage_fuse(ref_volt[i],
@@ -849,19 +879,43 @@ static int cpr3_msmcobalt_partial_binning_override(struct cpr3_regulator *vreg)
u32 proc_freq;
struct cpr3_corner *corner;
struct cpr3_corner *safe_corner;
- int i, j, low, high, safe_fuse_corner;
+ int i, j, low, high, safe_fuse_corner, max_fuse_corner;
if (vreg->thread->ctrl->soc_revision != MSMCOBALT_SOC_ID)
return 0;
- /* Loop over all fuse corners except for the highest one. */
- for (i = 0; i < vreg->fuse_corner_count - 1; i++) {
+ /* Handle the force highest corner fuse. */
+ if (fuse->force_highest_corner) {
+ cpr3_info(vreg, "overriding CPR parameters for corners 0 to %d with quotients and voltages of corner %d\n",
+ vreg->corner_count - 2, vreg->corner_count - 1);
+ corner = &vreg->corner[vreg->corner_count - 1];
+ for (i = 0; i < vreg->corner_count - 1; i++) {
+ proc_freq = vreg->corner[i].proc_freq;
+ vreg->corner[i] = *corner;
+ vreg->corner[i].proc_freq = proc_freq;
+ }
+
+ /*
+ * Return since the potential partial binning fuse values are
+ * superceded by the force highest corner fuse value.
+ */
+ return 0;
+ }
+
+ /*
+ * Allow up to the max corner which can be fused with partial
+ * binning values.
+ */
+ max_fuse_corner = min(MSMCOBALT_CPR_PARTIAL_BINNING_MAX_FUSE_CORNER,
+ vreg->fuse_corner_count - 2);
+
+ for (i = 0; i <= max_fuse_corner; i++) {
/* Determine which higher corners to override with (if any). */
if (fuse->init_voltage[i] != next
&& fuse->init_voltage[i] != safe)
continue;
- for (j = i + 1; j < vreg->fuse_corner_count - 1; j++)
+ for (j = i + 1; j <= max_fuse_corner; j++)
if (fuse->init_voltage[j] != next
&& fuse->init_voltage[j] != safe)
break;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 074878b55a0b..d044f3f273be 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -944,6 +944,7 @@ struct fib {
*/
struct list_head fiblink;
void *data;
+ u32 vector_no;
struct hw_fib *hw_fib_va; /* Actual shared object */
dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
};
@@ -2113,6 +2114,7 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev);
const char *aac_driverinfo(struct Scsi_Host *);
+void aac_fib_vector_assign(struct aac_dev *dev);
struct fib *aac_fib_alloc(struct aac_dev *dev);
int aac_fib_setup(struct aac_dev *dev);
void aac_fib_map_free(struct aac_dev *dev);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index a1f90fe849c9..4cbf54928640 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -83,13 +83,38 @@ static int fib_map_alloc(struct aac_dev *dev)
void aac_fib_map_free(struct aac_dev *dev)
{
- pci_free_consistent(dev->pdev,
- dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
- dev->hw_fib_va, dev->hw_fib_pa);
+ if (dev->hw_fib_va && dev->max_fib_size) {
+ pci_free_consistent(dev->pdev,
+ (dev->max_fib_size *
+ (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
+ dev->hw_fib_va, dev->hw_fib_pa);
+ }
dev->hw_fib_va = NULL;
dev->hw_fib_pa = 0;
}
+void aac_fib_vector_assign(struct aac_dev *dev)
+{
+ u32 i = 0;
+ u32 vector = 1;
+ struct fib *fibptr = NULL;
+
+ for (i = 0, fibptr = &dev->fibs[i];
+ i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
+ i++, fibptr++) {
+ if ((dev->max_msix == 1) ||
+ (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
+ - dev->vector_cap))) {
+ fibptr->vector_no = 0;
+ } else {
+ fibptr->vector_no = vector;
+ vector++;
+ if (vector == dev->max_msix)
+ vector = 1;
+ }
+ }
+}
+
/**
* aac_fib_setup - setup the fibs
* @dev: Adapter to set up
@@ -151,6 +176,12 @@ int aac_fib_setup(struct aac_dev * dev)
hw_fib_pa = hw_fib_pa +
dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
}
+
+ /*
+ *Assign vector numbers to fibs
+ */
+ aac_fib_vector_assign(dev);
+
/*
* Add the fib chain to the free list
*/
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 3b6e5c67e853..aa6eccb8940b 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1404,8 +1404,18 @@ static int aac_acquire_resources(struct aac_dev *dev)
aac_adapter_enable_int(dev);
- if (!dev->sync_mode)
+ /*max msix may change after EEH
+ * Re-assign vectors to fibs
+ */
+ aac_fib_vector_assign(dev);
+
+ if (!dev->sync_mode) {
+ /* After EEH recovery or suspend resume, max_msix count
+ * may change, therfore updating in init as well.
+ */
aac_adapter_start(dev);
+ dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
+ }
return 0;
error_iounmap:
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 2aa34ea8ceb1..bc0203f3d243 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -156,8 +156,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
break;
if (dev->msi_enabled && dev->max_msix > 1)
atomic_dec(&dev->rrq_outstanding[vector_no]);
- aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
dev->host_rrq[index++] = 0;
+ aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
if (index == (vector_no + 1) * dev->vector_cap)
index = vector_no * dev->vector_cap;
dev->host_rrq_idx[vector_no] = index;
@@ -452,36 +452,20 @@ static int aac_src_deliver_message(struct fib *fib)
#endif
u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
+ u16 vector_no;
atomic_inc(&q->numpending);
if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
dev->max_msix > 1) {
- u_int16_t vector_no, first_choice = 0xffff;
-
- vector_no = dev->fibs_pushed_no % dev->max_msix;
- do {
- vector_no += 1;
- if (vector_no == dev->max_msix)
- vector_no = 1;
- if (atomic_read(&dev->rrq_outstanding[vector_no]) <
- dev->vector_cap)
- break;
- if (0xffff == first_choice)
- first_choice = vector_no;
- else if (vector_no == first_choice)
- break;
- } while (1);
- if (vector_no == first_choice)
- vector_no = 0;
- atomic_inc(&dev->rrq_outstanding[vector_no]);
- if (dev->fibs_pushed_no == 0xffffffff)
- dev->fibs_pushed_no = 0;
- else
- dev->fibs_pushed_no++;
+ vector_no = fib->vector_no;
fib->hw_fib_va->header.Handle += (vector_no << 16);
+ } else {
+ vector_no = 0;
}
+ atomic_inc(&dev->rrq_outstanding[vector_no]);
+
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
/* Calculate the amount to the fibsize bits */
fibsize = (hdr_size + 127) / 128 - 1;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index b846a4683562..fc6a83188c1e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1336,6 +1336,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
case AHC_DEV_Q_TAGGED:
scsi_change_queue_depth(sdev,
dev->openings + dev->active);
+ break;
default:
/*
* We allow the OS to queue 2 untagged transactions to
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index fe0c5143f8e6..758f76e88704 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -4470,6 +4470,7 @@ put_shost:
scsi_host_put(phba->shost);
free_kset:
iscsi_boot_destroy_kset(phba->boot_kset);
+ phba->boot_kset = NULL;
return -ENOMEM;
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 536cd5a80422..43ac62623bf2 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4003,13 +4003,17 @@ static ssize_t ipr_store_update_fw(struct device *dev,
struct ipr_sglist *sglist;
char fname[100];
char *src;
- int len, result, dnld_size;
+ char *endline;
+ int result, dnld_size;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- len = snprintf(fname, 99, "%s", buf);
- fname[len-1] = '\0';
+ snprintf(fname, sizeof(fname), "%s", buf);
+
+ endline = strchr(fname, '\n');
+ if (endline)
+ *endline = '\0';
if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index c126966130ab..ce79de822e46 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -278,8 +278,16 @@ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
ucp[3] = 0;
put_unaligned_be64(info, &ucp[4]);
} else if ((buf[0] & 0x7f) == 0x70) {
- buf[0] |= 0x80;
- put_unaligned_be64(info, &buf[3]);
+ /*
+ * Only set the 'VALID' bit if we can represent the value
+ * correctly; otherwise just fill out the lower bytes and
+ * clear the 'VALID' flag.
+ */
+ if (info <= 0xffffffffUL)
+ buf[0] |= 0x80;
+ else
+ buf[0] &= 0x7f;
+ put_unaligned_be32((u32)info, &buf[3]);
}
return 0;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 6d6606e1568a..5d81bcc1dc75 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -673,7 +673,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
*/
if (sdkp->lbprz) {
q->limits.discard_alignment = 0;
- q->limits.discard_granularity = 1;
+ q->limits.discard_granularity = logical_block_size;
} else {
q->limits.discard_alignment = sdkp->unmap_alignment *
logical_block_size;
@@ -1300,18 +1300,19 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
struct scsi_device *sdp = sdkp->device;
struct Scsi_Host *host = sdp->host;
+ sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
int diskinfo[4];
/* default to most commonly used values */
- diskinfo[0] = 0x40; /* 1 << 6 */
- diskinfo[1] = 0x20; /* 1 << 5 */
- diskinfo[2] = sdkp->capacity >> 11;
-
+ diskinfo[0] = 0x40; /* 1 << 6 */
+ diskinfo[1] = 0x20; /* 1 << 5 */
+ diskinfo[2] = capacity >> 11;
+
/* override with calculated, extended default, or driver values */
if (host->hostt->bios_param)
- host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
+ host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
else
- scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
+ scsicam_bios_param(bdev, capacity, diskinfo);
geo->heads = diskinfo[0];
geo->sectors = diskinfo[1];
@@ -2281,14 +2282,6 @@ got_data:
if (sdkp->capacity > 0xffffffff)
sdp->use_16_for_rw = 1;
- /* Rescale capacity to 512-byte units */
- if (sector_size == 4096)
- sdkp->capacity <<= 3;
- else if (sector_size == 2048)
- sdkp->capacity <<= 2;
- else if (sector_size == 1024)
- sdkp->capacity <<= 1;
-
blk_queue_physical_block_size(sdp->request_queue,
sdkp->physical_block_size);
sdkp->device->sector_size = sector_size;
@@ -2739,11 +2732,6 @@ static int sd_try_extended_inquiry(struct scsi_device *sdp)
return 0;
}
-static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
-{
- return blocks << (ilog2(sdev->sector_size) - 9);
-}
-
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@@ -2827,7 +2815,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
/* Combine with controller limits */
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
- set_capacity(disk, sdkp->capacity);
+ set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);
kfree(buffer);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 5f2a84aff29f..654630bb7d0e 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -65,7 +65,7 @@ struct scsi_disk {
struct device dev;
struct gendisk *disk;
atomic_t openers;
- sector_t capacity; /* size in 512-byte sectors */
+ sector_t capacity; /* size in logical blocks */
u32 max_xfer_blocks;
u32 opt_xfer_blocks;
u32 max_ws_blocks;
@@ -146,6 +146,11 @@ static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
return 0;
}
+static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks)
+{
+ return blocks << (ilog2(sdev->sector_size) - 9);
+}
+
/*
* A DIF-capable target device can be formatted with different
* protection schemes. Currently 0 through 3 are defined:
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index ab184cd4d773..77b2da269d6e 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -652,7 +652,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
else
hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
hp->dxfer_len = mxsize;
- if (hp->dxfer_direction == SG_DXFER_TO_DEV)
+ if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
+ (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
hp->dxferp = (char __user *)buf + cmd_size;
else
hp->dxferp = NULL;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3fba42ad9fb8..0f636cc4c809 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -889,8 +889,9 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
do_work = true;
process_err_fn = storvsc_remove_lun;
break;
- case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
- if ((asc == 0x2a) && (ascq == 0x9)) {
+ case SRB_STATUS_ABORTED:
+ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
+ (asc == 0x2a) && (ascq == 0x9)) {
do_work = true;
process_err_fn = storvsc_device_scan;
/*
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 3e167f4c0f42..4d406c51d884 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4092,8 +4092,9 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
__func__, ret);
+ ret = ufshcd_link_recovery(hba);
/* Unable to recover the link, so no point proceeding */
- if (ufshcd_link_recovery(hba))
+ if (ret)
BUG();
} else {
dev_dbg(hba->dev, "%s: Hibern8 Exit at %lld us", __func__,
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index 98fce5e5c06a..2e6672326a77 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -299,6 +299,26 @@ static void slim_reinit_tx_msgq(struct msm_slim_ctrl *dev)
}
}
+static int ngd_check_hw_status(struct msm_slim_ctrl *dev)
+{
+ void __iomem *ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
+ u32 laddr = readl_relaxed(ngd + NGD_STATUS);
+ int ret = 0;
+
+ /* Lost logical addr due to noise */
+ if (!(laddr & NGD_LADDR)) {
+ SLIM_WARN(dev, "NGD lost LADDR: status:0x%x\n", laddr);
+ ret = ngd_slim_power_up(dev, false);
+
+ if (ret) {
+ SLIM_WARN(dev, "slim resume ret:%d, state:%d\n",
+ ret, dev->state);
+ ret = -EREMOTEIO;
+ }
+ }
+ return ret;
+}
+
static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
{
DECLARE_COMPLETION_ONSTACK(done);
@@ -351,7 +371,6 @@ static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
/* If txn is tried when controller is down, wait for ADSP to boot */
if (!report_sat) {
-
if (dev->state == MSM_CTRL_DOWN) {
u8 mc = (u8)txn->mc;
int timeout;
@@ -418,6 +437,12 @@ static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
msm_slim_put_ctrl(dev);
return -EREMOTEIO;
}
+ ret = ngd_check_hw_status(dev);
+ if (ret) {
+ mutex_unlock(&dev->tx_lock);
+ msm_slim_put_ctrl(dev);
+ return ret;
+ }
}
if (txn->mt == SLIM_MSG_MT_CORE &&
@@ -737,6 +762,14 @@ static int ngd_bulk_wr(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
}
mutex_lock(&dev->tx_lock);
}
+
+ ret = ngd_check_hw_status(dev);
+ if (ret) {
+ mutex_unlock(&dev->tx_lock);
+ msm_slim_put_ctrl(dev);
+ return ret;
+ }
+
if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
SLIM_WARN(dev, "bulk wr not supported");
ret = -EPROTONOSUPPORT;
@@ -1080,6 +1113,7 @@ static void ngd_slim_setup(struct msm_slim_ctrl *dev)
} else {
if (dev->use_rx_msgqs == MSM_MSGQ_DISABLED)
goto setup_tx_msg_path;
+
if ((dev->use_rx_msgqs == MSM_MSGQ_ENABLED) &&
(cfg & NGD_CFG_RX_MSGQ_EN))
goto setup_tx_msg_path;
@@ -1185,7 +1219,7 @@ static void ngd_slim_rx(struct msm_slim_ctrl *dev, u8 *buf)
static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart)
{
void __iomem *ngd;
- int timeout, ret = 0;
+ int timeout, retries = 0, ret = 0;
enum msm_ctrl_state cur_state = dev->state;
u32 laddr;
u32 rx_msgq;
@@ -1203,16 +1237,24 @@ static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart)
}
}
+hw_init_retry:
/* No need to vote if contorller is not in low power mode */
if (!mdm_restart &&
(cur_state == MSM_CTRL_DOWN || cur_state == MSM_CTRL_ASLEEP)) {
ret = msm_slim_qmi_power_request(dev, true);
if (ret) {
- SLIM_ERR(dev, "SLIM QMI power request failed:%d\n",
- ret);
+ SLIM_WARN(dev, "SLIM power req failed:%d, retry:%d\n",
+ ret, retries);
+ msm_slim_qmi_power_request(dev, false);
+ if (retries < INIT_MX_RETRIES) {
+ retries++;
+ goto hw_init_retry;
+ }
return ret;
}
}
+ retries = 0;
+
if (!dev->ver) {
dev->ver = readl_relaxed(dev->base);
/* Version info in 16 MSbits */
@@ -1276,6 +1318,7 @@ static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart)
dev->state = MSM_CTRL_DOWN;
}
+capability_retry:
/*
* ADSP power collapse case (OR SSR), where HW was reset
* BAM programming will happen when capability message is received
@@ -1296,7 +1339,16 @@ static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart)
timeout = wait_for_completion_timeout(&dev->reconf, HZ);
if (!timeout) {
- SLIM_WARN(dev, "capability exchange timed-out\n");
+ u32 cfg = readl_relaxed(dev->base +
+ NGD_BASE(dev->ctrl.nr, dev->ver));
+ laddr = readl_relaxed(ngd + NGD_STATUS);
+ SLIM_WARN(dev,
+ "slim capability time-out:%d, stat:0x%x,cfg:0x%x\n",
+ retries, laddr, cfg);
+ if (retries < INIT_MX_RETRIES) {
+ retries++;
+ goto capability_retry;
+ }
return -ETIMEDOUT;
}
/* mutliple transactions waiting on slimbus to power up? */
@@ -1388,12 +1440,11 @@ capability_retry:
SLIM_INFO(dev,
"SLIM SAT: capability exchange successful\n");
- if (prev_state >= MSM_CTRL_ASLEEP)
- complete(&dev->reconf);
- else
+ if (prev_state < MSM_CTRL_ASLEEP)
SLIM_WARN(dev,
- "SLIM: unexpected capability, state:%d\n",
+ "capability due to noise, state:%d\n",
prev_state);
+ complete(&dev->reconf);
/* ADSP SSR, send device_up notifications */
if (prev_state == MSM_CTRL_DOWN)
complete(&dev->qmi.slave_notify);
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
index 7859d1e79e39..fc0a8d23f573 100644
--- a/drivers/slimbus/slim-msm.h
+++ b/drivers/slimbus/slim-msm.h
@@ -67,7 +67,7 @@
#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
-#define INIT_MX_RETRIES 10
+#define INIT_MX_RETRIES 3
#define DEF_RETRY_MS 10
#define MSM_CONCUR_MSG 8
#define SAT_CONCUR_MSG 8
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 45dc329a776e..c45cbfa8a786 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -104,6 +104,16 @@ config MSM_GLINK_SMEM_NATIVE_XPRT
transport to only connecting with entities internal to the
System-on-Chip.
+config MSM_GLINK_SPI_XPRT
+ depends on MSM_GLINK
+ tristate "Generic Link (G-Link) SPI Transport"
+ help
+ G-Link SPI Transport is a Transport plug-in developed over SPI
+ bus. This transport plug-in performs marshaling of G-Link
+ commands & data to the appropriate SPI bus wire format and
+ allows for G-Link communication with remote subsystems that are
+ external to the System-on-Chip.
+
config MSM_SPCOM
depends on MSM_GLINK
bool "Secure Processor Communication over GLINK"
@@ -374,6 +384,15 @@ config QCOM_WATCHDOG_V2
deadlocks. It does not run during the bootup process, so it will
not catch any early lockups.
+config QCOM_IRQ_HELPER
+ bool "QCOM Irq Helper"
+ help
+ This enables the irq helper module. It exposes two APIs
+ int irq_blacklist_on(void) and int irq_blacklist_off(void)
+ to other kernel module.
+ These two apis will be used to control the black list used
+ by the irq balancer.
+
config QCOM_MEMORY_DUMP
bool "Qualcomm Memory Dump Support"
help
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index adbf2dc7a166..269b72c68b68 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_MSM_GLINK) += glink.o glink_debugfs.o glink_ssr.o
obj-$(CONFIG_MSM_GLINK_LOOPBACK_SERVER) += glink_loopback_server.o
obj-$(CONFIG_MSM_GLINK_SMD_XPRT) += glink_smd_xprt.o
obj-$(CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT)+= glink_smem_native_xprt.o
+obj-$(CONFIG_MSM_GLINK_SPI_XPRT) += glink_spi_xprt.o
obj-$(CONFIG_MSM_SMEM_LOGGING) += smem_log.o
obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o sysmon-qmi.o
obj-$(CONFIG_ARCH_MSM8996) += kryo-l2-accessors.o
@@ -67,6 +68,7 @@ obj-$(CONFIG_QCOM_MEMORY_DUMP_V2) += memory_dump_v2.o
obj-$(CONFIG_QCOM_DCC) += dcc.o
obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o
obj-$(CONFIG_QCOM_COMMON_LOG) += common_log.o
+obj-$(CONFIG_QCOM_IRQ_HELPER) += irq-helper.o
obj-$(CONFIG_TRACER_PKT) += tracer_pkt.o
obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o
obj-$(CONFIG_SOC_BUS) += socinfo.o
diff --git a/drivers/soc/qcom/core_ctl_helper.c b/drivers/soc/qcom/core_ctl_helper.c
index 3dde30d29a1c..88201412128e 100644
--- a/drivers/soc/qcom/core_ctl_helper.c
+++ b/drivers/soc/qcom/core_ctl_helper.c
@@ -72,22 +72,28 @@ EXPORT_SYMBOL(core_ctl_find_cpu_device);
int __ref core_ctl_online_core(unsigned int cpu)
{
- int ret;
+ int ret = -EINVAL;
+ struct device *dev = get_cpu_device(cpu);
- lock_device_hotplug();
- ret = device_online(get_cpu_device(cpu));
- unlock_device_hotplug();
+ if (dev) {
+ lock_device_hotplug();
+ ret = device_online(dev);
+ unlock_device_hotplug();
+ }
return ret;
}
EXPORT_SYMBOL(core_ctl_online_core);
int __ref core_ctl_offline_core(unsigned int cpu)
{
- int ret;
+ int ret = -EINVAL;
+ struct device *dev = get_cpu_device(cpu);
- lock_device_hotplug();
- ret = device_offline(get_cpu_device(cpu));
- unlock_device_hotplug();
+ if (dev) {
+ lock_device_hotplug();
+ ret = device_offline(dev);
+ unlock_device_hotplug();
+ }
return ret;
}
EXPORT_SYMBOL(core_ctl_offline_core);
diff --git a/drivers/soc/qcom/gladiator_erp_v2.c b/drivers/soc/qcom/gladiator_erp_v2.c
index 20bb97f1fb16..25c7bd77ae96 100644
--- a/drivers/soc/qcom/gladiator_erp_v2.c
+++ b/drivers/soc/qcom/gladiator_erp_v2.c
@@ -758,7 +758,7 @@ static int gladiator_erp_pm_callback(struct notifier_block *nb,
static int gladiator_erp_v2_probe(struct platform_device *pdev)
{
- int ret;
+ int ret = -1;
struct msm_gladiator_data *msm_gld_data;
msm_gld_data = devm_kzalloc(&pdev->dev,
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 464fe17158cf..57e58a57fab7 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -372,10 +372,10 @@ static struct channel_ctx *ch_name_to_ch_ctx_create(
const char *name);
static void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
- uint32_t riid);
+ uint32_t riid, void *cookie);
static int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
- uint32_t *riid_ptr, size_t *intent_size);
+ uint32_t *riid_ptr, size_t *intent_size, void **cookie);
static struct glink_core_rx_intent *ch_push_local_rx_intent(
struct channel_ctx *ctx, const void *pkt_priv, size_t size);
@@ -1139,11 +1139,12 @@ bool ch_check_duplicate_riid(struct channel_ctx *ctx, int riid)
* @ctx: Local channel context
* @size: Size of Intent
* @riid_ptr: Pointer to return value of remote intent ID
+ * @cookie: Transport-specific cookie to return
*
* This functions searches for an RX intent that is >= to the requested size.
*/
int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
- uint32_t *riid_ptr, size_t *intent_size)
+ uint32_t *riid_ptr, size_t *intent_size, void **cookie)
{
struct glink_core_rx_intent *intent;
struct glink_core_rx_intent *intent_tmp;
@@ -1177,6 +1178,7 @@ int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
intent->intent_size);
*riid_ptr = intent->id;
*intent_size = intent->intent_size;
+ *cookie = intent->cookie;
kfree(intent);
spin_unlock_irqrestore(
&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
@@ -1192,11 +1194,12 @@ int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
* @ctx: Local channel context
* @size: Size of Intent
* @riid: Remote intent ID
+ * @cookie: Transport-specific cookie to cache
*
* This functions adds a remote RX intent to the remote RX intent list.
*/
void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
- uint32_t riid)
+ uint32_t riid, void *cookie)
{
struct glink_core_rx_intent *intent;
unsigned long flags;
@@ -1225,6 +1228,7 @@ void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
}
intent->id = riid;
intent->intent_size = size;
+ intent->cookie = cookie;
spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
list_add_tail(&intent->list, &ctx->rmt_rx_intent_list);
@@ -2794,6 +2798,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
bool is_atomic =
tx_flags & (GLINK_TX_SINGLE_THREADED | GLINK_TX_ATOMIC);
unsigned long flags;
+ void *cookie = NULL;
if (!size)
return -EINVAL;
@@ -2826,7 +2831,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
}
/* find matching rx intent (first-fit algorithm for now) */
- if (ch_pop_remote_rx_intent(ctx, size, &riid, &intent_size)) {
+ if (ch_pop_remote_rx_intent(ctx, size, &riid, &intent_size, &cookie)) {
if (!(tx_flags & GLINK_TX_REQ_INTENT)) {
/* no rx intent available */
GLINK_ERR_CH(ctx,
@@ -2856,7 +2861,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
}
while (ch_pop_remote_rx_intent(ctx, size, &riid,
- &intent_size)) {
+ &intent_size, &cookie)) {
rwref_get(&ctx->ch_state_lhb2);
rwref_read_put(&ctx->ch_state_lhb2);
if (is_atomic) {
@@ -2928,7 +2933,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
is_atomic ? GFP_ATOMIC : GFP_KERNEL);
if (!tx_info) {
GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
- ch_push_remote_rx_intent(ctx, intent_size, riid);
+ ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
rwref_read_put(&ctx->ch_state_lhb2);
return -ENOMEM;
}
@@ -2946,6 +2951,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
tx_info->vprovider = vbuf_provider;
tx_info->pprovider = pbuf_provider;
tx_info->intent_size = intent_size;
+ tx_info->cookie = cookie;
/* schedule packet for transmit */
if ((tx_flags & GLINK_TX_SINGLE_THREADED) &&
@@ -3577,6 +3583,10 @@ int glink_xprt_name_to_id(const char *name, uint16_t *id)
*id = SMEM_XPRT_ID;
return 0;
}
+ if (!strcmp(name, "spi")) {
+ *id = SPIV2_XPRT_ID;
+ return 0;
+ }
if (!strcmp(name, "smd_trans")) {
*id = SMD_TRANS_XPRT_ID;
return 0;
@@ -4844,7 +4854,35 @@ static void glink_core_remote_rx_intent_put(struct glink_transport_if *if_ptr,
return;
}
- ch_push_remote_rx_intent(ctx, size, riid);
+ ch_push_remote_rx_intent(ctx, size, riid, NULL);
+ rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_remote_rx_intent_put_cookie() - Receive remove intent
+ *
+ * @if_ptr: Pointer to transport instance
+ * @rcid: Remote Channel ID
+ * @riid: Remote Intent ID
+ * @size: Size of the remote intent ID
+ * @cookie: Transport-specific cookie to cache
+ */
+static void glink_core_remote_rx_intent_put_cookie(
+ struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint32_t riid, size_t size, void *cookie)
+{
+ struct channel_ctx *ctx;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ /* unknown rcid received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid received %u\n", __func__,
+ (unsigned)rcid);
+ return;
+ }
+
+ ch_push_remote_rx_intent(ctx, size, riid, cookie);
rwref_put(&ctx->ch_state_lhb2);
}
@@ -5050,6 +5088,7 @@ void glink_core_rx_cmd_tx_done(struct glink_transport_if *if_ptr,
struct glink_core_tx_pkt *tx_pkt;
unsigned long flags;
size_t intent_size;
+ void *cookie;
ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
if (!ctx) {
@@ -5082,11 +5121,12 @@ void glink_core_rx_cmd_tx_done(struct glink_transport_if *if_ptr,
ctx->notify_tx_done(ctx, ctx->user_priv, tx_pkt->pkt_priv,
tx_pkt->data ? tx_pkt->data : tx_pkt->iovec);
intent_size = tx_pkt->intent_size;
+ cookie = tx_pkt->cookie;
ch_remove_tx_pending_remote_done(ctx, tx_pkt);
spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
if (reuse)
- ch_push_remote_rx_intent(ctx, intent_size, riid);
+ ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
rwref_put(&ctx->ch_state_lhb2);
}
@@ -5525,6 +5565,8 @@ static struct glink_core_if core_impl = {
.rx_get_pkt_ctx = glink_core_rx_get_pkt_ctx,
.rx_put_pkt_ctx = glink_core_rx_put_pkt_ctx,
.rx_cmd_remote_rx_intent_put = glink_core_remote_rx_intent_put,
+ .rx_cmd_remote_rx_intent_put_cookie =
+ glink_core_remote_rx_intent_put_cookie,
.rx_cmd_remote_rx_intent_req = glink_core_rx_cmd_remote_rx_intent_req,
.rx_cmd_rx_intent_req_ack = glink_core_rx_cmd_rx_intent_req_ack,
.rx_cmd_tx_done = glink_core_rx_cmd_tx_done,
diff --git a/drivers/soc/qcom/glink_core_if.h b/drivers/soc/qcom/glink_core_if.h
index 93c59d9c4aa1..14113305a50e 100644
--- a/drivers/soc/qcom/glink_core_if.h
+++ b/drivers/soc/qcom/glink_core_if.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -64,6 +64,7 @@ struct glink_core_version {
* iovec: Pointer to vector buffer if the transport passes a vector buffer
* vprovider: Virtual address-space buffer provider for a vector buffer
* pprovider: Physical address-space buffer provider for a vector buffer
+ * cookie: Private transport specific cookie
* pkt_priv: G-Link core owned packet-private data
* list: G-Link core owned list node
* bounce_buf: Pointer to the temporary/internal bounce buffer
@@ -78,6 +79,7 @@ struct glink_core_rx_intent {
void *iovec;
void * (*vprovider)(void *iovec, size_t offset, size_t *size);
void * (*pprovider)(void *iovec, size_t offset, size_t *size);
+ void *cookie;
/* G-Link-Core-owned elements - please ignore */
struct list_head list;
@@ -151,6 +153,9 @@ struct glink_core_if {
struct glink_core_rx_intent *intent_ptr, bool complete);
void (*rx_cmd_remote_rx_intent_put)(struct glink_transport_if *if_ptr,
uint32_t rcid, uint32_t riid, size_t size);
+ void (*rx_cmd_remote_rx_intent_put_cookie)(
+ struct glink_transport_if *if_ptr, uint32_t rcid,
+ uint32_t riid, size_t size, void *cookie);
void (*rx_cmd_tx_done)(struct glink_transport_if *if_ptr, uint32_t rcid,
uint32_t riid, bool reuse);
void (*rx_cmd_remote_rx_intent_req)(struct glink_transport_if *if_ptr,
diff --git a/drivers/soc/qcom/glink_spi_xprt.c b/drivers/soc/qcom/glink_spi_xprt.c
new file mode 100644
index 000000000000..6c91ac54821d
--- /dev/null
+++ b/drivers/soc/qcom/glink_spi_xprt.c
@@ -0,0 +1,2192 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/wait.h>
+#include <linux/component.h>
+#include <soc/qcom/tracer_pkt.h>
+#include <sound/wcd-dsp-mgr.h>
+#include <sound/wcd-spi.h>
+#include "glink_core_if.h"
+#include "glink_private.h"
+#include "glink_xprt_if.h"
+
+#define XPRT_NAME "spi"
+#define FIFO_ALIGNMENT 16
+#define FIFO_FULL_RESERVE 8
+#define TX_BLOCKED_CMD_RESERVE 16
+#define TRACER_PKT_FEATURE BIT(2)
+#define DEFAULT_FIFO_SIZE 1024
+#define SHORT_PKT_SIZE 16
+#define XPRT_ALIGNMENT 4
+
+#define MAX_INACTIVE_CYCLES 50
+#define POLL_INTERVAL_US 500
+
+#define ACTIVE_TX BIT(0)
+#define ACTIVE_RX BIT(1)
+
+#define ID_MASK 0xFFFFFF
+/**
+ * enum command_types - definition of the types of commands sent/received
+ * @VERSION_CMD: Version and feature set supported
+ * @VERSION_ACK_CMD: Response for @VERSION_CMD
+ * @OPEN_CMD: Open a channel
+ * @CLOSE_CMD: Close a channel
+ * @OPEN_ACK_CMD: Response to @OPEN_CMD
+ * @CLOSE_ACK_CMD: Response for @CLOSE_CMD
+ * @RX_INTENT_CMD: RX intent for a channel is queued
+ * @RX_DONE_CMD: Use of RX intent for a channel is complete
+ * @RX_DONE_W_REUSE_CMD: Same as @RX_DONE but also reuse the used intent
+ * @RX_INTENT_REQ_CMD: Request to have RX intent queued
+ * @RX_INTENT_REQ_ACK_CMD: Response for @RX_INTENT_REQ_CMD
+ * @TX_DATA_CMD: Start of a data transfer
+ * @TX_DATA_CONT_CMD: Continuation or end of a data transfer
+ * @READ_NOTIF_CMD: Request for a notification when this cmd is read
+ * @SIGNALS_CMD: Sideband signals
+ * @TRACER_PKT_CMD: Start of a Tracer Packet Command
+ * @TRACER_PKT_CONT_CMD: Continuation or end of a Tracer Packet Command
+ * @TX_SHORT_DATA_CMD: Transmit short packets
+ */
+enum command_types {
+ VERSION_CMD,
+ VERSION_ACK_CMD,
+ OPEN_CMD,
+ CLOSE_CMD,
+ OPEN_ACK_CMD,
+ CLOSE_ACK_CMD,
+ RX_INTENT_CMD,
+ RX_DONE_CMD,
+ RX_DONE_W_REUSE_CMD,
+ RX_INTENT_REQ_CMD,
+ RX_INTENT_REQ_ACK_CMD,
+ TX_DATA_CMD,
+ TX_DATA_CONT_CMD,
+ READ_NOTIF_CMD,
+ SIGNALS_CMD,
+ TRACER_PKT_CMD,
+ TRACER_PKT_CONT_CMD,
+ TX_SHORT_DATA_CMD,
+};
+
+/**
+ * struct glink_cmpnt - Component to cache WDSP component and its operations
+ * @master_dev: Device structure corresponding to WDSP device.
+ * @master_ops: Operations supported by the WDSP device.
+ */
+struct glink_cmpnt {
+ struct device *master_dev;
+ struct wdsp_mgr_ops *master_ops;
+};
+
+/**
+ * struct edge_info - local information for managing a single complete edge
+ * @xprt_if: The transport interface registered with the
+ * glink core associated with this edge.
+ * @xprt_cfg: The transport configuration for the glink core
+ * assocaited with this edge.
+ * @subsys_name: Name of the remote subsystem in the edge.
+ * @spi_dev: Pointer to the connectingSPI Device.
+ * @fifo_size: Size of the FIFO at the remote end.
+ * @tx_fifo_start: Base Address of the TX FIFO.
+ * @tx_fifo_end: End Address of the TX FIFO.
+ * @rx_fifo_start: Base Address of the RX FIFO.
+ * @rx_fifo_end: End Address of the RX FIFO.
+ * @tx_fifo_read_reg_addr: Address of the TX FIFO Read Index Register.
+ * @tx_fifo_write_reg_addr: Address of the TX FIFO Write Index Register.
+ * @rx_fifo_read_reg_addr: Address of the RX FIFO Read Index Register.
+ * @rx_fifo_write_reg_addr: Address of the RX FIFO Write Index Register.
+ * @kwork: Work to be executed when receiving data.
+ * @kworker: Handle to the entity processing @kwork.
+ * @task: Handle to the task context that runs @kworker.
+ * @use_ref: Active users of this transport grab a
+ * reference. Used for SSR synchronization.
+ * @in_ssr: Signals if this transport is in ssr.
+ * @write_lock: Lock to serialize write/tx operation.
+ * @tx_blocked_queue: Queue of entities waiting for the remote side to
+ * signal the resumption of TX.
+ * @tx_resume_needed: A tx resume signal needs to be sent to the glink
+ * core.
+ * @tx_blocked_signal_sent: Flag to indicate the flush signal has already
+ * been sent, and a response is pending from the
+ * remote side. Protected by @write_lock.
+ * @num_pw_states: Size of @ramp_time_us.
+ * @ramp_time_us: Array of ramp times in microseconds where array
+ * index position represents a power state.
+ * @activity_flag: Flag indicating active TX and RX.
+ * @activity_lock: Lock to synchronize access to activity flag.
+ * @cmpnt: Component to interface with the remote device.
+ */
+struct edge_info {
+ struct list_head list;
+ struct glink_transport_if xprt_if;
+ struct glink_core_transport_cfg xprt_cfg;
+ char subsys_name[GLINK_NAME_SIZE];
+ struct spi_device *spi_dev;
+
+ uint32_t fifo_size;
+ uint32_t tx_fifo_start;
+ uint32_t tx_fifo_end;
+ uint32_t rx_fifo_start;
+ uint32_t rx_fifo_end;
+ unsigned int tx_fifo_read_reg_addr;
+ unsigned int tx_fifo_write_reg_addr;
+ unsigned int rx_fifo_read_reg_addr;
+ unsigned int rx_fifo_write_reg_addr;
+
+ struct kthread_work kwork;
+ struct kthread_worker kworker;
+ struct task_struct *task;
+ struct srcu_struct use_ref;
+ bool in_ssr;
+ struct mutex write_lock;
+ wait_queue_head_t tx_blocked_queue;
+ bool tx_resume_needed;
+ bool tx_blocked_signal_sent;
+
+ uint32_t num_pw_states;
+ unsigned long *ramp_time_us;
+
+ uint32_t activity_flag;
+ spinlock_t activity_lock;
+
+ struct glink_cmpnt cmpnt;
+};
+
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+ const struct glink_core_version *version,
+ uint32_t features);
+static DEFINE_SPINLOCK(edge_infos_lock);
+static LIST_HEAD(edge_infos);
+static struct glink_core_version versions[] = {
+ {1, TRACER_PKT_FEATURE, negotiate_features_v1},
+};
+
+/**
+ * negotiate_features_v1() - determine what features of a version can be used
+ * @if_ptr: The transport for which features are negotiated for.
+ * @version: The version negotiated.
+ * @features: The set of requested features.
+ *
+ * Return: What set of the requested features can be supported.
+ */
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+ const struct glink_core_version *version,
+ uint32_t features)
+{
+ return features & version->features;
+}
+
+/**
+ * wdsp_suspend() - Vote for the WDSP device suspend
+ * @cmpnt: Component to identify the WDSP device.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int wdsp_suspend(struct glink_cmpnt *cmpnt)
+{
+ if (cmpnt && cmpnt->master_dev &&
+ cmpnt->master_ops && cmpnt->master_ops->suspend)
+ return cmpnt->master_ops->suspend(cmpnt->master_dev);
+ else
+ return -EINVAL;
+}
+
+/**
+ * wdsp_resume() - Vote for the WDSP device resume
+ * @cmpnt: Component to identify the WDSP device.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int wdsp_resume(struct glink_cmpnt *cmpnt)
+{
+ if (cmpnt && cmpnt->master_dev &&
+ cmpnt->master_ops && cmpnt->master_ops->resume)
+ return cmpnt->master_ops->resume(cmpnt->master_dev);
+ else
+ return -EINVAL;
+}
+
+/**
+ * glink_spi_xprt_set_poll_mode() - Set the transport to polling mode
+ * @einfo: Edge information corresponding to the transport.
+ *
+ * This helper function indicates the start of RX polling. This will
+ * prevent the system from suspending and keeps polling for RX for a
+ * pre-defined duration.
+ */
+static void glink_spi_xprt_set_poll_mode(struct edge_info *einfo)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&einfo->activity_lock, flags);
+ einfo->activity_flag |= ACTIVE_RX;
+ spin_unlock_irqrestore(&einfo->activity_lock, flags);
+ if (!strcmp(einfo->xprt_cfg.edge, "wdsp"))
+ wdsp_resume(&einfo->cmpnt);
+}
+
+/**
+ * glink_spi_xprt_set_irq_mode() - Set the transport to IRQ mode
+ * @einfo: Edge information corresponding to the transport.
+ *
+ * This helper indicates the end of RX polling. This will allow the
+ * system to suspend and new RX data can be handled only through an IRQ.
+ */
+static void glink_spi_xprt_set_irq_mode(struct edge_info *einfo)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&einfo->activity_lock, flags);
+ einfo->activity_flag &= ~ACTIVE_RX;
+ spin_unlock_irqrestore(&einfo->activity_lock, flags);
+}
+
+/**
+ * glink_spi_xprt_rx_data() - Receive data over SPI bus
+ * @einfo: Edge from which the data has to be received.
+ * @src: Source Address of the RX data.
+ * @dst: Address of the destination RX buffer.
+ * @size: Size of the RX data.
+ *
+ * This function is used to receive data or command as a byte stream from
+ * the remote subsystem over the SPI bus.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_rx_data(struct edge_info *einfo, void *src,
+ void *dst, uint32_t size)
+{
+ struct wcd_spi_msg spi_msg;
+
+ memset(&spi_msg, 0, sizeof(spi_msg));
+ spi_msg.data = dst;
+ spi_msg.remote_addr = (uint32_t)(size_t)src;
+ spi_msg.len = (size_t)size;
+ return wcd_spi_data_read(einfo->spi_dev, &spi_msg);
+}
+
+/**
+ * glink_spi_xprt_tx_data() - Transmit data over SPI bus
+ * @einfo: Edge from which the data has to be received.
+ * @src: Address of the TX buffer.
+ * @dst: Destination Address of the TX Date.
+ * @size: Size of the TX data.
+ *
+ * This function is used to transmit data or command as a byte stream to
+ * the remote subsystem over the SPI bus.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_tx_data(struct edge_info *einfo, void *src,
+ void *dst, uint32_t size)
+{
+ struct wcd_spi_msg spi_msg;
+
+ memset(&spi_msg, 0, sizeof(spi_msg));
+ spi_msg.data = src;
+ spi_msg.remote_addr = (uint32_t)(size_t)dst;
+ spi_msg.len = (size_t)size;
+ return wcd_spi_data_write(einfo->spi_dev, &spi_msg);
+}
+
+/**
+ * glink_spi_xprt_reg_read() - Read the TX/RX FIFO Read/Write Index registers
+ * @einfo: Edge from which the registers have to be read.
+ * @reg_addr: Address of the register to be read.
+ * @data: Buffer into which the register data has to be read.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_reg_read(struct edge_info *einfo, u32 reg_addr,
+ uint32_t *data)
+{
+ int rc;
+
+ rc = glink_spi_xprt_rx_data(einfo, (void *)(unsigned long)reg_addr,
+ data, sizeof(*data));
+ if (!rc)
+ *data = *data & ID_MASK;
+ return rc;
+}
+
+/**
+ * glink_spi_xprt_reg_write() - Write the TX/RX FIFO Read/Write Index registers
+ * @einfo: Edge to which the registers have to be written.
+ * @reg_addr: Address of the registers to be written.
+ * @data: Data to be written to the registers.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_reg_write(struct edge_info *einfo, u32 reg_addr,
+ uint32_t data)
+{
+ return glink_spi_xprt_tx_data(einfo, &data,
+ (void *)(unsigned long)reg_addr, sizeof(data));
+}
+
+/**
+ * glink_spi_xprt_write_avail() - Available Write Space in the remote side
+ * @einfo: Edge information corresponding to the remote side.
+ *
+ * This function reads the TX FIFO Read & Write Index registers from the
+ * remote subsystem and calculate the available write space.
+ *
+ * Return: 0 on error, available write space on success.
+ */
+static int glink_spi_xprt_write_avail(struct edge_info *einfo)
+{
+ uint32_t read_id;
+ uint32_t write_id;
+ int write_avail;
+ int ret;
+
+ ret = glink_spi_xprt_reg_read(einfo, einfo->tx_fifo_read_reg_addr,
+ &read_id);
+ if (ret < 0) {
+ pr_err("%s: Error %d reading %s tx_fifo_read_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->tx_fifo_read_reg_addr);
+ return 0;
+ }
+
+ ret = glink_spi_xprt_reg_read(einfo, einfo->tx_fifo_write_reg_addr,
+ &write_id);
+ if (ret < 0) {
+ pr_err("%s: Error %d reading %s tx_fifo_write_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->tx_fifo_write_reg_addr);
+ return 0;
+ }
+
+ if (!read_id || !write_id)
+ return 0;
+
+ if (unlikely(!einfo->tx_fifo_start))
+ einfo->tx_fifo_start = write_id;
+
+ if (read_id > write_id)
+ write_avail = read_id - write_id;
+ else
+ write_avail = einfo->fifo_size - (write_id - read_id);
+
+ if (write_avail < FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)
+ write_avail = 0;
+ else
+ write_avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
+
+ return write_avail;
+}
+
+/**
+ * glink_spi_xprt_read_avail() - Available Read Data from the remote side
+ * @einfo: Edge information corresponding to the remote side.
+ *
+ * This function reads the RX FIFO Read & Write Index registers from the
+ * remote subsystem and calculate the available read data size.
+ *
+ * Return: 0 on error, available read data on success.
+ */
+static int glink_spi_xprt_read_avail(struct edge_info *einfo)
+{
+ uint32_t read_id;
+ uint32_t write_id;
+ int read_avail;
+ int ret;
+
+ ret = glink_spi_xprt_reg_read(einfo, einfo->rx_fifo_read_reg_addr,
+ &read_id);
+ if (ret < 0) {
+ pr_err("%s: Error %d reading %s rx_fifo_read_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->rx_fifo_read_reg_addr);
+ return 0;
+ }
+
+ ret = glink_spi_xprt_reg_read(einfo, einfo->rx_fifo_write_reg_addr,
+ &write_id);
+ if (ret < 0) {
+ pr_err("%s: Error %d reading %s rx_fifo_write_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->rx_fifo_write_reg_addr);
+ return 0;
+ }
+
+ if (!read_id || !write_id)
+ return 0;
+
+ if (unlikely(!einfo->rx_fifo_start))
+ einfo->rx_fifo_start = read_id;
+
+ if (read_id <= write_id)
+ read_avail = write_id - read_id;
+ else
+ read_avail = einfo->fifo_size - (read_id - write_id);
+ return read_avail;
+}
+
+/**
+ * glink_spi_xprt_rx_cmd() - Receive G-Link commands
+ * @einfo: Edge information corresponding to the remote side.
+ * @dst: Destination buffer where the commands have to be read into.
+ * @size: Size of the data to be read.
+ *
+ * This function is used to receive the commands from the RX FIFO. This
+ * function updates the RX FIFO Read Index after reading the data.
+ *
+ * Return: 0 on success, standard Linux error codes on error.
+ */
+static int glink_spi_xprt_rx_cmd(struct edge_info *einfo, void *dst,
+ uint32_t size)
+{
+ uint32_t read_id;
+ uint32_t size_to_read = size;
+ uint32_t offset = 0;
+ int ret;
+
+ ret = glink_spi_xprt_reg_read(einfo, einfo->rx_fifo_read_reg_addr,
+ &read_id);
+ if (ret < 0) {
+ pr_err("%s: Error %d reading %s rx_fifo_read_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->rx_fifo_read_reg_addr);
+ return ret;
+ }
+
+ do {
+ if ((read_id + size_to_read) >=
+ (einfo->rx_fifo_start + einfo->fifo_size))
+ size_to_read = einfo->rx_fifo_start + einfo->fifo_size
+ - read_id;
+ ret = glink_spi_xprt_rx_data(einfo, (void *)(size_t)read_id,
+ dst + offset, size_to_read);
+ if (ret < 0) {
+ pr_err("%s: Error %d reading data\n", __func__, ret);
+ return ret;
+ }
+ read_id += size_to_read;
+ offset += size_to_read;
+ if (read_id >= (einfo->rx_fifo_start + einfo->fifo_size))
+ read_id = einfo->rx_fifo_start;
+ size_to_read = size - offset;
+ } while (size_to_read);
+
+ ret = glink_spi_xprt_reg_write(einfo, einfo->rx_fifo_read_reg_addr,
+ read_id);
+ if (ret < 0)
+ pr_err("%s: Error %d writing %s rx_fifo_read_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->rx_fifo_read_reg_addr);
+ return ret;
+}
+
+/**
+ * glink_spi_xprt_tx_cmd_safe() - Transmit G-Link commands
+ * @einfo: Edge information corresponding to the remote subsystem.
+ * @src: Source buffer containing the G-Link command.
+ * @size: Size of the command to transmit.
+ *
+ * This function is used to transmit the G-Link commands. This function
+ * must be called with einfo->write_lock locked.
+ *
+ * Return: 0 on success, standard Linux error codes on error.
+ */
+static int glink_spi_xprt_tx_cmd_safe(struct edge_info *einfo, void *src,
+ uint32_t size)
+{
+ uint32_t write_id;
+ uint32_t size_to_write = size;
+ uint32_t offset = 0;
+ int ret;
+
+ ret = glink_spi_xprt_reg_read(einfo, einfo->tx_fifo_write_reg_addr,
+ &write_id);
+ if (ret < 0) {
+ pr_err("%s: Error %d reading %s tx_fifo_write_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->tx_fifo_write_reg_addr);
+ return ret;
+ }
+
+ do {
+ if ((write_id + size_to_write) >=
+ (einfo->tx_fifo_start + einfo->fifo_size))
+ size_to_write = einfo->tx_fifo_start + einfo->fifo_size
+ - write_id;
+ ret = glink_spi_xprt_tx_data(einfo, src + offset,
+ (void *)(size_t)write_id, size_to_write);
+ if (ret < 0) {
+ pr_err("%s: Error %d writing data\n", __func__, ret);
+ return ret;
+ }
+ write_id += size_to_write;
+ offset += size_to_write;
+ if (write_id >= (einfo->tx_fifo_start + einfo->fifo_size))
+ write_id = einfo->tx_fifo_start;
+ size_to_write = size - offset;
+ } while (size_to_write);
+
+ ret = glink_spi_xprt_reg_write(einfo, einfo->tx_fifo_write_reg_addr,
+ write_id);
+ if (ret < 0)
+ pr_err("%s: Error %d writing %s tx_fifo_write_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->tx_fifo_write_reg_addr);
+ return ret;
+}
+
+/**
+ * send_tx_blocked_signal() - Send flow control request message
+ * @einfo: Edge information corresponding to the remote subsystem.
+ *
+ * This function is used to send a message to the remote subsystem indicating
+ * that the local subsystem is waiting for the write space. The remote
+ * subsystem on receiving this message will send a resume tx message.
+ */
+static void send_tx_blocked_signal(struct edge_info *einfo)
+{
+ struct read_notif_request {
+ uint16_t cmd;
+ uint16_t reserved;
+ uint32_t reserved2;
+ uint64_t reserved3;
+ };
+ struct read_notif_request read_notif_req = {0};
+
+ read_notif_req.cmd = READ_NOTIF_CMD;
+
+ if (!einfo->tx_blocked_signal_sent) {
+ einfo->tx_blocked_signal_sent = true;
+ glink_spi_xprt_tx_cmd_safe(einfo, &read_notif_req,
+ sizeof(read_notif_req));
+ }
+}
+
+/**
+ * glink_spi_xprt_tx_cmd() - Transmit G-Link commands
+ * @einfo: Edge information corresponding to the remote subsystem.
+ * @src: Source buffer containing the G-Link command.
+ * @size: Size of the command to transmit.
+ *
+ * This function is used to transmit the G-Link commands. This function
+ * might sleep if the space is not available to transmit the command.
+ *
+ * Return: 0 on success, standard Linux error codes on error.
+ */
+static int glink_spi_xprt_tx_cmd(struct edge_info *einfo, void *src,
+ uint32_t size)
+{
+ int ret;
+ DEFINE_WAIT(wait);
+
+ mutex_lock(&einfo->write_lock);
+ while (glink_spi_xprt_write_avail(einfo) < size) {
+ send_tx_blocked_signal(einfo);
+ prepare_to_wait(&einfo->tx_blocked_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (glink_spi_xprt_write_avail(einfo) < size &&
+ !einfo->in_ssr) {
+ mutex_unlock(&einfo->write_lock);
+ schedule();
+ mutex_lock(&einfo->write_lock);
+ }
+ finish_wait(&einfo->tx_blocked_queue, &wait);
+ if (einfo->in_ssr) {
+ mutex_unlock(&einfo->write_lock);
+ return -EFAULT;
+ }
+ }
+ ret = glink_spi_xprt_tx_cmd_safe(einfo, src, size);
+ mutex_unlock(&einfo->write_lock);
+ return ret;
+}
+
+/**
+ * process_rx_data() - process received data from an edge
+ * @einfo: The edge the data is received on.
+ * @cmd_id: ID to specify the type of data.
+ * @rcid: The remote channel id associated with the data.
+ * @intend_id: The intent the data should be put in.
+ * @src: Address of the source buffer from which the data
+ * is read.
+ * @frag_size: Size of the data fragment to read.
+ * @size_remaining: Size of data left to be read in this packet.
+ */
+static void process_rx_data(struct edge_info *einfo, uint16_t cmd_id,
+ uint32_t rcid, uint32_t intent_id, void *src,
+ uint32_t frag_size, uint32_t size_remaining)
+{
+ struct glink_core_rx_intent *intent;
+ int rc = 0;
+
+ intent = einfo->xprt_if.glink_core_if_ptr->rx_get_pkt_ctx(
+ &einfo->xprt_if, rcid, intent_id);
+ if (intent == NULL) {
+ GLINK_ERR("%s: no intent for ch %d liid %d\n", __func__, rcid,
+ intent_id);
+ return;
+ } else if (intent->data == NULL) {
+ GLINK_ERR("%s: intent for ch %d liid %d has no data buff\n",
+ __func__, rcid, intent_id);
+ return;
+ } else if (intent->intent_size - intent->write_offset < frag_size ||
+ intent->write_offset + size_remaining > intent->intent_size) {
+ GLINK_ERR("%s: rx data size:%d and remaining:%d %s %d %s:%d\n",
+ __func__, frag_size, size_remaining,
+ "will overflow ch", rcid, "intent", intent_id);
+ return;
+ }
+
+ if (cmd_id == TX_SHORT_DATA_CMD)
+ memcpy(intent->data + intent->write_offset, src, frag_size);
+ else
+ rc = glink_spi_xprt_rx_data(einfo, src,
+ intent->data + intent->write_offset, frag_size);
+ if (rc < 0) {
+ GLINK_ERR("%s: Error %d receiving data %d:%d:%d:%d\n",
+ __func__, rc, rcid, intent_id, frag_size,
+ size_remaining);
+ size_remaining += frag_size;
+ } else {
+ intent->write_offset += frag_size;
+ intent->pkt_size += frag_size;
+
+ if (unlikely((cmd_id == TRACER_PKT_CMD ||
+ cmd_id == TRACER_PKT_CONT_CMD) && !size_remaining)) {
+ tracer_pkt_log_event(intent->data, GLINK_XPRT_RX);
+ intent->tracer_pkt = true;
+ }
+ }
+ einfo->xprt_if.glink_core_if_ptr->rx_put_pkt_ctx(&einfo->xprt_if,
+ rcid, intent, size_remaining ? false : true);
+}
+
+/**
+ * process_rx_cmd() - Process incoming G-Link commands
+ * @einfo: Edge information corresponding to the remote subsystem.
+ * @rx_data: Buffer which contains the G-Link commands to be processed.
+ * @rx_size: Size of the buffer containing the series of G-Link commands.
+ *
+ * This function is used to parse and process a series of G-Link commands
+ * received in a buffer.
+ */
+static void process_rx_cmd(struct edge_info *einfo,
+ void *rx_data, int rx_size)
+{
+ struct command {
+ uint16_t id;
+ uint16_t param1;
+ uint32_t param2;
+ uint32_t param3;
+ uint32_t param4;
+ };
+ struct intent_desc {
+ uint32_t size;
+ uint32_t id;
+ uint64_t addr;
+ };
+ struct rx_desc {
+ uint32_t size;
+ uint32_t size_left;
+ uint64_t addr;
+ };
+ struct rx_short_data_desc {
+ unsigned char data[SHORT_PKT_SIZE];
+ };
+ struct command *cmd;
+ struct intent_desc *intents;
+ struct rx_desc *rx_descp;
+ struct rx_short_data_desc *rx_sd_descp;
+ int offset = 0;
+ int rcu_id;
+ uint16_t rcid;
+ uint16_t name_len;
+ uint16_t prio;
+ char *name;
+ bool granted;
+ int i;
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ while (offset < rx_size) {
+ cmd = (struct command *)(rx_data + offset);
+ offset += sizeof(*cmd);
+ switch (cmd->id) {
+ case VERSION_CMD:
+ if (cmd->param3)
+ einfo->fifo_size = cmd->param3;
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_version(
+ &einfo->xprt_if, cmd->param1, cmd->param2);
+ break;
+
+ case VERSION_ACK_CMD:
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_version_ack(
+ &einfo->xprt_if, cmd->param1, cmd->param2);
+ break;
+
+ case OPEN_CMD:
+ rcid = cmd->param1;
+ name_len = (uint16_t)(cmd->param2 & 0xFFFF);
+ prio = (uint16_t)((cmd->param2 & 0xFFFF0000) >> 16);
+ name = (char *)(rx_data + offset);
+ offset += ALIGN(name_len, FIFO_ALIGNMENT);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
+ &einfo->xprt_if, rcid, name, prio);
+ break;
+
+ case CLOSE_CMD:
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_ch_remote_close(
+ &einfo->xprt_if, cmd->param1);
+ break;
+
+ case OPEN_ACK_CMD:
+ prio = (uint16_t)(cmd->param2 & 0xFFFF);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
+ &einfo->xprt_if, cmd->param1, prio);
+ break;
+
+ case CLOSE_ACK_CMD:
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
+ &einfo->xprt_if, cmd->param1);
+ break;
+
+ case RX_INTENT_CMD:
+ for (i = 0; i < cmd->param2; i++) {
+ intents = (struct intent_desc *)
+ (rx_data + offset);
+ offset += sizeof(*intents);
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_remote_rx_intent_put_cookie(
+ &einfo->xprt_if, cmd->param1,
+ intents->id, intents->size,
+ (void *)(intents->addr));
+ }
+ break;
+
+ case RX_DONE_CMD:
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+ &einfo->xprt_if, cmd->param1, cmd->param2,
+ false);
+ break;
+
+ case RX_INTENT_REQ_CMD:
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_remote_rx_intent_req(
+ &einfo->xprt_if, cmd->param1,
+ cmd->param2);
+ break;
+
+ case RX_INTENT_REQ_ACK_CMD:
+ granted = cmd->param2 == 1 ? true : false;
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_rx_intent_req_ack(&einfo->xprt_if,
+ cmd->param1, granted);
+ break;
+
+ case TX_DATA_CMD:
+ case TX_DATA_CONT_CMD:
+ case TRACER_PKT_CMD:
+ case TRACER_PKT_CONT_CMD:
+ rx_descp = (struct rx_desc *)(rx_data + offset);
+ offset += sizeof(*rx_descp);
+ process_rx_data(einfo, cmd->id, cmd->param1,
+ cmd->param2, (void *)rx_descp->addr,
+ rx_descp->size, rx_descp->size_left);
+ break;
+
+ case TX_SHORT_DATA_CMD:
+ rx_sd_descp = (struct rx_short_data_desc *)
+ (rx_data + offset);
+ offset += sizeof(*rx_sd_descp);
+ process_rx_data(einfo, cmd->id, cmd->param1,
+ cmd->param2, (void *)rx_sd_descp->data,
+ cmd->param3, cmd->param4);
+ break;
+
+ case READ_NOTIF_CMD:
+ break;
+
+ case SIGNALS_CMD:
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_sigs(
+ &einfo->xprt_if, cmd->param1, cmd->param2);
+ break;
+
+ case RX_DONE_W_REUSE_CMD:
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+ &einfo->xprt_if, cmd->param1,
+ cmd->param2, true);
+ break;
+
+ default:
+ pr_err("Unrecognized command: %d\n", cmd->id);
+ break;
+ }
+ }
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * __rx_worker() - Receive commands on a specific edge
+ * @einfo: Edge to process commands on.
+ *
+ * This function checks the size of data to be received, allocates the
+ * buffer for that data and reads the data from the remote subsytem
+ * into that buffer. This function then calls the process_rx_cmd() to
+ * parse the received G-Link command sequence. This function will also
+ * poll for the data for a predefined duration for performance reasons.
+ */
+static void __rx_worker(struct edge_info *einfo)
+{
+ uint32_t inactive_cycles = 0;
+ int rx_avail, rc;
+ void *rx_data;
+ int rcu_id;
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (unlikely(!einfo->rx_fifo_start)) {
+ rx_avail = glink_spi_xprt_read_avail(einfo);
+ if (!rx_avail) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+ einfo->in_ssr = false;
+ einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+ }
+
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ glink_spi_xprt_set_poll_mode(einfo);
+ while (inactive_cycles < MAX_INACTIVE_CYCLES) {
+ if (einfo->tx_resume_needed &&
+ glink_spi_xprt_write_avail(einfo)) {
+ einfo->tx_resume_needed = false;
+ einfo->xprt_if.glink_core_if_ptr->tx_resume(
+ &einfo->xprt_if);
+ }
+ mutex_lock(&einfo->write_lock);
+ if (einfo->tx_blocked_signal_sent) {
+ wake_up_all(&einfo->tx_blocked_queue);
+ einfo->tx_blocked_signal_sent = false;
+ }
+ mutex_unlock(&einfo->write_lock);
+
+ rx_avail = glink_spi_xprt_read_avail(einfo);
+ if (!rx_avail) {
+ usleep_range(POLL_INTERVAL_US, POLL_INTERVAL_US + 50);
+ inactive_cycles++;
+ continue;
+ }
+ inactive_cycles = 0;
+
+ rx_data = kzalloc(rx_avail, GFP_KERNEL);
+ if (!rx_data)
+ break;
+
+ rc = glink_spi_xprt_rx_cmd(einfo, rx_data, rx_avail);
+ if (rc < 0) {
+ GLINK_ERR("%s: Error %d receiving data\n",
+ __func__, rc);
+ kfree(rx_data);
+ break;
+ }
+ process_rx_cmd(einfo, rx_data, rx_avail);
+ kfree(rx_data);
+ }
+ glink_spi_xprt_set_irq_mode(einfo);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * rx_worker() - Worker function to process received commands
+ * @work: kwork associated with the edge to process commands on.
+ */
+static void rx_worker(struct kthread_work *work)
+{
+ struct edge_info *einfo;
+
+ einfo = container_of(work, struct edge_info, kwork);
+ __rx_worker(einfo);
+};
+
+/**
+ * tx_cmd_version() - Convert a version cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @version: The version number to encode.
+ * @features: The features information to encode.
+ */
+static void tx_cmd_version(struct glink_transport_if *if_ptr, uint32_t version,
+ uint32_t features)
+{
+ struct command {
+ uint16_t id;
+ uint16_t version;
+ uint32_t features;
+ uint32_t fifo_size;
+ uint32_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ cmd.id = VERSION_CMD;
+ cmd.version = version;
+ cmd.features = features;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_version_ack() - Convert a version ack cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @version: The version number to encode.
+ * @features: The features information to encode.
+ */
+static void tx_cmd_version_ack(struct glink_transport_if *if_ptr,
+ uint32_t version,
+ uint32_t features)
+{
+ struct command {
+ uint16_t id;
+ uint16_t version;
+ uint32_t features;
+ uint32_t fifo_size;
+ uint32_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ cmd.id = VERSION_ACK_CMD;
+ cmd.version = version;
+ cmd.features = features;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * set_version() - Activate a negotiated version and feature set
+ * @if_ptr: The transport to configure.
+ * @version: The version to use.
+ * @features: The features to use.
+ *
+ * Return: The supported capabilities of the transport.
+ */
+static uint32_t set_version(struct glink_transport_if *if_ptr, uint32_t version,
+ uint32_t features)
+{
+ struct edge_info *einfo;
+ uint32_t ret;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+ }
+
+ ret = GCAP_SIGNALS;
+ if (features & TRACER_PKT_FEATURE)
+ ret |= GCAP_TRACER_PKT;
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return ret;
+}
+
+/**
+ * tx_cmd_ch_open() - Convert a channel open cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @name: The channel name to encode.
+ * @req_xprt: The transport the core would like to migrate this channel to.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_open(struct glink_transport_if *if_ptr, uint32_t lcid,
+ const char *name, uint16_t req_xprt)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint16_t length;
+ uint16_t req_xprt;
+ uint64_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ uint32_t buf_size;
+ void *buf;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = OPEN_CMD;
+ cmd.lcid = lcid;
+ cmd.length = (uint16_t)(strlen(name) + 1);
+ cmd.req_xprt = req_xprt;
+
+ buf_size = ALIGN(sizeof(cmd) + cmd.length, FIFO_ALIGNMENT);
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -ENOMEM;
+ }
+
+ memcpy(buf, &cmd, sizeof(cmd));
+ memcpy(buf + sizeof(cmd), name, cmd.length);
+
+ glink_spi_xprt_tx_cmd(einfo, buf, buf_size);
+
+ kfree(buf);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_ch_close() - Convert a channel close cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_close(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = CLOSE_CMD;
+ cmd.lcid = lcid;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_ch_remote_open_ack() - Convert a channel open ack cmd to wire format
+ * and transmit
+ * @if_ptr: The transport to transmit on.
+ * @rcid: The remote channel id to encode.
+ * @xprt_resp: The response to a transport migration request.
+ */
+static void tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint16_t xprt_resp)
+{
+ struct command {
+ uint16_t id;
+ uint16_t rcid;
+ uint16_t reserved1;
+ uint16_t xprt_resp;
+ uint64_t reserved2;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ cmd.id = OPEN_ACK_CMD;
+ cmd.rcid = rcid;
+ cmd.xprt_resp = xprt_resp;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_ch_remote_close_ack() - Convert a channel close ack cmd to wire format
+ * and transmit
+ * @if_ptr: The transport to transmit on.
+ * @rcid: The remote channel id to encode.
+ */
+static void tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
+ uint32_t rcid)
+{
+ struct command {
+ uint16_t id;
+ uint16_t rcid;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ cmd.id = CLOSE_ACK_CMD;
+ cmd.rcid = rcid;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * ssr() - Process a subsystem restart notification of a transport
+ * @if_ptr: The transport to restart
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int ssr(struct glink_transport_if *if_ptr)
+{
+ struct edge_info *einfo;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ einfo->in_ssr = true;
+ wake_up_all(&einfo->tx_blocked_queue);
+
+ synchronize_srcu(&einfo->use_ref);
+ einfo->tx_resume_needed = false;
+ einfo->tx_blocked_signal_sent = false;
+ einfo->tx_fifo_start = 0;
+ einfo->rx_fifo_start = 0;
+ einfo->fifo_size = DEFAULT_FIFO_SIZE;
+ einfo->xprt_if.glink_core_if_ptr->link_down(&einfo->xprt_if);
+
+ return 0;
+}
+
+/**
+ * allocate_rx_intent() - Allocate/reserve space for RX Intent
+ * @if_ptr: The transport the intent is associated with.
+ * @size: size of intent.
+ * @intent: Pointer to the intent structure.
+ *
+ * Assign "data" with the buffer created, since the transport creates
+ * a linear buffer and "iovec" with the "intent" itself, so that
+ * the data can be passed to a client that receives only vector buffer.
+ * Note that returning NULL for the pointer is valid (it means that space has
+ * been reserved, but the actual pointer will be provided later).
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int allocate_rx_intent(struct glink_transport_if *if_ptr, size_t size,
+ struct glink_core_rx_intent *intent)
+{
+ void *t;
+
+ t = kzalloc(size, GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ intent->data = t;
+ intent->iovec = (void *)intent;
+ intent->vprovider = rx_linear_vbuf_provider;
+ intent->pprovider = NULL;
+ return 0;
+}
+
+/**
+ * deallocate_rx_intent() - Deallocate space created for RX Intent
+ * @if_ptr: The transport the intent is associated with.
+ * @intent: Pointer to the intent structure.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int deallocate_rx_intent(struct glink_transport_if *if_ptr,
+ struct glink_core_rx_intent *intent)
+{
+ if (!intent || !intent->data)
+ return -EINVAL;
+
+ kfree(intent->data);
+ intent->data = NULL;
+ intent->iovec = NULL;
+ intent->vprovider = NULL;
+ return 0;
+}
+
+/**
+ * tx_cmd_local_rx_intent() - Convert an rx intent cmd to wire format and
+ * transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @size: The intent size to encode.
+ * @liid: The local intent id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size, uint32_t liid)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t count;
+ uint64_t reserved;
+ uint32_t size;
+ uint32_t liid;
+ uint64_t addr;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ if (size > UINT_MAX) {
+ pr_err("%s: size %zu is too large to encode\n", __func__, size);
+ return -EMSGSIZE;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = RX_INTENT_CMD;
+ cmd.lcid = lcid;
+ cmd.count = 1;
+ cmd.size = size;
+ cmd.liid = liid;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_local_rx_done() - Convert an rx done cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @liid: The local intent id to encode.
+ * @reuse: Reuse the consumed intent.
+ */
+static void tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint32_t liid, bool reuse)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t liid;
+ uint64_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ cmd.id = reuse ? RX_DONE_W_REUSE_CMD : RX_DONE_CMD;
+ cmd.lcid = lcid;
+ cmd.liid = liid;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_rx_intent_req() - Convert an rx intent request cmd to wire format and
+ * transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @size: The requested intent size to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t size;
+ uint64_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ if (size > UINT_MAX) {
+ pr_err("%s: size %zu is too large to encode\n", __func__, size);
+ return -EMSGSIZE;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = RX_INTENT_REQ_CMD,
+ cmd.lcid = lcid;
+ cmd.size = size;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_rx_intent_req_ack() - Convert an rx intent request ack cmd to wire
+ * format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @granted: The request response to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_remote_rx_intent_req_ack(struct glink_transport_if *if_ptr,
+ uint32_t lcid, bool granted)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t response;
+ uint64_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = RX_INTENT_REQ_ACK_CMD,
+ cmd.lcid = lcid;
+ if (granted)
+ cmd.response = 1;
+ else
+ cmd.response = 0;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_set_sigs() - Convert a signals ack cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @sigs: The signals to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_set_sigs(struct glink_transport_if *if_ptr, uint32_t lcid,
+ uint32_t sigs)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t sigs;
+ uint64_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = SIGNALS_CMD,
+ cmd.lcid = lcid;
+ cmd.sigs = sigs;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_data() - convert a data/tracer_pkt to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @cmd_id: The command ID to transmit.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_data(struct glink_transport_if *if_ptr, uint16_t cmd_id,
+ uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t riid;
+ uint64_t reserved;
+ uint32_t size;
+ uint32_t size_left;
+ uint64_t addr;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ uint32_t size;
+ void *data_start, *dst = NULL;
+ size_t tx_size = 0;
+ int rcu_id;
+
+ if (pctx->size < pctx->size_remaining) {
+ GLINK_ERR("%s: size remaining exceeds size. Resetting.\n",
+ __func__);
+ pctx->size_remaining = pctx->size;
+ }
+ if (!pctx->size_remaining)
+ return 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ if (cmd_id == TX_DATA_CMD) {
+ if (pctx->size_remaining == pctx->size)
+ cmd.id = TX_DATA_CMD;
+ else
+ cmd.id = TX_DATA_CONT_CMD;
+ } else {
+ if (pctx->size_remaining == pctx->size)
+ cmd.id = TRACER_PKT_CMD;
+ else
+ cmd.id = TRACER_PKT_CONT_CMD;
+ }
+ cmd.lcid = lcid;
+ cmd.riid = pctx->riid;
+ data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
+ &tx_size);
+ if (unlikely(!data_start)) {
+ GLINK_ERR("%s: invalid data_start\n", __func__);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EINVAL;
+ }
+ if (tx_size & (XPRT_ALIGNMENT - 1))
+ tx_size = ALIGN(tx_size - SHORT_PKT_SIZE, XPRT_ALIGNMENT);
+ if (likely(pctx->cookie))
+ dst = pctx->cookie + (pctx->size - pctx->size_remaining);
+
+ mutex_lock(&einfo->write_lock);
+ size = glink_spi_xprt_write_avail(einfo);
+ /* Need enough space to write the command */
+ if (size <= sizeof(cmd)) {
+ einfo->tx_resume_needed = true;
+ mutex_unlock(&einfo->write_lock);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EAGAIN;
+ }
+ cmd.addr = 0;
+ cmd.size = tx_size;
+ pctx->size_remaining -= tx_size;
+ cmd.size_left = pctx->size_remaining;
+ if (cmd.id == TRACER_PKT_CMD)
+ tracer_pkt_log_event((void *)(pctx->data), GLINK_XPRT_TX);
+
+ if (!strcmp(einfo->xprt_cfg.edge, "wdsp"))
+ wdsp_resume(&einfo->cmpnt);
+ glink_spi_xprt_tx_data(einfo, data_start, dst, tx_size);
+ glink_spi_xprt_tx_cmd_safe(einfo, &cmd, sizeof(cmd));
+ GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
+ "<SPI>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
+ cmd.size_left);
+ mutex_unlock(&einfo->write_lock);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return cmd.size;
+}
+
+/**
+ * tx_short_data() - Tansmit a short packet in band along with command
+ * @if_ptr: The transport to transmit on.
+ * @cmd_id: The command ID to transmit.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_short_data(struct glink_transport_if *if_ptr,
+ uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t riid;
+ uint32_t size;
+ uint32_t size_left;
+ unsigned char data[SHORT_PKT_SIZE];
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ uint32_t size;
+ void *data_start;
+ size_t tx_size = 0;
+ int rcu_id;
+
+ if (pctx->size < pctx->size_remaining) {
+ GLINK_ERR("%s: size remaining exceeds size. Resetting.\n",
+ __func__);
+ pctx->size_remaining = pctx->size;
+ }
+ if (!pctx->size_remaining)
+ return 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = TX_SHORT_DATA_CMD;
+ cmd.lcid = lcid;
+ cmd.riid = pctx->riid;
+ data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
+ &tx_size);
+ if (unlikely(!data_start || tx_size > SHORT_PKT_SIZE)) {
+ GLINK_ERR("%s: invalid data_start %p or tx_size %zu\n",
+ __func__, data_start, tx_size);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&einfo->write_lock);
+ size = glink_spi_xprt_write_avail(einfo);
+ /* Need enough space to write the command */
+ if (size <= sizeof(cmd)) {
+ einfo->tx_resume_needed = true;
+ mutex_unlock(&einfo->write_lock);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EAGAIN;
+ }
+ cmd.size = tx_size;
+ pctx->size_remaining -= tx_size;
+ cmd.size_left = pctx->size_remaining;
+ memcpy(cmd.data, data_start, tx_size);
+ if (!strcmp(einfo->xprt_cfg.edge, "wdsp"))
+ wdsp_resume(&einfo->cmpnt);
+ glink_spi_xprt_tx_cmd_safe(einfo, &cmd, sizeof(cmd));
+ GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
+ "<SPI>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
+ cmd.size_left);
+ mutex_unlock(&einfo->write_lock);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return cmd.size;
+}
+
+/**
+ * tx() - convert a data transmit cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx(struct glink_transport_if *if_ptr, uint32_t lcid,
+ struct glink_core_tx_pkt *pctx)
+{
+ if (pctx->size_remaining <= SHORT_PKT_SIZE)
+ return tx_short_data(if_ptr, lcid, pctx);
+ return tx_data(if_ptr, TX_DATA_CMD, lcid, pctx);
+}
+
+/**
+ * tx_cmd_tracer_pkt() - convert a tracer packet cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr, uint32_t lcid,
+ struct glink_core_tx_pkt *pctx)
+{
+ return tx_data(if_ptr, TRACER_PKT_CMD, lcid, pctx);
+}
+
+/**
+ * int wait_link_down() - Check status of read/write indices
+ * @if_ptr: The transport to check
+ *
+ * Return: 1 if indices are all zero, 0 otherwise
+ */
+static int wait_link_down(struct glink_transport_if *if_ptr)
+{
+ return 0;
+}
+
+/**
+ * get_power_vote_ramp_time() - Get the ramp time required for the power
+ * votes to be applied
+ * @if_ptr: The transport interface on which power voting is requested.
+ * @state: The power state for which ramp time is required.
+ *
+ * Return: The ramp time specific to the power state, standard error otherwise.
+ */
+static unsigned long get_power_vote_ramp_time(
+ struct glink_transport_if *if_ptr, uint32_t state)
+{
+ return 0;
+}
+
+/**
+ * power_vote() - Update the power votes to meet qos requirement
+ * @if_ptr: The transport interface on which power voting is requested.
+ * @state: The power state for which the voting should be done.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_vote(struct glink_transport_if *if_ptr, uint32_t state)
+{
+ unsigned long flags;
+ struct edge_info *einfo;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ spin_lock_irqsave(&einfo->activity_lock, flags);
+ einfo->activity_flag |= ACTIVE_TX;
+ spin_unlock_irqrestore(&einfo->activity_lock, flags);
+ return 0;
+}
+
+/**
+ * power_unvote() - Remove the all the power votes
+ * @if_ptr: The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_unvote(struct glink_transport_if *if_ptr)
+{
+ unsigned long flags;
+ struct edge_info *einfo;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ spin_lock_irqsave(&einfo->activity_lock, flags);
+ einfo->activity_flag &= ~ACTIVE_TX;
+ spin_unlock_irqrestore(&einfo->activity_lock, flags);
+ return 0;
+}
+
+static int glink_wdsp_cmpnt_init(struct device *dev, void *priv_data)
+{
+ return 0;
+}
+
+static int glink_wdsp_cmpnt_deinit(struct device *dev, void *priv_data)
+{
+ return 0;
+}
+
+static int glink_wdsp_cmpnt_event_handler(struct device *dev,
+ void *priv_data, enum wdsp_event_type event, void *data)
+{
+ struct edge_info *einfo = dev_get_drvdata(dev);
+ struct glink_cmpnt *cmpnt = &einfo->cmpnt;
+ struct device *sdev;
+ struct spi_device *spi_dev;
+
+ switch (event) {
+ case WDSP_EVENT_PRE_BOOTUP:
+ if (cmpnt && cmpnt->master_dev &&
+ cmpnt->master_ops &&
+ cmpnt->master_ops->get_dev_for_cmpnt)
+ sdev = cmpnt->master_ops->get_dev_for_cmpnt(
+ cmpnt->master_dev, WDSP_CMPNT_TRANSPORT);
+ else
+ sdev = NULL;
+
+ if (!sdev) {
+ dev_err(dev, "%s: Failed to get transport device\n",
+ __func__);
+ break;
+ }
+
+ spi_dev = to_spi_device(sdev);
+ einfo->spi_dev = spi_dev;
+ break;
+ case WDSP_EVENT_IPC1_INTR:
+ queue_kthread_work(&einfo->kworker, &einfo->kwork);
+ break;
+ default:
+ pr_debug("%s: unhandled event %d", __func__, event);
+ break;
+ }
+
+ return 0;
+}
+
+/* glink_wdsp_cmpnt_ops - Callback operations registered wtih WDSP framework */
+static struct wdsp_cmpnt_ops glink_wdsp_cmpnt_ops = {
+ .init = glink_wdsp_cmpnt_init,
+ .deinit = glink_wdsp_cmpnt_deinit,
+ .event_handler = glink_wdsp_cmpnt_event_handler,
+};
+
+static int glink_component_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct edge_info *einfo = dev_get_drvdata(dev);
+ struct glink_cmpnt *cmpnt = &einfo->cmpnt;
+ int ret = 0;
+
+ cmpnt->master_dev = master;
+ cmpnt->master_ops = data;
+
+ if (cmpnt->master_ops && cmpnt->master_ops->register_cmpnt_ops)
+ ret = cmpnt->master_ops->register_cmpnt_ops(master, dev, einfo,
+ &glink_wdsp_cmpnt_ops);
+ else
+ ret = -EINVAL;
+
+ if (ret)
+ dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static void glink_component_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct edge_info *einfo = dev_get_drvdata(dev);
+ struct glink_cmpnt *cmpnt = &einfo->cmpnt;
+
+ cmpnt->master_dev = NULL;
+ cmpnt->master_ops = NULL;
+}
+
+static const struct component_ops glink_component_ops = {
+ .bind = glink_component_bind,
+ .unbind = glink_component_unbind,
+};
+
+/**
+ * init_xprt_if() - Initialize the xprt_if for an edge
+ * @einfo: The edge to initialize.
+ */
+static void init_xprt_if(struct edge_info *einfo)
+{
+ einfo->xprt_if.tx_cmd_version = tx_cmd_version;
+ einfo->xprt_if.tx_cmd_version_ack = tx_cmd_version_ack;
+ einfo->xprt_if.set_version = set_version;
+ einfo->xprt_if.tx_cmd_ch_open = tx_cmd_ch_open;
+ einfo->xprt_if.tx_cmd_ch_close = tx_cmd_ch_close;
+ einfo->xprt_if.tx_cmd_ch_remote_open_ack = tx_cmd_ch_remote_open_ack;
+ einfo->xprt_if.tx_cmd_ch_remote_close_ack = tx_cmd_ch_remote_close_ack;
+ einfo->xprt_if.ssr = ssr;
+ einfo->xprt_if.allocate_rx_intent = allocate_rx_intent;
+ einfo->xprt_if.deallocate_rx_intent = deallocate_rx_intent;
+ einfo->xprt_if.tx_cmd_local_rx_intent = tx_cmd_local_rx_intent;
+ einfo->xprt_if.tx_cmd_local_rx_done = tx_cmd_local_rx_done;
+ einfo->xprt_if.tx = tx;
+ einfo->xprt_if.tx_cmd_rx_intent_req = tx_cmd_rx_intent_req;
+ einfo->xprt_if.tx_cmd_remote_rx_intent_req_ack =
+ tx_cmd_remote_rx_intent_req_ack;
+ einfo->xprt_if.tx_cmd_set_sigs = tx_cmd_set_sigs;
+ einfo->xprt_if.wait_link_down = wait_link_down;
+ einfo->xprt_if.tx_cmd_tracer_pkt = tx_cmd_tracer_pkt;
+ einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
+ einfo->xprt_if.power_vote = power_vote;
+ einfo->xprt_if.power_unvote = power_unvote;
+}
+
+/**
+ * init_xprt_cfg() - Initialize the xprt_cfg for an edge
+ * @einfo: The edge to initialize.
+ * @name: The name of the remote side this edge communicates to.
+ */
+static void init_xprt_cfg(struct edge_info *einfo, const char *name)
+{
+ einfo->xprt_cfg.name = XPRT_NAME;
+ einfo->xprt_cfg.edge = name;
+ einfo->xprt_cfg.versions = versions;
+ einfo->xprt_cfg.versions_entries = ARRAY_SIZE(versions);
+ einfo->xprt_cfg.max_cid = SZ_64K;
+ einfo->xprt_cfg.max_iid = SZ_2G;
+}
+
+/**
+ * parse_qos_dt_params() - Parse the power states from DT
+ * @dev: Reference to the platform device for a specific edge.
+ * @einfo: Edge information for the edge probe function is called.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+static int parse_qos_dt_params(struct device_node *node,
+ struct edge_info *einfo)
+{
+ int rc;
+ int i;
+ char *key;
+ uint32_t *arr32;
+ uint32_t num_states;
+
+ key = "qcom,ramp-time";
+ if (!of_find_property(node, key, &num_states))
+ return -ENODEV;
+
+ num_states /= sizeof(uint32_t);
+
+ einfo->num_pw_states = num_states;
+
+ arr32 = kmalloc_array(num_states, sizeof(uint32_t), GFP_KERNEL);
+ if (!arr32)
+ return -ENOMEM;
+
+ einfo->ramp_time_us = kmalloc_array(num_states, sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!einfo->ramp_time_us) {
+ rc = -ENOMEM;
+ goto mem_alloc_fail;
+ }
+
+ rc = of_property_read_u32_array(node, key, arr32, num_states);
+ if (rc) {
+ rc = -ENODEV;
+ goto invalid_key;
+ }
+ for (i = 0; i < num_states; i++)
+ einfo->ramp_time_us[i] = arr32[i];
+
+ kfree(arr32);
+ return 0;
+
+invalid_key:
+ kfree(einfo->ramp_time_us);
+mem_alloc_fail:
+ kfree(arr32);
+ return rc;
+}
+
+/**
+ * parse_qos_dt_params() - Parse any remote FIFO configuration
+ * @node: Reference to the platform device for a specific edge.
+ * @einfo: Edge information for the edge probe function is called.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+static int parse_remote_fifo_cfg(struct device_node *node,
+ struct edge_info *einfo)
+{
+ int rc;
+ char *key;
+
+ key = "qcom,out-read-idx-reg";
+ rc = of_property_read_u32(node, key, &einfo->tx_fifo_read_reg_addr);
+ if (rc)
+ goto key_error;
+
+ key = "qcom,out-write-idx-reg";
+ rc = of_property_read_u32(node, key, &einfo->tx_fifo_write_reg_addr);
+ if (rc)
+ goto key_error;
+
+ key = "qcom,in-read-idx-reg";
+ rc = of_property_read_u32(node, key, &einfo->rx_fifo_read_reg_addr);
+ if (rc)
+ goto key_error;
+
+ key = "qcom,in-write-idx-reg";
+ rc = of_property_read_u32(node, key, &einfo->rx_fifo_write_reg_addr);
+ if (rc)
+ goto key_error;
+ return 0;
+
+key_error:
+ pr_err("%s: Error %d parsing key %s\n", __func__, rc, key);
+ return rc;
+}
+
+static int glink_spi_probe(struct platform_device *pdev)
+{
+ struct device_node *node;
+ struct device_node *phandle_node;
+ struct edge_info *einfo;
+ int rc;
+ char *key;
+ const char *subsys_name;
+ unsigned long flags;
+
+ node = pdev->dev.of_node;
+
+ einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+ if (!einfo) {
+ rc = -ENOMEM;
+ goto edge_info_alloc_fail;
+ }
+
+ key = "label";
+ subsys_name = of_get_property(node, key, NULL);
+ if (!subsys_name) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+ strlcpy(einfo->subsys_name, subsys_name, sizeof(einfo->subsys_name));
+
+ init_xprt_cfg(einfo, subsys_name);
+ init_xprt_if(einfo);
+
+ einfo->in_ssr = true;
+ einfo->fifo_size = DEFAULT_FIFO_SIZE;
+ init_kthread_work(&einfo->kwork, rx_worker);
+ init_kthread_worker(&einfo->kworker);
+ init_srcu_struct(&einfo->use_ref);
+ mutex_init(&einfo->write_lock);
+ init_waitqueue_head(&einfo->tx_blocked_queue);
+ spin_lock_init(&einfo->activity_lock);
+
+ spin_lock_irqsave(&edge_infos_lock, flags);
+ list_add_tail(&einfo->list, &edge_infos);
+ spin_unlock_irqrestore(&edge_infos_lock, flags);
+
+ einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
+ "spi_%s", subsys_name);
+ if (IS_ERR(einfo->task)) {
+ rc = PTR_ERR(einfo->task);
+ pr_err("%s: kthread run failed %d\n", __func__, rc);
+ goto kthread_fail;
+ }
+
+ key = "qcom,remote-fifo-config";
+ phandle_node = of_parse_phandle(node, key, 0);
+ if (phandle_node)
+ parse_remote_fifo_cfg(phandle_node, einfo);
+
+ key = "qcom,qos-config";
+ phandle_node = of_parse_phandle(node, key, 0);
+ if (phandle_node && !(of_get_glink_core_qos_cfg(phandle_node,
+ &einfo->xprt_cfg)))
+ parse_qos_dt_params(node, einfo);
+
+ rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
+ if (rc == -EPROBE_DEFER)
+ goto reg_xprt_fail;
+ if (rc) {
+ pr_err("%s: glink core register transport failed: %d\n",
+ __func__, rc);
+ goto reg_xprt_fail;
+ }
+
+ dev_set_drvdata(&pdev->dev, einfo);
+ if (!strcmp(einfo->xprt_cfg.edge, "wdsp")) {
+ rc = component_add(&pdev->dev, &glink_component_ops);
+ if (rc) {
+ pr_err("%s: component_add failed, err = %d\n",
+ __func__, rc);
+ rc = -ENODEV;
+ goto reg_cmpnt_fail;
+ }
+ }
+ return 0;
+
+reg_cmpnt_fail:
+ dev_set_drvdata(&pdev->dev, NULL);
+ glink_core_unregister_transport(&einfo->xprt_if);
+reg_xprt_fail:
+ flush_kthread_worker(&einfo->kworker);
+ kthread_stop(einfo->task);
+ einfo->task = NULL;
+kthread_fail:
+ spin_lock_irqsave(&edge_infos_lock, flags);
+ list_del(&einfo->list);
+ spin_unlock_irqrestore(&edge_infos_lock, flags);
+missing_key:
+ kfree(einfo);
+edge_info_alloc_fail:
+ return rc;
+}
+
+static int glink_spi_remove(struct platform_device *pdev)
+{
+ struct edge_info *einfo;
+ unsigned long flags;
+
+ einfo = (struct edge_info *)dev_get_drvdata(&pdev->dev);
+ glink_core_unregister_transport(&einfo->xprt_if);
+ flush_kthread_worker(&einfo->kworker);
+ kthread_stop(einfo->task);
+ einfo->task = NULL;
+ spin_lock_irqsave(&edge_infos_lock, flags);
+ list_del(&einfo->list);
+ spin_unlock_irqrestore(&edge_infos_lock, flags);
+ kfree(einfo);
+ return 0;
+}
+
+static int glink_spi_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int glink_spi_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ unsigned long flags;
+ struct edge_info *einfo;
+ bool suspend;
+ int rc = -EBUSY;
+
+ einfo = (struct edge_info *)dev_get_drvdata(&pdev->dev);
+ if (strcmp(einfo->xprt_cfg.edge, "wdsp"))
+ return 0;
+
+ spin_lock_irqsave(&einfo->activity_lock, flags);
+ suspend = !(einfo->activity_flag);
+ spin_unlock_irqrestore(&einfo->activity_lock, flags);
+ if (suspend)
+ rc = wdsp_suspend(&einfo->cmpnt);
+ if (rc < 0)
+ pr_err("%s: Could not suspend activity_flag %d, rc %d\n",
+ __func__, einfo->activity_flag, rc);
+ return rc;
+}
+
+static const struct of_device_id spi_match_table[] = {
+ { .compatible = "qcom,glink-spi-xprt" },
+ {},
+};
+
+static struct platform_driver glink_spi_driver = {
+ .probe = glink_spi_probe,
+ .remove = glink_spi_remove,
+ .resume = glink_spi_resume,
+ .suspend = glink_spi_suspend,
+ .driver = {
+ .name = "msm_glink_spi_xprt",
+ .owner = THIS_MODULE,
+ .of_match_table = spi_match_table,
+ },
+};
+
+static int __init glink_spi_xprt_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&glink_spi_driver);
+ if (rc)
+ pr_err("%s: glink_spi register failed %d\n", __func__, rc);
+
+ return rc;
+}
+module_init(glink_spi_xprt_init);
+
+static void __exit glink_spi_xprt_exit(void)
+{
+ platform_driver_unregister(&glink_spi_driver);
+}
+module_exit(glink_spi_xprt_exit);
+
+MODULE_DESCRIPTION("MSM G-Link SPI Transport");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_xprt_if.h b/drivers/soc/qcom/glink_xprt_if.h
index 6242e867fe72..f4d5a3b303db 100644
--- a/drivers/soc/qcom/glink_xprt_if.h
+++ b/drivers/soc/qcom/glink_xprt_if.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,7 @@ enum buf_type {
enum xprt_ids {
SMEM_XPRT_ID = 100,
+ SPIV2_XPRT_ID = SMEM_XPRT_ID,
SMD_TRANS_XPRT_ID = 200,
LLOOP_XPRT_ID = 300,
MOCK_XPRT_HIGH_ID = 390,
@@ -56,6 +57,7 @@ enum xprt_ids {
* @iovec: Pointer to the vector buffer packet.
* @vprovider: Packet-specific virtual buffer provider function.
* @pprovider: Packet-specific physical buffer provider function.
+ * @cookie: Transport-specific cookie
* @pkt_ref: Active references to the packet.
*/
struct glink_core_tx_pkt {
@@ -73,6 +75,7 @@ struct glink_core_tx_pkt {
void *iovec;
void * (*vprovider)(void *iovec, size_t offset, size_t *size);
void * (*pprovider)(void *iovec, size_t offset, size_t *size);
+ void *cookie;
struct rwref_lock pkt_ref;
};
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index ea25ed5d0611..e8a9751fa266 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -1580,6 +1580,14 @@ int icnss_get_soc_info(struct icnss_soc_info *info)
info->v_addr = penv->mem_base_va;
info->p_addr = penv->mem_base_pa;
+ info->chip_id = penv->chip_info.chip_id;
+ info->chip_family = penv->chip_info.chip_family;
+ info->board_id = penv->board_info.board_id;
+ info->soc_id = penv->soc_info.soc_id;
+ info->fw_version = penv->fw_version_info.fw_version;
+ strlcpy(info->fw_build_timestamp,
+ penv->fw_version_info.fw_build_timestamp,
+ QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1);
return 0;
}
diff --git a/drivers/soc/qcom/irq-helper.c b/drivers/soc/qcom/irq-helper.c
new file mode 100644
index 000000000000..2bb71464d165
--- /dev/null
+++ b/drivers/soc/qcom/irq-helper.c
@@ -0,0 +1,180 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <soc/qcom/irq-helper.h>
+
+struct irq_helper {
+ bool enable;
+ bool deploy;
+ uint32_t count;
+ struct kobject kobj;
+ /* spinlock to protect reference count variable 'count' */
+ spinlock_t lock;
+};
+
+struct irq_helper_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+ char *buf);
+ size_t (*store)(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count);
+};
+
+#define IRQ_HELPER_ATTR(_name, _mode, _show, _store) \
+ struct irq_helper_attr irq_helper_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+#define to_irq_helper(kobj) \
+ container_of(kobj, struct irq_helper, kobj)
+
+#define to_irq_helper_attr(_attr) \
+ container_of(_attr, struct irq_helper_attr, attr)
+
+static ssize_t attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct irq_helper_attr *irq_attr = to_irq_helper_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (irq_attr->show)
+ ret = irq_attr->show(kobj, attr, buf);
+
+ return ret;
+}
+
+static const struct sysfs_ops irq_helper_sysfs_ops = {
+ .show = attr_show,
+};
+
+static struct kobj_type irq_helper_ktype = {
+ .sysfs_ops = &irq_helper_sysfs_ops,
+};
+
+static ssize_t show_deploy(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct irq_helper *irq = to_irq_helper(kobj);
+
+ return snprintf(buf, sizeof(irq->deploy), "%u\n", irq->deploy);
+}
+IRQ_HELPER_ATTR(irq_blacklist_on, 0444, show_deploy, NULL);
+
+static struct irq_helper *irq_h;
+
+int irq_blacklist_on(void)
+{
+ bool flag = false;
+
+ if (!irq_h) {
+ pr_err("%s: init function is not called", __func__);
+ return -EPERM;
+ }
+ if (!irq_h->enable) {
+ pr_err("%s: enable bit is not set up", __func__);
+ return -EPERM;
+ }
+ spin_lock(&irq_h->lock);
+ irq_h->count++;
+ if (!irq_h->deploy) {
+ irq_h->deploy = true;
+ flag = true;
+ }
+ spin_unlock(&irq_h->lock);
+ if (flag)
+ sysfs_notify(&irq_h->kobj, NULL, "irq_blacklist_on");
+ return 0;
+}
+EXPORT_SYMBOL(irq_blacklist_on);
+
+int irq_blacklist_off(void)
+{
+ bool flag = false;
+
+ if (!irq_h) {
+ pr_err("%s: init function is not called", __func__);
+ return -EPERM;
+ }
+ if (!irq_h->enable) {
+ pr_err("%s: enable bit is not set up", __func__);
+ return -EPERM;
+ }
+ spin_lock(&irq_h->lock);
+ if (irq_h->count == 0) {
+ pr_err("%s: ref-count is 0, cannot call irq blacklist off.",
+ __func__);
+ spin_unlock(&irq_h->lock);
+ return -EPERM;
+ }
+ irq_h->count--;
+ if (irq_h->count == 0) {
+ irq_h->deploy = false;
+ flag = true;
+ }
+ spin_unlock(&irq_h->lock);
+
+ if (flag)
+ sysfs_notify(&irq_h->kobj, NULL, "irq_blacklist_on");
+ return 0;
+}
+EXPORT_SYMBOL(irq_blacklist_off);
+
+static int __init irq_helper_init(void)
+{
+ int ret;
+
+ irq_h = kzalloc(sizeof(struct irq_helper), GFP_KERNEL);
+ if (!irq_h)
+ return -ENOMEM;
+
+ ret = kobject_init_and_add(&irq_h->kobj, &irq_helper_ktype,
+ kernel_kobj, "%s", "irq_helper");
+ if (ret) {
+ pr_err("%s:Error in creation kobject_add\n", __func__);
+ goto out_free_irq;
+ }
+
+ ret = sysfs_create_file(&irq_h->kobj,
+ &irq_helper_irq_blacklist_on.attr);
+ if (ret) {
+ pr_err("%s:Error in sysfs_create_file\n", __func__);
+ goto out_put_kobj;
+ }
+
+ spin_lock_init(&irq_h->lock);
+ irq_h->count = 0;
+ irq_h->enable = true;
+ return 0;
+out_put_kobj:
+ kobject_put(&irq_h->kobj);
+out_free_irq:
+ kfree(irq_h);
+ return ret;
+}
+module_init(irq_helper_init);
+
+static void __exit irq_helper_exit(void)
+{
+ sysfs_remove_file(&irq_h->kobj, &irq_helper_irq_blacklist_on.attr);
+ kobject_del(&irq_h->kobj);
+ kobject_put(&irq_h->kobj);
+ kfree(irq_h);
+}
+module_exit(irq_helper_exit);
+MODULE_DESCRIPTION("IRQ Helper APIs");
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index e163dd79b8b9..b055234326b6 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -189,8 +189,8 @@ int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
if (ret)
- pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d\n",
- __func__, &addr, size, desc->subsys_vmid);
+ pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
+ __func__, &addr, size, desc->subsys_vmid, ret);
return ret;
}
EXPORT_SYMBOL(pil_assign_mem_to_subsys);
@@ -205,8 +205,8 @@ int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
if (ret)
- panic("%s: failed for %pa address of size %zx - subsys VMid %d. Fatal error.\n",
- __func__, &addr, size, desc->subsys_vmid);
+ panic("%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
+ __func__, &addr, size, desc->subsys_vmid, ret);
return ret;
}
@@ -222,8 +222,8 @@ int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 2);
if (ret)
- pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d\n",
- __func__, &addr, size, desc->subsys_vmid);
+ pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
+ __func__, &addr, size, desc->subsys_vmid, ret);
return ret;
}
@@ -642,8 +642,8 @@ static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
seg->filesz, desc->map_fw_mem,
desc->unmap_fw_mem, map_data);
if (ret < 0) {
- pil_err(desc, "Failed to locate blob %s or blob is too big.\n",
- fw_name);
+ pil_err(desc, "Failed to locate blob %s or blob is too big(rc:%d)\n",
+ fw_name, ret);
return ret;
}
@@ -679,7 +679,8 @@ static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
if (desc->ops->verify_blob) {
ret = desc->ops->verify_blob(desc, seg->paddr, seg->sz);
if (ret)
- pil_err(desc, "Blob%u failed verification\n", num);
+ pil_err(desc, "Blob%u failed verification(rc:%d)\n",
+ num, ret);
}
return ret;
@@ -754,7 +755,7 @@ int pil_boot(struct pil_desc *desc)
snprintf(fw_name, sizeof(fw_name), "%s.mdt", desc->fw_name);
ret = request_firmware(&fw, fw_name, desc->dev);
if (ret) {
- pil_err(desc, "Failed to locate %s\n", fw_name);
+ pil_err(desc, "Failed to locate %s(rc:%d)\n", fw_name, ret);
goto out;
}
@@ -792,14 +793,14 @@ int pil_boot(struct pil_desc *desc)
desc->priv->unvoted_flag = 0;
ret = pil_proxy_vote(desc);
if (ret) {
- pil_err(desc, "Failed to proxy vote\n");
+ pil_err(desc, "Failed to proxy vote(rc:%d)\n", ret);
goto release_fw;
}
if (desc->ops->init_image)
ret = desc->ops->init_image(desc, fw->data, fw->size);
if (ret) {
- pil_err(desc, "Invalid firmware metadata\n");
+ pil_err(desc, "Initializing image failed(rc:%d)\n", ret);
goto err_boot;
}
@@ -807,7 +808,7 @@ int pil_boot(struct pil_desc *desc)
ret = desc->ops->mem_setup(desc, priv->region_start,
priv->region_end - priv->region_start);
if (ret) {
- pil_err(desc, "Memory setup error\n");
+ pil_err(desc, "Memory setup error(rc:%d)\n", ret);
goto err_deinit_image;
}
@@ -852,7 +853,7 @@ int pil_boot(struct pil_desc *desc)
ret = desc->ops->auth_and_reset(desc);
if (ret) {
- pil_err(desc, "Failed to bring out of reset\n");
+ pil_err(desc, "Failed to bring out of reset(rc:%d)\n", ret);
goto err_auth_and_reset;
}
pil_info(desc, "Brought out of reset\n");
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index 16c62240ec0a..3873a34c60fb 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -123,7 +123,8 @@ static int pil_mss_power_up(struct q6v5_data *drv)
if (drv->vreg) {
ret = regulator_enable(drv->vreg);
if (ret)
- dev_err(drv->desc.dev, "Failed to enable modem regulator.\n");
+ dev_err(drv->desc.dev, "Failed to enable modem regulator(rc:%d)\n",
+ ret);
}
if (drv->cxrail_bhs) {
@@ -245,7 +246,7 @@ static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
status != 0, POLL_INTERVAL_US, val);
if (ret) {
- dev_err(dev, "PBL boot timed out\n");
+ dev_err(dev, "PBL boot timed out (rc:%d)\n", ret);
return ret;
}
if (status != STATUS_PBL_SUCCESS) {
@@ -257,7 +258,7 @@ static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
status != 0, POLL_INTERVAL_US, val);
if (ret) {
- dev_err(dev, "MBA boot timed out\n");
+ dev_err(dev, "MBA boot timed out (rc:%d)\n", ret);
return ret;
}
if (status != STATUS_XPU_UNLOCKED &&
@@ -299,7 +300,8 @@ int pil_mss_shutdown(struct pil_desc *pil)
if (!ret)
assert_clamps(pil);
else
- dev_err(pil->dev, "error turning ON AHB clock\n");
+ dev_err(pil->dev, "error turning ON AHB clock(rc:%d)\n",
+ ret);
}
ret = pil_mss_restart_reg(drv, 1);
@@ -328,7 +330,8 @@ int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
status == STATUS_MBA_UNLOCKED || status < 0,
POLL_INTERVAL_US, val);
if (ret)
- dev_err(pil->dev, "MBA region unlock timed out\n");
+ dev_err(pil->dev, "MBA region unlock timed out(rc:%d)\n",
+ ret);
else if (status < 0)
dev_err(pil->dev, "MBA unlock returned err status: %d\n",
status);
@@ -367,19 +370,20 @@ int pil_mss_make_proxy_votes(struct pil_desc *pil)
ret = of_property_read_u32(pil->dev->of_node, "vdd_mx-uV", &uv);
if (ret) {
- dev_err(pil->dev, "missing vdd_mx-uV property\n");
+ dev_err(pil->dev, "missing vdd_mx-uV property(rc:%d)\n", ret);
return ret;
}
ret = regulator_set_voltage(drv->vreg_mx, uv, INT_MAX);
if (ret) {
- dev_err(pil->dev, "Failed to request vreg_mx voltage\n");
+ dev_err(pil->dev, "Failed to request vreg_mx voltage(rc:%d)\n",
+ ret);
return ret;
}
ret = regulator_enable(drv->vreg_mx);
if (ret) {
- dev_err(pil->dev, "Failed to enable vreg_mx\n");
+ dev_err(pil->dev, "Failed to enable vreg_mx(rc:%d)\n", ret);
regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
return ret;
}
@@ -540,8 +544,8 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
ret = request_firmware(&fw, fw_name_p, pil->dev);
if (ret) {
- dev_err(pil->dev, "Failed to locate %s\n",
- fw_name_p);
+ dev_err(pil->dev, "Failed to locate %s (rc:%d)\n",
+ fw_name_p, ret);
return ret;
}
@@ -611,14 +615,15 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
ret = pil_assign_mem_to_subsys(pil, drv->mba_dp_phys,
drv->mba_dp_size);
if (ret) {
- pr_err("scm_call to unprotect MBA and DP mem failed\n");
+ pr_err("scm_call to unprotect MBA and DP mem failed(rc:%d)\n",
+ ret);
goto err_mba_data;
}
}
ret = pil_mss_reset(pil);
if (ret) {
- dev_err(pil->dev, "MBA boot failed.\n");
+ dev_err(pil->dev, "MBA boot failed(rc:%d)\n", ret);
goto err_mss_reset;
}
@@ -673,7 +678,8 @@ static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
ret = pil_assign_mem_to_subsys(pil, mdata_phys,
ALIGN(size, SZ_4K));
if (ret) {
- pr_err("scm_call to unprotect modem metadata mem failed\n");
+ pr_err("scm_call to unprotect modem metadata mem failed(rc:%d)\n",
+ ret);
dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt,
mdata_phys, &attrs);
goto fail;
@@ -690,7 +696,8 @@ static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
POLL_INTERVAL_US, val);
if (ret) {
- dev_err(pil->dev, "MBA authentication of headers timed out\n");
+ dev_err(pil->dev, "MBA authentication of headers timed out(rc:%d)\n",
+ ret);
} else if (status < 0) {
dev_err(pil->dev, "MBA returned error %d for headers\n",
status);
@@ -771,7 +778,8 @@ static int pil_msa_mba_auth(struct pil_desc *pil)
ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
status == STATUS_AUTH_COMPLETE || status < 0, 50, val);
if (ret) {
- dev_err(pil->dev, "MBA authentication of image timed out\n");
+ dev_err(pil->dev, "MBA authentication of image timed out(rc:%d)\n",
+ ret);
} else if (status < 0) {
dev_err(pil->dev, "MBA returned error %d for image\n", status);
ret = -EINVAL;
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index 5c0c1ffa8951..af9cd189cf6d 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -291,11 +291,13 @@ static int pil_mss_loadable_init(struct modem_data *drv,
ret = regulator_set_voltage(q6->vreg, VDD_MSS_UV,
MAX_VDD_MSS_UV);
if (ret)
- dev_err(&pdev->dev, "Failed to set vreg voltage.\n");
+ dev_err(&pdev->dev, "Failed to set vreg voltage(rc:%d)\n",
+ ret);
ret = regulator_set_load(q6->vreg, 100000);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to set vreg mode.\n");
+ dev_err(&pdev->dev, "Failed to set vreg mode(rc:%d)\n",
+ ret);
return ret;
}
}
@@ -330,7 +332,7 @@ static int pil_mss_loadable_init(struct modem_data *drv,
ret = of_property_read_u32(pdev->dev.of_node,
"qcom,pas-id", &drv->pas_id);
if (ret)
- dev_warn(&pdev->dev, "Failed to find the pas_id.\n");
+ dev_info(&pdev->dev, "No pas_id found.\n");
drv->subsys_desc.pil_mss_memsetup =
of_property_read_bool(pdev->dev.of_node, "qcom,pil-mss-memsetup");
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
index 3dcfb5abdb23..f8895e8a7b3d 100644
--- a/drivers/soc/qcom/pil-q6v5.c
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -91,50 +91,53 @@ int pil_q6v5_make_proxy_votes(struct pil_desc *pil)
ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
if (ret) {
- dev_err(pil->dev, "missing vdd_cx-voltage property\n");
+ dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(drv->xo);
if (ret) {
- dev_err(pil->dev, "Failed to vote for XO\n");
+ dev_err(pil->dev, "Failed to vote for XO(rc:%d)\n", ret);
goto out;
}
ret = clk_prepare_enable(drv->pnoc_clk);
if (ret) {
- dev_err(pil->dev, "Failed to vote for pnoc\n");
+ dev_err(pil->dev, "Failed to vote for pnoc(rc:%d)\n", ret);
goto err_pnoc_vote;
}
ret = clk_prepare_enable(drv->qdss_clk);
if (ret) {
- dev_err(pil->dev, "Failed to vote for qdss\n");
+ dev_err(pil->dev, "Failed to vote for qdss(rc:%d)\n", ret);
goto err_qdss_vote;
}
ret = regulator_set_voltage(drv->vreg_cx, uv, INT_MAX);
if (ret) {
- dev_err(pil->dev, "Failed to request vdd_cx voltage.\n");
+ dev_err(pil->dev, "Failed to request vdd_cx voltage(rc:%d)\n",
+ ret);
goto err_cx_voltage;
}
ret = regulator_set_load(drv->vreg_cx, 100000);
if (ret < 0) {
- dev_err(pil->dev, "Failed to set vdd_cx mode.\n");
+ dev_err(pil->dev, "Failed to set vdd_cx mode(rc:%d)\n", ret);
goto err_cx_mode;
}
ret = regulator_enable(drv->vreg_cx);
if (ret) {
- dev_err(pil->dev, "Failed to vote for vdd_cx\n");
+ dev_err(pil->dev, "Failed to vote for vdd_cx(rc:%d)\n", ret);
goto err_cx_enable;
}
if (drv->vreg_pll) {
ret = regulator_enable(drv->vreg_pll);
if (ret) {
- dev_err(pil->dev, "Failed to vote for vdd_pll\n");
+ dev_err(pil->dev, "Failed to vote for vdd_pll(rc:%d)\n",
+ ret);
goto err_vreg_pll;
}
}
@@ -165,7 +168,8 @@ void pil_q6v5_remove_proxy_votes(struct pil_desc *pil)
ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
if (ret) {
- dev_err(pil->dev, "missing vdd_cx-voltage property\n");
+ dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+ ret);
return;
}
@@ -708,13 +712,15 @@ struct q6v5_data *pil_q6v5_init(struct platform_device *pdev)
ret = regulator_set_voltage(drv->vreg_pll, vdd_pll,
vdd_pll);
if (ret) {
- dev_err(&pdev->dev, "Failed to set vdd_pll voltage.\n");
+ dev_err(&pdev->dev, "Failed to set vdd_pll voltage(rc:%d)\n",
+ ret);
return ERR_PTR(ret);
}
ret = regulator_set_load(drv->vreg_pll, 10000);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to set vdd_pll mode.\n");
+ dev_err(&pdev->dev, "Failed to set vdd_pll mode(rc:%d)\n",
+ ret);
return ERR_PTR(ret);
}
} else
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 4f29923e054c..23e32214756a 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -528,10 +528,11 @@ static struct msm_soc_info cpu_of_id[] = {
[270] = {MSM_CPU_8929, "MSM8229"},
[271] = {MSM_CPU_8929, "APQ8029"},
- /* Cobalt ID */
+ /* Cobalt IDs */
[292] = {MSM_CPU_COBALT, "MSMCOBALT"},
+ [319] = {MSM_CPU_COBALT, "APQCOBALT"},
- /* Cobalt ID */
+ /* Hamster ID */
[306] = {MSM_CPU_HAMSTER, "MSMHAMSTER"},
/* falcon ID */
@@ -655,13 +656,14 @@ static uint32_t socinfo_get_foundry_id(void)
: 0;
}
-static uint32_t socinfo_get_serial_number(void)
+uint32_t socinfo_get_serial_number(void)
{
return socinfo ?
(socinfo_format >= SOCINFO_VERSION(0, 10) ?
socinfo->v0_10.serial_number : 0)
: 0;
}
+EXPORT_SYMBOL(socinfo_get_serial_number);
static uint32_t socinfo_get_chip_family(void)
{
@@ -1205,6 +1207,10 @@ static void * __init setup_dummy_socinfo(void)
dummy_socinfo.id = 317;
strlcpy(dummy_socinfo.build_id, "msmfalcon - ",
sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_apqcobalt()) {
+ dummy_socinfo.id = 319;
+ strlcpy(dummy_socinfo.build_id, "apqcobalt - ",
+ sizeof(dummy_socinfo.build_id));
}
strlcat(dummy_socinfo.build_id, "Dummy socinfo",
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index e3e43eee3608..56ca6835fc12 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -339,7 +339,8 @@ static int of_read_regs(struct device *dev, struct reg_info **regs_ref,
rc = of_property_read_u32_array(dev->of_node, reg_uV_uA_name,
vdd_uV_uA, len);
if (rc) {
- dev_err(dev, "Failed to read uV/uA values\n");
+ dev_err(dev, "Failed to read uV/uA values(rc:%d)\n",
+ rc);
return rc;
}
@@ -423,7 +424,8 @@ static int enable_regulators(struct pil_tz_data *d, struct device *dev,
rc = regulator_set_voltage(regs[i].reg,
regs[i].uV, INT_MAX);
if (rc) {
- dev_err(dev, "Failed to request voltage.\n");
+ dev_err(dev, "Failed to request voltage(rc:%d)\n",
+ rc);
goto err_voltage;
}
}
@@ -432,7 +434,8 @@ static int enable_regulators(struct pil_tz_data *d, struct device *dev,
rc = regulator_set_load(regs[i].reg,
regs[i].uA);
if (rc < 0) {
- dev_err(dev, "Failed to set regulator mode\n");
+ dev_err(dev, "Failed to set regulator mode(rc:%d)\n",
+ rc);
goto err_mode;
}
}
@@ -442,7 +445,7 @@ static int enable_regulators(struct pil_tz_data *d, struct device *dev,
rc = regulator_enable(regs[i].reg);
if (rc) {
- dev_err(dev, "Regulator enable failed\n");
+ dev_err(dev, "Regulator enable failed(rc:%d)\n", rc);
goto err_enable;
}
}
@@ -499,7 +502,7 @@ static int prepare_enable_clocks(struct device *dev, struct clk **clks,
for (i = 0; i < clk_count; i++) {
rc = clk_prepare_enable(clks[i]);
if (rc) {
- dev_err(dev, "Clock enable failed\n");
+ dev_err(dev, "Clock enable failed(rc:%d)\n", rc);
goto err;
}
}
@@ -541,7 +544,8 @@ static int pil_make_proxy_vote(struct pil_desc *pil)
if (d->bus_client) {
rc = msm_bus_scale_client_update_request(d->bus_client, 1);
if (rc) {
- dev_err(pil->dev, "bandwidth request failed\n");
+ dev_err(pil->dev, "bandwidth request failed(rc:%d)\n",
+ rc);
goto err_bw;
}
} else
@@ -995,7 +999,8 @@ static int pil_tz_driver_probe(struct platform_device *pdev)
rc = of_property_read_u32(pdev->dev.of_node, "qcom,smem-id",
&d->smem_id);
if (rc) {
- dev_err(&pdev->dev, "Failed to get the smem_id.\n");
+ dev_err(&pdev->dev, "Failed to get the smem_id(rc:%d)\n",
+ rc);
return rc;
}
}
@@ -1019,7 +1024,8 @@ static int pil_tz_driver_probe(struct platform_device *pdev)
rc = of_property_read_u32(pdev->dev.of_node, "qcom,pas-id",
&d->pas_id);
if (rc) {
- dev_err(&pdev->dev, "Failed to find the pas_id.\n");
+ dev_err(&pdev->dev, "Failed to find the pas_id(rc:%d)\n",
+ rc);
return rc;
}
scm_pas_init(MSM_BUS_MASTER_CRYPTO_CORE0);
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index 32041c17d88f..0ed8a6533e00 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -268,7 +268,8 @@ static ssize_t firmware_name_store(struct device *dev,
pr_info("Changing subsys fw_name to %s\n", buf);
mutex_lock(&track->lock);
- strlcpy(subsys->desc->fw_name, buf, count + 1);
+ strlcpy(subsys->desc->fw_name, buf,
+ min(count + 1, sizeof(subsys->desc->fw_name)));
mutex_unlock(&track->lock);
return orig_count;
}
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 5e1fd988b22c..b02e48185355 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -97,6 +97,17 @@ enum pmic_arb_cmd_op_code {
/* interrupt enable bit */
#define SPMI_PIC_ACC_ENABLE_BIT BIT(0)
+#define HWIRQ(slave_id, periph_id, irq_id, apid) \
+ ((((slave_id) & 0xF) << 28) | \
+ (((periph_id) & 0xFF) << 20) | \
+ (((irq_id) & 0x7) << 16) | \
+ (((apid) & 0x1FF) << 0))
+
+#define HWIRQ_SID(hwirq) (((hwirq) >> 28) & 0xF)
+#define HWIRQ_PER(hwirq) (((hwirq) >> 20) & 0xFF)
+#define HWIRQ_IRQ(hwirq) (((hwirq) >> 16) & 0x7)
+#define HWIRQ_APID(hwirq) (((hwirq) >> 0) & 0x1FF)
+
struct pmic_arb_ver_ops;
struct apid_data {
@@ -172,7 +183,7 @@ struct spmi_pmic_arb {
struct pmic_arb_ver_ops {
const char *ver_str;
int (*ppid_to_apid)(struct spmi_pmic_arb *pa, u8 sid, u16 addr,
- u8 *apid);
+ u16 *apid);
int (*mode)(struct spmi_pmic_arb *dev, u8 sid, u16 addr,
mode_t *mode);
/* spmi commands (read_cmd, write_cmd, cmd) functionality */
@@ -181,10 +192,10 @@ struct pmic_arb_ver_ops {
u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
/* Interrupts controller functionality (offset of PIC registers) */
- u32 (*owner_acc_status)(u8 m, u8 n);
- u32 (*acc_enable)(u8 n);
- u32 (*irq_status)(u8 n);
- u32 (*irq_clear)(u8 n);
+ u32 (*owner_acc_status)(u8 m, u16 n);
+ u32 (*acc_enable)(u16 n);
+ u32 (*irq_status)(u16 n);
+ u32 (*irq_clear)(u16 n);
};
static inline void pmic_arb_base_write(struct spmi_pmic_arb *pa,
@@ -466,8 +477,8 @@ static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
size_t len)
{
struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
- u8 sid = d->hwirq >> 24;
- u8 per = d->hwirq >> 16;
+ u8 sid = HWIRQ_SID(d->hwirq);
+ u8 per = HWIRQ_PER(d->hwirq);
if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
(per << 8) + reg, buf, len))
@@ -479,8 +490,8 @@ static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
{
struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
- u8 sid = d->hwirq >> 24;
- u8 per = d->hwirq >> 16;
+ u8 sid = HWIRQ_SID(d->hwirq);
+ u8 per = HWIRQ_PER(d->hwirq);
if (pmic_arb_read_cmd(pa->spmic, SPMI_CMD_EXT_READL, sid,
(per << 8) + reg, buf, len))
@@ -489,7 +500,7 @@ static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
d->irq);
}
-static void cleanup_irq(struct spmi_pmic_arb *pa, u8 apid, int id)
+static void cleanup_irq(struct spmi_pmic_arb *pa, u16 apid, int id)
{
u16 ppid = pa->apid_data[apid].ppid;
u8 sid = ppid >> 8;
@@ -514,20 +525,19 @@ static void cleanup_irq(struct spmi_pmic_arb *pa, u8 apid, int id)
irq_mask, ppid);
}
-static void periph_interrupt(struct spmi_pmic_arb *pa, u8 apid)
+static void periph_interrupt(struct spmi_pmic_arb *pa, u16 apid)
{
unsigned int irq;
u32 status;
int id;
+ u8 sid = (pa->apid_data[apid].ppid >> 8) & 0xF;
+ u8 per = pa->apid_data[apid].ppid & 0xFF;
status = readl_relaxed(pa->intr + pa->ver_ops->irq_status(apid));
while (status) {
id = ffs(status) - 1;
status &= ~BIT(id);
- irq = irq_find_mapping(pa->domain,
- pa->apid_data[apid].ppid << 16
- | id << 8
- | apid);
+ irq = irq_find_mapping(pa->domain, HWIRQ(sid, per, id, apid));
if (irq == 0) {
cleanup_irq(pa, apid, id);
continue;
@@ -568,8 +578,8 @@ static void pmic_arb_chained_irq(struct irq_desc *desc)
static void qpnpint_irq_ack(struct irq_data *d)
{
struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
- u8 irq = d->hwirq >> 8;
- u8 apid = d->hwirq;
+ u8 irq = HWIRQ_IRQ(d->hwirq);
+ u16 apid = HWIRQ_APID(d->hwirq);
u8 data;
writel_relaxed(BIT(irq), pa->intr + pa->ver_ops->irq_clear(apid));
@@ -580,7 +590,7 @@ static void qpnpint_irq_ack(struct irq_data *d)
static void qpnpint_irq_mask(struct irq_data *d)
{
- u8 irq = d->hwirq >> 8;
+ u8 irq = HWIRQ_IRQ(d->hwirq);
u8 data = BIT(irq);
qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1);
@@ -589,8 +599,8 @@ static void qpnpint_irq_mask(struct irq_data *d)
static void qpnpint_irq_unmask(struct irq_data *d)
{
struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
- u8 irq = d->hwirq >> 8;
- u8 apid = d->hwirq;
+ u8 irq = HWIRQ_IRQ(d->hwirq);
+ u16 apid = HWIRQ_APID(d->hwirq);
u8 buf[2];
writel_relaxed(SPMI_PIC_ACC_ENABLE_BIT,
@@ -612,7 +622,7 @@ static void qpnpint_irq_unmask(struct irq_data *d)
static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct spmi_pmic_arb_qpnpint_type type;
- u8 irq = d->hwirq >> 8;
+ u8 irq = HWIRQ_IRQ(d->hwirq);
u8 bit_mask_irq = BIT(irq);
qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
@@ -649,7 +659,7 @@ static int qpnpint_get_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which,
bool *state)
{
- u8 irq = d->hwirq >> 8;
+ u8 irq = HWIRQ_IRQ(d->hwirq);
u8 status = 0;
if (which != IRQCHIP_STATE_LINE_LEVEL)
@@ -681,7 +691,7 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
{
struct spmi_pmic_arb *pa = d->host_data;
int rc;
- u8 apid;
+ u16 apid;
dev_dbg(&pa->spmic->dev,
"intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
@@ -709,10 +719,7 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
if (apid < pa->min_apid)
pa->min_apid = apid;
- *out_hwirq = (intspec[0] & 0xF) << 24
- | (intspec[1] & 0xFF) << 16
- | (intspec[2] & 0x7) << 8
- | apid;
+ *out_hwirq = HWIRQ(intspec[0], intspec[1], intspec[2], apid);
*out_type = intspec[3] & IRQ_TYPE_SENSE_MASK;
dev_dbg(&pa->spmic->dev, "out_hwirq = %lu\n", *out_hwirq);
@@ -735,7 +742,7 @@ static int qpnpint_irq_domain_map(struct irq_domain *d,
}
static int
-pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u8 *apid)
+pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
{
u16 ppid = sid << 8 | ((addr >> 8) & 0xFF);
u32 *mapping_table = pa->mapping_table;
@@ -834,7 +841,7 @@ static u16 pmic_arb_find_apid(struct spmi_pmic_arb *pa, u16 ppid)
}
static int
-pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u8 *apid)
+pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
{
u16 ppid = (sid << 8) | (addr >> 8);
u16 apid_valid;
@@ -852,7 +859,7 @@ pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u8 *apid)
static int
pmic_arb_mode_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
{
- u8 apid;
+ u16 apid;
int rc;
rc = pmic_arb_ppid_to_apid_v2(pa, sid, addr, &apid);
@@ -871,7 +878,7 @@ pmic_arb_mode_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
static int
pmic_arb_offset_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u32 *offset)
{
- u8 apid;
+ u16 apid;
int rc;
rc = pmic_arb_ppid_to_apid_v2(pa, sid, addr, &apid);
@@ -892,47 +899,47 @@ static u32 pmic_arb_fmt_cmd_v2(u8 opc, u8 sid, u16 addr, u8 bc)
return (opc << 27) | ((addr & 0xff) << 4) | (bc & 0x7);
}
-static u32 pmic_arb_owner_acc_status_v1(u8 m, u8 n)
+static u32 pmic_arb_owner_acc_status_v1(u8 m, u16 n)
{
return 0x20 * m + 0x4 * n;
}
-static u32 pmic_arb_owner_acc_status_v2(u8 m, u8 n)
+static u32 pmic_arb_owner_acc_status_v2(u8 m, u16 n)
{
return 0x100000 + 0x1000 * m + 0x4 * n;
}
-static u32 pmic_arb_owner_acc_status_v3(u8 m, u8 n)
+static u32 pmic_arb_owner_acc_status_v3(u8 m, u16 n)
{
return 0x200000 + 0x1000 * m + 0x4 * n;
}
-static u32 pmic_arb_acc_enable_v1(u8 n)
+static u32 pmic_arb_acc_enable_v1(u16 n)
{
return 0x200 + 0x4 * n;
}
-static u32 pmic_arb_acc_enable_v2(u8 n)
+static u32 pmic_arb_acc_enable_v2(u16 n)
{
return 0x1000 * n;
}
-static u32 pmic_arb_irq_status_v1(u8 n)
+static u32 pmic_arb_irq_status_v1(u16 n)
{
return 0x600 + 0x4 * n;
}
-static u32 pmic_arb_irq_status_v2(u8 n)
+static u32 pmic_arb_irq_status_v2(u16 n)
{
return 0x4 + 0x1000 * n;
}
-static u32 pmic_arb_irq_clear_v1(u8 n)
+static u32 pmic_arb_irq_clear_v1(u16 n)
{
return 0xA00 + 0x4 * n;
}
-static u32 pmic_arb_irq_clear_v2(u8 n)
+static u32 pmic_arb_irq_clear_v2(u16 n)
{
return 0x8 + 0x1000 * n;
}
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index e9237202e79f..a8c8e120c348 100755
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -257,9 +257,10 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
* memory coming from the heaps is ready for dma, ie if it has a
* cached mapping that mapping has been invalidated
*/
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
sg_dma_address(sg) = sg_phys(sg);
-
+ sg_dma_len(sg) = sg->length;
+ }
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
index b8dcf5a26cc4..58d46893e5ff 100644
--- a/drivers/staging/android/ion/ion_test.c
+++ b/drivers/staging/android/ion/ion_test.c
@@ -285,8 +285,8 @@ static int __init ion_test_init(void)
{
ion_test_pdev = platform_device_register_simple("ion-test",
-1, NULL, 0);
- if (!ion_test_pdev)
- return -ENODEV;
+ if (IS_ERR(ion_test_pdev))
+ return PTR_ERR(ion_test_pdev);
return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
}
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 6cc304a4c59b..27fbf1a81097 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -246,24 +246,24 @@ static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
{
if (dev->mmio)
writel(data, dev->mmio + reg);
-
- outl(data, dev->iobase + reg);
+ else
+ outl(data, dev->iobase + reg);
}
static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
{
if (dev->mmio)
writew(data, dev->mmio + reg);
-
- outw(data, dev->iobase + reg);
+ else
+ outw(data, dev->iobase + reg);
}
static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
{
if (dev->mmio)
writeb(data, dev->mmio + reg);
-
- outb(data, dev->iobase + reg);
+ else
+ outb(data, dev->iobase + reg);
}
static uint32_t ni_readl(struct comedi_device *dev, int reg)
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 437f723bb34d..823e47910004 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -92,7 +92,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
unsigned long flags;
int ret = 0;
- if (trig_num != cmd->start_src)
+ if (trig_num != cmd->start_arg)
return -EINVAL;
spin_lock_irqsave(&counter->lock, flags);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 94f4ffac723f..d151bc3d6971 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2618,8 +2618,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
list_for_each_entry_safe(se_cmd, tmp_cmd,
&se_sess->sess_wait_list, se_cmd_list) {
- list_del_init(&se_cmd->se_cmd_list);
-
pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
" %d\n", se_cmd, se_cmd->t_state,
se_cmd->se_tfo->get_cmd_state(se_cmd));
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index 73d7435d2eb8..97ab02dfc753 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -831,6 +831,7 @@ struct tsens_tm_device {
bool prev_reading_avail;
bool calibration_less_mode;
bool tsens_local_init;
+ bool gain_offset_programmed;
int tsens_factor;
uint32_t tsens_num_sensor;
int tsens_irq;
@@ -5341,17 +5342,25 @@ static int get_device_tree_data(struct platform_device *pdev,
return -ENODEV;
}
- tsens_slope_data = devm_kzalloc(&pdev->dev,
+ /* TSENS calibration region */
+ tmdev->res_calib_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "tsens_eeprom_physical");
+ if (!tmdev->res_calib_mem) {
+ pr_debug("Using controller programmed gain and offset\n");
+ tmdev->gain_offset_programmed = true;
+ } else {
+ tsens_slope_data = devm_kzalloc(&pdev->dev,
tsens_num_sensors * sizeof(u32), GFP_KERNEL);
- if (!tsens_slope_data)
- return -ENOMEM;
+ if (!tsens_slope_data)
+ return -ENOMEM;
- rc = of_property_read_u32_array(of_node,
- "qcom,slope", tsens_slope_data, tsens_num_sensors);
- if (rc) {
- dev_err(&pdev->dev, "invalid or missing property: tsens-slope\n");
- return rc;
- };
+ rc = of_property_read_u32_array(of_node,
+ "qcom,slope", tsens_slope_data, tsens_num_sensors);
+ if (rc) {
+ dev_err(&pdev->dev, "missing property: tsens-slope\n");
+ return rc;
+ };
+ }
if (!of_match_node(tsens_match, of_node)) {
pr_err("Need to read SoC specific fuse map\n");
@@ -5364,9 +5373,13 @@ static int get_device_tree_data(struct platform_device *pdev,
return -ENODEV;
}
- for (i = 0; i < tsens_num_sensors; i++)
- tmdev->sensor[i].slope_mul_tsens_factor = tsens_slope_data[i];
- tmdev->tsens_factor = TSENS_SLOPE_FACTOR;
+ if (!tmdev->gain_offset_programmed) {
+ for (i = 0; i < tsens_num_sensors; i++)
+ tmdev->sensor[i].slope_mul_tsens_factor =
+ tsens_slope_data[i];
+ tmdev->tsens_factor = TSENS_SLOPE_FACTOR;
+ }
+
tmdev->tsens_num_sensor = tsens_num_sensors;
tmdev->calibration_less_mode = of_property_read_bool(of_node,
"qcom,calibration-less-mode");
@@ -5536,24 +5549,17 @@ static int get_device_tree_data(struct platform_device *pdev,
goto fail_unmap_tsens_region;
}
- /* TSENS calibration region */
- tmdev->res_calib_mem = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, "tsens_eeprom_physical");
- if (!tmdev->res_calib_mem) {
- pr_err("Could not get qfprom physical address resource\n");
- rc = -EINVAL;
- goto fail_unmap_tsens;
- }
-
- tmdev->calib_len = tmdev->res_calib_mem->end -
+ if (!tmdev->gain_offset_programmed) {
+ tmdev->calib_len = tmdev->res_calib_mem->end -
tmdev->res_calib_mem->start + 1;
- tmdev->tsens_calib_addr = ioremap(tmdev->res_calib_mem->start,
+ tmdev->tsens_calib_addr = ioremap(tmdev->res_calib_mem->start,
tmdev->calib_len);
- if (!tmdev->tsens_calib_addr) {
- pr_err("Failed to IO map EEPROM registers.\n");
- rc = -EINVAL;
- goto fail_unmap_tsens;
+ if (!tmdev->tsens_calib_addr) {
+ pr_err("Failed to IO map EEPROM registers.\n");
+ rc = -EINVAL;
+ goto fail_unmap_tsens;
+ }
}
return 0;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index df8c82aa2dd9..e03d3b41c25b 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -39,7 +39,6 @@
#include <linux/kthread.h>
#include <net/netlink.h>
#include <net/genetlink.h>
-#include <linux/suspend.h>
#define CREATE_TRACE_POINTS
#include <trace/events/thermal.h>
@@ -64,8 +63,6 @@ static LIST_HEAD(thermal_governor_list);
static DEFINE_MUTEX(thermal_list_lock);
static DEFINE_MUTEX(thermal_governor_lock);
-static atomic_t in_suspend;
-
static struct thermal_governor *def_governor;
static struct thermal_governor *__find_governor(const char *name)
@@ -851,6 +848,10 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
{
enum thermal_trip_type type;
+ /* Ignore disabled trip points */
+ if (test_bit(trip, &tz->trips_disabled))
+ return;
+
tz->ops->get_trip_type(tz, trip, &type);
if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT ||
@@ -957,9 +958,6 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
{
int count;
- if (atomic_read(&in_suspend))
- return;
-
if (!tz->ops->get_temp)
return;
@@ -2247,6 +2245,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
{
struct thermal_zone_device *tz;
enum thermal_trip_type trip_type;
+ int trip_temp;
int result;
int count;
int passive = 0;
@@ -2318,9 +2317,15 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
goto unregister;
for (count = 0; count < trips; count++) {
- tz->ops->get_trip_type(tz, count, &trip_type);
+ if (tz->ops->get_trip_type(tz, count, &trip_type))
+ set_bit(count, &tz->trips_disabled);
if (trip_type == THERMAL_TRIP_PASSIVE)
passive = 1;
+ if (tz->ops->get_trip_temp(tz, count, &trip_temp))
+ set_bit(count, &tz->trips_disabled);
+ /* Check for bogus trip points */
+ if (trip_temp == 0)
+ set_bit(count, &tz->trips_disabled);
}
if (!passive) {
@@ -2630,36 +2635,6 @@ static void thermal_unregister_governors(void)
thermal_gov_power_allocator_unregister();
}
-static int thermal_pm_notify(struct notifier_block *nb,
- unsigned long mode, void *_unused)
-{
- struct thermal_zone_device *tz;
-
- switch (mode) {
- case PM_HIBERNATION_PREPARE:
- case PM_RESTORE_PREPARE:
- case PM_SUSPEND_PREPARE:
- atomic_set(&in_suspend, 1);
- break;
- case PM_POST_HIBERNATION:
- case PM_POST_RESTORE:
- case PM_POST_SUSPEND:
- atomic_set(&in_suspend, 0);
- list_for_each_entry(tz, &thermal_tz_list, node) {
- thermal_zone_device_reset(tz);
- thermal_zone_device_update(tz);
- }
- break;
- default:
- break;
- }
- return 0;
-}
-
-static struct notifier_block thermal_pm_nb = {
- .notifier_call = thermal_pm_notify,
-};
-
static int __init thermal_init(void)
{
int result;
@@ -2680,11 +2655,6 @@ static int __init thermal_init(void)
if (result)
goto exit_netlink;
- result = register_pm_notifier(&thermal_pm_nb);
- if (result)
- pr_warn("Thermal: Can not register suspend notifier, return %d\n",
- result);
-
return 0;
exit_netlink:
@@ -2704,7 +2674,6 @@ error:
static void __exit thermal_exit(void)
{
- unregister_pm_notifier(&thermal_pm_nb);
of_thermal_destroy_zones();
genetlink_exit();
class_unregister(&thermal_class);
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 52d82d2ac726..56ccbcefdd85 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -713,22 +713,16 @@ static int size_fifo(struct uart_8250_port *up)
*/
static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
{
- unsigned char old_dll, old_dlm, old_lcr;
- unsigned int id;
+ unsigned char old_lcr;
+ unsigned int id, old_dl;
old_lcr = serial_in(p, UART_LCR);
serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
+ old_dl = serial_dl_read(p);
+ serial_dl_write(p, 0);
+ id = serial_dl_read(p);
+ serial_dl_write(p, old_dl);
- old_dll = serial_in(p, UART_DLL);
- old_dlm = serial_in(p, UART_DLM);
-
- serial_out(p, UART_DLL, 0);
- serial_out(p, UART_DLM, 0);
-
- id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
-
- serial_out(p, UART_DLL, old_dll);
- serial_out(p, UART_DLM, old_dlm);
serial_out(p, UART_LCR, old_lcr);
return id;
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 6843711774b2..d4ece0e56954 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -70,9 +70,11 @@
#define UART_SPS_CONS_PERIPHERAL 0
#define UART_SPS_PROD_PERIPHERAL 1
-#define IPC_MSM_HS_LOG_PAGES 5
+#define IPC_MSM_HS_LOG_STATE_PAGES 2
+#define IPC_MSM_HS_LOG_USER_PAGES 2
+#define IPC_MSM_HS_LOG_DATA_PAGES 3
#define UART_DMA_DESC_NR 8
-#define BUF_DUMP_SIZE 20
+#define BUF_DUMP_SIZE 32
/* If the debug_mask gets set to FATAL_LEV,
* a fatal error has happened and further IPC logging
@@ -121,6 +123,11 @@ enum {
} \
} while (0)
+#define LOG_USR_MSG(ctx, x...) do { \
+ if (ctx) \
+ ipc_log_string(ctx, x); \
+} while (0)
+
/*
* There are 3 different kind of UART Core available on MSM.
* High Speed UART (i.e. Legacy HSUART), GSBI based HSUART
@@ -164,6 +171,7 @@ struct msm_hs_tx {
struct task_struct *task;
struct msm_hs_sps_ep_conn_data cons;
struct timer_list tx_timeout_timer;
+ void *ipc_tx_ctxt;
};
struct msm_hs_rx {
@@ -181,6 +189,7 @@ struct msm_hs_rx {
unsigned long pending_flag;
int rx_inx;
struct sps_iovec iovec[UART_DMA_DESC_NR]; /* track descriptors */
+ void *ipc_rx_ctxt;
};
enum buffer_states {
NONE_PENDING = 0x0,
@@ -214,7 +223,7 @@ struct msm_hs_port {
struct clk *pclk;
struct msm_hs_tx tx;
struct msm_hs_rx rx;
- atomic_t clk_count;
+ atomic_t resource_count;
struct msm_hs_wakeup wakeup;
struct dentry *loopback_dir;
@@ -248,6 +257,7 @@ struct msm_hs_port {
bool obs; /* out of band sleep flag */
atomic_t client_req_state;
void *ipc_msm_hs_log_ctxt;
+ void *ipc_msm_hs_pwr_ctxt;
int ipc_debug_mask;
};
@@ -315,7 +325,7 @@ static int msm_hs_ioctl(struct uart_port *uport, unsigned int cmd,
break;
}
default: {
- MSM_HS_DBG("%s():Unknown cmd specified: cmd=%d\n", __func__,
+ MSM_HS_INFO("%s():Unknown cmd specified: cmd=%d\n", __func__,
cmd);
ret = -ENOIOCTLCMD;
break;
@@ -380,7 +390,7 @@ static void msm_hs_clk_bus_unvote(struct msm_hs_port *msm_uport)
static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
{
struct uart_port *uport = &(msm_uport->uport);
- int rc = atomic_read(&msm_uport->clk_count);
+ int rc = atomic_read(&msm_uport->resource_count);
MSM_HS_DBG("%s(): power usage count %d", __func__, rc);
if (rc <= 0) {
@@ -388,7 +398,7 @@ static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
WARN_ON(1);
return;
}
- atomic_dec(&msm_uport->clk_count);
+ atomic_dec(&msm_uport->resource_count);
pm_runtime_mark_last_busy(uport->dev);
pm_runtime_put_autosuspend(uport->dev);
}
@@ -400,12 +410,12 @@ static void msm_hs_resource_vote(struct msm_hs_port *msm_uport)
struct uart_port *uport = &(msm_uport->uport);
ret = pm_runtime_get_sync(uport->dev);
if (ret < 0 || msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
- MSM_HS_WARN("%s(): %p runtime PM callback not invoked(%d)",
- __func__, uport->dev, ret);
+ MSM_HS_WARN("%s:%s runtime callback not invoked ret:%d st:%d",
+ __func__, dev_name(uport->dev), ret,
+ msm_uport->pm_state);
msm_hs_pm_resume(uport->dev);
}
-
- atomic_inc(&msm_uport->clk_count);
+ atomic_inc(&msm_uport->resource_count);
}
/* Check if the uport line number matches with user id stored in pdata.
@@ -567,23 +577,21 @@ static int sps_rx_disconnect(struct sps_pipe *sps_pipe_handler)
return sps_disconnect(sps_pipe_handler);
}
-static void hex_dump_ipc(struct msm_hs_port *msm_uport,
- char *prefix, char *string, int size)
+static void hex_dump_ipc(struct msm_hs_port *msm_uport, void *ipc_ctx,
+ char *prefix, char *string, u64 addr, int size)
+
{
- unsigned char linebuf[512];
- unsigned char firstbuf[40], lastbuf[40];
+ char buf[(BUF_DUMP_SIZE * 3) + 2];
+ int len = 0;
- if ((msm_uport->ipc_debug_mask != DBG_LEV) && (size > BUF_DUMP_SIZE)) {
- hex_dump_to_buffer(string, 10, 16, 1,
- firstbuf, sizeof(firstbuf), 1);
- hex_dump_to_buffer(string + (size - 10), 10, 16, 1,
- lastbuf, sizeof(lastbuf), 1);
- MSM_HS_INFO("%s : %s...%s", prefix, firstbuf, lastbuf);
- } else {
- hex_dump_to_buffer(string, size, 16, 1,
- linebuf, sizeof(linebuf), 1);
- MSM_HS_INFO("%s : %s", prefix, linebuf);
- }
+ len = min(size, BUF_DUMP_SIZE);
+ /*
+ * Print upto 32 data bytes, 32 bytes per line, 1 byte at a time and
+ * don't include the ASCII text at the end of the buffer.
+ */
+ hex_dump_to_buffer(string, len, 32, 1, buf, sizeof(buf), false);
+ ipc_log_string(ipc_ctx, "%s[0x%.10x:%d] : %s", prefix,
+ (unsigned int)addr, size, buf);
}
/*
@@ -594,8 +602,8 @@ static void dump_uart_hs_registers(struct msm_hs_port *msm_uport)
struct uart_port *uport = &(msm_uport->uport);
if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
- MSM_HS_INFO("%s:Failed clocks are off, clk_count %d",
- __func__, atomic_read(&msm_uport->clk_count));
+ MSM_HS_INFO("%s:Failed clocks are off, resource_count %d",
+ __func__, atomic_read(&msm_uport->resource_count));
return;
}
@@ -757,8 +765,10 @@ static int msm_hs_spsconnect_tx(struct msm_hs_port *msm_uport)
unsigned long flags;
unsigned int data;
- if (tx->flush != FLUSH_SHUTDOWN)
+ if (tx->flush != FLUSH_SHUTDOWN) {
+ MSM_HS_ERR("%s:Invalid flush state:%d\n", __func__, tx->flush);
return 0;
+ }
/* Establish connection between peripheral and memory endpoint */
ret = sps_connect(sps_pipe_handle, sps_config);
@@ -1100,7 +1110,6 @@ static void msm_hs_set_termios(struct uart_port *uport,
mutex_lock(&msm_uport->mtx);
msm_hs_write(uport, UART_DM_IMR, 0);
- MSM_HS_DBG("Entering %s\n", __func__);
msm_hs_disable_flow_control(uport, true);
/*
@@ -1214,10 +1223,10 @@ static void msm_hs_set_termios(struct uart_port *uport,
msm_uport->flow_control = true;
}
msm_hs_write(uport, UART_DM_MR1, data);
+ MSM_HS_INFO("%s: Cflags 0x%x Baud %u\n", __func__, c_cflag, bps);
mutex_unlock(&msm_uport->mtx);
- MSM_HS_DBG("Exit %s\n", __func__);
msm_hs_resource_unvote(msm_uport);
}
@@ -1400,9 +1409,6 @@ static void msm_hs_submit_tx_locked(struct uart_port *uport)
if (tx_count > left)
tx_count = left;
- MSM_HS_INFO("%s(): [UART_TX]<%d>\n", __func__, tx_count);
- hex_dump_ipc(msm_uport, "HSUART write: ",
- &tx_buf->buf[tx_buf->tail], tx_count);
src_addr = tx->dma_base + tx_buf->tail;
/* Mask the src_addr to align on a cache
@@ -1415,6 +1421,8 @@ static void msm_hs_submit_tx_locked(struct uart_port *uport)
tx->tx_count = tx_count;
+ hex_dump_ipc(msm_uport, tx->ipc_tx_ctxt, "Tx",
+ &tx_buf->buf[tx_buf->tail], (u64)src_addr, tx_count);
sps_pipe_handle = tx->cons.pipe_handle;
/* Queue transfer request to SPS */
ret = sps_transfer_one(sps_pipe_handle, src_addr, tx_count,
@@ -1717,12 +1725,11 @@ static void msm_serial_hs_rx_work(struct kthread_work *work)
goto out;
rx_count = msm_uport->rx.iovec[msm_uport->rx.rx_inx].size;
-
- MSM_HS_INFO("%s():[UART_RX]<%d>\n", __func__, rx_count);
- hex_dump_ipc(msm_uport, "HSUART Read: ",
- (msm_uport->rx.buffer +
- (msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)),
- rx_count);
+ hex_dump_ipc(msm_uport, rx->ipc_rx_ctxt, "Rx",
+ (msm_uport->rx.buffer +
+ (msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)),
+ msm_uport->rx.iovec[msm_uport->rx.rx_inx].addr,
+ rx_count);
/*
* We are in a spin locked context, spin lock taken at
@@ -1733,7 +1740,7 @@ static void msm_serial_hs_rx_work(struct kthread_work *work)
&msm_uport->rx.pending_flag) &&
!test_bit(msm_uport->rx.rx_inx,
&msm_uport->rx.queued_flag))
- MSM_HS_ERR("RX INX not set");
+ MSM_HS_ERR("%s: RX INX not set", __func__);
else if (test_bit(msm_uport->rx.rx_inx,
&msm_uport->rx.pending_flag) &&
!test_bit(msm_uport->rx.rx_inx,
@@ -1748,14 +1755,14 @@ static void msm_serial_hs_rx_work(struct kthread_work *work)
rx_count);
if (retval != rx_count) {
- MSM_HS_DBG("%s(): ret %d rx_count %d",
+ MSM_HS_INFO("%s(): ret %d rx_count %d",
__func__, retval, rx_count);
msm_uport->rx.buffer_pending |=
CHARS_NORMAL | retval << 5 |
(rx_count - retval) << 16;
}
} else
- MSM_HS_ERR("Error in inx %d",
+ MSM_HS_ERR("%s: Error in inx %d", __func__,
msm_uport->rx.rx_inx);
}
@@ -1778,7 +1785,7 @@ static void msm_serial_hs_rx_work(struct kthread_work *work)
}
out:
if (msm_uport->rx.buffer_pending) {
- MSM_HS_WARN("tty buffer exhausted. Stalling\n");
+ MSM_HS_WARN("%s: tty buffer exhausted. Stalling\n", __func__);
schedule_delayed_work(&msm_uport->rx.flip_insert_work
, msecs_to_jiffies(RETRY_TIMEOUT));
}
@@ -1796,7 +1803,7 @@ static void msm_hs_start_tx_locked(struct uart_port *uport)
/* Bail if transfer in progress */
if (tx->flush < FLUSH_STOP || tx->dma_in_flight) {
- MSM_HS_DBG("%s(): retry, flush %d, dma_in_flight %d\n",
+ MSM_HS_INFO("%s(): retry, flush %d, dma_in_flight %d\n",
__func__, tx->flush, tx->dma_in_flight);
return;
}
@@ -1826,11 +1833,9 @@ static void msm_hs_sps_tx_callback(struct sps_event_notify *notify)
notify->data.transfer.iovec.addr);
msm_uport->notify = *notify;
- MSM_HS_DBG("%s: ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x, line=%d\n",
- __func__, notify->event_id, &addr,
- notify->data.transfer.iovec.size,
- notify->data.transfer.iovec.flags,
- msm_uport->uport.line);
+ MSM_HS_INFO("tx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
+ &addr, notify->data.transfer.iovec.size,
+ notify->data.transfer.iovec.flags);
del_timer(&msm_uport->tx.tx_timeout_timer);
MSM_HS_DBG("%s(): Queue kthread work", __func__);
@@ -1931,9 +1936,8 @@ static void msm_hs_sps_rx_callback(struct sps_event_notify *notify)
uport = &(msm_uport->uport);
msm_uport->notify = *notify;
- MSM_HS_DBG("\n%s: sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x\n",
- __func__, notify->event_id, &addr,
- notify->data.transfer.iovec.size,
+ MSM_HS_INFO("rx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
+ &addr, notify->data.transfer.iovec.size,
notify->data.transfer.iovec.flags);
spin_lock_irqsave(&uport->lock, flags);
@@ -1985,13 +1989,13 @@ void msm_hs_set_mctrl_locked(struct uart_port *uport,
unsigned int set_rts;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
- MSM_HS_DBG("%s()", __func__);
if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
MSM_HS_WARN("%s(): Clocks are off\n", __func__);
return;
}
/* RTS is active low */
set_rts = TIOCM_RTS & mctrl ? 0 : 1;
+ MSM_HS_INFO("%s: set_rts %d\n", __func__, set_rts);
if (set_rts)
msm_hs_disable_flow_control(uport, false);
@@ -2186,7 +2190,7 @@ static struct msm_hs_port *msm_hs_get_hs_port(int port_index)
return NULL;
}
-void toggle_wakeup_interrupt(struct msm_hs_port *msm_uport)
+void enable_wakeup_interrupt(struct msm_hs_port *msm_uport)
{
unsigned long flags;
struct uart_port *uport = &(msm_uport->uport);
@@ -2197,7 +2201,6 @@ void toggle_wakeup_interrupt(struct msm_hs_port *msm_uport)
return;
if (!(msm_uport->wakeup.enabled)) {
- MSM_HS_DBG("%s(): Enable Wakeup IRQ", __func__);
enable_irq(msm_uport->wakeup.irq);
disable_irq(uport->irq);
spin_lock_irqsave(&uport->lock, flags);
@@ -2205,12 +2208,28 @@ void toggle_wakeup_interrupt(struct msm_hs_port *msm_uport)
msm_uport->wakeup.enabled = true;
spin_unlock_irqrestore(&uport->lock, flags);
} else {
+ MSM_HS_WARN("%s:Wake up IRQ already enabled", __func__);
+ }
+}
+
+void disable_wakeup_interrupt(struct msm_hs_port *msm_uport)
+{
+ unsigned long flags;
+ struct uart_port *uport = &(msm_uport->uport);
+
+ if (!is_use_low_power_wakeup(msm_uport))
+ return;
+ if (msm_uport->wakeup.freed)
+ return;
+
+ if (msm_uport->wakeup.enabled) {
disable_irq_nosync(msm_uport->wakeup.irq);
enable_irq(uport->irq);
spin_lock_irqsave(&uport->lock, flags);
msm_uport->wakeup.enabled = false;
spin_unlock_irqrestore(&uport->lock, flags);
- MSM_HS_DBG("%s(): Disable Wakeup IRQ", __func__);
+ } else {
+ MSM_HS_WARN("%s:Wake up IRQ already disabled", __func__);
}
}
@@ -2267,6 +2286,7 @@ int msm_hs_request_clock_off(struct uart_port *uport)
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
int ret = 0;
+ int client_count = 0;
mutex_lock(&msm_uport->mtx);
/*
@@ -2293,8 +2313,10 @@ int msm_hs_request_clock_off(struct uart_port *uport)
atomic_set(&msm_uport->client_req_state, 1);
msm_hs_resource_unvote(msm_uport);
atomic_dec(&msm_uport->client_count);
- MSM_HS_INFO("%s():DISABLE UART CLOCK: ioc %d\n",
- __func__, atomic_read(&msm_uport->client_count));
+ client_count = atomic_read(&msm_uport->client_count);
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s: Client_Count %d\n", __func__,
+ client_count);
exit_request_clock_off:
return ret;
}
@@ -2323,8 +2345,9 @@ int msm_hs_request_clock_on(struct uart_port *uport)
msm_hs_resource_vote(UARTDM_TO_MSM(uport));
atomic_inc(&msm_uport->client_count);
client_count = atomic_read(&msm_uport->client_count);
- MSM_HS_INFO("%s():ENABLE UART CLOCK: ioc %d\n",
- __func__, client_count);
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s: Client_Count %d\n", __func__,
+ client_count);
/* Clear the flag */
if (msm_uport->obs)
@@ -2342,11 +2365,8 @@ static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
struct uart_port *uport = &msm_uport->uport;
struct tty_struct *tty = NULL;
- msm_hs_resource_vote(msm_uport);
spin_lock_irqsave(&uport->lock, flags);
- MSM_HS_DBG("%s(): ignore %d\n", __func__,
- msm_uport->wakeup.ignore);
if (msm_uport->wakeup.ignore)
msm_uport->wakeup.ignore = 0;
else
@@ -2362,13 +2382,15 @@ static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
tty_insert_flip_char(tty->port,
msm_uport->wakeup.rx_to_inject,
TTY_NORMAL);
- MSM_HS_DBG("%s(): Inject 0x%x", __func__,
- msm_uport->wakeup.rx_to_inject);
+ hex_dump_ipc(msm_uport, msm_uport->rx.ipc_rx_ctxt,
+ "Rx Inject",
+ &msm_uport->wakeup.rx_to_inject, 0, 1);
+ MSM_HS_INFO("Wakeup ISR.Ignore%d\n",
+ msm_uport->wakeup.ignore);
}
}
spin_unlock_irqrestore(&uport->lock, flags);
- msm_hs_resource_unvote(msm_uport);
if (wakeup && msm_uport->wakeup.inject_rx)
tty_flip_buffer_push(tty->port);
@@ -2396,7 +2418,7 @@ static void msm_hs_unconfig_uart_gpios(struct uart_port *uport)
ret = pinctrl_select_state(msm_uport->pinctrl,
msm_uport->gpio_state_suspend);
if (ret)
- MSM_HS_ERR("%s(): Failed to pinctrl set_state",
+ MSM_HS_ERR("%s():Failed to pinctrl set_state",
__func__);
} else if (pdata) {
if (gpio_is_valid(pdata->uart_tx_gpio))
@@ -2674,6 +2696,8 @@ static int msm_hs_startup(struct uart_port *uport)
spin_lock_irqsave(&uport->lock, flags);
atomic_set(&msm_uport->client_count, 0);
atomic_set(&msm_uport->client_req_state, 0);
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s: Client_Count 0\n", __func__);
msm_hs_start_rx_locked(uport);
spin_unlock_irqrestore(&uport->lock, flags);
@@ -3092,17 +3116,19 @@ static void msm_hs_pm_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
int ret;
+ int client_count = 0;
if (!msm_uport)
goto err_suspend;
mutex_lock(&msm_uport->mtx);
+ client_count = atomic_read(&msm_uport->client_count);
/* For OBS, don't use wakeup interrupt, set gpio to suspended state */
if (msm_uport->obs) {
ret = pinctrl_select_state(msm_uport->pinctrl,
msm_uport->gpio_state_suspend);
if (ret)
- MSM_HS_ERR("%s(): Error selecting suspend state",
+ MSM_HS_ERR("%s():Error selecting pinctrl suspend state",
__func__);
}
@@ -3111,8 +3137,10 @@ static void msm_hs_pm_suspend(struct device *dev)
obs_manage_irq(msm_uport, false);
msm_hs_clk_bus_unvote(msm_uport);
if (!atomic_read(&msm_uport->client_req_state))
- toggle_wakeup_interrupt(msm_uport);
- MSM_HS_DBG("%s(): return suspend\n", __func__);
+ enable_wakeup_interrupt(msm_uport);
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s: PM State Suspended client_count %d\n", __func__,
+ client_count);
mutex_unlock(&msm_uport->mtx);
return;
err_suspend:
@@ -3124,17 +3152,26 @@ static int msm_hs_pm_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
- int ret;
+ int ret = 0;
+ int client_count = 0;
- if (!msm_uport)
- goto err_resume;
+ if (!msm_uport) {
+ dev_err(dev, "%s:Invalid uport\n", __func__);
+ return -ENODEV;
+ }
mutex_lock(&msm_uport->mtx);
+ client_count = atomic_read(&msm_uport->client_count);
if (msm_uport->pm_state == MSM_HS_PM_ACTIVE)
goto exit_pm_resume;
if (!atomic_read(&msm_uport->client_req_state))
- toggle_wakeup_interrupt(msm_uport);
- msm_hs_clk_bus_vote(msm_uport);
+ disable_wakeup_interrupt(msm_uport);
+ ret = msm_hs_clk_bus_vote(msm_uport);
+ if (ret) {
+ MSM_HS_ERR("%s:Failed clock vote %d\n", __func__, ret);
+ dev_err(dev, "%s:Failed clock vote %d\n", __func__, ret);
+ goto exit_pm_resume;
+ }
obs_manage_irq(msm_uport, true);
msm_uport->pm_state = MSM_HS_PM_ACTIVE;
msm_hs_resource_on(msm_uport);
@@ -3144,17 +3181,15 @@ static int msm_hs_pm_resume(struct device *dev)
ret = pinctrl_select_state(msm_uport->pinctrl,
msm_uport->gpio_state_active);
if (ret)
- MSM_HS_ERR("%s(): Error selecting active state",
+ MSM_HS_ERR("%s():Error selecting active state",
__func__);
}
- MSM_HS_DBG("%s(): return resume\n", __func__);
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s:PM State:Active client_count %d\n", __func__, client_count);
exit_pm_resume:
mutex_unlock(&msm_uport->mtx);
- return 0;
-err_resume:
- pr_err("%s(): invalid uport", __func__);
- return 0;
+ return ret;
}
#ifdef CONFIG_PM
@@ -3174,20 +3209,20 @@ static int msm_hs_pm_sys_suspend_noirq(struct device *dev)
* If there is an active clk request or an impending userspace request
* fail the suspend callback.
*/
- clk_cnt = atomic_read(&msm_uport->clk_count);
+ clk_cnt = atomic_read(&msm_uport->resource_count);
client_count = atomic_read(&msm_uport->client_count);
- if (clk_cnt || (pm_runtime_enabled(dev) &&
- !pm_runtime_suspended(dev))) {
- MSM_HS_WARN("%s:Fail Suspend.clk_cnt:%d,clnt_count:%d,RPM:%d\n",
- __func__, clk_cnt, client_count,
- dev->power.runtime_status);
+ if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) {
+ MSM_HS_WARN("%s:Fail Suspend.clk_cnt:%d,clnt_count:%d\n",
+ __func__, clk_cnt, client_count);
ret = -EBUSY;
goto exit_suspend_noirq;
}
prev_pwr_state = msm_uport->pm_state;
msm_uport->pm_state = MSM_HS_PM_SYS_SUSPENDED;
- MSM_HS_DBG("%s(): suspending", __func__);
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s:PM State:Sys-Suspended client_count %d\n", __func__,
+ client_count);
exit_suspend_noirq:
mutex_unlock(&msm_uport->mtx);
return ret;
@@ -3207,9 +3242,10 @@ static int msm_hs_pm_sys_resume_noirq(struct device *dev)
*/
mutex_lock(&msm_uport->mtx);
- MSM_HS_DBG("%s(): system resume", __func__);
if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED)
msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s:PM State: Suspended\n", __func__);
mutex_unlock(&msm_uport->mtx);
return 0;
}
@@ -3257,6 +3293,7 @@ static int msm_hs_probe(struct platform_device *pdev)
int core_irqres, bam_irqres, wakeup_irqres;
struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
unsigned long data;
+ char name[30];
if (pdev->dev.of_node) {
dev_dbg(&pdev->dev, "device tree enabled\n");
@@ -3350,11 +3387,13 @@ static int msm_hs_probe(struct platform_device *pdev)
iounmap(uport->membase);
return -ENOMEM;
}
+
+ memset(name, 0, sizeof(name));
+ scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+ "_state");
msm_uport->ipc_msm_hs_log_ctxt =
- ipc_log_context_create(IPC_MSM_HS_LOG_PAGES,
- dev_name(msm_uport->uport.dev), 0);
- pr_debug("%s: Device name is %s\n", __func__,
- dev_name(msm_uport->uport.dev));
+ ipc_log_context_create(IPC_MSM_HS_LOG_STATE_PAGES,
+ name, 0);
if (!msm_uport->ipc_msm_hs_log_ctxt) {
dev_err(&pdev->dev, "%s: error creating logging context",
__func__);
@@ -3439,6 +3478,36 @@ static int msm_hs_probe(struct platform_device *pdev)
msm_uport->tx.flush = FLUSH_SHUTDOWN;
msm_uport->rx.flush = FLUSH_SHUTDOWN;
+ memset(name, 0, sizeof(name));
+ scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+ "_tx");
+ msm_uport->tx.ipc_tx_ctxt =
+ ipc_log_context_create(IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
+ if (!msm_uport->tx.ipc_tx_ctxt)
+ dev_err(&pdev->dev, "%s: error creating tx logging context",
+ __func__);
+
+ memset(name, 0, sizeof(name));
+ scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+ "_rx");
+ msm_uport->rx.ipc_rx_ctxt = ipc_log_context_create(
+ IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
+ if (!msm_uport->rx.ipc_rx_ctxt)
+ dev_err(&pdev->dev, "%s: error creating rx logging context",
+ __func__);
+
+ memset(name, 0, sizeof(name));
+ scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+ "_pwr");
+ msm_uport->ipc_msm_hs_pwr_ctxt = ipc_log_context_create(
+ IPC_MSM_HS_LOG_USER_PAGES, name, 0);
+ if (!msm_uport->ipc_msm_hs_pwr_ctxt)
+ dev_err(&pdev->dev, "%s: error creating usr logging context",
+ __func__);
+
+ uport->irq = core_irqres;
+ msm_uport->bam_irq = bam_irqres;
+
clk_set_rate(msm_uport->clk, msm_uport->uport.uartclk);
msm_hs_clk_bus_vote(msm_uport);
ret = uartdm_init_port(uport);
@@ -3622,9 +3691,9 @@ static void msm_hs_shutdown(struct uart_port *uport)
UART_XMIT_SIZE, DMA_TO_DEVICE);
msm_hs_resource_unvote(msm_uport);
- rc = atomic_read(&msm_uport->clk_count);
+ rc = atomic_read(&msm_uport->resource_count);
if (rc) {
- atomic_set(&msm_uport->clk_count, 1);
+ atomic_set(&msm_uport->resource_count, 1);
MSM_HS_WARN("%s(): removing extra vote\n", __func__);
msm_hs_resource_unvote(msm_uport);
}
@@ -3635,6 +3704,8 @@ static void msm_hs_shutdown(struct uart_port *uport)
if (atomic_read(&msm_uport->client_count)) {
MSM_HS_WARN("%s: Client vote on, forcing to 0\n", __func__);
atomic_set(&msm_uport->client_count, 0);
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s: Client_Count 0\n", __func__);
}
msm_hs_unconfig_uart_gpios(uport);
MSM_HS_INFO("%s:UART port closed successfully\n", __func__);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index fa4e23930614..d37fdcc3143c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1114,6 +1114,9 @@ static int acm_probe(struct usb_interface *intf,
if (quirks == NO_UNION_NORMAL) {
data_interface = usb_ifnum_to_if(usb_dev, 1);
control_interface = usb_ifnum_to_if(usb_dev, 0);
+ /* we would crash */
+ if (!data_interface || !control_interface)
+ return -ENODEV;
goto skip_normal_probe;
}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 56593a9a8726..2057d91d8336 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -502,11 +502,15 @@ static int usb_unbind_interface(struct device *dev)
int usb_driver_claim_interface(struct usb_driver *driver,
struct usb_interface *iface, void *priv)
{
- struct device *dev = &iface->dev;
+ struct device *dev;
struct usb_device *udev;
int retval = 0;
int lpm_disable_error;
+ if (!iface)
+ return -ENODEV;
+
+ dev = &iface->dev;
if (dev->driver)
return -EBUSY;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1560f3f3e756..84df093639ac 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4277,7 +4277,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
{
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
- int i, j, retval;
+ int retries, operations, retval, i;
unsigned delay = HUB_SHORT_RESET_TIME;
enum usb_device_speed oldspeed = udev->speed;
const char *speed;
@@ -4379,7 +4379,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* first 8 bytes of the device descriptor to get the ep0 maxpacket
* value.
*/
- for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
+ for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
bool did_new_scheme = false;
if (use_new_scheme(udev, retry_counter)) {
@@ -4406,7 +4406,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* 255 is for WUSB devices, we actually need to use
* 512 (WUSB1.0[4.8.1]).
*/
- for (j = 0; j < 3; ++j) {
+ for (operations = 0; operations < 3; ++operations) {
buf->bMaxPacketSize0 = 0;
r = usb_control_msg(udev, usb_rcvaddr0pipe(),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
@@ -4426,7 +4426,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
r = -EPROTO;
break;
}
- if (r == 0)
+ /*
+ * Some devices time out if they are powered on
+ * when already connected. They need a second
+ * reset. But only on the first attempt,
+ * lest we get into a time out/reset loop
+ */
+ if (r == 0 || (r == -ETIMEDOUT && retries == 0))
break;
}
udev->descriptor.bMaxPacketSize0 =
@@ -4458,7 +4464,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* authorization will assign the final address.
*/
if (udev->wusb == 0) {
- for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
+ for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) {
retval = hub_set_address(udev, devnum);
if (retval >= 0)
break;
@@ -5386,6 +5392,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
}
bos = udev->bos;
+ udev->bos = NULL;
for (i = 0; i < SET_CONFIG_TRIES; ++i) {
@@ -5478,11 +5485,8 @@ done:
usb_set_usb2_hardware_lpm(udev, 1);
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
- /* release the new BOS descriptor allocated by hub_port_init() */
- if (udev->bos != bos) {
- usb_release_bos_descriptor(udev);
- udev->bos = bos;
- }
+ usb_release_bos_descriptor(udev);
+ udev->bos = bos;
return 0;
re_enumerate:
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 4635edf0189b..4d35de1c14c5 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -92,6 +92,13 @@ MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
#define PIPE3_PHYSTATUS_SW BIT(3)
#define PIPE_UTMI_CLK_DIS BIT(8)
+#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
+#define UTMI_OTG_VBUS_VALID BIT(20)
+#define SW_SESSVLD_SEL BIT(28)
+
+#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
+#define LANE0_PWR_PRESENT BIT(24)
+
/* GSI related registers */
#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
@@ -3090,6 +3097,25 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
return 0;
}
+static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
+{
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ /* Update OTG VBUS Valid from HSPHY to controller */
+ dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
+ vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
+ UTMI_OTG_VBUS_VALID,
+ vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
+
+ /* Update only if Super Speed is supported */
+ if (dwc->maximum_speed == USB_SPEED_SUPER) {
+ /* Update VBUS Valid from SSPHY to controller */
+ dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
+ LANE0_PWR_PRESENT,
+ vbus_present ? LANE0_PWR_PRESENT : 0);
+ }
+}
+
/**
* dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
*
@@ -3110,6 +3136,7 @@ static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
__func__, dwc->gadget.name);
+ dwc3_override_vbus_status(mdwc, true);
usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
@@ -3125,6 +3152,7 @@ static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
usb_gadget_vbus_disconnect(&dwc->gadget);
usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
+ dwc3_override_vbus_status(mdwc, false);
dwc3_usb3_phy_suspend(dwc, false);
}
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 3639890c0dc7..5db4fe9e3cdf 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1738,7 +1738,10 @@ void unregister_gadget_item(struct config_item *item)
{
struct gadget_info *gi = to_gadget_info(item);
+ /* to protect race with gadget_dev_desc_UDC_store*/
+ mutex_lock(&gi->lock);
unregister_gadget(gi);
+ mutex_unlock(&gi->lock);
}
EXPORT_SYMBOL_GPL(unregister_gadget_item);
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
index 0a9a3afd72dd..b22ea656367e 100644
--- a/drivers/usb/gadget/function/f_cdev.c
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -529,6 +529,14 @@ static int usb_cser_notify(struct f_cdev *port, u8 type, u16 value,
const unsigned len = sizeof(*notify) + length;
void *buf;
int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->is_connected) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("%s: port disconnected\n", __func__);
+ return -ENODEV;
+ }
req = port->port_usb.notify_req;
port->port_usb.notify_req = NULL;
@@ -544,7 +552,9 @@ static int usb_cser_notify(struct f_cdev *port, u8 type, u16 value,
notify->wValue = cpu_to_le16(value);
notify->wIndex = cpu_to_le16(port->port_usb.data_id);
notify->wLength = cpu_to_le16(length);
+ /* 2 byte data copy */
memcpy(buf, data, length);
+ spin_unlock_irqrestore(&port->port_lock, flags);
status = usb_ep_queue(ep, req, GFP_ATOMIC);
if (status < 0) {
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 48f987d77e91..c5fd3ce3ed9a 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -24,6 +24,7 @@
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/uio.h>
+#include <linux/ipc_logging.h>
#include <asm/unaligned.h>
#include <linux/usb/composite.h>
@@ -41,6 +42,15 @@
#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
+#define NUM_PAGES 10 /* # of pages for ipc logging */
+
+static void *ffs_ipc_log;
+#define ffs_log(fmt, ...) do { \
+ ipc_log_string(ffs_ipc_log, "%s: " fmt, __func__, \
+ ##__VA_ARGS__); \
+ pr_debug(fmt, ##__VA_ARGS__); \
+} while (0)
+
/* Reference counter handling */
static void ffs_data_get(struct ffs_data *ffs);
static void ffs_data_put(struct ffs_data *ffs);
@@ -214,6 +224,9 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
spin_unlock_irq(&ffs->ev.waitq.lock);
+ ffs_log("enter: state %d setup_state %d flags %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
req->buf = data;
req->length = len;
@@ -238,11 +251,18 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
}
ffs->setup_state = FFS_NO_SETUP;
+
+ ffs_log("exit: state %d setup_state %d flags %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
return req->status ? req->status : req->actual;
}
static int __ffs_ep0_stall(struct ffs_data *ffs)
{
+ ffs_log("state %d setup_state %d flags %lu can_stall %d", ffs->state,
+ ffs->setup_state, ffs->flags, ffs->ev.can_stall);
+
if (ffs->ev.can_stall) {
pr_vdebug("ep0 stall\n");
usb_ep_set_halt(ffs->gadget->ep0);
@@ -263,6 +283,9 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
ENTER();
+ ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
+ ffs->state, ffs->setup_state, ffs->flags);
+
/* Fast check if setup was canceled */
if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
return -EIDRM;
@@ -391,6 +414,9 @@ done_spin:
break;
}
+ ffs_log("exit:ret %zu state %d setup_state %d flags %lu", ret,
+ ffs->state, ffs->setup_state, ffs->flags);
+
mutex_unlock(&ffs->mutex);
return ret;
}
@@ -424,6 +450,10 @@ static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
ffs->ev.count * sizeof *ffs->ev.types);
spin_unlock_irq(&ffs->ev.waitq.lock);
+
+ ffs_log("state %d setup_state %d flags %lu #evt %zu", ffs->state,
+ ffs->setup_state, ffs->flags, n);
+
mutex_unlock(&ffs->mutex);
return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
@@ -439,6 +469,9 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
ENTER();
+ ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
+ ffs->state, ffs->setup_state, ffs->flags);
+
/* Fast check if setup was canceled */
if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
return -EIDRM;
@@ -527,8 +560,12 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
spin_unlock_irq(&ffs->ev.waitq.lock);
done_mutex:
+ ffs_log("exit:ret %d state %d setup_state %d flags %lu", ret,
+ ffs->state, ffs->setup_state, ffs->flags);
+
mutex_unlock(&ffs->mutex);
kfree(data);
+
return ret;
}
@@ -538,6 +575,9 @@ static int ffs_ep0_open(struct inode *inode, struct file *file)
ENTER();
+ ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
if (unlikely(ffs->state == FFS_CLOSING))
return -EBUSY;
@@ -557,6 +597,9 @@ static int ffs_ep0_release(struct inode *inode, struct file *file)
ENTER();
+ ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
ffs_data_closed(ffs);
return 0;
@@ -570,6 +613,9 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
ENTER();
+ ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
if (code == FUNCTIONFS_INTERFACE_REVMAP) {
struct ffs_function *func = ffs->func;
ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
@@ -588,6 +634,9 @@ static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
unsigned int mask = POLLWRNORM;
int ret;
+ ffs_log("enter:state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
poll_wait(file, &ffs->ev.waitq, wait);
ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
@@ -618,6 +667,8 @@ static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
break;
}
+ ffs_log("exit: mask %u", mask);
+
mutex_unlock(&ffs->mutex);
return mask;
@@ -648,6 +699,7 @@ static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
ep->status = req->status ? req->status : req->actual;
/* Set is_busy false to indicate completion of last request */
ep->is_busy = false;
+ ffs_log("ep status %d for req %p", ep->status, req);
complete(req->context);
}
}
@@ -659,6 +711,8 @@ static void ffs_user_copy_worker(struct work_struct *work)
int ret = io_data->req->status ? io_data->req->status :
io_data->req->actual;
+ ffs_log("enter: ret %d", ret);
+
if (io_data->read && ret > 0) {
use_mm(io_data->mm);
ret = copy_to_iter(io_data->buf, ret, &io_data->data);
@@ -680,6 +734,8 @@ static void ffs_user_copy_worker(struct work_struct *work)
kfree(io_data->to_free);
kfree(io_data->buf);
kfree(io_data);
+
+ ffs_log("exit");
}
static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
@@ -689,8 +745,12 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
ENTER();
+ ffs_log("enter");
+
INIT_WORK(&io_data->work, ffs_user_copy_worker);
schedule_work(&io_data->work);
+
+ ffs_log("exit");
}
static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
@@ -701,6 +761,9 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
ssize_t ret, data_len = -EINVAL;
int halt;
+ ffs_log("enter: epfile name %s epfile err %d", epfile->name,
+ atomic_read(&epfile->error));
+
smp_mb__before_atomic();
if (atomic_read(&epfile->error))
return -ENODEV;
@@ -927,6 +990,9 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
}
mutex_unlock(&epfile->mutex);
+
+ ffs_log("exit:ret %zu", ret);
+
return ret;
error_lock:
@@ -934,6 +1000,9 @@ error_lock:
mutex_unlock(&epfile->mutex);
error:
kfree(data);
+
+ ffs_log("exit: ret %zu", ret);
+
return ret;
}
@@ -944,6 +1013,9 @@ ffs_epfile_open(struct inode *inode, struct file *file)
ENTER();
+ ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
@@ -962,6 +1034,9 @@ ffs_epfile_open(struct inode *inode, struct file *file)
smp_mb__before_atomic();
atomic_set(&epfile->error, 0);
+ ffs_log("exit:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
return 0;
}
@@ -973,6 +1048,9 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
ENTER();
+ ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
spin_lock_irq(&epfile->ffs->eps_lock);
if (likely(io_data && io_data->ep && io_data->req))
@@ -982,6 +1060,8 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
spin_unlock_irq(&epfile->ffs->eps_lock);
+ ffs_log("exit: value %d", value);
+
return value;
}
@@ -992,6 +1072,8 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
ENTER();
+ ffs_log("enter");
+
if (!is_sync_kiocb(kiocb)) {
p = kmalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
@@ -1018,6 +1100,9 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
kfree(p);
else
*from = p->data;
+
+ ffs_log("exit");
+
return res;
}
@@ -1028,6 +1113,8 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
ENTER();
+ ffs_log("enter");
+
if (!is_sync_kiocb(kiocb)) {
p = kmalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
@@ -1066,6 +1153,9 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
} else {
*to = p->data;
}
+
+ ffs_log("enter");
+
return res;
}
@@ -1076,12 +1166,17 @@ ffs_epfile_release(struct inode *inode, struct file *file)
ENTER();
+ ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
smp_mb__before_atomic();
atomic_set(&epfile->opened, 0);
atomic_set(&epfile->error, 1);
ffs_data_closed(epfile->ffs);
file->private_data = NULL;
+ ffs_log("exit");
+
return 0;
}
@@ -1093,6 +1188,9 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
ENTER();
+ ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
@@ -1143,6 +1241,8 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
}
spin_unlock_irq(&epfile->ffs->eps_lock);
+ ffs_log("exit:ret %d", ret);
+
return ret;
}
@@ -1174,6 +1274,8 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
ENTER();
+ ffs_log("enter");
+
inode = new_inode(sb);
if (likely(inode)) {
@@ -1193,6 +1295,8 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
inode->i_op = iops;
}
+ ffs_log("exit");
+
return inode;
}
@@ -1207,6 +1311,8 @@ static struct dentry *ffs_sb_create_file(struct super_block *sb,
ENTER();
+ ffs_log("enter");
+
dentry = d_alloc_name(sb->s_root, name);
if (unlikely(!dentry))
return NULL;
@@ -1218,6 +1324,9 @@ static struct dentry *ffs_sb_create_file(struct super_block *sb,
}
d_add(dentry, inode);
+
+ ffs_log("exit");
+
return dentry;
}
@@ -1243,6 +1352,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
ENTER();
+ ffs_log("enter");
+
ffs->sb = sb;
data->ffs_data = NULL;
sb->s_fs_info = ffs;
@@ -1267,6 +1378,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
&ffs_ep0_operations)))
return -ENOMEM;
+ ffs_log("exit");
+
return 0;
}
@@ -1274,6 +1387,8 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
{
ENTER();
+ ffs_log("enter");
+
if (!opts || !*opts)
return 0;
@@ -1356,6 +1471,8 @@ invalid:
opts = comma + 1;
}
+ ffs_log("exit");
+
return 0;
}
@@ -1381,6 +1498,8 @@ ffs_fs_mount(struct file_system_type *t, int flags,
ENTER();
+ ffs_log("enter");
+
ret = ffs_fs_parse_opts(&data, opts);
if (unlikely(ret < 0))
return ERR_PTR(ret);
@@ -1410,6 +1529,9 @@ ffs_fs_mount(struct file_system_type *t, int flags,
ffs_release_dev(data.ffs_data);
ffs_data_put(data.ffs_data);
}
+
+ ffs_log("exit");
+
return rv;
}
@@ -1418,12 +1540,16 @@ ffs_fs_kill_sb(struct super_block *sb)
{
ENTER();
+ ffs_log("enter");
+
kill_litter_super(sb);
if (sb->s_fs_info) {
ffs_release_dev(sb->s_fs_info);
ffs_data_closed(sb->s_fs_info);
ffs_data_put(sb->s_fs_info);
}
+
+ ffs_log("exit");
}
static struct file_system_type ffs_fs_type = {
@@ -1449,6 +1575,8 @@ static int functionfs_init(void)
else
pr_err("failed registering file system (%d)\n", ret);
+ ffs_ipc_log = ipc_log_context_create(NUM_PAGES, "f_fs", 0);
+
return ret;
}
@@ -1470,14 +1598,21 @@ static void ffs_data_get(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter");
+
smp_mb__before_atomic();
atomic_inc(&ffs->ref);
+
+ ffs_log("exit");
}
static void ffs_data_opened(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
smp_mb__before_atomic();
atomic_inc(&ffs->ref);
if (atomic_add_return(1, &ffs->opened) == 1 &&
@@ -1485,12 +1620,17 @@ static void ffs_data_opened(struct ffs_data *ffs)
ffs->state = FFS_CLOSING;
ffs_data_reset(ffs);
}
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
static void ffs_data_put(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter");
+
smp_mb__before_atomic();
if (unlikely(atomic_dec_and_test(&ffs->ref))) {
pr_info("%s(): freeing\n", __func__);
@@ -1500,12 +1640,17 @@ static void ffs_data_put(struct ffs_data *ffs)
kfree(ffs->dev_name);
kfree(ffs);
}
+
+ ffs_log("exit");
}
static void ffs_data_closed(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
smp_mb__before_atomic();
if (atomic_dec_and_test(&ffs->opened)) {
if (ffs->no_disconnect) {
@@ -1529,6 +1674,9 @@ static void ffs_data_closed(struct ffs_data *ffs)
ffs_data_reset(ffs);
}
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
ffs_data_put(ffs);
}
@@ -1540,6 +1688,8 @@ static struct ffs_data *ffs_data_new(void)
ENTER();
+ ffs_log("enter");
+
atomic_set(&ffs->ref, 1);
atomic_set(&ffs->opened, 0);
ffs->state = FFS_READ_DESCRIPTORS;
@@ -1553,6 +1703,8 @@ static struct ffs_data *ffs_data_new(void)
/* XXX REVISIT need to update it in some places, or do we? */
ffs->ev.can_stall = 1;
+ ffs_log("exit");
+
return ffs;
}
@@ -1560,6 +1712,9 @@ static void ffs_data_clear(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
pr_debug("%s: ffs->gadget= %p, ffs->flags= %lu\n",
__func__, ffs->gadget, ffs->flags);
ffs_closed(ffs);
@@ -1578,12 +1733,18 @@ static void ffs_data_clear(struct ffs_data *ffs)
kfree(ffs->raw_descs_data);
kfree(ffs->raw_strings);
kfree(ffs->stringtabs);
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
static void ffs_data_reset(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
ffs_data_clear(ffs);
ffs->epfiles = NULL;
@@ -1606,6 +1767,9 @@ static void ffs_data_reset(struct ffs_data *ffs)
ffs->state = FFS_READ_DESCRIPTORS;
ffs->setup_state = FFS_NO_SETUP;
ffs->flags = 0;
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
@@ -1616,6 +1780,9 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
if (WARN_ON(ffs->state != FFS_ACTIVE
|| test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
return -EBADFD;
@@ -1641,6 +1808,10 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
}
ffs->gadget = cdev->gadget;
+
+ ffs_log("exit: state %d setup_state %d flag %lu gadget %p\n",
+ ffs->state, ffs->setup_state, ffs->flags, ffs->gadget);
+
ffs_data_get(ffs);
return 0;
}
@@ -1654,6 +1825,8 @@ static void functionfs_unbind(struct ffs_data *ffs)
ffs->ep0req = NULL;
ffs->gadget = NULL;
clear_bit(FFS_FL_BOUND, &ffs->flags);
+ ffs_log("state %d setup_state %d flag %lu gadget %p\n",
+ ffs->state, ffs->setup_state, ffs->flags, ffs->gadget);
ffs_data_put(ffs);
}
}
@@ -1665,6 +1838,9 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
count = ffs->eps_count;
epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
if (!epfiles)
@@ -1690,6 +1866,10 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
}
ffs->epfiles = epfiles;
+
+ ffs_log("exit: epfile name %s state %d setup_state %d flag %lu",
+ epfile->name, ffs->state, ffs->setup_state, ffs->flags);
+
return 0;
}
@@ -1699,6 +1879,8 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
ENTER();
+ ffs_log("enter: epfilename %s", epfile->name);
+
for (; count; --count, ++epfile) {
BUG_ON(mutex_is_locked(&epfile->mutex) ||
waitqueue_active(&epfile->wait));
@@ -1710,6 +1892,8 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
}
kfree(epfiles);
+
+ ffs_log("exit");
}
static void ffs_func_eps_disable(struct ffs_function *func)
@@ -1719,6 +1903,9 @@ static void ffs_func_eps_disable(struct ffs_function *func)
unsigned count = func->ffs->eps_count;
unsigned long flags;
+ ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
+ func->ffs->setup_state, func->ffs->flags);
+
spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
@@ -1738,6 +1925,8 @@ static void ffs_func_eps_disable(struct ffs_function *func)
}
} while (--count);
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+
+ ffs_log("exit");
}
static int ffs_func_eps_enable(struct ffs_function *func)
@@ -1749,6 +1938,9 @@ static int ffs_func_eps_enable(struct ffs_function *func)
unsigned long flags;
int ret = 0;
+ ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
+ func->ffs->setup_state, func->ffs->flags);
+
spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
struct usb_endpoint_descriptor *ds;
@@ -1786,6 +1978,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
epfile->ep = ep;
epfile->in = usb_endpoint_dir_in(ds);
epfile->isoc = usb_endpoint_xfer_isoc(ds);
+ ffs_log("usb_ep_enable %s", ep->ep->name);
} else {
break;
}
@@ -1797,6 +1990,8 @@ static int ffs_func_eps_enable(struct ffs_function *func)
} while (--count);
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+ ffs_log("exit: ret %d", ret);
+
return ret;
}
@@ -1837,6 +2032,8 @@ static int __must_check ffs_do_single_desc(char *data, unsigned len,
ENTER();
+ ffs_log("enter: len %u", len);
+
/* At least two bytes are required: length and type */
if (len < 2) {
pr_vdebug("descriptor too short\n");
@@ -1953,6 +2150,8 @@ inv_length:
#undef __entity_check_STRING
#undef __entity_check_ENDPOINT
+ ffs_log("exit: desc type %d length %d", _ds->bDescriptorType, length);
+
return length;
}
@@ -1964,6 +2163,8 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
ENTER();
+ ffs_log("enter: len %u", len);
+
for (;;) {
int ret;
@@ -1991,6 +2192,8 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
data += ret;
++num;
}
+
+ ffs_log("exit: len %u", len);
}
static int __ffs_data_do_entity(enum ffs_entity_type type,
@@ -2002,6 +2205,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
ENTER();
+ ffs_log("enter: type %u", type);
+
switch (type) {
case FFS_DESCRIPTOR:
break;
@@ -2040,6 +2245,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
break;
}
+ ffs_log("exit");
+
return 0;
}
@@ -2049,6 +2256,8 @@ static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
u16 bcd_version = le16_to_cpu(desc->bcdVersion);
u16 w_index = le16_to_cpu(desc->wIndex);
+ ffs_log("enter");
+
if (bcd_version != 1) {
pr_vdebug("unsupported os descriptors version: %d",
bcd_version);
@@ -2066,6 +2275,8 @@ static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
return -EINVAL;
}
+ ffs_log("exit: size of desc %lu", sizeof(*desc));
+
return sizeof(*desc);
}
@@ -2085,6 +2296,8 @@ static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
ENTER();
+ ffs_log("enter: len %u os desc type %d", len, type);
+
/* loop over all ext compat/ext prop descriptors */
while (feature_count--) {
ret = entity(type, h, data, len, priv);
@@ -2095,6 +2308,9 @@ static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
data += ret;
len -= ret;
}
+
+ ffs_log("exit");
+
return _len - len;
}
@@ -2108,6 +2324,8 @@ static int __must_check ffs_do_os_descs(unsigned count,
ENTER();
+ ffs_log("enter: len %u", len);
+
for (num = 0; num < count; ++num) {
int ret;
enum ffs_os_desc_type type;
@@ -2157,6 +2375,9 @@ static int __must_check ffs_do_os_descs(unsigned count,
len -= ret;
data += ret;
}
+
+ ffs_log("exit");
+
return _len - len;
}
@@ -2172,6 +2393,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
ENTER();
+ ffs_log("enter: len %u", len);
+
switch (type) {
case FFS_OS_DESC_EXT_COMPAT: {
struct usb_ext_compat_desc *d = data;
@@ -2226,6 +2449,9 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
pr_vdebug("unknown descriptor: %d\n", type);
return -EINVAL;
}
+
+ ffs_log("exit");
+
return length;
}
@@ -2239,6 +2465,8 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
ENTER();
+ ffs_log("enter: len %zu", len);
+
if (get_unaligned_le32(data + 4) != len)
goto error;
@@ -2349,10 +2577,13 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
ffs->ss_descs_count = counts[2];
ffs->ms_os_descs_count = os_descs_count;
+ ffs_log("exit");
+
return 0;
error:
kfree(_data);
+ ffs_log("exit: ret %d", ret);
return ret;
}
@@ -2366,6 +2597,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
ENTER();
+ ffs_log("enter: len %zu", len);
+
if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
get_unaligned_le32(data + 4) != len))
goto error;
@@ -2480,12 +2713,14 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
ffs->stringtabs = stringtabs;
ffs->raw_strings = _data;
+ ffs_log("exit");
return 0;
error_free:
kfree(stringtabs);
error:
kfree(_data);
+ ffs_log("exit: -EINVAL");
return -EINVAL;
}
@@ -2498,6 +2733,9 @@ static void __ffs_event_add(struct ffs_data *ffs,
enum usb_functionfs_event_type rem_type1, rem_type2 = type;
int neg = 0;
+ ffs_log("enter: type %d state %d setup_state %d flag %lu", type,
+ ffs->state, ffs->setup_state, ffs->flags);
+
/*
* Abort any unhandled setup
*
@@ -2557,6 +2795,9 @@ static void __ffs_event_add(struct ffs_data *ffs,
wake_up_locked(&ffs->ev.waitq);
if (ffs->ffs_eventfd)
eventfd_signal(ffs->ffs_eventfd, 1);
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
static void ffs_event_add(struct ffs_data *ffs,
@@ -2591,6 +2832,8 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
int idx;
static const char *speed_names[] = { "full", "high", "super" };
+ ffs_log("enter");
+
if (type != FFS_DESCRIPTOR)
return 0;
@@ -2666,6 +2909,8 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
}
ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
+ ffs_log("exit");
+
return 0;
}
@@ -2677,6 +2922,8 @@ static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
unsigned idx;
u8 newValue;
+ ffs_log("enter: type %d", type);
+
switch (type) {
default:
case FFS_DESCRIPTOR:
@@ -2721,6 +2968,9 @@ static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
pr_vdebug("%02x -> %02x\n", *valuep, newValue);
*valuep = newValue;
+
+ ffs_log("exit: newValue %d", newValue);
+
return 0;
}
@@ -2731,6 +2981,8 @@ static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
struct ffs_function *func = priv;
u8 length = 0;
+ ffs_log("enter: type %d", type);
+
switch (type) {
case FFS_OS_DESC_EXT_COMPAT: {
struct usb_ext_compat_desc *desc = data;
@@ -2800,6 +3052,8 @@ static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
pr_vdebug("unknown descriptor: %d\n", type);
}
+ ffs_log("exit");
+
return length;
}
@@ -2813,6 +3067,8 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
ENTER();
+ ffs_log("enter");
+
/*
* Legacy gadget triggers binding in functionfs_ready_callback,
* which already uses locking; taking the same lock here would
@@ -2847,6 +3103,8 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
ffs_opts->refcnt++;
func->function.strings = func->ffs->stringtabs;
+ ffs_log("exit");
+
return ffs_opts;
}
@@ -2889,6 +3147,9 @@ static int _ffs_func_bind(struct usb_configuration *c,
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
/* Has descriptors only for speeds gadget does not support */
if (unlikely(!(full | high | super)))
return -ENOTSUPP;
@@ -3006,10 +3267,15 @@ static int _ffs_func_bind(struct usb_configuration *c,
/* And we're done */
ffs_event_add(ffs, FUNCTIONFS_BIND);
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
return 0;
error:
/* XXX Do we need to release all claimed endpoints here? */
+ ffs_log("exit: ret %d", ret);
return ret;
}
@@ -3020,6 +3286,8 @@ static int ffs_func_bind(struct usb_configuration *c,
struct ffs_function *func = ffs_func_from_usb(f);
int ret;
+ ffs_log("enter");
+
if (IS_ERR(ffs_opts))
return PTR_ERR(ffs_opts);
@@ -3027,6 +3295,8 @@ static int ffs_func_bind(struct usb_configuration *c,
if (ret && !--ffs_opts->refcnt)
functionfs_unbind(func->ffs);
+ ffs_log("exit: ret %d", ret);
+
return ret;
}
@@ -3037,7 +3307,12 @@ static void ffs_reset_work(struct work_struct *work)
{
struct ffs_data *ffs = container_of(work,
struct ffs_data, reset_work);
+
+ ffs_log("enter");
+
ffs_data_reset(ffs);
+
+ ffs_log("exit");
}
static int ffs_func_set_alt(struct usb_function *f,
@@ -3047,6 +3322,8 @@ static int ffs_func_set_alt(struct usb_function *f,
struct ffs_data *ffs = func->ffs;
int ret = 0, intf;
+ ffs_log("enter");
+
if (alt != (unsigned)-1) {
intf = ffs_func_revmap_intf(func, interface);
if (unlikely(intf < 0))
@@ -3082,6 +3359,8 @@ static int ffs_func_set_alt(struct usb_function *f,
usb_gadget_autopm_get_async(ffs->gadget);
}
+ ffs_log("exit: ret %d", ret);
+
return ret;
}
@@ -3090,9 +3369,13 @@ static void ffs_func_disable(struct usb_function *f)
struct ffs_function *func = ffs_func_from_usb(f);
struct ffs_data *ffs = func->ffs;
+ ffs_log("enter");
+
ffs_func_set_alt(f, 0, (unsigned)-1);
/* matching put to allow LPM on disconnect */
usb_gadget_autopm_put_async(ffs->gadget);
+
+ ffs_log("exit");
}
static int ffs_func_setup(struct usb_function *f,
@@ -3105,6 +3388,8 @@ static int ffs_func_setup(struct usb_function *f,
ENTER();
+ ffs_log("enter");
+
pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
@@ -3148,19 +3433,31 @@ static int ffs_func_setup(struct usb_function *f,
__ffs_event_add(ffs, FUNCTIONFS_SETUP);
spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+ ffs_log("exit");
+
return 0;
}
static void ffs_func_suspend(struct usb_function *f)
{
ENTER();
+
+ ffs_log("enter");
+
ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
+
+ ffs_log("exit");
}
static void ffs_func_resume(struct usb_function *f)
{
ENTER();
+
+ ffs_log("enter");
+
ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
+
+ ffs_log("exit");
}
@@ -3177,11 +3474,15 @@ static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
short *nums = func->interfaces_nums;
unsigned count = func->ffs->interfaces_count;
+ ffs_log("enter");
+
for (; count; --count, ++nums) {
if (*nums >= 0 && *nums == intf)
return nums - func->interfaces_nums;
}
+ ffs_log("exit");
+
return -EDOM;
}
@@ -3194,6 +3495,8 @@ static struct ffs_dev *_ffs_do_find_dev(const char *name)
{
struct ffs_dev *dev;
+ ffs_log("enter");
+
list_for_each_entry(dev, &ffs_devices, entry) {
if (!dev->name || !name)
continue;
@@ -3201,6 +3504,8 @@ static struct ffs_dev *_ffs_do_find_dev(const char *name)
return dev;
}
+ ffs_log("exit");
+
return NULL;
}
@@ -3211,12 +3516,16 @@ static struct ffs_dev *_ffs_get_single_dev(void)
{
struct ffs_dev *dev;
+ ffs_log("enter");
+
if (list_is_singular(&ffs_devices)) {
dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
if (dev->single)
return dev;
}
+ ffs_log("exit");
+
return NULL;
}
@@ -3227,11 +3536,17 @@ static struct ffs_dev *_ffs_find_dev(const char *name)
{
struct ffs_dev *dev;
+ ffs_log("enter");
+
dev = _ffs_get_single_dev();
if (dev)
return dev;
- return _ffs_do_find_dev(name);
+ dev = _ffs_do_find_dev(name);
+
+ ffs_log("exit");
+
+ return dev;
}
/* Configfs support *********************************************************/
@@ -3353,6 +3668,10 @@ static void ffs_func_unbind(struct usb_configuration *c,
unsigned long flags;
ENTER();
+
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
if (ffs->func == func) {
ffs_func_eps_disable(func);
ffs->func = NULL;
@@ -3383,6 +3702,9 @@ static void ffs_func_unbind(struct usb_configuration *c,
func->interfaces_nums = NULL;
ffs_event_add(ffs, FUNCTIONFS_UNBIND);
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
@@ -3445,12 +3767,16 @@ static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
{
struct ffs_dev *existing;
+ ffs_log("enter");
+
existing = _ffs_do_find_dev(name);
if (existing)
return -EBUSY;
dev->name = name;
+ ffs_log("exit");
+
return 0;
}
@@ -3461,10 +3787,14 @@ int ffs_name_dev(struct ffs_dev *dev, const char *name)
{
int ret;
+ ffs_log("enter");
+
ffs_dev_lock();
ret = _ffs_name_dev(dev, name);
ffs_dev_unlock();
+ ffs_log("exit");
+
return ret;
}
EXPORT_SYMBOL_GPL(ffs_name_dev);
@@ -3473,6 +3803,8 @@ int ffs_single_dev(struct ffs_dev *dev)
{
int ret;
+ ffs_log("enter");
+
ret = 0;
ffs_dev_lock();
@@ -3482,6 +3814,9 @@ int ffs_single_dev(struct ffs_dev *dev)
dev->single = true;
ffs_dev_unlock();
+
+ ffs_log("exit");
+
return ret;
}
EXPORT_SYMBOL_GPL(ffs_single_dev);
@@ -3491,12 +3826,17 @@ EXPORT_SYMBOL_GPL(ffs_single_dev);
*/
static void _ffs_free_dev(struct ffs_dev *dev)
{
+
+ ffs_log("enter");
+
list_del(&dev->entry);
if (dev->name_allocated)
kfree(dev->name);
kfree(dev);
if (list_empty(&ffs_devices))
functionfs_cleanup();
+
+ ffs_log("exit");
}
static void *ffs_acquire_dev(const char *dev_name)
@@ -3504,6 +3844,9 @@ static void *ffs_acquire_dev(const char *dev_name)
struct ffs_dev *ffs_dev;
ENTER();
+
+ ffs_log("enter");
+
ffs_dev_lock();
ffs_dev = _ffs_find_dev(dev_name);
@@ -3518,6 +3861,9 @@ static void *ffs_acquire_dev(const char *dev_name)
ffs_dev->mounted = true;
ffs_dev_unlock();
+
+ ffs_log("exit");
+
return ffs_dev;
}
@@ -3526,6 +3872,9 @@ static void ffs_release_dev(struct ffs_data *ffs_data)
struct ffs_dev *ffs_dev;
ENTER();
+
+ ffs_log("enter");
+
ffs_dev_lock();
ffs_dev = ffs_data->private_data;
@@ -3537,6 +3886,8 @@ static void ffs_release_dev(struct ffs_data *ffs_data)
}
ffs_dev_unlock();
+
+ ffs_log("exit");
}
static int ffs_ready(struct ffs_data *ffs)
@@ -3545,6 +3896,9 @@ static int ffs_ready(struct ffs_data *ffs)
int ret = 0;
ENTER();
+
+ ffs_log("enter");
+
ffs_dev_lock();
ffs_obj = ffs->private_data;
@@ -3569,6 +3923,9 @@ static int ffs_ready(struct ffs_data *ffs)
set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
done:
ffs_dev_unlock();
+
+ ffs_log("exit");
+
return ret;
}
@@ -3578,11 +3935,16 @@ static void ffs_closed(struct ffs_data *ffs)
struct f_fs_opts *opts;
ENTER();
+
+ ffs_log("enter");
+
ffs_dev_lock();
ffs_obj = ffs->private_data;
- if (!ffs_obj)
+ if (!ffs_obj) {
+ ffs_dev_unlock();
goto done;
+ }
ffs_obj->desc_ready = false;
@@ -3590,20 +3952,29 @@ static void ffs_closed(struct ffs_data *ffs)
ffs_obj->ffs_closed_callback)
ffs_obj->ffs_closed_callback(ffs);
- if (ffs_obj->opts)
+ if (ffs_obj->opts) {
opts = ffs_obj->opts;
- else
+ } else {
+ ffs_dev_unlock();
goto done;
+ }
smp_mb__before_atomic();
if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
- || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
+ || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount)) {
+ ffs_dev_unlock();
goto done;
+ }
+
+ ffs_dev_unlock();
- unregister_gadget_item(ffs_obj->opts->
+ if (test_bit(FFS_FL_BOUND, &ffs->flags)) {
+ unregister_gadget_item(opts->
func_inst.group.cg_item.ci_parent->ci_parent);
+ ffs_log("unreg gadget done");
+ }
done:
- ffs_dev_unlock();
+ ffs_log("exit");
}
/* Misc helper functions ****************************************************/
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 5612645d7237..a629723d19cb 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -39,6 +39,8 @@ static struct workqueue_struct *ipa_usb_wq;
static void ipa_disconnect_handler(struct gsi_data_port *d_port);
static int gsi_ctrl_send_notification(struct f_gsi *gsi,
enum gsi_ctrl_notify_state);
+static int gsi_alloc_trb_buffer(struct f_gsi *gsi);
+static void gsi_free_trb_buffer(struct f_gsi *gsi);
void post_event(struct gsi_data_port *port, u8 event)
{
@@ -474,6 +476,9 @@ static void ipa_disconnect_work_handler(struct gsi_data_port *d_port)
if (gsi->d_port.out_ep)
usb_gsi_ep_op(gsi->d_port.out_ep, NULL, GSI_EP_OP_FREE_TRBS);
+
+ /* free buffers allocated with each TRB */
+ gsi_free_trb_buffer(gsi);
}
static int ipa_suspend_work_handler(struct gsi_data_port *d_port)
@@ -547,6 +552,7 @@ static void ipa_work_handler(struct work_struct *w)
struct usb_gadget *gadget = d_port->gadget;
struct device *dev;
struct device *gad_dev;
+ struct f_gsi *gsi;
event = read_event(d_port);
@@ -566,31 +572,27 @@ static void ipa_work_handler(struct work_struct *w)
return;
}
+ gsi = d_port_to_gsi(d_port);
+
switch (d_port->sm_state) {
case STATE_UNINITIALIZED:
break;
case STATE_INITIALIZED:
if (event == EVT_CONNECT_IN_PROGRESS) {
+ usb_gadget_autopm_get(d_port->gadget);
+ log_event_dbg("%s: get = %d", __func__,
+ atomic_read(&gad_dev->power.usage_count));
+ /* allocate buffers used with each TRB */
+ ret = gsi_alloc_trb_buffer(gsi);
+ if (ret) {
+ log_event_err("%s: gsi_alloc_trb_failed\n",
+ __func__);
+ break;
+ }
ipa_connect_channels(d_port);
d_port->sm_state = STATE_CONNECT_IN_PROGRESS;
log_event_dbg("%s: ST_INIT_EVT_CONN_IN_PROG",
__func__);
- } else if (event == EVT_HOST_READY) {
- /*
- * When in a composition such as RNDIS + ADB,
- * RNDIS host sends a GEN_CURRENT_PACKET_FILTER msg
- * to enable/disable flow control eg. during RNDIS
- * adaptor disable/enable from device manager.
- * In the case of the msg to disable flow control,
- * connect IPA channels and enable data path.
- * EVT_HOST_READY is posted to the state machine
- * in the handler for this msg.
- */
- ipa_connect_channels(d_port);
- ipa_data_path_enable(d_port);
- d_port->sm_state = STATE_CONNECTED;
- log_event_dbg("%s: ST_INIT_EVT_HOST_READY",
- __func__);
}
break;
case STATE_CONNECT_IN_PROGRESS:
@@ -648,6 +650,7 @@ static void ipa_work_handler(struct work_struct *w)
&gad_dev->power.usage_count));
} else if (event == EVT_SUSPEND) {
if (peek_event(d_port) == EVT_DISCONNECTED) {
+ read_event(d_port);
ipa_disconnect_work_handler(d_port);
d_port->sm_state = STATE_INITIALIZED;
usb_gadget_autopm_put_async(d_port->gadget);
@@ -718,19 +721,12 @@ static void ipa_work_handler(struct work_struct *w)
case STATE_SUSPENDED:
if (event == EVT_RESUMED) {
+ usb_gadget_autopm_get(d_port->gadget);
+ log_event_dbg("%s: ST_SUS_EVT_RES", __func__);
+ log_event_dbg("%s: get = %d", __func__,
+ atomic_read(&gad_dev->power.usage_count));
ipa_resume_work_handler(d_port);
d_port->sm_state = STATE_CONNECTED;
- /*
- * Increment usage count here to disallow gadget
- * parent suspend. This counter will decrement
- * after IPA handshake is done in disconnect work
- * (due to cable disconnect) or in suspended state.
- */
- usb_gadget_autopm_get_noresume(d_port->gadget);
- log_event_dbg("%s: ST_SUS_EVT_RES", __func__);
- log_event_dbg("%s: get_nores2 = %d", __func__,
- atomic_read(
- &gad_dev->power.usage_count));
} else if (event == EVT_DISCONNECTED) {
ipa_disconnect_work_handler(d_port);
d_port->sm_state = STATE_INITIALIZED;
@@ -1714,6 +1710,92 @@ static int gsi_get_alt(struct usb_function *f, unsigned intf)
return -EINVAL;
}
+static int gsi_alloc_trb_buffer(struct f_gsi *gsi)
+{
+ u32 len_in = 0, len_out = 0;
+ int ret = 0;
+
+ log_event_dbg("allocate trb's buffer\n");
+
+ if (gsi->d_port.in_ep && !gsi->d_port.in_request.buf_base_addr) {
+ log_event_dbg("IN: num_bufs:=%zu, buf_len=%zu\n",
+ gsi->d_port.in_request.num_bufs,
+ gsi->d_port.in_request.buf_len);
+
+ len_in = gsi->d_port.in_request.buf_len *
+ gsi->d_port.in_request.num_bufs;
+ gsi->d_port.in_request.buf_base_addr =
+ dma_zalloc_coherent(gsi->d_port.gadget->dev.parent,
+ len_in, &gsi->d_port.in_request.dma, GFP_KERNEL);
+ if (!gsi->d_port.in_request.buf_base_addr) {
+ dev_err(&gsi->d_port.gadget->dev,
+ "IN buf_base_addr allocate failed %s\n",
+ gsi->function.name);
+ ret = -ENOMEM;
+ goto fail1;
+ }
+ }
+
+ if (gsi->d_port.out_ep && !gsi->d_port.out_request.buf_base_addr) {
+ log_event_dbg("OUT: num_bufs:=%zu, buf_len=%zu\n",
+ gsi->d_port.out_request.num_bufs,
+ gsi->d_port.out_request.buf_len);
+
+ len_out = gsi->d_port.out_request.buf_len *
+ gsi->d_port.out_request.num_bufs;
+ gsi->d_port.out_request.buf_base_addr =
+ dma_zalloc_coherent(gsi->d_port.gadget->dev.parent,
+ len_out, &gsi->d_port.out_request.dma, GFP_KERNEL);
+ if (!gsi->d_port.out_request.buf_base_addr) {
+ dev_err(&gsi->d_port.gadget->dev,
+ "OUT buf_base_addr allocate failed %s\n",
+ gsi->function.name);
+ ret = -ENOMEM;
+ goto fail;
+ }
+ }
+
+ log_event_dbg("finished allocating trb's buffer\n");
+ return ret;
+
+fail:
+ if (len_in && gsi->d_port.in_request.buf_base_addr) {
+ dma_free_coherent(gsi->d_port.gadget->dev.parent, len_in,
+ gsi->d_port.in_request.buf_base_addr,
+ gsi->d_port.in_request.dma);
+ gsi->d_port.in_request.buf_base_addr = NULL;
+ }
+fail1:
+ return ret;
+}
+
+static void gsi_free_trb_buffer(struct f_gsi *gsi)
+{
+ u32 len;
+
+ log_event_dbg("freeing trb's buffer\n");
+
+ if (gsi->d_port.out_ep &&
+ gsi->d_port.out_request.buf_base_addr) {
+ len = gsi->d_port.out_request.buf_len *
+ gsi->d_port.out_request.num_bufs;
+ dma_free_coherent(gsi->d_port.gadget->dev.parent, len,
+ gsi->d_port.out_request.buf_base_addr,
+ gsi->d_port.out_request.dma);
+ gsi->d_port.out_request.buf_base_addr = NULL;
+ }
+
+ if (gsi->d_port.in_ep &&
+ gsi->d_port.in_request.buf_base_addr) {
+ len = gsi->d_port.in_request.buf_len *
+ gsi->d_port.in_request.num_bufs;
+ dma_free_coherent(gsi->d_port.gadget->dev.parent, len,
+ gsi->d_port.in_request.buf_base_addr,
+ gsi->d_port.in_request.dma);
+ gsi->d_port.in_request.buf_base_addr = NULL;
+ }
+}
+
static int gsi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_gsi *gsi = func_to_gsi(f);
@@ -1827,14 +1909,14 @@ static int gsi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (gsi->prot_id == IPA_USB_ECM)
gsi->d_port.cdc_filter = DEFAULT_FILTER;
+ post_event(&gsi->d_port, EVT_CONNECT_IN_PROGRESS);
/*
- * Increment usage count upon cable connect. Decrement
- * after IPA disconnect is done in disconnect work
- * (due to cable disconnect) or in suspend work.
+ * For RNDIS the event is posted from the flow control
+ * handler which is invoked when the host sends the
+ * GEN_CURRENT_PACKET_FILTER message.
*/
- usb_gadget_autopm_get_noresume(gsi->d_port.gadget);
-
- post_event(&gsi->d_port, EVT_CONNECT_IN_PROGRESS);
+ if (gsi->prot_id != IPA_USB_RNDIS)
+ post_event(&gsi->d_port, EVT_HOST_READY);
queue_work(gsi->d_port.ipa_usb_wq,
&gsi->d_port.usb_ipa_w);
}
@@ -2042,7 +2124,6 @@ static int gsi_update_function_bind_params(struct f_gsi *gsi,
struct usb_ep *ep;
struct usb_cdc_notification *event;
struct usb_function *f = &gsi->function;
- u32 len = 0;
int status;
/* maybe allocate device-global string IDs */
@@ -2162,37 +2243,9 @@ skip_string_id_alloc:
gsi->d_port.in_request.buf_len = info->in_req_buf_len;
gsi->d_port.in_request.num_bufs = info->in_req_num_buf;
- len = gsi->d_port.in_request.buf_len * gsi->d_port.in_request.num_bufs;
- dev_dbg(&cdev->gadget->dev, "%zu %zu\n", gsi->d_port.in_request.buf_len,
- gsi->d_port.in_request.num_bufs);
- gsi->d_port.in_request.buf_base_addr =
- dma_zalloc_coherent(cdev->gadget->dev.parent, len,
- &gsi->d_port.in_request.dma, GFP_KERNEL);
- if (!gsi->d_port.in_request.buf_base_addr) {
- dev_err(&cdev->gadget->dev,
- "IN buf_base_addr allocate failed %s\n",
- gsi->function.name);
- goto fail;
- }
-
if (gsi->d_port.out_ep) {
gsi->d_port.out_request.buf_len = info->out_req_buf_len;
gsi->d_port.out_request.num_bufs = info->out_req_num_buf;
- len =
- gsi->d_port.out_request.buf_len *
- gsi->d_port.out_request.num_bufs;
- dev_dbg(&cdev->gadget->dev, "%zu %zu\n",
- gsi->d_port.out_request.buf_len,
- gsi->d_port.out_request.num_bufs);
- gsi->d_port.out_request.buf_base_addr =
- dma_zalloc_coherent(cdev->gadget->dev.parent, len,
- &gsi->d_port.out_request.dma, GFP_KERNEL);
- if (!gsi->d_port.out_request.buf_base_addr) {
- dev_err(&cdev->gadget->dev,
- "OUT buf_base_addr allocate failed %s\n",
- gsi->function.name);
- goto fail;
- }
}
/* Initialize event queue */
@@ -2263,14 +2316,6 @@ fail:
gsi->d_port.out_ep->driver_data = NULL;
if (gsi->d_port.in_ep && gsi->d_port.in_ep->desc)
gsi->d_port.in_ep->driver_data = NULL;
- if (len && gsi->d_port.in_request.buf_base_addr)
- dma_free_coherent(cdev->gadget->dev.parent, len,
- gsi->d_port.in_request.buf_base_addr,
- gsi->d_port.in_request.dma);
- if (len && gsi->d_port.out_request.buf_base_addr)
- dma_free_coherent(cdev->gadget->dev.parent, len,
- gsi->d_port.out_request.buf_base_addr,
- gsi->d_port.out_request.dma);
log_event_err("%s: bind failed for %s", __func__, f->name);
return -ENOMEM;
}
@@ -2564,8 +2609,6 @@ fail:
static void gsi_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_gsi *gsi = func_to_gsi(f);
- struct usb_composite_dev *cdev = c->cdev;
- u32 len;
/*
* Use drain_workqueue to accomplish below conditions:
@@ -2600,19 +2643,7 @@ static void gsi_unbind(struct usb_configuration *c, struct usb_function *f)
if (gsi->c_port.notify) {
kfree(gsi->c_port.notify_req->buf);
usb_ep_free_request(gsi->c_port.notify, gsi->c_port.notify_req);
-
- len =
- gsi->d_port.out_request.buf_len *
- gsi->d_port.out_request.num_bufs;
- dma_free_coherent(&cdev->gadget->dev, len,
- gsi->d_port.out_request.buf_base_addr,
- gsi->d_port.out_request.dma);
}
-
- len = gsi->d_port.in_request.buf_len * gsi->d_port.in_request.num_bufs;
- dma_free_coherent(&cdev->gadget->dev, len,
- gsi->d_port.in_request.buf_base_addr,
- gsi->d_port.in_request.dma);
}
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index c6bfd13f6c92..1950e87b4219 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -787,6 +787,12 @@ static int iowarrior_probe(struct usb_interface *interface,
iface_desc = interface->cur_altsetting;
dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
+ if (iface_desc->desc.bNumEndpoints < 1) {
+ dev_err(&interface->dev, "Invalid number of endpoints\n");
+ retval = -EINVAL;
+ goto error;
+ }
+
/* set up the endpoint information */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index 5a29fc96f940..f8121eb4f63a 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -54,10 +54,6 @@
#define QUSB2PHY_PORT_TUNE2 0x240
-#define HS_PHY_CTRL_REG 0x10
-#define UTMI_OTG_VBUS_VALID BIT(20)
-#define SW_SESSVLD_SEL BIT(28)
-
#define QUSB2PHY_1P8_VOL_MIN 1800000 /* uV */
#define QUSB2PHY_1P8_VOL_MAX 1800000 /* uV */
#define QUSB2PHY_1P8_HPM_LOAD 30000 /* uA */
@@ -76,7 +72,6 @@ MODULE_PARM_DESC(phy_tune2, "QUSB PHY v2 TUNE2");
struct qusb_phy {
struct usb_phy phy;
void __iomem *base;
- void __iomem *qscratch_base;
void __iomem *tune2_efuse_reg;
struct clk *ref_clk_src;
@@ -625,25 +620,6 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
return 0;
}
-static void qusb_write_readback(void *base, u32 offset,
- const u32 mask, u32 val)
-{
- u32 write_val, tmp = readl_relaxed(base + offset);
-
- tmp &= ~mask; /* retain other bits */
- write_val = tmp | val;
-
- writel_relaxed(write_val, base + offset);
-
- /* Read back to see if val was written */
- tmp = readl_relaxed(base + offset);
- tmp &= mask; /* clear other bits */
-
- if (tmp != val)
- pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
- __func__, val, offset);
-}
-
static int qusb_phy_notify_connect(struct usb_phy *phy,
enum usb_device_speed speed)
{
@@ -651,21 +627,11 @@ static int qusb_phy_notify_connect(struct usb_phy *phy,
qphy->cable_connected = true;
- dev_dbg(phy->dev, " cable_connected=%d\n", qphy->cable_connected);
-
if (qphy->qusb_phy_host_init_seq && qphy->phy.flags & PHY_HOST_MODE)
qusb_phy_host_init(phy);
- /* Set OTG VBUS Valid from HSPHY to controller */
- qusb_write_readback(qphy->qscratch_base, HS_PHY_CTRL_REG,
- UTMI_OTG_VBUS_VALID,
- UTMI_OTG_VBUS_VALID);
-
- /* Indicate value is driven by UTMI_OTG_VBUS_VALID bit */
- qusb_write_readback(qphy->qscratch_base, HS_PHY_CTRL_REG,
- SW_SESSVLD_SEL, SW_SESSVLD_SEL);
-
- dev_dbg(phy->dev, "QUSB2 phy connect notification\n");
+ dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+ qphy->cable_connected);
return 0;
}
@@ -676,17 +642,8 @@ static int qusb_phy_notify_disconnect(struct usb_phy *phy,
qphy->cable_connected = false;
- dev_dbg(phy->dev, " cable_connected=%d\n", qphy->cable_connected);
-
- /* Set OTG VBUS Valid from HSPHY to controller */
- qusb_write_readback(qphy->qscratch_base, HS_PHY_CTRL_REG,
- UTMI_OTG_VBUS_VALID, 0);
-
- /* Indicate value is driven by UTMI_OTG_VBUS_VALID bit */
- qusb_write_readback(qphy->qscratch_base, HS_PHY_CTRL_REG,
- SW_SESSVLD_SEL, 0);
-
- dev_dbg(phy->dev, "QUSB2 phy disconnect notification\n");
+ dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+ qphy->cable_connected);
return 0;
}
@@ -768,16 +725,6 @@ static int qusb_phy_probe(struct platform_device *pdev)
return PTR_ERR(qphy->base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "qscratch_base");
- if (res) {
- qphy->qscratch_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(qphy->qscratch_base)) {
- dev_dbg(dev, "couldn't ioremap qscratch_base\n");
- qphy->qscratch_base = NULL;
- }
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"emu_phy_base");
if (res) {
qphy->emu_phy_base = devm_ioremap_resource(dev, res);
@@ -852,7 +799,7 @@ static int qusb_phy_probe(struct platform_device *pdev)
}
of_property_read_u32_array(dev->of_node,
- "qcom,qemu-init-seq",
+ "qcom,emu-init-seq",
qphy->emu_init_seq,
qphy->emu_init_seq_len);
} else {
@@ -977,11 +924,8 @@ static int qusb_phy_probe(struct platform_device *pdev)
qphy->phy.set_suspend = qusb_phy_set_suspend;
qphy->phy.shutdown = qusb_phy_shutdown;
qphy->phy.type = USB_PHY_TYPE_USB2;
-
- if (qphy->qscratch_base) {
- qphy->phy.notify_connect = qusb_phy_notify_connect;
- qphy->phy.notify_disconnect = qusb_phy_notify_disconnect;
- }
+ qphy->phy.notify_connect = qusb_phy_notify_connect;
+ qphy->phy.notify_disconnect = qusb_phy_notify_disconnect;
ret = usb_add_phy_dev(&qphy->phy);
if (ret)
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 1eb0c8d6b62f..5ec08098d197 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -88,9 +88,6 @@
#define LINESTATE_DP BIT(0)
#define LINESTATE_DM BIT(1)
-#define HS_PHY_CTRL_REG 0x10
-#define UTMI_OTG_VBUS_VALID BIT(20)
-#define SW_SESSVLD_SEL BIT(28)
#define QUSB2PHY_1P8_VOL_MIN 1800000 /* uV */
#define QUSB2PHY_1P8_VOL_MAX 1800000 /* uV */
@@ -109,7 +106,6 @@ MODULE_PARM_DESC(tune2, "QUSB PHY TUNE2");
struct qusb_phy {
struct usb_phy phy;
void __iomem *base;
- void __iomem *qscratch_base;
void __iomem *tune2_efuse_reg;
void __iomem *ref_clk_base;
@@ -686,24 +682,6 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
return 0;
}
-static void qusb_write_readback(void *base, u32 offset,
- const u32 mask, u32 val)
-{
- u32 write_val, tmp = readl_relaxed(base + offset);
- tmp &= ~mask; /* retain other bits */
- write_val = tmp | val;
-
- writel_relaxed(write_val, base + offset);
-
- /* Read back to see if val was written */
- tmp = readl_relaxed(base + offset);
- tmp &= mask; /* clear other bits */
-
- if (tmp != val)
- pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
- __func__, val, offset);
-}
-
static int qusb_phy_notify_connect(struct usb_phy *phy,
enum usb_device_speed speed)
{
@@ -711,18 +689,8 @@ static int qusb_phy_notify_connect(struct usb_phy *phy,
qphy->cable_connected = true;
- dev_dbg(phy->dev, " cable_connected=%d\n", qphy->cable_connected);
-
- /* Set OTG VBUS Valid from HSPHY to controller */
- qusb_write_readback(qphy->qscratch_base, HS_PHY_CTRL_REG,
- UTMI_OTG_VBUS_VALID,
- UTMI_OTG_VBUS_VALID);
-
- /* Indicate value is driven by UTMI_OTG_VBUS_VALID bit */
- qusb_write_readback(qphy->qscratch_base, HS_PHY_CTRL_REG,
- SW_SESSVLD_SEL, SW_SESSVLD_SEL);
-
- dev_dbg(phy->dev, "QUSB2 phy connect notification\n");
+ dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+ qphy->cable_connected);
return 0;
}
@@ -733,17 +701,8 @@ static int qusb_phy_notify_disconnect(struct usb_phy *phy,
qphy->cable_connected = false;
- dev_dbg(phy->dev, " cable_connected=%d\n", qphy->cable_connected);
-
- /* Set OTG VBUS Valid from HSPHY to controller */
- qusb_write_readback(qphy->qscratch_base, HS_PHY_CTRL_REG,
- UTMI_OTG_VBUS_VALID, 0);
-
- /* Indicate value is driven by UTMI_OTG_VBUS_VALID bit */
- qusb_write_readback(qphy->qscratch_base, HS_PHY_CTRL_REG,
- SW_SESSVLD_SEL, 0);
-
- dev_dbg(phy->dev, "QUSB2 phy disconnect notification\n");
+ dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+ qphy->cable_connected);
return 0;
}
@@ -827,16 +786,6 @@ static int qusb_phy_probe(struct platform_device *pdev)
return PTR_ERR(qphy->base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "qscratch_base");
- if (res) {
- qphy->qscratch_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(qphy->qscratch_base)) {
- dev_dbg(dev, "couldn't ioremap qscratch_base\n");
- qphy->qscratch_base = NULL;
- }
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"emu_phy_base");
if (res) {
qphy->emu_phy_base = devm_ioremap_resource(dev, res);
@@ -929,7 +878,7 @@ static int qusb_phy_probe(struct platform_device *pdev)
}
of_property_read_u32_array(dev->of_node,
- "qcom,qemu-init-seq",
+ "qcom,emu-init-seq",
qphy->emu_init_seq,
qphy->emu_init_seq_len);
} else {
@@ -1051,11 +1000,8 @@ static int qusb_phy_probe(struct platform_device *pdev)
qphy->phy.set_suspend = qusb_phy_set_suspend;
qphy->phy.shutdown = qusb_phy_shutdown;
qphy->phy.type = USB_PHY_TYPE_USB2;
-
- if (qphy->qscratch_base) {
- qphy->phy.notify_connect = qusb_phy_notify_connect;
- qphy->phy.notify_disconnect = qusb_phy_notify_disconnect;
- }
+ qphy->phy.notify_connect = qusb_phy_notify_connect;
+ qphy->phy.notify_disconnect = qusb_phy_notify_disconnect;
/*
* On some platforms multiple QUSB PHYs are available. If QUSB PHY is
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index c0f5c652d272..f1893e08e51a 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -190,7 +190,8 @@ static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
goto __usbhs_pkt_handler_end;
}
- ret = func(pkt, &is_done);
+ if (likely(func))
+ ret = func(pkt, &is_done);
if (is_done)
__usbhsf_pkt_del(pkt);
@@ -889,6 +890,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
pkt->trans = len;
+ usbhsf_tx_irq_ctrl(pipe, 0);
INIT_WORK(&pkt->work, xfer_work);
schedule_work(&pkt->work);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 8f7a78e70975..fa14198daf77 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -158,10 +158,14 @@ static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
+ unsigned long flags;
ureq->req.actual = pkt->actual;
- usbhsg_queue_pop(uep, ureq, 0);
+ usbhs_lock(priv, flags);
+ if (uep)
+ __usbhsg_queue_pop(uep, ureq, 0);
+ usbhs_unlock(priv, flags);
}
static void usbhsg_queue_push(struct usbhsg_uep *uep,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 7a76fe4c2f9e..bdc0f2f24f19 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -164,6 +164,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+ { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 01bf53392819..244acb1299a9 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
struct usb_serial *serial = port->serial;
struct cypress_private *priv;
+ if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
+ dev_err(&port->dev, "required endpoint is missing\n");
+ return -ENODEV;
+ }
+
priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
cypress_set_termios(tty, port, &priv->tmp_termios);
/* setup the port and start reading from the device */
- if (!port->interrupt_in_urb) {
- dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
- __func__);
- return -1;
- }
-
usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
port->interrupt_in_urb->transfer_buffer,
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 12b0e67473ba..3df7b7ec178e 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
static int digi_startup(struct usb_serial *serial)
{
+ struct device *dev = &serial->interface->dev;
struct digi_serial *serial_priv;
int ret;
+ int i;
+
+ /* check whether the device has the expected number of endpoints */
+ if (serial->num_port_pointers < serial->type->num_ports + 1) {
+ dev_err(dev, "OOB endpoints missing\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < serial->type->num_ports + 1 ; i++) {
+ if (!serial->port[i]->read_urb) {
+ dev_err(dev, "bulk-in endpoint missing\n");
+ return -ENODEV;
+ }
+ if (!serial->port[i]->write_urb) {
+ dev_err(dev, "bulk-out endpoint missing\n");
+ return -ENODEV;
+ }
+ }
serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
if (!serial_priv)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8c660ae401d8..b61f12160d37 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1004,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
+ /* ICP DAS I-756xU devices */
+ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
+ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
+ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a84df2513994..c5d6c1e73e8e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -872,6 +872,14 @@
#define NOVITUS_BONO_E_PID 0x6010
/*
+ * ICPDAS I-756*U devices
+ */
+#define ICPDAS_VID 0x1b5c
+#define ICPDAS_I7560U_PID 0x0103
+#define ICPDAS_I7561U_PID 0x0104
+#define ICPDAS_I7563U_PID 0x0105
+
+/*
* RT Systems programming cables for various ham radios
*/
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index fd707d6a10e2..89726f702202 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
static int mct_u232_port_probe(struct usb_serial_port *port)
{
+ struct usb_serial *serial = port->serial;
struct mct_u232_private *priv;
+ /* check first to simplify error handling */
+ if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
+ dev_err(&port->dev, "expected endpoint missing\n");
+ return -ENODEV;
+ }
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* Use second interrupt-in endpoint for reading. */
- priv->read_urb = port->serial->port[1]->interrupt_in_urb;
+ priv->read_urb = serial->port[1]->interrupt_in_urb;
priv->read_urb->context = port;
spin_lock_init(&priv->lock);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 348e19834b83..c6f497f16526 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1818,6 +1818,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 5c66d3f7a6d0..9baf081174ce 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -2,7 +2,7 @@
* USB Attached SCSI
* Note that this is not the same as the USB Mass Storage driver
*
- * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014
+ * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
* Copyright Matthew Wilcox for Intel Corp, 2010
* Copyright Sarah Sharp for Intel Corp, 2010
*
@@ -757,6 +757,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
return SUCCESS;
}
+static int uas_target_alloc(struct scsi_target *starget)
+{
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)
+ dev_to_shost(starget->dev.parent)->hostdata;
+
+ if (devinfo->flags & US_FL_NO_REPORT_LUNS)
+ starget->no_report_luns = 1;
+
+ return 0;
+}
+
static int uas_slave_alloc(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
@@ -800,7 +811,6 @@ static int uas_slave_configure(struct scsi_device *sdev)
if (devinfo->flags & US_FL_BROKEN_FUA)
sdev->broken_fua = 1;
- scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
return 0;
}
@@ -808,11 +818,12 @@ static struct scsi_host_template uas_host_template = {
.module = THIS_MODULE,
.name = "uas",
.queuecommand = uas_queuecommand,
+ .target_alloc = uas_target_alloc,
.slave_alloc = uas_slave_alloc,
.slave_configure = uas_slave_configure,
.eh_abort_handler = uas_eh_abort_handler,
.eh_bus_reset_handler = uas_eh_bus_reset_handler,
- .can_queue = 65536, /* Is there a limit on the _host_ ? */
+ .can_queue = MAX_CMNDS,
.this_id = -1,
.sg_tablesize = SG_NONE,
.skip_settle_delay = 1,
@@ -932,6 +943,12 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (result)
goto set_alt0;
+ /*
+ * 1 tag is reserved for untagged commands +
+ * 1 tag to avoid off by one errors in some bridge firmwares
+ */
+ shost->can_queue = devinfo->qdepth - 2;
+
usb_set_intfdata(intf, shost);
result = scsi_add_host(shost, &intf->dev);
if (result)
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index ccc113e83d88..53341a77d89f 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: David Webb <djw@noc.ac.uk> */
+UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
+ "Seagate",
+ "Expansion Desk",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_LUNS),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
"Seagate",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 43576ed31ccd..9de988a0f856 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -482,7 +482,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
- US_FL_MAX_SECTORS_240);
+ US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
p = quirks;
while (*p) {
@@ -532,6 +532,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
case 'i':
f |= US_FL_IGNORE_DEVICE;
break;
+ case 'j':
+ f |= US_FL_NO_REPORT_LUNS;
+ break;
case 'l':
f |= US_FL_NOT_LOCKABLE;
break;
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index 88950e9cb2aa..b601cafba6fd 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -176,6 +176,7 @@ enum mdss_hw_capabilities {
MDSS_CAPS_10_BIT_SUPPORTED,
MDSS_CAPS_CWB_SUPPORTED,
MDSS_CAPS_MDP_VOTE_CLK_NOT_SUPPORTED,
+ MDSS_CAPS_AVR_SUPPORTED,
MDSS_CAPS_MAX,
};
diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.c b/drivers/video/fbdev/msm/mdss_compat_utils.c
index e391a5aaa45d..e883f045967d 100644
--- a/drivers/video/fbdev/msm/mdss_compat_utils.c
+++ b/drivers/video/fbdev/msm/mdss_compat_utils.c
@@ -299,7 +299,7 @@ static int __compat_atomic_commit(struct fb_info *info, unsigned int cmd,
struct mdp_layer_commit commit;
struct mdp_layer_commit32 commit32;
u32 layer_count;
- struct mdp_input_layer *layer_list = NULL, *layer;
+ struct mdp_input_layer *layer_list = NULL;
struct mdp_input_layer32 *layer_list32 = NULL;
struct mdp_output_layer *output_layer = NULL;
@@ -370,8 +370,8 @@ static int __compat_atomic_commit(struct fb_info *info, unsigned int cmd,
argp, layer_count);
for (i = 0; i < layer_count; i++) {
- kfree(layer[i].scale);
- mdss_mdp_free_layer_pp_info(&layer[i]);
+ kfree(layer_list[i].scale);
+ mdss_mdp_free_layer_pp_info(&layer_list[i]);
}
kfree(layer_list);
layer_list_err:
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index 06e5502910b2..4285a14e7f35 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -1747,6 +1747,9 @@ static void __mdss_dsi_update_video_mode_total(struct mdss_panel_data *pdata,
return;
}
+ if (ctrl_pdata->timing_db_mode)
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x1e8, 0x1);
+
vsync_period =
mdss_panel_get_vtotal(&pdata->panel_info);
hsync_period =
@@ -1756,23 +1759,13 @@ static void __mdss_dsi_update_video_mode_total(struct mdss_panel_data *pdata,
new_dsi_v_total =
((vsync_period - 1) << 16) | (hsync_period - 1);
- MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
- (current_dsi_v_total | 0x8000000));
- if (new_dsi_v_total & 0x8000000) {
- MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
- new_dsi_v_total);
- } else {
- MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
- (new_dsi_v_total | 0x8000000));
- MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
- (new_dsi_v_total & 0x7ffffff));
- }
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C, new_dsi_v_total);
if (ctrl_pdata->timing_db_mode)
MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x1e4, 0x1);
- pr_debug("%s new_fps:%d vsync:%d hsync:%d frame_rate:%d\n",
- __func__, new_fps, vsync_period, hsync_period,
+ pr_debug("%s new_fps:%d new_vtotal:0x%X cur_vtotal:0x%X frame_rate:%d\n",
+ __func__, new_fps, new_dsi_v_total, current_dsi_v_total,
ctrl_pdata->panel_data.panel_info.mipi.frame_rate);
ctrl_pdata->panel_data.panel_info.current_fps = new_fps;
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index 9548ea471385..4bd705bdc05f 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -389,8 +389,10 @@ int mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
}
gpio_set_value((ctrl_pdata->rst_gpio), 0);
gpio_free(ctrl_pdata->rst_gpio);
- if (gpio_is_valid(ctrl_pdata->lcd_mode_sel_gpio))
+ if (gpio_is_valid(ctrl_pdata->lcd_mode_sel_gpio)) {
+ gpio_set_value(ctrl_pdata->lcd_mode_sel_gpio, 0);
gpio_free(ctrl_pdata->lcd_mode_sel_gpio);
+ }
}
exit:
@@ -1967,7 +1969,6 @@ static void mdss_dsi_parse_dfps_config(struct device_node *pan_node,
pinfo->dfps_update = DFPS_SUSPEND_RESUME_MODE;
pr_debug("default dfps mode: suspend/resume\n");
}
- mdss_dsi_set_refresh_rate_range(pan_node, pinfo);
} else {
pinfo->dynamic_fps = false;
pr_debug("dfps update mode not configured: disable\n");
@@ -2528,6 +2529,8 @@ static int mdss_panel_parse_dt(struct device_node *np,
mdss_dsi_parse_dfps_config(np, ctrl_pdata);
+ mdss_dsi_set_refresh_rate_range(np, pinfo);
+
pinfo->is_dba_panel = of_property_read_bool(np,
"qcom,dba-panel");
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 43e8373f18df..b2c0c78d3f2b 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -552,13 +552,19 @@ static ssize_t mdss_fb_get_panel_info(struct device *dev,
struct msm_fb_data_type *mfd = fbi->par;
struct mdss_panel_info *pinfo = mfd->panel_info;
int ret;
+ bool dfps_porch_mode = false;
+
+ if (pinfo->dfps_update == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP ||
+ pinfo->dfps_update == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP)
+ dfps_porch_mode = true;
ret = scnprintf(buf, PAGE_SIZE,
"pu_en=%d\nxstart=%d\nwalign=%d\nystart=%d\nhalign=%d\n"
"min_w=%d\nmin_h=%d\nroi_merge=%d\ndyn_fps_en=%d\n"
"min_fps=%d\nmax_fps=%d\npanel_name=%s\n"
"primary_panel=%d\nis_pluggable=%d\ndisplay_id=%s\n"
- "is_cec_supported=%d\nis_pingpong_split=%d\n",
+ "is_cec_supported=%d\nis_pingpong_split=%d\n"
+ "dfps_porch_mode=%d\n",
pinfo->partial_update_enabled,
pinfo->roi_alignment.xstart_pix_align,
pinfo->roi_alignment.width_pix_align,
@@ -570,7 +576,8 @@ static ssize_t mdss_fb_get_panel_info(struct device *dev,
pinfo->dynamic_fps, pinfo->min_fps, pinfo->max_fps,
pinfo->panel_name, pinfo->is_prim_panel,
pinfo->is_pluggable, pinfo->display_id,
- pinfo->is_cec_supported, is_pingpong_split(mfd));
+ pinfo->is_cec_supported, is_pingpong_split(mfd),
+ dfps_porch_mode);
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
index 6a6cdc8b502c..4f1435d006b2 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
@@ -216,7 +216,7 @@ static int hdmi_edid_reset_parser(struct hdmi_edid_ctrl *edid_ctrl)
/* reset HDR related data */
edid_ctrl->hdr_supported = false;
edid_ctrl->hdr_data.eotf = 0;
- edid_ctrl->hdr_data.descriptor = 0;
+ edid_ctrl->hdr_data.metadata_type_one = false;
edid_ctrl->hdr_data.max_luminance = 0;
edid_ctrl->hdr_data.avg_luminance = 0;
edid_ctrl->hdr_data.min_luminance = 0;
@@ -794,7 +794,7 @@ static ssize_t hdmi_edid_sysfs_rda_hdr_data(struct device *dev,
ret = scnprintf(buf, PAGE_SIZE, "%d, %u, %u, %u, %u, %u\n",
edid_ctrl->hdr_supported,
edid_ctrl->hdr_data.eotf,
- edid_ctrl->hdr_data.descriptor,
+ edid_ctrl->hdr_data.metadata_type_one,
edid_ctrl->hdr_data.max_luminance,
edid_ctrl->hdr_data.avg_luminance,
edid_ctrl->hdr_data.min_luminance);
@@ -964,8 +964,8 @@ static void hdmi_edid_parse_hdrdb(struct hdmi_edid_ctrl *edid_ctrl,
/* Byte 3: Electro-Optical Transfer Functions */
edid_ctrl->hdr_data.eotf = data_block[2] & 0x3F;
- /* Byte 4: Static Metadata Descriptors */
- edid_ctrl->hdr_data.descriptor = data_block[3] & 0x1;
+ /* Byte 4: Static Metadata Descriptor Type 1 */
+ edid_ctrl->hdr_data.metadata_type_one = (data_block[3] & 0x1) & BIT(0);
/* Byte 5: Desired Content Maximum Luminance */
if (hdmi_edid_is_luminance_value_present(len, MAXIMUM_LUMINANCE))
@@ -2458,16 +2458,16 @@ u8 hdmi_edid_get_deep_color(void *input)
* Return: HDR data.
*/
void hdmi_edid_get_hdr_data(void *input,
- struct hdmi_edid_hdr_data *hdr_data)
+ struct hdmi_edid_hdr_data **hdr_data)
{
struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
- if (!edid_ctrl || !hdr_data) {
+ if (!edid_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
return;
}
- hdr_data = &edid_ctrl->hdr_data;
+ *hdr_data = &edid_ctrl->hdr_data;
}
bool hdmi_edid_is_s3d_mode_supported(void *input, u32 video_mode, u32 s3d_mode)
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.h b/drivers/video/fbdev/msm/mdss_hdmi_edid.h
index c818f3fc0d19..ce6cecbb2e03 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.h
@@ -31,14 +31,14 @@ struct hdmi_edid_init_data {
/*
* struct hdmi_edid_hdr_data - HDR Static Metadata
* @eotf: Electro-Optical Transfer Function
- * @descriptor: Static Metadata Descriptor
+ * @metadata_type_one: Static Metadata Type 1 support
* @max_luminance: Desired Content Maximum Luminance
* @avg_luminance: Desired Content Frame-average Luminance
* @min_luminance: Desired Content Minimum Luminance
*/
struct hdmi_edid_hdr_data {
u32 eotf;
- u32 descriptor;
+ bool metadata_type_one;
u32 max_luminance;
u32 avg_luminance;
u32 min_luminance;
@@ -61,6 +61,6 @@ bool hdmi_edid_is_s3d_mode_supported(void *input,
u32 video_mode, u32 s3d_mode);
u8 hdmi_edid_get_deep_color(void *edid_ctrl);
void hdmi_edid_get_hdr_data(void *edid_ctrl,
- struct hdmi_edid_hdr_data *hdr_data);
+ struct hdmi_edid_hdr_data **hdr_data);
#endif /* __HDMI_EDID_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
index fb59d0b03afe..9ce47ccb5e09 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
@@ -1041,6 +1041,7 @@ void *hdmi_hdcp2p2_init(struct hdmi_hdcp_init_data *init_data)
register_data.hdcp_ctx = &ctrl->lib_ctx;
register_data.client_ops = &client_ops;
register_data.txmtr_ops = &txmtr_ops;
+ register_data.device_type = HDCP_TXMTR_HDMI;
register_data.client_ctx = ctrl;
register_data.tethered = ctrl->tethered;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index 3d773371713d..10e7a2d1a940 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -79,6 +79,8 @@
#define HDMI_TX_MAX_FPS 120000
#define HDMI_TX_VERSION_403 0x40000003 /* msmcobalt */
+#define HDMI_GET_MSB(x) (x >> 8)
+#define HDMI_GET_LSB(x) (x & 0xff)
/* Enable HDCP by default */
static bool hdcp_feature_on = true;
@@ -111,6 +113,9 @@ static int hdmi_tx_enable_power(struct hdmi_tx_ctrl *hdmi_ctrl,
enum hdmi_tx_power_module_type module, int enable);
static int hdmi_tx_setup_tmds_clk_rate(struct hdmi_tx_ctrl *hdmi_ctrl);
static void hdmi_tx_fps_work(struct work_struct *work);
+static int hdmi_tx_pinctrl_set_state(struct hdmi_tx_ctrl *hdmi_ctrl,
+ enum hdmi_tx_power_module_type module, bool active);
+static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *hdmi_ctrl);
static struct mdss_hw hdmi_tx_hw = {
.hw_ndx = MDSS_HW_HDMI,
@@ -285,6 +290,29 @@ static inline bool hdmi_tx_is_hdcp_enabled(struct hdmi_tx_ctrl *hdmi_ctrl)
hdmi_ctrl->hdcp_ops;
}
+/*
+ * The sink must support at least one electro-optical transfer function for
+ * HDMI controller to sendi the dynamic range and mastering infoframe.
+ */
+static inline bool hdmi_tx_is_hdr_supported(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ struct hdmi_edid_hdr_data *hdr_data;
+
+ hdmi_edid_get_hdr_data(hdmi_tx_get_fd(HDMI_TX_FEAT_EDID), &hdr_data);
+
+ return (hdr_data->eotf & BIT(0)) || (hdr_data->eotf & BIT(1)) ||
+ (hdr_data->eotf & BIT(2));
+}
+
+static inline bool hdmi_tx_metadata_type_one(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ struct hdmi_edid_hdr_data *hdr_data;
+
+ hdmi_edid_get_hdr_data(hdmi_tx_get_fd(HDMI_TX_FEAT_EDID), &hdr_data);
+
+ return hdr_data->metadata_type_one;
+}
+
static inline bool hdmi_tx_dc_support(struct hdmi_tx_ctrl *hdmi_ctrl)
{
return hdmi_ctrl->dc_feature_on && hdmi_ctrl->dc_support &&
@@ -478,25 +506,30 @@ void *hdmi_get_featuredata_from_sysfs_dev(struct device *device,
} /* hdmi_tx_get_featuredata_from_sysfs_dev */
EXPORT_SYMBOL(hdmi_get_featuredata_from_sysfs_dev);
-static int hdmi_tx_config_5v(struct hdmi_tx_ctrl *hdmi_ctrl, bool enable)
+static int hdmi_tx_config_5v(struct hdmi_tx_ctrl *ctrl, bool enable)
{
- struct dss_module_power *pd = NULL;
int ret = 0;
+ struct dss_module_power *pd = NULL;
- if (!hdmi_ctrl) {
- DEV_ERR("%s: invalid input\n", __func__);
+ if (!ctrl) {
+ DEV_ERR("%s: Invalid HDMI ctrl\n", __func__);
ret = -EINVAL;
goto end;
}
- pd = &hdmi_ctrl->pdata.power_data[HDMI_TX_HPD_PM];
- if (!pd || !pd->gpio_config) {
- DEV_ERR("%s: Error: invalid power data\n", __func__);
- ret = -EINVAL;
- goto end;
+ if (ctrl->hdmi_tx_version >= HDMI_TX_VERSION_403)
+ ret = hdmi_tx_pinctrl_set_state(ctrl, HDMI_TX_HPD_PM, enable);
+ else {
+ pd = &ctrl->pdata.power_data[HDMI_TX_HPD_PM];
+ if (!pd || !pd->gpio_config) {
+ DEV_ERR("%s: Invalid power data\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ gpio_set_value(pd->gpio_config->gpio, enable);
}
- gpio_set_value(pd->gpio_config->gpio, enable);
end:
return ret;
}
@@ -1220,12 +1253,6 @@ static ssize_t hdmi_tx_sysfs_wta_5v(struct device *dev,
}
mutex_lock(&hdmi_ctrl->tx_lock);
- pd = &hdmi_ctrl->pdata.power_data[HDMI_TX_HPD_PM];
- if (!pd || !pd->gpio_config) {
- DEV_ERR("%s: Error: invalid power data\n", __func__);
- ret = -EINVAL;
- goto end;
- }
ret = kstrtoint(buf, 10, &read);
if (ret) {
@@ -1245,6 +1272,72 @@ end:
return ret;
}
+static ssize_t hdmi_tx_sysfs_wta_hdr_stream(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ u32 const hdr_param_count = 13;
+ struct hdmi_tx_ctrl *ctrl = NULL;
+
+ ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+ if (!ctrl) {
+ pr_err("%s: invalid input\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (!hdmi_tx_is_hdr_supported(ctrl)) {
+ pr_err("%s: Sink does not support HDR\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u %u",
+ &ctrl->hdr_data.eotf,
+ &ctrl->hdr_data.display_primaries_x[0],
+ &ctrl->hdr_data.display_primaries_y[0],
+ &ctrl->hdr_data.display_primaries_x[1],
+ &ctrl->hdr_data.display_primaries_y[1],
+ &ctrl->hdr_data.display_primaries_x[2],
+ &ctrl->hdr_data.display_primaries_y[2],
+ &ctrl->hdr_data.white_point_x,
+ &ctrl->hdr_data.white_point_y,
+ &ctrl->hdr_data.max_luminance,
+ &ctrl->hdr_data.min_luminance,
+ &ctrl->hdr_data.max_content_light_level,
+ &ctrl->hdr_data.max_average_light_level)
+ != hdr_param_count) {
+ pr_err("%s: Invalid HDR stream data\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ pr_debug("%s: 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ __func__,
+ ctrl->hdr_data.eotf,
+ ctrl->hdr_data.display_primaries_x[0],
+ ctrl->hdr_data.display_primaries_y[0],
+ ctrl->hdr_data.display_primaries_x[1],
+ ctrl->hdr_data.display_primaries_y[1],
+ ctrl->hdr_data.display_primaries_x[2],
+ ctrl->hdr_data.display_primaries_y[2]);
+
+ pr_debug("%s: 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ __func__,
+ ctrl->hdr_data.white_point_x,
+ ctrl->hdr_data.white_point_y,
+ ctrl->hdr_data.max_luminance,
+ ctrl->hdr_data.min_luminance,
+ ctrl->hdr_data.max_content_light_level,
+ ctrl->hdr_data.max_average_light_level);
+
+ hdmi_panel_set_hdr_infoframe(ctrl);
+
+ ret = strnlen(buf, PAGE_SIZE);
+end:
+ return ret;
+}
+
static DEVICE_ATTR(connected, S_IRUGO, hdmi_tx_sysfs_rda_connected, NULL);
static DEVICE_ATTR(hdmi_audio_cb, S_IWUSR, NULL, hdmi_tx_sysfs_wta_audio_cb);
static DEVICE_ATTR(hot_plug, S_IWUSR, NULL, hdmi_tx_sysfs_wta_hot_plug);
@@ -1265,6 +1358,7 @@ static DEVICE_ATTR(avi_cn0_1, S_IWUSR, NULL, hdmi_tx_sysfs_wta_avi_cn_bits);
static DEVICE_ATTR(s3d_mode, S_IRUGO | S_IWUSR, hdmi_tx_sysfs_rda_s3d_mode,
hdmi_tx_sysfs_wta_s3d_mode);
static DEVICE_ATTR(5v, S_IWUSR, NULL, hdmi_tx_sysfs_wta_5v);
+static DEVICE_ATTR(hdr_stream, S_IWUSR, NULL, hdmi_tx_sysfs_wta_hdr_stream);
static struct attribute *hdmi_tx_fs_attrs[] = {
&dev_attr_connected.attr,
@@ -1280,6 +1374,7 @@ static struct attribute *hdmi_tx_fs_attrs[] = {
&dev_attr_avi_cn0_1.attr,
&dev_attr_s3d_mode.attr,
&dev_attr_5v.attr,
+ &dev_attr_hdr_stream.attr,
NULL,
};
static struct attribute_group hdmi_tx_fs_attrs_group = {
@@ -2209,7 +2304,7 @@ static int hdmi_tx_check_capability(struct hdmi_tx_ctrl *hdmi_ctrl)
DEV_DBG("%s: Features <HDMI:%s, HDCP:%s, Deep Color:%s>\n", __func__,
hdmi_disabled ? "OFF" : "ON", hdcp_disabled ? "OFF" : "ON",
- hdmi_ctrl->dc_feature_on ? "OFF" : "ON");
+ !hdmi_ctrl->dc_feature_on ? "OFF" : "ON");
if (hdmi_disabled) {
DEV_ERR("%s: HDMI disabled\n", __func__);
@@ -2635,6 +2730,102 @@ static void hdmi_tx_phy_reset(struct hdmi_tx_ctrl *hdmi_ctrl)
DSS_REG_W_ND(io, HDMI_PHY_CTRL, val | SW_RESET_PLL);
} /* hdmi_tx_phy_reset */
+static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *ctrl)
+{
+ u32 packet_payload = 0;
+ u32 packet_header = 0;
+ u32 packet_control = 0;
+ u32 const type_code = 0x87;
+ u32 const version = 0x01;
+ u32 const length = 0x1a;
+ u32 const descriptor_id = 0x00;
+ struct dss_io_data *io = NULL;
+
+ if (!ctrl) {
+ pr_err("%s: invalid input\n", __func__);
+ return;
+ }
+
+ if (!hdmi_tx_is_hdr_supported(ctrl)) {
+ pr_err("%s: Sink does not support HDR\n", __func__);
+ return;
+ }
+
+ io = &ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ pr_err("%s: core io not inititalized\n", __func__);
+ return;
+ }
+
+ /* Setup Packet header and payload */
+ packet_header = type_code | (version << 8) | (length << 16);
+ DSS_REG_W(io, HDMI_GENERIC0_HDR, packet_header);
+
+ packet_payload = (ctrl->hdr_data.eotf << 8);
+ if (hdmi_tx_metadata_type_one(ctrl)) {
+ packet_payload |= (descriptor_id << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_x[0])
+ << 24);
+ DSS_REG_W(io, HDMI_GENERIC0_0, packet_payload);
+ } else {
+ pr_debug("%s: Metadata Type 1 not supported\n", __func__);
+ DSS_REG_W(io, HDMI_GENERIC0_0, packet_payload);
+ goto enable_packet_control;
+ }
+
+ packet_payload =
+ (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_x[0]))
+ | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_y[0]) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_y[0]) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_x[1]) << 24);
+ DSS_REG_W(io, HDMI_GENERIC0_1, packet_payload);
+
+ packet_payload =
+ (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_x[1]))
+ | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_y[1]) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_y[1]) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_x[2]) << 24);
+ DSS_REG_W(io, HDMI_GENERIC0_2, packet_payload);
+
+ packet_payload =
+ (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_x[2]))
+ | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_y[2]) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_y[2]) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_data.white_point_x) << 24);
+ DSS_REG_W(io, HDMI_GENERIC0_3, packet_payload);
+
+ packet_payload =
+ (HDMI_GET_MSB(ctrl->hdr_data.white_point_x))
+ | (HDMI_GET_LSB(ctrl->hdr_data.white_point_y) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_data.white_point_y) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_data.max_luminance) << 24);
+ DSS_REG_W(io, HDMI_GENERIC0_4, packet_payload);
+
+ packet_payload =
+ (HDMI_GET_MSB(ctrl->hdr_data.max_luminance))
+ | (HDMI_GET_LSB(ctrl->hdr_data.min_luminance) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_data.min_luminance) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_data.max_content_light_level) << 24);
+ DSS_REG_W(io, HDMI_GENERIC0_5, packet_payload);
+
+ packet_payload =
+ (HDMI_GET_MSB(ctrl->hdr_data.max_content_light_level))
+ | (HDMI_GET_LSB(ctrl->hdr_data.max_average_light_level) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_data.max_average_light_level) << 16);
+ DSS_REG_W(io, HDMI_GENERIC0_6, packet_payload);
+
+enable_packet_control:
+ /*
+ * GENERIC0_LINE | GENERIC0_CONT | GENERIC0_SEND
+ * Setup HDMI TX generic packet control
+ * Enable this packet to transmit every frame
+ * Enable HDMI TX engine to transmit Generic packet 1
+ */
+ packet_control = DSS_REG_R_ND(io, HDMI_GEN_PKT_CTRL);
+ packet_control |= BIT(0) | BIT(1) | BIT(2) | BIT(16);
+ DSS_REG_W(io, HDMI_GEN_PKT_CTRL, packet_control);
+}
+
static int hdmi_tx_audio_info_setup(struct platform_device *pdev,
struct msm_ext_disp_audio_setup_params *params)
{
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.h b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
index 462edac31c09..1c306df70c7e 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
@@ -20,6 +20,7 @@
#include "mdss_hdmi_audio.h"
#define MAX_SWITCH_NAME_SIZE 5
+#define HDR_PRIMARIES_COUNT 3
enum hdmi_tx_io_type {
HDMI_TX_CORE_IO,
@@ -61,6 +62,30 @@ struct hdmi_tx_pinctrl {
struct hdmi_tx_ctrl;
typedef int (*hdmi_tx_evt_handler) (struct hdmi_tx_ctrl *);
+/*
+ * struct hdmi_tx_hdr_stream - HDR video stream characteristics
+ * @eotf: Electro-Optical Transfer Function
+ * @display_primaries_x: display primaries data for x-coordinate
+ * @display_primaries_y: display primaries data for y-coordinate
+ * @white_point_x: white point data for x-coordinate
+ * @white_point_y: white point data for y-coordinate
+ * @max_luminance: content maximum luminance
+ * @min_luminance: content minimum luminance
+ * @max_content_light_level: content maximum light level
+ * @max_average_light_level: content average light level
+ */
+struct hdmi_tx_hdr_stream_data {
+ u32 eotf;
+ u32 display_primaries_x[HDR_PRIMARIES_COUNT];
+ u32 display_primaries_y[HDR_PRIMARIES_COUNT];
+ u32 white_point_x;
+ u32 white_point_y;
+ u32 max_luminance;
+ u32 min_luminance;
+ u32 max_content_light_level;
+ u32 max_average_light_level;
+};
+
struct hdmi_tx_ctrl {
struct platform_device *pdev;
struct hdmi_tx_platform_data pdata;
@@ -88,6 +113,7 @@ struct hdmi_tx_ctrl {
struct hdmi_panel_ops panel_ops;
struct msm_ext_disp_audio_setup_params audio_params;
struct work_struct fps_work;
+ struct hdmi_tx_hdr_stream_data hdr_data;
spinlock_t hpd_state_lock;
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 5a355f226179..518b84fbad51 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1950,6 +1950,7 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
mdss_set_quirk(mdata, MDSS_QUIRK_SRC_SPLIT_ALWAYS);
mdata->has_wb_ubwc = true;
set_bit(MDSS_CAPS_10_BIT_SUPPORTED, mdata->mdss_caps_map);
+ set_bit(MDSS_CAPS_AVR_SUPPORTED, mdata->mdss_caps_map);
break;
default:
mdata->max_target_zorder = 4; /* excluding base layer */
@@ -2489,6 +2490,8 @@ ssize_t mdss_mdp_show_capabilities(struct device *dev,
SPRINT(" separate_rotator");
if (test_bit(MDSS_CAPS_CWB_SUPPORTED, mdata->mdss_caps_map))
SPRINT(" concurrent_writeback");
+ if (test_bit(MDSS_CAPS_AVR_SUPPORTED, mdata->mdss_caps_map))
+ SPRINT(" avr");
SPRINT("\n");
#undef SPRINT
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index da5e7bb8a343..da60570c7085 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -303,6 +303,11 @@ enum mdp_wb_blk_caps {
MDSS_MDP_WB_UBWC = BIT(3),
};
+enum mdss_mdp_avr_mode {
+ MDSS_MDP_AVR_CONTINUOUS = 0,
+ MDSS_MDP_AVR_ONE_SHOT,
+};
+
/**
* enum perf_calc_vote_mode - enum to decide if mdss_mdp_get_bw_vote_mode
* function needs an extra efficiency factor.
@@ -391,6 +396,7 @@ struct mdss_mdp_ctl_intfs_ops {
/* to update lineptr, [1..yres] - enable, 0 - disable */
int (*update_lineptr)(struct mdss_mdp_ctl *ctl, bool enable);
+ int (*avr_ctrl_fnc)(struct mdss_mdp_ctl *);
};
struct mdss_mdp_cwb {
@@ -406,6 +412,11 @@ struct mdss_mdp_cwb {
struct work_struct cwb_work;
};
+struct mdss_mdp_avr_info {
+ bool avr_enabled;
+ int avr_mode;
+};
+
struct mdss_mdp_ctl {
u32 num;
char __iomem *base;
@@ -513,6 +524,7 @@ struct mdss_mdp_ctl {
/* dynamic resolution switch during cont-splash handoff */
bool switch_with_handoff;
+ struct mdss_mdp_avr_info avr_info;
};
struct mdss_mdp_mixer {
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index 3f2de6915c7e..2218e9c4ac81 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -5528,6 +5528,26 @@ int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
sctl = mdss_mdp_get_split_ctl(ctl);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ if (ctl->ops.avr_ctrl_fnc) {
+ ret = ctl->ops.avr_ctrl_fnc(ctl);
+ if (ret) {
+ pr_err("error configuring avr ctrl registers ctl=%d err=%d\n",
+ ctl->num, ret);
+ mutex_unlock(&ctl->lock);
+ return ret;
+ }
+ }
+
+ if (sctl && sctl->ops.avr_ctrl_fnc) {
+ ret = sctl->ops.avr_ctrl_fnc(sctl);
+ if (ret) {
+ pr_err("error configuring avr ctrl registers sctl=%d err=%d\n",
+ sctl->num, ret);
+ mutex_unlock(&ctl->lock);
+ return ret;
+ }
+ }
+
mutex_lock(&ctl->flush_lock);
/*
diff --git a/drivers/video/fbdev/msm/mdss_mdp_hwio.h b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
index 74ab902f6e8e..de868bcd8f6f 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_hwio.h
+++ b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
@@ -688,6 +688,11 @@ enum mdss_mpd_intf_index {
#define MDSS_MDP_REG_INTF_PROG_LINE_INTR_CONF 0x250
#define MDSS_MDP_REG_INTF_VBLANK_END_CONF 0x264
+#define MDSS_MDP_REG_INTF_AVR_CONTROL 0x270
+#define MDSS_MDP_REG_INTF_AVR_MODE 0x274
+#define MDSS_MDP_REG_INTF_AVR_TRIGGER 0x278
+#define MDSS_MDP_REG_INTF_AVR_VTOTAL 0x27C
+
#define MDSS_MDP_REG_INTF_FRAME_LINE_COUNT_EN 0x0A8
#define MDSS_MDP_REG_INTF_FRAME_COUNT 0x0AC
#define MDSS_MDP_REG_INTF_LINE_COUNT 0x0B0
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
index d1ced303b059..72fc20d97f44 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -403,6 +403,76 @@ static void mdss_mdp_video_intf_recovery(void *data, int event)
}
}
+static void mdss_mdp_video_avr_vtotal_setup(struct mdss_mdp_ctl *ctl,
+ struct intf_timing_params *p,
+ struct mdss_mdp_video_ctx *ctx)
+{
+ struct mdss_data_type *mdata = ctl->mdata;
+
+ if (test_bit(MDSS_CAPS_AVR_SUPPORTED, mdata->mdss_caps_map)) {
+ struct mdss_panel_data *pdata = ctl->panel_data;
+ u32 hsync_period = p->hsync_pulse_width + p->h_back_porch +
+ p->width + p->h_front_porch;
+ u32 vsync_period = p->vsync_pulse_width + p->v_back_porch +
+ p->height + p->v_front_porch;
+ u32 min_fps = pdata->panel_info.min_fps;
+ u32 diff_fps = abs(pdata->panel_info.default_fps - min_fps);
+ u32 vtotal = mdss_panel_get_vtotal(&pdata->panel_info);
+
+ int add_porches = mult_frac(vtotal, diff_fps, min_fps);
+
+ u32 vsync_period_slow = vsync_period + add_porches;
+ u32 avr_vtotal = vsync_period_slow * hsync_period;
+
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_AVR_VTOTAL, avr_vtotal);
+
+ MDSS_XLOG(min_fps, vsync_period, vsync_period_slow, avr_vtotal);
+ }
+}
+
+static int mdss_mdp_video_avr_trigger_setup(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_video_ctx *ctx = NULL;
+ struct mdss_data_type *mdata = ctl->mdata;
+
+ ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx || !ctx->ref_cnt) {
+ pr_err("invalid master ctx\n");
+ return -EINVAL;
+ }
+
+ if (!ctl->is_master)
+ return 0;
+
+ if (ctl->avr_info.avr_enabled &&
+ test_bit(MDSS_CAPS_AVR_SUPPORTED, mdata->mdss_caps_map))
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_AVR_TRIGGER, 1);
+
+ return 0;
+}
+
+static void mdss_mdp_video_avr_ctrl_setup(struct mdss_mdp_video_ctx *ctx,
+ struct mdss_mdp_avr_info *avr_info, bool is_master)
+{
+ u32 avr_ctrl = 0;
+ u32 avr_mode = 0;
+
+ avr_ctrl = avr_info->avr_enabled;
+ avr_mode = avr_info->avr_mode;
+
+ /* Enable avr_vsync_clear_en bit to clear avr in next vsync */
+ if (avr_mode == MDSS_MDP_AVR_ONE_SHOT)
+ avr_mode |= (1 << 8);
+
+ if (is_master)
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_AVR_CONTROL, avr_ctrl);
+
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_AVR_MODE, avr_mode);
+
+ pr_debug("intf:%d avr_mode:%x avr_ctrl:%x\n",
+ ctx->intf_num, avr_mode, avr_ctrl);
+}
+
static int mdss_mdp_video_timegen_setup(struct mdss_mdp_ctl *ctl,
struct intf_timing_params *p,
struct mdss_mdp_video_ctx *ctx)
@@ -1185,16 +1255,7 @@ static int mdss_mdp_video_vfp_fps_update(struct mdss_mdp_video_ctx *ctx,
new_vsync_period_f0 = (vsync_period * hsync_period);
mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
- current_vsync_period_f0 | 0x800000);
- if (new_vsync_period_f0 & 0x800000) {
- mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
new_vsync_period_f0);
- } else {
- mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
- new_vsync_period_f0 | 0x800000);
- mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
- new_vsync_period_f0 & 0x7fffff);
- }
pr_debug("if:%d vtotal:%d htotal:%d f0:0x%x nw_f0:0x%x\n",
ctx->intf_num, vsync_period, hsync_period,
@@ -1404,6 +1465,11 @@ static int mdss_mdp_video_config_fps(struct mdss_mdp_ctl *ctl, int new_fps)
}
/*
+ * Make sure controller setting committed
+ */
+ wmb();
+
+ /*
* MDP INTF registers support DB on targets
* starting from MDP v1.5.
*/
@@ -1530,6 +1596,12 @@ static int mdss_mdp_video_display(struct mdss_mdp_ctl *ctl, void *arg)
CTL_INTF_EVENT_FLAG_DEFAULT);
}
+ rc = mdss_mdp_video_avr_trigger_setup(ctl);
+ if (rc) {
+ pr_err("avr trigger setup failed\n");
+ return rc;
+ }
+
if (mdss_mdp_is_lineptr_supported(ctl))
mdss_mdp_video_lineptr_ctrl(ctl, true);
@@ -1673,7 +1745,9 @@ static void mdss_mdp_fetch_start_config(struct mdss_mdp_video_ctx *ctx,
h_total = mdss_panel_get_htotal(pinfo, true);
fetch_start = (v_total - pinfo->prg_fet) * h_total + 1;
- fetch_enable = BIT(31);
+
+ fetch_enable = mdp_video_read(ctx, MDSS_MDP_REG_INTF_CONFIG);
+ fetch_enable |= BIT(31);
if (pinfo->dynamic_fps && (pinfo->dfps_update ==
DFPS_IMMEDIATE_CLK_UPDATE_MODE))
@@ -1886,6 +1960,8 @@ static int mdss_mdp_video_ctx_setup(struct mdss_mdp_ctl *ctl,
mdss_mdp_handoff_programmable_fetch(ctl, ctx);
}
+ mdss_mdp_video_avr_vtotal_setup(ctl, itp, ctx);
+
mdss_mdp_disable_prefill(ctl);
mdp_video_write(ctx, MDSS_MDP_REG_INTF_PANEL_FORMAT, ctl->dst_format);
@@ -2124,6 +2200,29 @@ static int mdss_mdp_video_early_wake_up(struct mdss_mdp_ctl *ctl)
return 0;
}
+static int mdss_mdp_video_avr_ctrl(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_video_ctx *ctx = NULL, *sctx = NULL;
+
+ ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx || !ctx->ref_cnt) {
+ pr_err("invalid master ctx\n");
+ return -EINVAL;
+ }
+ mdss_mdp_video_avr_ctrl_setup(ctx, &ctl->avr_info, ctl->is_master);
+
+ if (is_pingpong_split(ctl->mfd)) {
+ sctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[SLAVE_CTX];
+ if (!sctx || !sctx->ref_cnt) {
+ pr_err("invalid slave ctx\n");
+ return -EINVAL;
+ }
+ mdss_mdp_video_avr_ctrl_setup(sctx, &ctl->avr_info, false);
+ }
+
+ return 0;
+}
+
int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl)
{
int intfs_num, ret = 0;
@@ -2144,6 +2243,7 @@ int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl)
ctl->ops.config_fps_fnc = mdss_mdp_video_config_fps;
ctl->ops.early_wake_up_fnc = mdss_mdp_video_early_wake_up;
ctl->ops.update_lineptr = mdss_mdp_video_lineptr_ctrl;
+ ctl->ops.avr_ctrl_fnc = mdss_mdp_video_avr_ctrl;
return 0;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index 0ae420724d61..35bd0932f321 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -288,6 +288,57 @@ static int mdss_mdp_validate_destination_scaler(struct msm_fb_data_type *mfd,
return ret;
}
+static int mdss_mdp_avr_validate(struct msm_fb_data_type *mfd,
+ struct mdp_layer_commit_v1 *commit)
+{
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ int req = 0;
+ struct mdss_panel_info *pinfo = NULL;
+
+ if (!ctl || !mdata || !commit) {
+ pr_err("Invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ if (!(commit->flags & MDP_COMMIT_AVR_EN))
+ return 0;
+
+ pinfo = &ctl->panel_data->panel_info;
+
+ if (!test_bit(MDSS_CAPS_AVR_SUPPORTED, mdata->mdss_caps_map) ||
+ (pinfo->max_fps == pinfo->min_fps)) {
+ pr_err("AVR not supported\n");
+ return -ENODEV;
+ }
+
+ if (pinfo->dynamic_fps &&
+ !(pinfo->dfps_update == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP ||
+ pinfo->dfps_update == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP)) {
+ pr_err("Dynamic fps and AVR cannot coexists\n");
+ return -EINVAL;
+ }
+
+ if (!ctl->is_video_mode) {
+ pr_err("AVR not supported in command mode\n");
+ return -EINVAL;
+ }
+
+ return req;
+}
+
+static void __update_avr_info(struct mdss_mdp_ctl *ctl,
+ struct mdp_layer_commit_v1 *commit)
+{
+ if (commit->flags & MDP_COMMIT_AVR_EN)
+ ctl->avr_info.avr_enabled = true;
+
+ ctl->avr_info.avr_mode = MDSS_MDP_AVR_CONTINUOUS;
+
+ if (commit->flags & MDP_COMMIT_AVR_ONE_SHOT_MODE)
+ ctl->avr_info.avr_mode = MDSS_MDP_AVR_ONE_SHOT;
+}
+
/*
* __layer_needs_src_split() - check needs source split configuration
* @layer: input layer
@@ -2247,13 +2298,13 @@ int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd,
struct mdss_overlay_private *mdp5_data;
struct mdss_mdp_data *src_data[MDSS_MDP_MAX_SSPP];
struct mdss_mdp_validate_info_t *validate_info_list;
+ struct mdss_mdp_ctl *sctl = NULL;
mdp5_data = mfd_to_mdp5_data(mfd);
if (!mdp5_data || !mdp5_data->ctl)
return -EINVAL;
-
if (commit->output_layer) {
ret = __is_cwb_requested(commit->output_layer->flags);
if (IS_ERR_VALUE(ret)) {
@@ -2267,6 +2318,18 @@ int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd,
}
}
+ ret = mdss_mdp_avr_validate(mfd, commit);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("AVR validate failed\n");
+ return -EINVAL;
+ }
+
+ __update_avr_info(mdp5_data->ctl, commit);
+
+ sctl = mdss_mdp_get_split_ctl(mdp5_data->ctl);
+ if (sctl)
+ __update_avr_info(sctl, commit);
+
layer_list = commit->input_layers;
/* handle null commit */
@@ -2428,6 +2491,12 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
}
}
+ rc = mdss_mdp_avr_validate(mfd, commit);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("AVR validate failed\n");
+ return -EINVAL;
+ }
+
return __validate_layers(mfd, file, commit);
}
diff --git a/drivers/video/fbdev/msm/mdss_panel.c b/drivers/video/fbdev/msm/mdss_panel.c
index 97025b3a9c23..61911810b2c0 100644
--- a/drivers/video/fbdev/msm/mdss_panel.c
+++ b/drivers/video/fbdev/msm/mdss_panel.c
@@ -644,6 +644,7 @@ void mdss_panel_info_from_timing(struct mdss_panel_timing *pt,
pinfo->dsc_enc_total = pt->dsc_enc_total;
pinfo->fbc = pt->fbc;
pinfo->compression_mode = pt->compression_mode;
+ pinfo->default_fps = pinfo->mipi.frame_rate;
pinfo->roi_alignment = pt->roi_alignment;
pinfo->te = pt->te;
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 8e5cf194cc0b..4469202eaa8e 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -17,6 +17,7 @@
*
*/
+#include <linux/delay.h>
#define VIRTIO_PCI_NO_LEGACY
#include "virtio_pci_common.h"
@@ -271,9 +272,13 @@ static void vp_reset(struct virtio_device *vdev)
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* 0 status means a reset. */
vp_iowrite8(0, &vp_dev->common->device_status);
- /* Flush out the status write, and flush in device writes,
- * including MSI-X interrupts, if any. */
- vp_ioread8(&vp_dev->common->device_status);
+ /* After writing 0 to device_status, the driver MUST wait for a read of
+ * device_status to return 0 before reinitializing the device.
+ * This will flush out the status write, and flush in device writes,
+ * including MSI-X interrupts, if any.
+ */
+ while (vp_ioread8(&vp_dev->common->device_status))
+ msleep(1);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
}
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index 71e78ef4b736..3a75f3b53452 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -237,7 +237,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
/* Fall through */
case WDIOC_GETTIMEOUT:
- return copy_to_user(argp, &timeout, sizeof(int));
+ return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0;
default:
return -ENOTTY;
}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 524c22146429..44367783f07a 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data)
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
int rc = 0;
- irq_move_irq(data);
+ if (!VALID_EVTCHN(evtchn))
+ return;
- if (VALID_EVTCHN(evtchn))
+ if (unlikely(irqd_is_setaffinity_pending(data))) {
+ int masked = test_and_set_mask(evtchn);
+
+ clear_evtchn(evtchn);
+
+ irq_move_masked_irq(data);
+
+ if (!masked)
+ unmask_evtchn(evtchn);
+ } else
clear_evtchn(evtchn);
if (pirq_needs_eoi(data->irq)) {
@@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data)
{
int evtchn = evtchn_from_irq(data->irq);
- irq_move_irq(data);
+ if (!VALID_EVTCHN(evtchn))
+ return;
- if (VALID_EVTCHN(evtchn))
+ if (unlikely(irqd_is_setaffinity_pending(data))) {
+ int masked = test_and_set_mask(evtchn);
+
+ clear_evtchn(evtchn);
+
+ irq_move_masked_irq(data);
+
+ if (!masked)
+ unmask_evtchn(evtchn);
+ } else
clear_evtchn(evtchn);
}