summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_platform.c8
-rw-r--r--drivers/acpi/acpi_video.c3
-rw-r--r--drivers/acpi/blacklist.c28
-rw-r--r--drivers/acpi/nfit.c6
-rw-r--r--drivers/acpi/power.c1
-rw-r--r--drivers/base/power/wakeup.c4
-rw-r--r--drivers/block/zram/zram_drv.c6
-rw-r--r--drivers/bluetooth/btfm_slim.h2
-rw-r--r--drivers/bluetooth/btfm_slim_wcn3990.c31
-rw-r--r--drivers/char/Kconfig6
-rw-r--r--drivers/char/adsprpc.c16
-rw-r--r--drivers/char/diag/diag_memorydevice.c57
-rw-r--r--drivers/char/diag/diagchar_core.c7
-rw-r--r--drivers/char/diag/diagfwd_glink.c15
-rw-r--r--drivers/char/diag/diagfwd_glink.h1
-rw-r--r--drivers/char/mem.c82
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/char/virtio_console.c12
-rw-r--r--drivers/clk/clk.c55
-rw-r--r--drivers/clk/msm/clock-cpu-8996.c25
-rw-r--r--drivers/clk/msm/clock-gcc-8996.c8
-rw-r--r--drivers/clk/sunxi/clk-simple-gates.c2
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c8
-rw-r--r--drivers/crypto/caam/ctrl.c3
-rw-r--r--drivers/esoc/esoc-mdm-4x.c278
-rw-r--r--drivers/esoc/esoc-mdm-drv.c44
-rw-r--r--drivers/esoc/esoc-mdm-pon.c84
-rw-r--r--drivers/esoc/esoc-mdm.h5
-rw-r--r--drivers/esoc/esoc.h16
-rw-r--r--drivers/esoc/esoc_bus.c2
-rw-r--r--drivers/esoc/esoc_dev.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c73
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c12
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c18
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_snapshot.c32
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c37
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/dba_bridge.c357
-rw-r--r--drivers/gpu/drm/msm/dba_bridge.h67
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c29
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_defs.h4
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display.c379
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display.h13
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_drm.c119
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_panel.c59
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_panel.h16
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c188
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h37
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c52
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c23
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c7
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c33
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c20
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h13
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c15
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c206
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c271
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h41
-rw-r--r--drivers/gpu/drm/msm/msm_prop.c67
-rw-r--r--drivers/gpu/drm/msm/msm_prop.h41
-rw-r--r--drivers/gpu/drm/msm/msm_smmu.c152
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot.h4
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot_api.h13
-rw-r--r--drivers/gpu/drm/msm/sde/sde_backlight.c12
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c12
-rw-r--r--drivers/gpu/drm/msm/sde/sde_formats.c6
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c14
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c79
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c31
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.c133
-rw-r--r--drivers/hv/channel.c16
-rw-r--r--drivers/hv/connection.c8
-rw-r--r--drivers/hv/hv.c11
-rw-r--r--drivers/hv/hv_balloon.c140
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/hv/vmbus_drv.c8
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c13
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c6
-rw-r--r--drivers/iio/gyro/bmg160_core.c12
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c3
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/misc/cm109.c4
-rw-r--r--drivers/input/misc/hbtp_input.c37
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/qpnp-power-on.c61
-rw-r--r--drivers/input/misc/yealink.c4
-rw-r--r--drivers/input/mouse/elan_i2c_core.c20
-rw-r--r--drivers/input/mouse/elantech.c8
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h14
-rw-r--r--drivers/input/tablet/hanwang.c3
-rw-r--r--drivers/input/tablet/kbtab.c3
-rw-r--r--drivers/input/touchscreen/st/fts.c2
-rw-r--r--drivers/input/touchscreen/st/fts_lib/ftsFlash.c2
-rw-r--r--drivers/input/touchscreen/st/fts_lib/ftsGesture.c12
-rw-r--r--drivers/input/touchscreen/st/fts_lib/ftsTest.c12
-rw-r--r--drivers/input/touchscreen/sur40.c3
-rw-r--r--drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c52
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/arm-smmu.c24
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/io-pgtable-arm.c14
-rw-r--r--drivers/iommu/io-pgtable.h2
-rw-r--r--drivers/iommu/iommu-debug.c541
-rw-r--r--drivers/iommu/iommu.c9
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c2
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c3
-rw-r--r--drivers/md/Kconfig17
-rw-r--r--drivers/md/Makefile4
-rw-r--r--drivers/md/dm-android-verity.c55
-rw-r--r--drivers/md/dm-verity-avb.c217
-rw-r--r--drivers/md/dm-verity-target.c6
-rw-r--r--drivers/md/dm-verity.h1
-rw-r--r--drivers/md/dm.c29
-rw-r--r--drivers/md/raid1.c5
-rw-r--r--drivers/md/raid10.c25
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c35
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h3
-rw-r--r--drivers/media/platform/msm/camera_v2/msm.c7
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c82
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c9
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c9
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-firmware.c33
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c118
-rw-r--r--drivers/misc/Kconfig6
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c65
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_utils_aio.c6
-rw-r--r--drivers/misc/uid_cputime.c240
-rw-r--r--drivers/misc/uid_sys_stats.c505
-rw-r--r--drivers/mmc/card/queue.c7
-rw-r--r--drivers/mmc/core/core.c13
-rw-r--r--drivers/mmc/core/host.c12
-rw-r--r--drivers/mmc/core/mmc.c57
-rw-r--r--drivers/mmc/host/cmdq_hci.c15
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c1
-rw-r--r--drivers/mmc/host/ushc.c3
-rw-r--r--drivers/mtd/bcm47xxpart.c10
-rw-r--r--drivers/mtd/ubi/upd.c8
-rw-r--r--drivers/net/can/spi/rh850.c615
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c20
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c102
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c15
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c73
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c67
-rw-r--r--drivers/net/macvlan.c11
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/ppp/pppolac.c4
-rw-r--r--drivers/net/ppp/pppopns.c4
-rw-r--r--drivers/net/usb/catc.c56
-rw-r--r--drivers/net/usb/pegasus.c29
-rw-r--r--drivers/net/usb/rtl8150.c34
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c21
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c21
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c32
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c24
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h6
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c3
-rw-r--r--drivers/net/wireless/cnss/Kconfig9
-rw-r--r--drivers/net/wireless/cnss/cnss_pci.c5
-rw-r--r--drivers/net/wireless/cnss_prealloc/cnss_prealloc.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c15
-rw-r--r--drivers/nvdimm/bus.c6
-rw-r--r--drivers/parport/share.c6
-rw-r--r--drivers/pci/host/pci-msm.c7
-rw-r--r--drivers/pci/iov.c70
-rw-r--r--drivers/pci/pci.c34
-rw-r--r--drivers/pci/pci.h7
-rw-r--r--drivers/pci/probe.c3
-rw-r--r--drivers/pci/rom.c5
-rw-r--r--drivers/pci/setup-res.c48
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c4
-rw-r--r--drivers/platform/msm/gpio-usbdetect.c142
-rw-r--r--drivers/platform/msm/gsi/gsi_dbg.c17
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c20
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h1
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_rt.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc.c15
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h45
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c41
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_client.c24
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c17
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c15
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c63
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h12
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c8
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c29
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c18
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c4
-rw-r--r--drivers/platform/msm/mhi/mhi.h17
-rw-r--r--drivers/platform/msm/mhi/mhi_main.c28
-rw-r--r--drivers/platform/msm/msm_ext_display.c17
-rw-r--r--drivers/platform/msm/seemp_core/seemp_logk.c2
-rw-r--r--drivers/platform/x86/acer-wmi.c22
-rw-r--r--drivers/power/power_supply_sysfs.c4
-rw-r--r--drivers/power/reset/at91-poweroff.c54
-rw-r--r--drivers/power/reset/msm-poweroff.c6
-rw-r--r--drivers/power/supply/qcom/battery.c57
-rw-r--r--drivers/power/supply/qcom/battery.h17
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c10
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c226
-rw-r--r--drivers/power/supply/qcom/smb-lib.c162
-rw-r--r--drivers/power/supply/qcom/smb-lib.h14
-rw-r--r--drivers/power/supply/qcom/smb-reg.h10
-rw-r--r--drivers/power/supply/qcom/smb138x-charger.c258
-rw-r--r--drivers/regulator/Kconfig9
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/onsemi-ncp6335d.c775
-rw-r--r--drivers/rtc/rtc-s35390a.c167
-rw-r--r--drivers/rtc/rtc-tegra.c28
-rw-r--r--drivers/s390/crypto/ap_bus.c3
-rw-r--r--drivers/s390/crypto/ap_bus.h1
-rw-r--r--drivers/scsi/cxlflash/common.h8
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/libiscsi.c26
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c40
-rw-r--r--drivers/scsi/sd.c23
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sr.c6
-rw-r--r--drivers/scsi/ufs/Kconfig12
-rw-r--r--drivers/scsi/ufs/ufshcd.c202
-rw-r--r--drivers/scsi/ufs/ufshcd.h46
-rw-r--r--drivers/soc/qcom/Kconfig42
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/boot_marker.c183
-rw-r--r--drivers/soc/qcom/boot_stats.c60
-rw-r--r--drivers/soc/qcom/cache_m4m_erp64.c635
-rw-r--r--drivers/soc/qcom/icnss.c24
-rw-r--r--drivers/soc/qcom/ipc_router_mhi_xprt.c34
-rw-r--r--drivers/soc/qcom/memory_dump_v2.c4
-rw-r--r--drivers/soc/qcom/memshare/msm_memshare.c11
-rw-r--r--drivers/soc/qcom/memshare/msm_memshare.h3
-rw-r--r--drivers/soc/qcom/msm_minidump.c153
-rw-r--r--drivers/soc/qcom/perf_event_kryo.c13
-rw-r--r--drivers/soc/qcom/peripheral-loader.c4
-rw-r--r--drivers/soc/qcom/qbt1000.c21
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr.c29
-rw-r--r--drivers/soc/qcom/scm-boot.c19
-rw-r--r--drivers/soc/qcom/service-notifier.c13
-rw-r--r--drivers/soc/qcom/socinfo.c36
-rw-r--r--drivers/soc/qcom/subsystem_restart.c74
-rw-r--r--drivers/staging/android/ashmem.c3
-rw-r--r--drivers/staging/android/ion/ion.c71
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c12
-rw-r--r--drivers/target/target_core_pscsi.c47
-rw-r--r--drivers/target/target_core_sbc.c10
-rw-r--r--drivers/tty/nozomi.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c23
-rw-r--r--drivers/tty/serial/atmel_serial.c19
-rw-r--r--drivers/tty/serial/msm_serial.c38
-rw-r--r--drivers/usb/class/usbtmc.c9
-rw-r--r--drivers/usb/core/config.c10
-rw-r--r--drivers/usb/core/hcd.c33
-rw-r--r--drivers/usb/core/hub.c21
-rw-r--r--drivers/usb/core/quirks.c8
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c4
-rw-r--r--drivers/usb/dwc3/gadget.c21
-rw-r--r--drivers/usb/gadget/function/f_accessory.c37
-rw-r--r--drivers/usb/gadget/function/f_acm.c4
-rw-r--r--drivers/usb/gadget/function/f_midi.c59
-rw-r--r--drivers/usb/gadget/function/f_mtp.c9
-rw-r--r--drivers/usb/gadget/function/f_uvc.c2
-rw-r--r--drivers/usb/gadget/function/u_ether.c27
-rw-r--r--drivers/usb/misc/Kconfig10
-rw-r--r--drivers/usb/misc/Makefile2
-rw-r--r--drivers/usb/misc/idmouse.c3
-rw-r--r--drivers/usb/misc/ks_bridge.c1105
-rw-r--r--drivers/usb/misc/lvstest.c4
-rw-r--r--drivers/usb/misc/uss720.c5
-rw-r--r--drivers/usb/musb/musb_cppi41.c23
-rw-r--r--drivers/usb/pd/policy_engine.c2
-rw-r--r--drivers/usb/serial/option.c17
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/wusbcore/wa-hc.c3
-rw-r--r--drivers/uwb/hwa-rc.c3
-rw-r--r--drivers/uwb/i1480/dfu/usb.c3
-rw-r--r--drivers/vfio/pci/vfio_pci.c33
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c20
-rw-r--r--drivers/video/console/fbcon.c67
-rw-r--r--drivers/video/fbdev/Makefile6
-rw-r--r--drivers/video/fbdev/msm/dsi_status_6g.c4
-rw-r--r--drivers/video/fbdev/msm/mdss.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_compat_utils.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_compat_utils.h3
-rw-r--r--drivers/video/fbdev/msm/mdss_debug.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_debug_xlog.c53
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c13
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_panel.c120
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_phy.h11
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_phy_v3.c17
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c1
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.c42
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c8
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c11
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c30
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_debug.c4
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_hwio.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_video.c61
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c219
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c17
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp.c9
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_util.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_wfd.c8
-rw-r--r--drivers/video/fbdev/msm/mdss_rotator.c20
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.c8
-rw-r--r--drivers/video/fbdev/msm/msm_dba/adv7533.c8
-rw-r--r--drivers/video/fbdev/msm/msm_dba/msm_dba_helpers.c4
-rw-r--r--drivers/video/fbdev/msm/msm_mdss_io_8974.c45
-rw-r--r--drivers/video/fbdev/xen-fbfront.c4
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/xen/xen-acpi-processor.c34
364 files changed, 11341 insertions, 3121 deletions
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 675eaf337178..b9cebca376f9 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -2,7 +2,6 @@
# Makefile for the Linux ACPI interpreter
#
-ccflags-y := -Os
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
#
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 296b7a14893a..5365ff6e69c1 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -24,9 +24,11 @@
ACPI_MODULE_NAME("platform");
static const struct acpi_device_id forbidden_id_list[] = {
- {"PNP0000", 0}, /* PIC */
- {"PNP0100", 0}, /* Timer */
- {"PNP0200", 0}, /* AT DMA Controller */
+ {"PNP0000", 0}, /* PIC */
+ {"PNP0100", 0}, /* Timer */
+ {"PNP0200", 0}, /* AT DMA Controller */
+ {"ACPI0009", 0}, /* IOxAPIC */
+ {"ACPI000A", 0}, /* IOAPIC */
{"", 0},
};
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 5fdac394207a..549cdbed7b0e 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1211,6 +1211,9 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
union acpi_object *dod = NULL;
union acpi_object *obj;
+ if (!video->cap._DOD)
+ return AE_NOT_EXIST;
+
status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer);
if (!ACPI_SUCCESS(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD"));
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 96809cd99ace..2f24b578bcaf 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -346,6 +346,34 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"),
},
},
+ {
+ .callback = dmi_enable_rev_override,
+ .ident = "DELL Precision 5520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 5520"),
+ },
+ },
+ {
+ .callback = dmi_enable_rev_override,
+ .ident = "DELL Precision 3520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3520"),
+ },
+ },
+ /*
+ * Resolves a quirk with the Dell Latitude 3350 that
+ * causes the ethernet adapter to not function.
+ */
+ {
+ .callback = dmi_enable_rev_override,
+ .ident = "DELL Latitude 3350",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 3350"),
+ },
+ },
#endif
{}
};
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 14c2a07c9f3f..67d7489ced01 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -979,7 +979,11 @@ static int cmp_map(const void *m0, const void *m1)
const struct nfit_set_info_map *map0 = m0;
const struct nfit_set_info_map *map1 = m1;
- return map0->region_offset - map1->region_offset;
+ if (map0->region_offset < map1->region_offset)
+ return -1;
+ else if (map0->region_offset > map1->region_offset)
+ return 1;
+ return 0;
}
/* Retrieve the nth entry referencing this spa */
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index fcd4ce6f78d5..1c2b846c5776 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -200,6 +200,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
+ cur_state = 0;
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
acpi_handle handle = resource->device.handle;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 09c07f519952..0e494108c20c 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -1042,7 +1042,7 @@ static int print_wakeup_source_stats(struct seq_file *m,
active_time = ktime_set(0, 0);
}
- seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
+ seq_printf(m, "%-32s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
ws->name, active_count, ws->event_count,
ws->wakeup_count, ws->expire_count,
ktime_to_ms(active_time), ktime_to_ms(total_time),
@@ -1062,7 +1062,7 @@ static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
{
struct wakeup_source *ws;
- seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
+ seq_puts(m, "name\t\t\t\t\tactive_count\tevent_count\twakeup_count\t"
"expire_count\tactive_since\ttotal_time\tmax_time\t"
"last_change\tprevent_suspend_time\n");
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 76ac3179f25c..c5a2057ef668 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -581,13 +581,13 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- clear_page(mem);
+ memset(mem, 0, PAGE_SIZE);
return 0;
}
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE)
- copy_page(mem, cmem);
+ memcpy(mem, cmem, PAGE_SIZE);
else
ret = zcomp_decompress(zram->comp, cmem, size, mem);
zs_unmap_object(meta->mem_pool, handle);
@@ -750,7 +750,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
src = kmap_atomic(page);
- copy_page(cmem, src);
+ memcpy(cmem, src, PAGE_SIZE);
kunmap_atomic(src);
} else {
memcpy(cmem, src, clen);
diff --git a/drivers/bluetooth/btfm_slim.h b/drivers/bluetooth/btfm_slim.h
index 5d105fba2193..e67c6964ee65 100644
--- a/drivers/bluetooth/btfm_slim.h
+++ b/drivers/bluetooth/btfm_slim.h
@@ -13,7 +13,7 @@
#define BTFM_SLIM_H
#include <linux/slimbus/slimbus.h>
-#define BTFMSLIM_DBG(fmt, arg...) pr_debug(fmt "\n", ## arg)
+#define BTFMSLIM_DBG(fmt, arg...) pr_debug("%s: " fmt "\n", __func__, ## arg)
#define BTFMSLIM_INFO(fmt, arg...) pr_info("%s: " fmt "\n", __func__, ## arg)
#define BTFMSLIM_ERR(fmt, arg...) pr_err("%s: " fmt "\n", __func__, ## arg)
diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c
index c93b29281e35..a451ff33103c 100644
--- a/drivers/bluetooth/btfm_slim_wcn3990.c
+++ b/drivers/bluetooth/btfm_slim_wcn3990.c
@@ -83,19 +83,34 @@ int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num,
{
int ret = 0;
uint8_t reg_val = 0;
+ uint8_t port_bit = 0;
uint16_t reg;
BTFMSLIM_DBG("port(%d) enable(%d)", port_num, enable);
if (rxport) {
+ if (enable) {
+ /* For SCO Rx, A2DP Rx */
+ reg_val = 0x1;
+ port_bit = port_num - 0x10;
+ reg = CHRK_SB_PGD_RX_PORTn_MULTI_CHNL_0(port_bit);
+ BTFMSLIM_DBG("writing reg_val (%d) to reg(%x)",
+ reg_val, reg);
+ ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
+ if (ret) {
+ BTFMSLIM_ERR("failed to write (%d) reg 0x%x",
+ ret, reg);
+ goto error;
+ }
+ }
/* Port enable */
reg = CHRK_SB_PGD_PORT_RX_CFGN(port_num - 0x10);
goto enable_disable_rxport;
}
- /* txport */
if (!enable)
goto enable_disable_txport;
- /* Multiple Channel Setting - only for FM Tx */
+ /* txport */
+ /* Multiple Channel Setting */
if (is_fm_port(port_num)) {
reg_val = (0x1 << CHRK_SB_PGD_PORT_TX1_FM) |
(0x1 << CHRK_SB_PGD_PORT_TX2_FM);
@@ -105,6 +120,18 @@ int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num,
BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg);
goto error;
}
+ } else if (port_num == CHRK_SB_PGD_PORT_TX_SCO) {
+ /* SCO Tx */
+ reg_val = 0x1 << CHRK_SB_PGD_PORT_TX_SCO;
+ reg = CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(port_num);
+ BTFMSLIM_DBG("writing reg_val (%d) to reg(%x)",
+ reg_val, reg);
+ ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
+ if (ret) {
+ BTFMSLIM_ERR("failed to write (%d) reg 0x%x",
+ ret, reg);
+ goto error;
+ }
}
/* Enable Tx port hw auto recovery for underrun or overrun error */
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 4bbe4e5f9a6d..db8eb7ccd744 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -585,10 +585,12 @@ config TELCLOCK
controlling the behavior of this hardware.
config DEVPORT
- bool
- depends on !M68K
+ bool "/dev/port character device"
depends on ISA || PCI
default y
+ help
+ Say Y here if you want to support the /dev/port device. The /dev/port
+ device is similar to /dev/mem, but for I/O ports.
source "drivers/s390/char/Kconfig"
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 10c4d8ce2410..b1432ccf5358 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1465,6 +1465,7 @@ static void fastrpc_init(struct fastrpc_apps *me)
{
int i;
INIT_HLIST_HEAD(&me->drivers);
+ INIT_HLIST_HEAD(&me->maps);
spin_lock_init(&me->hlock);
mutex_init(&me->smd_mutex);
me->channel = &gcinfo[0];
@@ -2125,11 +2126,9 @@ void fastrpc_glink_notify_state(void *handle, const void *priv, unsigned event)
link->port_state = FASTRPC_LINK_DISCONNECTED;
break;
case GLINK_REMOTE_DISCONNECTED:
- if (me->channel[cid].chan &&
- link->link_state == FASTRPC_LINK_STATE_UP) {
+ if (me->channel[cid].chan) {
fastrpc_glink_close(me->channel[cid].chan, cid);
me->channel[cid].chan = 0;
- link->port_state = FASTRPC_LINK_DISCONNECTED;
}
break;
default:
@@ -2295,10 +2294,9 @@ static int fastrpc_glink_open(int cid)
if (err)
goto bail;
- if (link->port_state == FASTRPC_LINK_CONNECTED ||
- link->port_state == FASTRPC_LINK_CONNECTING) {
+ VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
+ if (err)
goto bail;
- }
link->port_state = FASTRPC_LINK_CONNECTING;
cfg->priv = (void *)(uintptr_t)cid;
@@ -2457,7 +2455,9 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
(me->channel[cid].chan == 0)) {
if (me->glink) {
- fastrpc_glink_register(cid, me);
+ VERIFY(err, 0 == fastrpc_glink_register(cid, me));
+ if (err)
+ goto bail;
VERIFY(err, 0 == fastrpc_glink_open(cid));
} else {
VERIFY(err, !smd_named_open_on_edge(FASTRPC_SMD_GUID,
@@ -2791,7 +2791,7 @@ static int fastrpc_cb_probe(struct device *dev)
start = 0x60000000;
VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
arm_iommu_create_mapping(&platform_bus_type,
- start, 0x7fffffff)));
+ start, 0x70000000)));
if (err)
goto bail;
iommu_set_fault_handler(sess->smmu.mapping->domain,
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index bd34e6cceec0..a5d92c51cc0b 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -252,6 +252,7 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
uint8_t drain_again = 0;
uint8_t peripheral = 0;
struct diag_md_session_t *session_info = NULL;
+ struct pid *pid_struct = NULL;
mutex_lock(&driver->diagfwd_untag_mutex);
@@ -278,6 +279,14 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
if ((info && (info->peripheral_mask &
MD_PERIPHERAL_MASK(peripheral)) == 0))
goto drop_data;
+ pid_struct = find_get_pid(session_info->pid);
+ if (!pid_struct) {
+ err = -ESRCH;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: No such md_session_map[%d] with pid = %d err=%d exists..\n",
+ peripheral, session_info->pid, err);
+ goto drop_data;
+ }
/*
* If the data is from remote processor, copy the remote
* token first
@@ -297,27 +306,35 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
}
if (i > 0) {
remote_token = diag_get_remote(i);
- err = copy_to_user(buf + ret, &remote_token,
- sizeof(int));
+ if (get_pid_task(pid_struct, PIDTYPE_PID)) {
+ err = copy_to_user(buf + ret,
+ &remote_token,
+ sizeof(int));
+ if (err)
+ goto drop_data;
+ ret += sizeof(int);
+ }
+ }
+
+ /* Copy the length of data being passed */
+ if (get_pid_task(pid_struct, PIDTYPE_PID)) {
+ err = copy_to_user(buf + ret,
+ (void *)&(entry->len),
+ sizeof(int));
if (err)
goto drop_data;
ret += sizeof(int);
}
- /* Copy the length of data being passed */
- err = copy_to_user(buf + ret, (void *)&(entry->len),
- sizeof(int));
- if (err)
- goto drop_data;
- ret += sizeof(int);
-
/* Copy the actual data being passed */
- err = copy_to_user(buf + ret, (void *)entry->buf,
- entry->len);
- if (err)
- goto drop_data;
- ret += entry->len;
-
+ if (get_pid_task(pid_struct, PIDTYPE_PID)) {
+ err = copy_to_user(buf + ret,
+ (void *)entry->buf,
+ entry->len);
+ if (err)
+ goto drop_data;
+ ret += entry->len;
+ }
/*
* The data is now copied to the user space client,
* Notify that the write is complete and delete its
@@ -339,7 +356,15 @@ drop_data:
}
*pret = ret;
- err = copy_to_user(buf + sizeof(int), (void *)&num_data, sizeof(int));
+ if (pid_struct && get_pid_task(pid_struct, PIDTYPE_PID)) {
+ err = copy_to_user(buf + sizeof(int),
+ (void *)&num_data,
+ sizeof(int));
+ } else {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: md_session_map[%d] with pid = %d Exited..\n",
+ peripheral, driver->md_session_map[peripheral]->pid);
+ }
diag_ws_on_copy_complete(DIAG_WS_MUX);
if (drain_again)
chk_logging_wakeup();
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 574a13de6a0d..dc2d9fc4282c 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -996,6 +996,11 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len,
hdlc_disabled = driver->hdlc_disabled;
if (hdlc_disabled) {
payload = *(uint16_t *)(buf + 2);
+ if (payload > DIAG_MAX_HDLC_BUF_SIZE) {
+ pr_err("diag: Dropping packet, payload size is %d\n",
+ payload);
+ return -EBADMSG;
+ }
driver->hdlc_encode_buf_len = payload;
/*
* Adding 4 bytes for start (1 byte), version (1 byte) and
@@ -2336,7 +2341,9 @@ long diagchar_ioctl(struct file *filp,
mutex_unlock(&driver->dci_mutex);
break;
case DIAG_IOCTL_DCI_EVENT_STATUS:
+ mutex_lock(&driver->dci_mutex);
result = diag_ioctl_dci_event_status(ioarg);
+ mutex_unlock(&driver->dci_mutex);
break;
case DIAG_IOCTL_DCI_CLEAR_LOGS:
mutex_lock(&driver->dci_mutex);
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
index 2784cf71cc2b..03d496c2dd91 100644
--- a/drivers/char/diag/diagfwd_glink.c
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -487,6 +487,18 @@ static void diag_glink_remote_disconnect_work_fn(struct work_struct *work)
atomic_set(&glink_info->tx_intent_ready, 0);
}
+static void diag_glink_late_init_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ late_init_work);
+ if (!glink_info || !glink_info->hdl)
+ return;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d\n",
+ glink_info->peripheral, glink_info->type);
+ diagfwd_channel_open(glink_info->fwd_ctxt);
+}
+
static void diag_glink_transport_notify_state(void *handle, const void *priv,
unsigned event)
{
@@ -617,7 +629,7 @@ static void glink_late_init(struct diag_glink_info *glink_info)
glink_info->inited = 1;
if (atomic_read(&glink_info->opened))
- diagfwd_channel_open(glink_info->fwd_ctxt);
+ queue_work(glink_info->wq, &(glink_info->late_init_work));
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
glink_info->name);
@@ -665,6 +677,7 @@ static void __diag_glink_init(struct diag_glink_info *glink_info)
INIT_WORK(&(glink_info->connect_work), diag_glink_connect_work_fn);
INIT_WORK(&(glink_info->remote_disconnect_work),
diag_glink_remote_disconnect_work_fn);
+ INIT_WORK(&(glink_info->late_init_work), diag_glink_late_init_work_fn);
link_info.glink_link_state_notif_cb = diag_glink_notify_cb;
link_info.transport = NULL;
link_info.edge = glink_info->edge;
diff --git a/drivers/char/diag/diagfwd_glink.h b/drivers/char/diag/diagfwd_glink.h
index 5c1abeffd498..a84fa4edfca0 100644
--- a/drivers/char/diag/diagfwd_glink.h
+++ b/drivers/char/diag/diagfwd_glink.h
@@ -37,6 +37,7 @@ struct diag_glink_info {
struct work_struct read_work;
struct work_struct connect_work;
struct work_struct remote_disconnect_work;
+ struct work_struct late_init_work;
struct diagfwd_info *fwd_ctxt;
};
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6b1721f978c2..e901463d4972 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -59,6 +59,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
#endif
#ifdef CONFIG_STRICT_DEVMEM
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return devmem_is_allowed(pfn);
+}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
u64 from = ((u64)pfn) << PAGE_SHIFT;
@@ -78,6 +82,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
return 1;
}
#else
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return 1;
+}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
return 1;
@@ -125,23 +133,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
while (count > 0) {
unsigned long remaining;
+ int allowed;
sz = size_inside_page(p, count);
- if (!range_is_allowed(p >> PAGE_SHIFT, count))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;
+ if (allowed == 2) {
+ /* Show zeros for restricted memory. */
+ remaining = clear_user(buf, sz);
+ } else {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr)
+ return -EFAULT;
- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr)
- return -EFAULT;
+ remaining = copy_to_user(buf, ptr, sz);
+
+ unxlate_dev_mem_ptr(p, ptr);
+ }
- remaining = copy_to_user(buf, ptr, sz);
- unxlate_dev_mem_ptr(p, ptr);
if (remaining)
return -EFAULT;
@@ -184,30 +200,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
#endif
while (count > 0) {
+ int allowed;
+
sz = size_inside_page(p, count);
- if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;
- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr) {
- if (written)
- break;
- return -EFAULT;
- }
+ /* Skip actual writing when a page is marked as restricted. */
+ if (allowed == 1) {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr) {
+ if (written)
+ break;
+ return -EFAULT;
+ }
- copied = copy_from_user(ptr, buf, sz);
- unxlate_dev_mem_ptr(p, ptr);
- if (copied) {
- written += sz - copied;
- if (written)
- break;
- return -EFAULT;
+ copied = copy_from_user(ptr, buf, sz);
+ unxlate_dev_mem_ptr(p, ptr);
+ if (copied) {
+ written += sz - copied;
+ if (written)
+ break;
+ return -EFAULT;
+ }
}
buf += sz;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 65f7eecc45b0..f10a107614b4 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -401,7 +401,7 @@ static void disable_interrupts(struct tpm_chip *chip)
iowrite32(intmask,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
- free_irq(chip->vendor.irq, chip);
+ devm_free_irq(chip->pdev, chip->vendor.irq, chip);
chip->vendor.irq = 0;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 090183f812be..31e8ae916ba0 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1130,6 +1130,8 @@ static int put_chars(u32 vtermno, const char *buf, int count)
{
struct port *port;
struct scatterlist sg[1];
+ void *data;
+ int ret;
if (unlikely(early_put_chars))
return early_put_chars(vtermno, buf, count);
@@ -1138,8 +1140,14 @@ static int put_chars(u32 vtermno, const char *buf, int count)
if (!port)
return -EPIPE;
- sg_init_one(sg, buf, count);
- return __send_to_port(port, sg, 1, count, (void *)buf, false);
+ data = kmemdup(buf, count, GFP_ATOMIC);
+ if (!data)
+ return -ENOMEM;
+
+ sg_init_one(sg, data, count);
+ ret = __send_to_port(port, sg, 1, count, data, false);
+ kfree(data);
+ return ret;
}
/*
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 4a9e034f939f..4996f4f312f4 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2326,6 +2326,56 @@ static struct hlist_head *orphan_list[] = {
NULL,
};
+static void clk_state_subtree(struct clk_core *c)
+{
+ int vdd_level = 0;
+ struct clk_core *child;
+
+ if (!c)
+ return;
+
+ if (c->vdd_class) {
+ vdd_level = clk_find_vdd_level(c, c->rate);
+ if (vdd_level < 0)
+ vdd_level = 0;
+ }
+
+ trace_clk_state(c->name, c->prepare_count, c->enable_count,
+ c->rate, vdd_level);
+
+ hlist_for_each_entry(child, &c->children, child_node)
+ clk_state_subtree(child);
+}
+
+static int clk_state_show(struct seq_file *s, void *data)
+{
+ struct clk_core *c;
+ struct hlist_head **lists = (struct hlist_head **)s->private;
+
+ clk_prepare_lock();
+
+ for (; *lists; lists++)
+ hlist_for_each_entry(c, *lists, child_node)
+ clk_state_subtree(c);
+
+ clk_prepare_unlock();
+
+ return 0;
+}
+
+
+static int clk_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_state_show, inode->i_private);
+}
+
+static const struct file_operations clk_state_fops = {
+ .open = clk_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
int level)
{
@@ -3002,6 +3052,11 @@ static int __init clk_debug_init(void)
if (!d)
return -ENOMEM;
+ d = debugfs_create_file("trace_clocks", S_IRUGO, rootdir, &all_lists,
+ &clk_state_fops);
+ if (!d)
+ return -ENOMEM;
+
mutex_lock(&clk_debug_lock);
hlist_for_each_entry(core, &clk_debug_list, debug_node)
clk_debug_create_one(core, rootdir);
diff --git a/drivers/clk/msm/clock-cpu-8996.c b/drivers/clk/msm/clock-cpu-8996.c
index bcda6f31d6f5..bca8ada97f7d 100644
--- a/drivers/clk/msm/clock-cpu-8996.c
+++ b/drivers/clk/msm/clock-cpu-8996.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1308,6 +1308,7 @@ static int cpu_clock_8996_driver_probe(struct platform_device *pdev)
unsigned long pwrclrate, perfclrate, cbfrate;
int pvs_ver = 0;
u32 pte_efuse;
+ u32 clk_rate;
char perfclspeedbinstr[] = "qcom,perfcl-speedbinXX-vXX";
char pwrclspeedbinstr[] = "qcom,pwrcl-speedbinXX-vXX";
char cbfspeedbinstr[] = "qcom,cbf-speedbinXX-vXX";
@@ -1435,6 +1436,18 @@ static int cpu_clock_8996_driver_probe(struct platform_device *pdev)
clk_prepare_enable(&pwrcl_alt_pll.c);
clk_prepare_enable(&cbf_pll.c);
+ /* Override the existing ealry boot frequency for power cluster */
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,pwrcl-early-boot-freq", &clk_rate);
+ if (!ret)
+ pwrcl_early_boot_rate = clk_rate;
+
+ /* Override the existing ealry boot frequency for perf cluster */
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,perfcl-early-boot-freq", &clk_rate);
+ if (!ret)
+ perfcl_early_boot_rate = clk_rate;
+
/* Set the early boot rate. This may also switch us to the ACD leg */
clk_set_rate(&pwrcl_clk.c, pwrcl_early_boot_rate);
clk_set_rate(&perfcl_clk.c, perfcl_early_boot_rate);
@@ -1450,6 +1463,7 @@ static struct of_device_id match_table[] = {
{ .compatible = "qcom,cpu-clock-8996" },
{ .compatible = "qcom,cpu-clock-8996-v3" },
{ .compatible = "qcom,cpu-clock-8996-pro" },
+ { .compatible = "qcom,cpu-clock-8996-auto" },
{}
};
@@ -1499,6 +1513,9 @@ module_exit(cpu_clock_8996_exit);
#define HF_MUX_SEL_LF_MUX 0x1
#define LF_MUX_SEL_ALT_PLL 0x1
+#define PWRCL_EARLY_BOOT_RATE 1286400000
+#define PERFCL_EARLY_BOOT_RATE 1363200000
+
static int use_alt_pll;
module_param(use_alt_pll, int, 0444);
@@ -1537,6 +1554,12 @@ int __init cpu_clock_8996_early_init(void)
cpu_clocks_v3 = true;
cpu_clocks_pro = true;
} else if (of_find_compatible_node(NULL, NULL,
+ "qcom,cpu-clock-8996-auto")) {
+ cpu_clocks_v3 = true;
+ cpu_clocks_pro = true;
+ pwrcl_early_boot_rate = PWRCL_EARLY_BOOT_RATE;
+ perfcl_early_boot_rate = PERFCL_EARLY_BOOT_RATE;
+ } else if (of_find_compatible_node(NULL, NULL,
"qcom,cpu-clock-8996-v3")) {
cpu_clocks_v3 = true;
} else if (!of_find_compatible_node(NULL, NULL,
diff --git a/drivers/clk/msm/clock-gcc-8996.c b/drivers/clk/msm/clock-gcc-8996.c
index e93e9c494023..6dd2cf879c49 100644
--- a/drivers/clk/msm/clock-gcc-8996.c
+++ b/drivers/clk/msm/clock-gcc-8996.c
@@ -3670,14 +3670,6 @@ static int msm_gcc_8996_probe(struct platform_device *pdev)
regval |= BIT(21);
writel_relaxed(regval, virt_base + GCC_APCS_CLOCK_BRANCH_ENA_VOTE);
- /*
- * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be
- * turned off by hardware during certain apps low power modes.
- */
- regval = readl_relaxed(virt_base + GCC_APCS_CLOCK_SLEEP_ENA_VOTE);
- regval |= BIT(21);
- writel_relaxed(regval, virt_base + GCC_APCS_CLOCK_SLEEP_ENA_VOTE);
-
vdd_dig.vdd_uv[1] = RPM_REGULATOR_CORNER_SVS_KRAIT;
vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
if (IS_ERR(vdd_dig.regulator[0])) {
diff --git a/drivers/clk/sunxi/clk-simple-gates.c b/drivers/clk/sunxi/clk-simple-gates.c
index 0214c6548afd..97cb4221de25 100644
--- a/drivers/clk/sunxi/clk-simple-gates.c
+++ b/drivers/clk/sunxi/clk-simple-gates.c
@@ -98,6 +98,8 @@ static void __init sunxi_simple_gates_init(struct device_node *node)
sunxi_simple_gates_setup(node, NULL, 0);
}
+CLK_OF_DECLARE(sun4i_a10_gates, "allwinner,sun4i-a10-gates-clk",
+ sunxi_simple_gates_init);
CLK_OF_DECLARE(sun4i_a10_apb0, "allwinner,sun4i-a10-apb0-gates-clk",
sunxi_simple_gates_init);
CLK_OF_DECLARE(sun4i_a10_apb1, "allwinner,sun4i-a10-apb1-gates-clk",
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e4d9aef1dda4..a0dba9beac05 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -698,9 +698,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
char *buf)
{
unsigned int cur_freq = __cpufreq_get(policy);
- if (!cur_freq)
- return sprintf(buf, "<unknown>");
- return sprintf(buf, "%u\n", cur_freq);
+
+ if (cur_freq)
+ return sprintf(buf, "%u\n", cur_freq);
+
+ return sprintf(buf, "<unknown>\n");
}
/**
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 24ac49019b29..b91e115462ae 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -419,13 +419,13 @@ static u64 update_load(int cpu)
ppol->policy->governor_data;
u64 now;
u64 now_idle;
- unsigned int delta_idle;
- unsigned int delta_time;
+ u64 delta_idle;
+ u64 delta_time;
u64 active_time;
now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
- delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
- delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
+ delta_idle = (now_idle - pcpu->time_in_idle);
+ delta_time = (now - pcpu->time_in_idle_timestamp);
if (delta_time <= delta_idle)
active_time = 0;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 69d4a1326fee..53e61459c69f 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -278,7 +278,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
/* Try to run it through DECO0 */
ret = run_descriptor_deco0(ctrldev, desc, &status);
- if (ret || status) {
+ if (ret ||
+ (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
dev_err(ctrldev,
"Failed to deinstantiate RNG4 SH%d\n",
sh_idx);
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index 1e5f35d8422d..26f69fa61ba1 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -179,26 +179,48 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
struct device *dev = mdm->dev;
int ret;
bool graceful_shutdown = false;
+ u32 status, err_fatal;
switch (cmd) {
case ESOC_PWR_ON:
+ if (esoc->auto_boot) {
+ /*
+ * If esoc has already booted, we would have missed
+ * status change interrupt. Read status and err_fatal
+ * signals to arrive at the state of esoc.
+ */
+ esoc->clink_ops->get_status(&status, esoc);
+ esoc->clink_ops->get_err_fatal(&err_fatal, esoc);
+ if (err_fatal)
+ return -EIO;
+ if (status && !mdm->ready) {
+ mdm->ready = true;
+ esoc->clink_ops->notify(ESOC_BOOT_DONE, esoc);
+ }
+ }
gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
- mdm_enable_irqs(mdm);
mdm->init = 1;
mdm_do_first_power_on(mdm);
+ mdm_enable_irqs(mdm);
break;
case ESOC_PWR_OFF:
mdm_disable_irqs(mdm);
mdm->debug = 0;
mdm->ready = false;
mdm->trig_cnt = 0;
+ if (esoc->primary)
+ break;
graceful_shutdown = true;
- ret = sysmon_send_shutdown(&esoc->subsys);
- if (ret) {
- dev_err(mdm->dev, "sysmon shutdown fail, ret = %d\n",
- ret);
- graceful_shutdown = false;
- goto force_poff;
+ if (!esoc->userspace_handle_shutdown) {
+ ret = sysmon_send_shutdown(&esoc->subsys);
+ if (ret) {
+ dev_err(mdm->dev,
+ "sysmon shutdown fail, ret = %d\n", ret);
+ graceful_shutdown = false;
+ goto force_poff;
+ }
+ } else {
+ esoc_clink_queue_request(ESOC_REQ_SEND_SHUTDOWN, esoc);
}
dev_dbg(mdm->dev, "Waiting for status gpio go low\n");
status_down = false;
@@ -228,12 +250,17 @@ force_poff:
esoc->subsys.sysmon_shutdown_ret);
}
+ if (esoc->primary)
+ break;
/*
* Force a shutdown of the mdm. This is required in order
* to prevent the mdm from immediately powering back on
- * after the shutdown
+ * after the shutdown. Avoid setting status to 0, if line is
+ * monitored by multiple mdms(might be wrongly interpreted as
+ * a primary crash).
*/
- gpio_set_value(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
+ if (esoc->statusline_not_a_powersource == false)
+ gpio_set_value(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc);
mdm_power_down(mdm);
mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
@@ -249,9 +276,12 @@ force_poff:
*/
mdm->ready = false;
cancel_delayed_work(&mdm->mdm2ap_status_check_work);
- gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
- dev_dbg(mdm->dev, "set ap2mdm errfatal to force reset\n");
- msleep(mdm->ramdump_delay_ms);
+ if (!mdm->esoc->auto_boot) {
+ gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
+ dev_dbg(mdm->dev,
+ "set ap2mdm errfatal to force reset\n");
+ msleep(mdm->ramdump_delay_ms);
+ }
break;
case ESOC_EXE_DEBUG:
mdm->debug = 1;
@@ -378,6 +408,8 @@ static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc)
status_down = false;
dev_dbg(dev, "signal apq err fatal for graceful restart\n");
gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
+ if (esoc->primary)
+ break;
timeout = local_clock();
do_div(timeout, NSEC_PER_MSEC);
timeout += MDM_MODEM_TIMEOUT;
@@ -420,7 +452,8 @@ static irqreturn_t mdm_errfatal(int irq, void *dev_id)
goto mdm_pwroff_irq;
esoc = mdm->esoc;
dev_err(dev, "%s: mdm sent errfatal interrupt\n",
- __func__);
+ __func__);
+ subsys_set_crash_status(esoc->subsys_dev, true);
/* disable irq ?*/
esoc_clink_evt_notify(ESOC_ERR_FATAL, esoc);
return IRQ_HANDLED;
@@ -441,11 +474,26 @@ static irqreturn_t mdm_status_change(int irq, void *dev_id)
return IRQ_HANDLED;
dev = mdm->dev;
esoc = mdm->esoc;
+ /*
+ * On auto boot devices, there is a possibility of receiving
+ * status change interrupt before esoc_clink structure is
+ * initialized. Ignore them.
+ */
+ if (!esoc)
+ return IRQ_HANDLED;
value = gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS));
if (value == 0 && mdm->ready) {
dev_err(dev, "unexpected reset external modem\n");
+ subsys_set_crash_status(esoc->subsys_dev, true);
esoc_clink_evt_notify(ESOC_UNEXPECTED_RESET, esoc);
} else if (value == 1) {
+ /*
+ * In auto_boot cases, bailout early if mdm
+ * is up already.
+ */
+ if (esoc->auto_boot && mdm->ready)
+ return IRQ_HANDLED;
+
cancel_delayed_work(&mdm->mdm2ap_status_check_work);
dev_dbg(dev, "status = 1: mdm is now ready\n");
mdm->ready = true;
@@ -453,6 +501,8 @@ static irqreturn_t mdm_status_change(int irq, void *dev_id)
queue_work(mdm->mdm_queue, &mdm->mdm_status_work);
if (mdm->get_restart_reason)
queue_work(mdm->mdm_queue, &mdm->restart_reason_work);
+ if (esoc->auto_boot)
+ esoc->clink_ops->notify(ESOC_BOOT_DONE, esoc);
}
return IRQ_HANDLED;
}
@@ -481,7 +531,7 @@ static irqreturn_t mdm_pblrdy_change(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mdm_get_status(u32 *status, struct esoc_clink *esoc)
+static void mdm_get_status(u32 *status, struct esoc_clink *esoc)
{
struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
@@ -489,7 +539,16 @@ static int mdm_get_status(u32 *status, struct esoc_clink *esoc)
*status = 0;
else
*status = 1;
- return 0;
+}
+
+static void mdm_get_err_fatal(u32 *status, struct esoc_clink *esoc)
+{
+ struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
+
+ if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_ERRFATAL)) == 0)
+ *status = 0;
+ else
+ *status = 1;
}
static void mdm_configure_debug(struct mdm_ctrl *mdm)
@@ -573,13 +632,21 @@ static int mdm_configure_ipc(struct mdm_ctrl *mdm, struct platform_device *pdev)
&mdm->ramdump_delay_ms);
if (ret)
mdm->ramdump_delay_ms = DEF_RAMDUMP_DELAY;
- /* Multilple gpio_request calls are allowed */
+ /*
+ * In certain scenarios, multiple esoc devices are monitoring
+ * same AP2MDM_STATUS line. But only one of them will have a
+ * successful gpio_request call. Initialize gpio only if request
+ * succeeds.
+ */
if (gpio_request(MDM_GPIO(mdm, AP2MDM_STATUS), "AP2MDM_STATUS"))
dev_err(dev, "Failed to configure AP2MDM_STATUS gpio\n");
- /* Multilple gpio_request calls are allowed */
+ else
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
if (gpio_request(MDM_GPIO(mdm, AP2MDM_ERRFATAL), "AP2MDM_ERRFATAL"))
dev_err(dev, "%s Failed to configure AP2MDM_ERRFATAL gpio\n",
__func__);
+ else
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
if (gpio_request(MDM_GPIO(mdm, MDM2AP_STATUS), "MDM2AP_STATUS")) {
dev_err(dev, "%s Failed to configure MDM2AP_STATUS gpio\n",
__func__);
@@ -612,9 +679,6 @@ static int mdm_configure_ipc(struct mdm_ctrl *mdm, struct platform_device *pdev)
}
}
- gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
- gpio_direction_output(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
-
if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_CHNLRDY)))
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_CHNLRDY), 0);
@@ -748,6 +812,7 @@ static int mdm9x25_setup_hw(struct mdm_ctrl *mdm,
dev_err(mdm->dev, "cannot allocate esoc device\n");
return PTR_ERR(esoc);
}
+ esoc->pdev = pdev;
mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
if (!mdm->mdm_queue) {
dev_err(mdm->dev, "could not create mdm_queue\n");
@@ -818,6 +883,7 @@ static int mdm9x35_setup_hw(struct mdm_ctrl *mdm,
dev_err(mdm->dev, "cannot allocate esoc device\n");
return PTR_ERR(esoc);
}
+ esoc->pdev = pdev;
mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
if (!mdm->mdm_queue) {
dev_err(mdm->dev, "could not create mdm_queue\n");
@@ -888,6 +954,84 @@ static int mdm9x35_setup_hw(struct mdm_ctrl *mdm,
return 0;
}
+static int mdm9x45_setup_hw(struct mdm_ctrl *mdm,
+ const struct mdm_ops *ops,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct esoc_clink *esoc;
+ const struct esoc_clink_ops *const clink_ops = ops->clink_ops;
+ const struct mdm_pon_ops *pon_ops = ops->pon_ops;
+
+ mdm->dev = &pdev->dev;
+ mdm->pon_ops = pon_ops;
+ esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(esoc)) {
+ dev_err(mdm->dev, "cannot allocate esoc device\n");
+ return PTR_ERR(esoc);
+ }
+ esoc->pdev = pdev;
+ mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
+ if (!mdm->mdm_queue) {
+ dev_err(mdm->dev, "could not create mdm_queue\n");
+ return -ENOMEM;
+ }
+ mdm->irq_mask = 0;
+ mdm->ready = false;
+ ret = mdm_dt_parse_gpios(mdm);
+ if (ret)
+ return ret;
+ dev_err(mdm->dev, "parsing gpio done\n");
+ ret = mdm_pon_dt_init(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "pon dt init done\n");
+ ret = mdm_pinctrl_init(mdm);
+ if (ret)
+ return ret;
+ dev_err(mdm->dev, "pinctrl init done\n");
+ ret = mdm_pon_setup(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "pon setup done\n");
+ ret = mdm_configure_ipc(mdm, pdev);
+ if (ret)
+ return ret;
+ mdm_configure_debug(mdm);
+ dev_err(mdm->dev, "ipc configure done\n");
+ esoc->name = MDM9x45_LABEL;
+ esoc->link_name = MDM9x45_PCIE;
+ esoc->clink_ops = clink_ops;
+ esoc->parent = mdm->dev;
+ esoc->owner = THIS_MODULE;
+ esoc->np = pdev->dev.of_node;
+
+ esoc->auto_boot = of_property_read_bool(esoc->np,
+ "qcom,mdm-auto-boot");
+ esoc->statusline_not_a_powersource = of_property_read_bool(esoc->np,
+ "qcom,mdm-statusline-not-a-powersource");
+ esoc->userspace_handle_shutdown = of_property_read_bool(esoc->np,
+ "qcom,mdm-userspace-handle-shutdown");
+ set_esoc_clink_data(esoc, mdm);
+ ret = esoc_clink_register(esoc);
+ if (ret) {
+ dev_err(mdm->dev, "esoc registration failed\n");
+ return ret;
+ }
+ dev_dbg(mdm->dev, "esoc registration done\n");
+ init_completion(&mdm->debug_done);
+ INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
+ INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
+ INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
+ mdm->get_restart_reason = false;
+ mdm->debug_fail = false;
+ mdm->esoc = esoc;
+ mdm->init = 0;
+ if (esoc->auto_boot)
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 1);
+ return 0;
+}
+
static int mdm9x55_setup_hw(struct mdm_ctrl *mdm,
const struct mdm_ops *ops,
struct platform_device *pdev)
@@ -906,6 +1050,7 @@ static int mdm9x55_setup_hw(struct mdm_ctrl *mdm,
dev_err(mdm->dev, "cannot allocate esoc device\n");
return PTR_ERR(esoc);
}
+ esoc->pdev = pdev;
mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
if (!mdm->mdm_queue) {
dev_err(mdm->dev, "could not create mdm_queue\n");
@@ -963,9 +1108,86 @@ static int mdm9x55_setup_hw(struct mdm_ctrl *mdm,
return 0;
}
+static int apq8096_setup_hw(struct mdm_ctrl *mdm,
+ const struct mdm_ops *ops,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *node;
+ struct esoc_clink *esoc;
+ const struct esoc_clink_ops *const clink_ops = ops->clink_ops;
+ const struct mdm_pon_ops *pon_ops = ops->pon_ops;
+
+ mdm->dev = &pdev->dev;
+ mdm->pon_ops = pon_ops;
+ node = pdev->dev.of_node;
+ esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(esoc)) {
+ dev_err(mdm->dev, "cannot allocate esoc device\n");
+ return PTR_ERR(esoc);
+ }
+ esoc->pdev = pdev;
+ mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
+ if (!mdm->mdm_queue) {
+ dev_err(mdm->dev, "could not create mdm_queue\n");
+ return -ENOMEM;
+ }
+ mdm->irq_mask = 0;
+ mdm->ready = false;
+ ret = mdm_dt_parse_gpios(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "parsing gpio done\n");
+ ret = mdm_pon_dt_init(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "pon dt init done\n");
+ ret = mdm_pinctrl_init(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "pinctrl init done\n");
+ ret = mdm_pon_setup(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "pon setup done\n");
+ ret = mdm_configure_ipc(mdm, pdev);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "ipc configure done\n");
+ esoc->name = APQ8096_LABEL;
+ esoc->link_name = APQ8096_PCIE;
+ esoc->clink_ops = clink_ops;
+ esoc->parent = mdm->dev;
+ esoc->owner = THIS_MODULE;
+ esoc->np = pdev->dev.of_node;
+ esoc->auto_boot = of_property_read_bool(esoc->np,
+ "qcom,mdm-auto-boot");
+ esoc->primary = of_property_read_bool(esoc->np,
+ "qcom,mdm-primary");
+ set_esoc_clink_data(esoc, mdm);
+ ret = esoc_clink_register(esoc);
+ if (ret) {
+ dev_err(mdm->dev, "esoc registration failed\n");
+ return ret;
+ }
+ dev_dbg(mdm->dev, "esoc registration done\n");
+ init_completion(&mdm->debug_done);
+ INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
+ INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
+ INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
+ mdm->get_restart_reason = false;
+ mdm->debug_fail = false;
+ mdm->esoc = esoc;
+ mdm->init = 0;
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 1);
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
+ return 0;
+}
+
static struct esoc_clink_ops mdm_cops = {
.cmd_exe = mdm_cmd_exe,
.get_status = mdm_get_status,
+ .get_err_fatal = mdm_get_err_fatal,
.notify = mdm_notify,
};
@@ -981,6 +1203,18 @@ static struct mdm_ops mdm9x35_ops = {
.pon_ops = &mdm9x35_pon_ops,
};
+static struct mdm_ops mdm9x45_ops = {
+ .clink_ops = &mdm_cops,
+ .config_hw = mdm9x45_setup_hw,
+ .pon_ops = &mdm9x45_pon_ops,
+};
+
+static struct mdm_ops apq8096_ops = {
+ .clink_ops = &mdm_cops,
+ .config_hw = apq8096_setup_hw,
+ .pon_ops = &apq8096_pon_ops,
+};
+
static struct mdm_ops mdm9x55_ops = {
.clink_ops = &mdm_cops,
.config_hw = mdm9x55_setup_hw,
@@ -992,8 +1226,12 @@ static const struct of_device_id mdm_dt_match[] = {
.data = &mdm9x25_ops, },
{ .compatible = "qcom,ext-mdm9x35",
.data = &mdm9x35_ops, },
+ { .compatible = "qcom,ext-mdm9x45",
+ .data = &mdm9x45_ops, },
{ .compatible = "qcom,ext-mdm9x55",
.data = &mdm9x55_ops, },
+ { .compatible = "qcom,ext-apq8096",
+ .data = &apq8096_ops, },
{},
};
MODULE_DEVICE_TABLE(of, mdm_dt_match);
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
index 8697428eceb2..9c2c68dfef65 100644
--- a/drivers/esoc/esoc-mdm-drv.c
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/reboot.h>
+#include <linux/of.h>
#include "esoc.h"
#include "mdm-dbg.h"
@@ -72,7 +73,14 @@ static void mdm_handle_clink_evt(enum esoc_evt evt,
break;
case ESOC_UNEXPECTED_RESET:
case ESOC_ERR_FATAL:
- if (mdm_drv->mode == CRASH)
+ /*
+ * Modem can crash while we are waiting for boot_done during
+ * a subsystem_get(). Setting mode to CRASH will prevent a
+ * subsequent subsystem_get() from entering poweron ops. Avoid
+ * this by seting mode to CRASH only if device was up and
+ * running.
+ */
+ if (mdm_drv->mode == CRASH || mdm_drv->mode != RUN)
return;
mdm_drv->mode = CRASH;
queue_work(mdm_drv->mdm_queue, &mdm_drv->ssr_work);
@@ -161,8 +169,9 @@ static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
subsys);
struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
const struct esoc_clink_ops const *clink_ops = esoc_clink->clink_ops;
+ int timeout = INT_MAX;
- if (!esoc_req_eng_enabled(esoc_clink)) {
+ if (!esoc_clink->auto_boot && !esoc_req_eng_enabled(esoc_clink)) {
dev_dbg(&esoc_clink->dev, "Wait for req eng registration\n");
wait_for_completion(&mdm_drv->req_eng_wait);
}
@@ -187,8 +196,17 @@ static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
return ret;
}
}
- wait_for_completion(&mdm_drv->boot_done);
- if (mdm_drv->boot_fail) {
+
+ /*
+ * In autoboot case, it is possible that we can forever wait for
+ * boot completion, when esoc fails to boot. This is because there
+ * is no helper application which can alert esoc driver about boot
+ * failure. Prevent going to wait forever in such case.
+ */
+ if (esoc_clink->auto_boot)
+ timeout = 10 * HZ;
+ ret = wait_for_completion_timeout(&mdm_drv->boot_done, timeout);
+ if (mdm_drv->boot_fail || ret <= 0) {
dev_err(&esoc_clink->dev, "booting failed\n");
return -EIO;
}
@@ -216,10 +234,12 @@ static int mdm_subsys_ramdumps(int want_dumps,
static int mdm_register_ssr(struct esoc_clink *esoc_clink)
{
- esoc_clink->subsys.shutdown = mdm_subsys_shutdown;
- esoc_clink->subsys.ramdump = mdm_subsys_ramdumps;
- esoc_clink->subsys.powerup = mdm_subsys_powerup;
- esoc_clink->subsys.crash_shutdown = mdm_crash_shutdown;
+ struct subsys_desc *subsys = &esoc_clink->subsys;
+
+ subsys->shutdown = mdm_subsys_shutdown;
+ subsys->ramdump = mdm_subsys_ramdumps;
+ subsys->powerup = mdm_subsys_powerup;
+ subsys->crash_shutdown = mdm_crash_shutdown;
return esoc_clink_register_ssr(esoc_clink);
}
@@ -286,6 +306,14 @@ static struct esoc_compat compat_table[] = {
.name = "MDM9x55",
.data = NULL,
},
+ {
+ .name = "MDM9x45",
+ .data = NULL,
+ },
+ {
+ .name = "APQ8096",
+ .data = NULL,
+ },
};
static struct esoc_drv esoc_ssr_drv = {
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
index acda06485364..4ae3b7520f77 100644
--- a/drivers/esoc/esoc-mdm-pon.c
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -60,6 +60,29 @@ static int mdm9x55_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
return 0;
}
+/* This function can be called from atomic context. */
+static int mdm9x45_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
+{
+ int soft_reset_direction_assert = 0,
+ soft_reset_direction_de_assert = 1;
+
+ if (mdm->soft_reset_inverted) {
+ soft_reset_direction_assert = 1;
+ soft_reset_direction_de_assert = 0;
+ }
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ soft_reset_direction_assert);
+ /*
+ * Allow PS hold assert to be detected
+ */
+ if (!atomic)
+ usleep_range(1000000, 1005000);
+ else
+ mdelay(1000);
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ soft_reset_direction_de_assert);
+ return 0;
+}
static int mdm4x_do_first_power_on(struct mdm_ctrl *mdm)
{
@@ -68,6 +91,9 @@ static int mdm4x_do_first_power_on(struct mdm_ctrl *mdm)
struct device *dev = mdm->dev;
dev_dbg(dev, "Powering on modem for the first time\n");
+ if (mdm->esoc->auto_boot)
+ return 0;
+
mdm_toggle_soft_reset(mdm, false);
/* Add a delay to allow PON sequence to complete*/
msleep(50);
@@ -132,8 +158,31 @@ static int mdm9x55_power_down(struct mdm_ctrl *mdm)
return 0;
}
+static int mdm9x45_power_down(struct mdm_ctrl *mdm)
+{
+ int soft_reset_direction_assert = 0,
+ soft_reset_direction_de_assert = 1;
+
+ if (mdm->soft_reset_inverted) {
+ soft_reset_direction_assert = 1;
+ soft_reset_direction_de_assert = 0;
+ }
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ soft_reset_direction_assert);
+ /*
+ * Allow PS hold assert to be detected
+ */
+ msleep(3003);
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ soft_reset_direction_de_assert);
+ return 0;
+}
+
static void mdm4x_cold_reset(struct mdm_ctrl *mdm)
{
+ if (!gpio_is_valid(MDM_GPIO(mdm, AP2MDM_SOFT_RESET)))
+ return;
+
dev_dbg(mdm->dev, "Triggering mdm cold reset");
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
!!mdm->soft_reset_inverted);
@@ -152,6 +201,11 @@ static void mdm9x55_cold_reset(struct mdm_ctrl *mdm)
!mdm->soft_reset_inverted);
}
+static int apq8096_pon_dt_init(struct mdm_ctrl *mdm)
+{
+ return 0;
+}
+
static int mdm4x_pon_dt_init(struct mdm_ctrl *mdm)
{
int val;
@@ -183,6 +237,21 @@ static int mdm4x_pon_setup(struct mdm_ctrl *mdm)
return 0;
}
+/* This function can be called from atomic context. */
+static int apq8096_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
+{
+ return 0;
+}
+
+static int apq8096_power_down(struct mdm_ctrl *mdm)
+{
+ return 0;
+}
+
+static void apq8096_cold_reset(struct mdm_ctrl *mdm)
+{
+}
+
struct mdm_pon_ops mdm9x25_pon_ops = {
.pon = mdm4x_do_first_power_on,
.soft_reset = mdm4x_toggle_soft_reset,
@@ -203,8 +272,8 @@ struct mdm_pon_ops mdm9x35_pon_ops = {
struct mdm_pon_ops mdm9x45_pon_ops = {
.pon = mdm4x_do_first_power_on,
- .soft_reset = mdm4x_toggle_soft_reset,
- .poff_force = mdm4x_power_down,
+ .soft_reset = mdm9x45_toggle_soft_reset,
+ .poff_force = mdm9x45_power_down,
.cold_reset = mdm4x_cold_reset,
.dt_init = mdm4x_pon_dt_init,
.setup = mdm4x_pon_setup,
@@ -218,3 +287,12 @@ struct mdm_pon_ops mdm9x55_pon_ops = {
.dt_init = mdm4x_pon_dt_init,
.setup = mdm4x_pon_setup,
};
+
+struct mdm_pon_ops apq8096_pon_ops = {
+ .pon = mdm4x_do_first_power_on,
+ .soft_reset = apq8096_toggle_soft_reset,
+ .poff_force = apq8096_power_down,
+ .cold_reset = apq8096_cold_reset,
+ .dt_init = apq8096_pon_dt_init,
+ .setup = mdm4x_pon_setup,
+};
diff --git a/drivers/esoc/esoc-mdm.h b/drivers/esoc/esoc-mdm.h
index ac811720b035..9343e49559f2 100644
--- a/drivers/esoc/esoc-mdm.h
+++ b/drivers/esoc/esoc-mdm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -37,6 +37,8 @@
#define MDM9x45_PCIE "PCIe"
#define MDM9x55_LABEL "MDM9x55"
#define MDM9x55_PCIE "PCIe"
+#define APQ8096_LABEL "APQ8096"
+#define APQ8096_PCIE "PCIe"
#define MDM2AP_STATUS_TIMEOUT_MS 120000L
#define MDM_MODEM_TIMEOUT 3000
#define DEF_RAMDUMP_TIMEOUT 120000
@@ -153,4 +155,5 @@ extern struct mdm_pon_ops mdm9x25_pon_ops;
extern struct mdm_pon_ops mdm9x35_pon_ops;
extern struct mdm_pon_ops mdm9x45_pon_ops;
extern struct mdm_pon_ops mdm9x55_pon_ops;
+extern struct mdm_pon_ops apq8096_pon_ops;
#endif
diff --git a/drivers/esoc/esoc.h b/drivers/esoc/esoc.h
index 755fb24bd60a..ee54908ce486 100644
--- a/drivers/esoc/esoc.h
+++ b/drivers/esoc/esoc.h
@@ -49,6 +49,7 @@ struct esoc_eng {
* @link_info: additional info about the physical link.
* @parent: parent device.
* @dev: device for userspace interface.
+ * @pdev: platform device to interface with SSR driver.
* @id: id of the external device.
* @owner: owner of the device.
* @clink_ops: control operations for the control link
@@ -59,6 +60,12 @@ struct esoc_eng {
* @subsys_desc: descriptor for subsystem restart
* @subsys_dev: ssr device handle.
* @np: device tree node for esoc_clink.
+ * @auto_boot: boots independently.
+ * @primary: primary esoc controls(reset/poweroff) all secondary
+ * esocs, but not otherway around.
+ * @statusline_not_a_powersource: True if status line to esoc is not a
+ * power source.
+ * @userspace_handle_shutdown: True if user space handles shutdown requests.
*/
struct esoc_clink {
const char *name;
@@ -66,6 +73,7 @@ struct esoc_clink {
const char *link_info;
struct device *parent;
struct device dev;
+ struct platform_device *pdev;
unsigned int id;
struct module *owner;
const struct esoc_clink_ops const *clink_ops;
@@ -77,17 +85,23 @@ struct esoc_clink {
struct subsys_desc subsys;
struct subsys_device *subsys_dev;
struct device_node *np;
+ bool auto_boot;
+ bool primary;
+ bool statusline_not_a_powersource;
+ bool userspace_handle_shutdown;
};
/**
* struct esoc_clink_ops: Operations to control external soc
* @cmd_exe: Execute control command
* @get_status: Get current status, or response to previous command
+ * @get_err_fatal: Get status of err fatal signal
* @notify_esoc: notify external soc of events
*/
struct esoc_clink_ops {
int (*cmd_exe)(enum esoc_cmd cmd, struct esoc_clink *dev);
- int (*get_status)(u32 *status, struct esoc_clink *dev);
+ void (*get_status)(u32 *status, struct esoc_clink *dev);
+ void (*get_err_fatal)(u32 *status, struct esoc_clink *dev);
void (*notify)(enum esoc_notify notify, struct esoc_clink *dev);
};
diff --git a/drivers/esoc/esoc_bus.c b/drivers/esoc/esoc_bus.c
index f925607511ba..94f52764c8c3 100644
--- a/drivers/esoc/esoc_bus.c
+++ b/drivers/esoc/esoc_bus.c
@@ -189,7 +189,7 @@ int esoc_clink_register_ssr(struct esoc_clink *esoc_clink)
snprintf(subsys_name, len, "esoc%d", esoc_clink->id);
esoc_clink->subsys.name = subsys_name;
esoc_clink->dev.of_node = esoc_clink->np;
- esoc_clink->subsys.dev = &esoc_clink->dev;
+ esoc_clink->subsys.dev = &esoc_clink->pdev->dev;
esoc_clink->subsys_dev = subsys_register(&esoc_clink->subsys);
if (IS_ERR_OR_NULL(esoc_clink->subsys_dev)) {
dev_err(&esoc_clink->dev, "failed to register ssr node\n");
diff --git a/drivers/esoc/esoc_dev.c b/drivers/esoc/esoc_dev.c
index bbe1d24fb1f6..a1e7a52a8c26 100644
--- a/drivers/esoc/esoc_dev.c
+++ b/drivers/esoc/esoc_dev.c
@@ -224,9 +224,11 @@ static long esoc_dev_ioctl(struct file *file, unsigned int cmd,
clink_ops->notify(esoc_cmd, esoc_clink);
break;
case ESOC_GET_STATUS:
- err = clink_ops->get_status(&status, esoc_clink);
- if (err)
- return err;
+ clink_ops->get_status(&status, esoc_clink);
+ put_user(status, (unsigned int __user *)uarg);
+ break;
+ case ESOC_GET_ERR_FATAL:
+ clink_ops->get_err_fatal(&status, esoc_clink);
put_user(status, (unsigned int __user *)uarg);
break;
case ESOC_WAIT_FOR_CRASH:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 7c42ff670080..a0924330d125 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/irq.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 7e9154c7f1db..d1c9525d81eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2258,7 +2258,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
if (pi->caps_stable_p_state) {
stable_p_state_sclk = (max_limits->sclk * 75) / 100;
- for (i = table->count - 1; i >= 0; i++) {
+ for (i = table->count - 1; i >= 0; i--) {
if (stable_p_state_sclk >= table->entries[i].clk) {
stable_p_state_sclk = table->entries[i].clk;
break;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fb9f647bb5cd..5044f2257e89 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1159,7 +1159,7 @@ struct intel_gen6_power_mgmt {
struct intel_rps_client semaphores, mmioflips;
/* manual wa residency calculations */
- struct intel_rps_ei up_ei, down_ei;
+ struct intel_rps_ei ei;
/*
* Protects RPS/RC6 register access and PCU communication.
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0f42a2782afc..b7b0a38acd67 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -994,68 +994,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
-static bool vlv_c0_above(struct drm_i915_private *dev_priv,
- const struct intel_rps_ei *old,
- const struct intel_rps_ei *now,
- int threshold)
-{
- u64 time, c0;
- unsigned int mul = 100;
-
- if (old->cz_clock == 0)
- return false;
-
- if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
- mul <<= 8;
-
- time = now->cz_clock - old->cz_clock;
- time *= threshold * dev_priv->czclk_freq;
-
- /* Workload can be split between render + media, e.g. SwapBuffers
- * being blitted in X after being rendered in mesa. To account for
- * this we need to combine both engines into our activity counter.
- */
- c0 = now->render_c0 - old->render_c0;
- c0 += now->media_c0 - old->media_c0;
- c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
-
- return c0 >= time;
-}
-
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
- vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
- dev_priv->rps.up_ei = dev_priv->rps.down_ei;
+ memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
}
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
+ const struct intel_rps_ei *prev = &dev_priv->rps.ei;
struct intel_rps_ei now;
u32 events = 0;
- if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
+ if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
return 0;
vlv_c0_read(dev_priv, &now);
if (now.cz_clock == 0)
return 0;
- if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
- if (!vlv_c0_above(dev_priv,
- &dev_priv->rps.down_ei, &now,
- dev_priv->rps.down_threshold))
- events |= GEN6_PM_RP_DOWN_THRESHOLD;
- dev_priv->rps.down_ei = now;
- }
+ if (prev->cz_clock) {
+ u64 time, c0;
+ unsigned int mul;
- if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
- if (vlv_c0_above(dev_priv,
- &dev_priv->rps.up_ei, &now,
- dev_priv->rps.up_threshold))
- events |= GEN6_PM_RP_UP_THRESHOLD;
- dev_priv->rps.up_ei = now;
+ mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
+ if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+ mul <<= 8;
+
+ time = now.cz_clock - prev->cz_clock;
+ time *= dev_priv->czclk_freq;
+
+ /* Workload can be split between render + media,
+ * e.g. SwapBuffers being blitted in X after being rendered in
+ * mesa. To account for this we need to combine both engines
+ * into our activity counter.
+ */
+ c0 = now.render_c0 - prev->render_c0;
+ c0 += now.media_c0 - prev->media_c0;
+ c0 *= mul;
+
+ if (c0 > time * dev_priv->rps.up_threshold)
+ events = GEN6_PM_RP_UP_THRESHOLD;
+ else if (c0 < time * dev_priv->rps.down_threshold)
+ events = GEN6_PM_RP_DOWN_THRESHOLD;
}
+ dev_priv->rps.ei = now;
return events;
}
@@ -4390,7 +4373,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
+ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e7c18519274a..fd4690ed93c0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4376,6 +4376,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
break;
}
+ /* When byt can survive without system hang with dynamic
+ * sw freq adjustments, this restriction can be lifted.
+ */
+ if (IS_VALLEYVIEW(dev_priv))
+ goto skip_hw_write;
+
I915_WRITE(GEN6_RP_UP_EI,
GT_INTERVAL_FROM_US(dev_priv, ei_up));
I915_WRITE(GEN6_RP_UP_THRESHOLD,
@@ -4394,6 +4400,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
+skip_hw_write:
dev_priv->rps.power = new_power;
dev_priv->rps.up_threshold = threshold_up;
dev_priv->rps.down_threshold = threshold_down;
@@ -4404,8 +4411,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
{
u32 mask = 0;
+ /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
if (val > dev_priv->rps.min_freq_softlimit)
- mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
if (val < dev_priv->rps.max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
@@ -4509,7 +4517,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
+ if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
gen6_rps_reset_ei(dev_priv);
I915_WRITE(GEN6_PMINTRMSK,
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 3d0617dbc514..f3a8a8416c7a 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -49,6 +49,7 @@ msm_drm-y := \
sde/sde_vbif.o \
sde_dbg_evtlog.o \
sde_io_util.o \
+ dba_bridge.o \
sde_edid_parser.o
# use drm gpu driver only if qcom_kgsl driver not available
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index c4f886fd6037..a417e42944fc 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -466,6 +466,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct msm_gpu_config a3xx_config = { 0 };
int ret;
if (!pdev) {
@@ -491,7 +492,13 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a3xx_registers;
adreno_gpu->reg_offsets = a3xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ a3xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
+ a3xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
+ a3xx_config.nr_rings = 1;
+ a3xx_config.va_start = 0x300000;
+ a3xx_config.va_end = 0xffffffff;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a3xx_config);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 534a7c3fbdca..069823f054f7 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -543,6 +543,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct msm_gpu_config a4xx_config = { 0 };
int ret;
if (!pdev) {
@@ -568,7 +569,13 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a4xx_registers;
adreno_gpu->reg_offsets = a4xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ a4xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
+ a4xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
+ a4xx_config.nr_rings = 1;
+ a4xx_config.va_start = 0x300000;
+ a4xx_config.va_end = 0xffffffff;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a4xx_config);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 9ceef8f437b5..de2ee1ffb735 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1163,7 +1163,7 @@ static const u32 a5xx_registers[] = {
0xe9c0, 0xe9c7, 0xe9d0, 0xe9d1, 0xea00, 0xea01, 0xea10, 0xea1c,
0xea40, 0xea68, 0xea80, 0xea80, 0xea82, 0xeaa3, 0xeaa5, 0xeac2,
0xeb80, 0xeb8f, 0xebb0, 0xebb0, 0xec00, 0xec05, 0xec08, 0xece9,
- 0xecf0, 0xecf0, 0xf400, 0xf400, 0xf800, 0xf807,
+ 0xecf0, 0xecf0, 0xf800, 0xf807,
~0
};
@@ -1368,6 +1368,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
struct a5xx_gpu *a5xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
+ struct msm_gpu_config a5xx_config = { 0 };
int ret;
if (!pdev) {
@@ -1391,7 +1392,20 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
/* Check the efuses for some configuration */
a5xx_efuses_read(pdev, adreno_gpu);
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
+ a5xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
+ a5xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
+
+ /* Set the number of rings to 4 - yay preemption */
+ a5xx_config.nr_rings = 4;
+
+ /*
+ * Set the user domain range to fall into the TTBR1 region for global
+ * objects
+ */
+ a5xx_config.va_start = 0x800000000;
+ a5xx_config.va_end = 0x8ffffffff;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a5xx_config);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
index 5a2edb0ea518..690e6f546e60 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
@@ -733,6 +733,35 @@ static void a5xx_snapshot_indexed_registers(struct msm_gpu *gpu,
}
}
+static void a5xx_snapshot_preemption(struct msm_gpu *gpu, struct msm_snapshot
+ *snapshot)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_snapshot_gpu_object header = {
+ .type = SNAPSHOT_GPU_OBJECT_GLOBAL,
+ .size = A5XX_PREEMPT_RECORD_SIZE >> 2,
+ .pt_base = 0,
+ };
+ int index;
+
+ if (gpu->nr_rings <= 1)
+ return;
+
+ for (index = 0; index < gpu->nr_rings; index++) {
+
+ header.gpuaddr = a5xx_gpu->preempt_iova[index];
+
+ if (!SNAPSHOT_HEADER(snapshot, header,
+ SNAPSHOT_SECTION_GPU_OBJECT_V2,
+ A5XX_PREEMPT_RECORD_SIZE >> 2))
+ return;
+
+ SNAPSHOT_MEMCPY(snapshot, a5xx_gpu->preempt[index],
+ A5XX_PREEMPT_RECORD_SIZE);
+ }
+}
+
int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
{
struct crashdump crashdump = { 0 };
@@ -787,6 +816,9 @@ int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
/* CP MERCIU */
a5xx_snapshot_cp_merciu(gpu, snapshot);
+ /* Preemption records*/
+ a5xx_snapshot_preemption(gpu, snapshot);
+
crashdump_destroy(gpu, &crashdump);
snapshot->priv = NULL;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 19267b2a3b49..9952fa8dcda5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -405,10 +405,6 @@ void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
ring->gpu->name, ring->id);
}
-static const char *iommu_ports[] = {
- "gfx3d_user",
-};
-
/* Read the set of powerlevels */
static int _adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *node)
{
@@ -524,10 +520,10 @@ static int adreno_of_parse(struct platform_device *pdev, struct msm_gpu *gpu)
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu,
- const struct adreno_gpu_funcs *funcs, int nr_rings)
+ const struct adreno_gpu_funcs *funcs,
+ struct msm_gpu_config *gpu_config)
{
struct adreno_platform_config *config = pdev->dev.platform_data;
- struct msm_gpu_config adreno_gpu_config = { 0 };
struct msm_gpu *gpu = &adreno_gpu->base;
struct msm_mmu *mmu;
int ret;
@@ -541,26 +537,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
/* Get the rest of the target configuration from the device tree */
adreno_of_parse(pdev, gpu);
- adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
- adreno_gpu_config.irqname = "kgsl_3d0_irq";
- adreno_gpu_config.nr_rings = nr_rings;
-
- adreno_gpu_config.va_start = SZ_16M;
- adreno_gpu_config.va_end = 0xffffffff;
-
- if (adreno_gpu->revn >= 500) {
- /* 5XX targets use a 64 bit region */
- adreno_gpu_config.va_start = 0x800000000;
- adreno_gpu_config.va_end = 0x8ffffffff;
- } else {
- adreno_gpu_config.va_start = 0x300000;
- adreno_gpu_config.va_end = 0xffffffff;
- }
-
- adreno_gpu_config.nr_rings = nr_rings;
-
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
- adreno_gpu->info->name, &adreno_gpu_config);
+ adreno_gpu->info->name, gpu_config);
if (ret)
return ret;
@@ -580,8 +558,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
mmu = gpu->aspace->mmu;
if (mmu) {
- ret = mmu->funcs->attach(mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ ret = mmu->funcs->attach(mmu, NULL, 0);
if (ret)
return ret;
}
@@ -722,7 +699,7 @@ static struct adreno_counter_group *get_counter_group(struct msm_gpu *gpu,
return ERR_PTR(-ENODEV);
if (groupid >= adreno_gpu->nr_counter_groups)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENODEV);
return (struct adreno_counter_group *)
adreno_gpu->counter_groups[groupid];
@@ -745,7 +722,7 @@ u64 adreno_read_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
struct adreno_counter_group *group =
get_counter_group(gpu, groupid);
- if (!IS_ERR(group) && group->funcs.read)
+ if (!IS_ERR_OR_NULL(group) && group->funcs.read)
return group->funcs.read(gpu, group, counterid);
return 0;
@@ -756,6 +733,6 @@ void adreno_put_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
struct adreno_counter_group *group =
get_counter_group(gpu, groupid);
- if (!IS_ERR(group) && group->funcs.put)
+ if (!IS_ERR_OR_NULL(group) && group->funcs.put)
group->funcs.put(gpu, group, counterid);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 8e8f3e5182d6..3f9bc655c383 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -257,7 +257,7 @@ struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
- int nr_rings);
+ struct msm_gpu_config *config);
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
diff --git a/drivers/gpu/drm/msm/dba_bridge.c b/drivers/gpu/drm/msm/dba_bridge.c
new file mode 100644
index 000000000000..f933a7f3dcfb
--- /dev/null
+++ b/drivers/gpu/drm/msm/dba_bridge.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <video/msm_dba.h>
+#include "drm_edid.h"
+#include "sde_kms.h"
+#include "dba_bridge.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "dba_bridge:[%s] " fmt, __func__
+
+/**
+ * struct dba_bridge - DBA bridge information
+ * @base: drm_bridge base
+ * @client_name: Client's name who calls the init
+ * @chip_name: Bridge chip name
+ * @name: Bridge chip name
+ * @id: Bridge driver index
+ * @display: Private display handle
+ * @list: Bridge chip driver list node
+ * @ops: DBA operation container
+ * @dba_ctx: DBA context
+ * @mode: DRM mode info
+ * @hdmi_mode: HDMI or DVI mode for the sink
+ * @num_of_input_lanes: Number of input lanes in case of DSI/LVDS
+ * @pluggable: If it's pluggable
+ * @panel_count: Number of panels attached to this display
+ */
+struct dba_bridge {
+ struct drm_bridge base;
+ char client_name[MSM_DBA_CLIENT_NAME_LEN];
+ char chip_name[MSM_DBA_CHIP_NAME_MAX_LEN];
+ u32 id;
+ void *display;
+ struct list_head list;
+ struct msm_dba_ops ops;
+ void *dba_ctx;
+ struct drm_display_mode mode;
+ bool hdmi_mode;
+ u32 num_of_input_lanes;
+ bool pluggable;
+ u32 panel_count;
+};
+#define to_dba_bridge(x) container_of((x), struct dba_bridge, base)
+
+static void _dba_bridge_cb(void *data, enum msm_dba_callback_event event)
+{
+ struct dba_bridge *d_bridge = data;
+
+ if (!d_bridge) {
+ SDE_ERROR("Invalid data\n");
+ return;
+ }
+
+ DRM_DEBUG("event: %d\n", event);
+
+ switch (event) {
+ case MSM_DBA_CB_HPD_CONNECT:
+ DRM_DEBUG("HPD CONNECT\n");
+ break;
+ case MSM_DBA_CB_HPD_DISCONNECT:
+ DRM_DEBUG("HPD DISCONNECT\n");
+ break;
+ default:
+ DRM_DEBUG("event:%d is not supported\n", event);
+ break;
+ }
+}
+
+static int _dba_bridge_attach(struct drm_bridge *bridge)
+{
+ struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+ struct msm_dba_reg_info info;
+ int ret = 0;
+
+ if (!bridge) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+
+ memset(&info, 0, sizeof(info));
+ /* initialize DBA registration data */
+ strlcpy(info.client_name, d_bridge->client_name,
+ MSM_DBA_CLIENT_NAME_LEN);
+ strlcpy(info.chip_name, d_bridge->chip_name,
+ MSM_DBA_CHIP_NAME_MAX_LEN);
+ info.instance_id = d_bridge->id;
+ info.cb = _dba_bridge_cb;
+ info.cb_data = d_bridge;
+
+ /* register client with DBA and get device's ops*/
+ if (IS_ENABLED(CONFIG_MSM_DBA)) {
+ d_bridge->dba_ctx = msm_dba_register_client(&info,
+ &d_bridge->ops);
+ if (IS_ERR_OR_NULL(d_bridge->dba_ctx)) {
+ SDE_ERROR("dba register failed\n");
+ ret = PTR_ERR(d_bridge->dba_ctx);
+ goto error;
+ }
+ } else {
+ SDE_ERROR("DBA not enabled\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ DRM_INFO("client:%s bridge:[%s:%d] attached\n",
+ d_bridge->client_name, d_bridge->chip_name, d_bridge->id);
+
+error:
+ return ret;
+}
+
+static void _dba_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ if (!bridge) {
+ SDE_ERROR("Invalid params\n");
+ return;
+ }
+}
+
+static void _dba_bridge_enable(struct drm_bridge *bridge)
+{
+ int rc = 0;
+ struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+ struct msm_dba_video_cfg video_cfg;
+ struct drm_display_mode *mode;
+ struct hdmi_avi_infoframe avi_frame;
+
+ if (!bridge) {
+ SDE_ERROR("Invalid params\n");
+ return;
+ }
+
+ memset(&video_cfg, 0, sizeof(video_cfg));
+ memset(&avi_frame, 0, sizeof(avi_frame));
+ mode = &d_bridge->mode;
+ video_cfg.h_active = mode->hdisplay;
+ video_cfg.v_active = mode->vdisplay;
+ video_cfg.h_front_porch = mode->hsync_start - mode->hdisplay;
+ video_cfg.v_front_porch = mode->vsync_start - mode->vdisplay;
+ video_cfg.h_back_porch = mode->htotal - mode->hsync_end;
+ video_cfg.v_back_porch = mode->vtotal - mode->vsync_end;
+ video_cfg.h_pulse_width = mode->hsync_end - mode->hsync_start;
+ video_cfg.v_pulse_width = mode->vsync_end - mode->vsync_start;
+ video_cfg.pclk_khz = mode->clock;
+ video_cfg.hdmi_mode = d_bridge->hdmi_mode;
+ video_cfg.num_of_input_lanes = d_bridge->num_of_input_lanes;
+
+ SDE_DEBUG(
+ "video=h[%d,%d,%d,%d] v[%d,%d,%d,%d] pclk=%d hdmi=%d lane=%d\n",
+ video_cfg.h_active, video_cfg.h_front_porch,
+ video_cfg.h_pulse_width, video_cfg.h_back_porch,
+ video_cfg.v_active, video_cfg.v_front_porch,
+ video_cfg.v_pulse_width, video_cfg.v_back_porch,
+ video_cfg.pclk_khz, video_cfg.hdmi_mode,
+ video_cfg.num_of_input_lanes);
+
+ rc = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, mode);
+ if (rc) {
+ SDE_ERROR("get avi frame failed ret=%d\n", rc);
+ } else {
+ video_cfg.scaninfo = avi_frame.scan_mode;
+ switch (avi_frame.picture_aspect) {
+ case HDMI_PICTURE_ASPECT_4_3:
+ video_cfg.ar = MSM_DBA_AR_4_3;
+ break;
+ case HDMI_PICTURE_ASPECT_16_9:
+ video_cfg.ar = MSM_DBA_AR_16_9;
+ break;
+ default:
+ break;
+ }
+ video_cfg.vic = avi_frame.video_code;
+ DRM_INFO("scaninfo=%d ar=%d vic=%d\n",
+ video_cfg.scaninfo, video_cfg.ar, video_cfg.vic);
+ }
+
+ if (d_bridge->ops.video_on) {
+ rc = d_bridge->ops.video_on(d_bridge->dba_ctx, true,
+ &video_cfg, 0);
+ if (rc)
+ SDE_ERROR("video on failed ret=%d\n", rc);
+ }
+}
+
+static void _dba_bridge_disable(struct drm_bridge *bridge)
+{
+ int rc = 0;
+ struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+
+ if (!bridge) {
+ SDE_ERROR("Invalid params\n");
+ return;
+ }
+
+ if (d_bridge->ops.video_on) {
+ rc = d_bridge->ops.video_on(d_bridge->dba_ctx, false, NULL, 0);
+ if (rc)
+ SDE_ERROR("video off failed ret=%d\n", rc);
+ }
+}
+
+static void _dba_bridge_post_disable(struct drm_bridge *bridge)
+{
+ if (!bridge) {
+ SDE_ERROR("Invalid params\n");
+ return;
+ }
+}
+
+static void _dba_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+
+ if (!bridge || !mode || !adjusted_mode || !d_bridge) {
+ SDE_ERROR("Invalid params\n");
+ return;
+ } else if (!d_bridge->panel_count) {
+ SDE_ERROR("Panel count is 0\n");
+ return;
+ }
+
+ d_bridge->mode = *adjusted_mode;
+ /* Adjust mode according to number of panels */
+ d_bridge->mode.hdisplay /= d_bridge->panel_count;
+ d_bridge->mode.hsync_start /= d_bridge->panel_count;
+ d_bridge->mode.hsync_end /= d_bridge->panel_count;
+ d_bridge->mode.htotal /= d_bridge->panel_count;
+ d_bridge->mode.clock /= d_bridge->panel_count;
+}
+
+static bool _dba_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ bool ret = true;
+
+ if (!bridge || !mode || !adjusted_mode) {
+ SDE_ERROR("Invalid params\n");
+ return false;
+ }
+
+ return ret;
+}
+
+static const struct drm_bridge_funcs _dba_bridge_ops = {
+ .attach = _dba_bridge_attach,
+ .mode_fixup = _dba_bridge_mode_fixup,
+ .pre_enable = _dba_bridge_pre_enable,
+ .enable = _dba_bridge_enable,
+ .disable = _dba_bridge_disable,
+ .post_disable = _dba_bridge_post_disable,
+ .mode_set = _dba_bridge_mode_set,
+};
+
+struct drm_bridge *dba_bridge_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ struct dba_bridge_init *data)
+{
+ int rc = 0;
+ struct dba_bridge *bridge;
+ struct msm_drm_private *priv = NULL;
+
+ if (!dev || !encoder || !data) {
+ SDE_ERROR("dev=%pK or encoder=%pK or data=%pK is NULL\n",
+ dev, encoder, data);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ SDE_ERROR("Private data is not present\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge) {
+ SDE_ERROR("out of memory\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ INIT_LIST_HEAD(&bridge->list);
+ strlcpy(bridge->client_name, data->client_name,
+ MSM_DBA_CLIENT_NAME_LEN);
+ strlcpy(bridge->chip_name, data->chip_name,
+ MSM_DBA_CHIP_NAME_MAX_LEN);
+ bridge->id = data->id;
+ bridge->display = data->display;
+ bridge->hdmi_mode = data->hdmi_mode;
+ bridge->num_of_input_lanes = data->num_of_input_lanes;
+ bridge->pluggable = data->pluggable;
+ bridge->panel_count = data->panel_count;
+ bridge->base.funcs = &_dba_bridge_ops;
+ bridge->base.encoder = encoder;
+
+ rc = drm_bridge_attach(dev, &bridge->base);
+ if (rc) {
+ SDE_ERROR("failed to attach bridge, rc=%d\n", rc);
+ goto error_free_bridge;
+ }
+
+ if (data->precede_bridge) {
+ /* Insert current bridge */
+ bridge->base.next = data->precede_bridge->next;
+ data->precede_bridge->next = &bridge->base;
+ } else {
+ encoder->bridge = &bridge->base;
+ }
+
+ if (!bridge->pluggable) {
+ if (bridge->ops.power_on)
+ bridge->ops.power_on(bridge->dba_ctx, true, 0);
+ if (bridge->ops.check_hpd)
+ bridge->ops.check_hpd(bridge->dba_ctx, 0);
+ }
+
+ return &bridge->base;
+
+error_free_bridge:
+ kfree(bridge);
+error:
+ return ERR_PTR(rc);
+}
+
+void dba_bridge_cleanup(struct drm_bridge *bridge)
+{
+ struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+
+ if (!bridge)
+ return;
+
+ if (IS_ENABLED(CONFIG_MSM_DBA)) {
+ if (!IS_ERR_OR_NULL(d_bridge->dba_ctx))
+ msm_dba_deregister_client(d_bridge->dba_ctx);
+ }
+
+ if (d_bridge->base.encoder)
+ d_bridge->base.encoder->bridge = NULL;
+
+ kfree(bridge);
+}
diff --git a/drivers/gpu/drm/msm/dba_bridge.h b/drivers/gpu/drm/msm/dba_bridge.h
new file mode 100644
index 000000000000..5562d2b2aef9
--- /dev/null
+++ b/drivers/gpu/drm/msm/dba_bridge.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DBA_BRIDGE_H_
+#define _DBA_BRIDGE_H_
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "msm_drv.h"
+
+/**
+ * struct dba_bridge_init - Init parameters for DBA bridge
+ * @client_name: Client's name who calls the init
+ * @chip_name: Bridge chip name
+ * @id: Bridge driver index
+ * @display: Private display handle
+ * @hdmi_mode: HDMI or DVI mode for the sink
+ * @num_of_input_lanes: Number of input lanes in case of DSI/LVDS
+ * @precede_bridge: Precede bridge chip
+ * @pluggable: If it's pluggable
+ * @panel_count: Number of panels attached to this display
+ */
+struct dba_bridge_init {
+ const char *client_name;
+ const char *chip_name;
+ u32 id;
+ void *display;
+ bool hdmi_mode;
+ u32 num_of_input_lanes;
+ struct drm_bridge *precede_bridge;
+ bool pluggable;
+ u32 panel_count;
+};
+
+/**
+ * dba_bridge_init - Initialize the DBA bridge
+ * @dev: Pointer to drm device handle
+ * @encoder: Pointer to drm encoder handle
+ * @data: Pointer to init data
+ * Returns: pointer of struct drm_bridge
+ */
+struct drm_bridge *dba_bridge_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ struct dba_bridge_init *data);
+
+/**
+ * dba_bridge_cleanup - Clean up the DBA bridge
+ * @bridge: Pointer to DBA bridge handle
+ * Returns: void
+ */
+void dba_bridge_cleanup(struct drm_bridge *bridge);
+
+#endif /* _DBA_BRIDGE_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
index ca04eedd6af1..2f0f6c2f1b01 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -85,6 +85,14 @@ void dsi_ctrl_hw_14_host_setup(struct dsi_ctrl_hw *ctrl,
DSI_W32(ctrl, DSI_CTRL, reg_value);
+ /* Force clock lane in HS */
+ reg_value = DSI_R32(ctrl, DSI_LANE_CTRL);
+ if (cfg->force_clk_lane_hs)
+ reg_value |= BIT(28);
+ else
+ reg_value &= ~BIT(28);
+ DSI_W32(ctrl, DSI_LANE_CTRL, reg_value);
+
pr_debug("[DSI_%d]Host configuration complete\n", ctrl->index);
}
@@ -604,8 +612,9 @@ void dsi_ctrl_hw_14_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
{
u32 reg = 0;
+ reg = DSI_R32(ctrl, DSI_LANE_CTRL);
if (lanes & DSI_CLOCK_LANE)
- reg = BIT(4);
+ reg |= BIT(4);
if (lanes & DSI_DATA_LANE_0)
reg |= BIT(0);
if (lanes & DSI_DATA_LANE_1)
@@ -664,7 +673,8 @@ void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
u32 reg = 0;
reg = DSI_R32(ctrl, DSI_LANE_CTRL);
- reg &= ~BIT(4); /* clock lane */
+ if (lanes & DSI_CLOCK_LANE)
+ reg &= ~BIT(4); /* clock lane */
if (lanes & DSI_DATA_LANE_0)
reg &= ~BIT(0);
if (lanes & DSI_DATA_LANE_1)
@@ -679,7 +689,18 @@ void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
* HPG recommends separate writes for clearing ULPS_REQUEST and
* ULPS_EXIT.
*/
- DSI_W32(ctrl, DSI_LANE_CTRL, 0x0);
+ reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+ if (lanes & DSI_CLOCK_LANE)
+ reg &= ~BIT(12);
+ if (lanes & DSI_DATA_LANE_0)
+ reg &= ~BIT(8);
+ if (lanes & DSI_DATA_LANE_1)
+ reg &= ~BIT(9);
+ if (lanes & DSI_DATA_LANE_2)
+ reg &= ~BIT(10);
+ if (lanes & DSI_DATA_LANE_3)
+ reg &= ~BIT(11);
+ DSI_W32(ctrl, DSI_LANE_CTRL, reg);
pr_debug("[DSI_%d] ULPS request cleared\n", ctrl->index);
}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
index 2caa32ea8f0c..d9fcec60693d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -259,6 +259,7 @@ struct dsi_lane_mapping {
* @ignore_rx_eot: Ignore Rx EOT packets if set to true.
* @append_tx_eot: Append EOT packets for forward transmissions if set to
* true.
+ * @force_clk_lane_hs: Force clock lane in high speed mode.
*/
struct dsi_host_common_cfg {
enum dsi_pixel_format dst_format;
@@ -277,6 +278,7 @@ struct dsi_host_common_cfg {
u32 t_clk_pre;
bool ignore_rx_eot;
bool append_tx_eot;
+ bool force_clk_lane_hs;
};
/**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 5a166a4bae93..f2412daee8b6 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,13 +18,16 @@
#include <linux/of.h>
#include "msm_drv.h"
+#include "sde_kms.h"
#include "dsi_display.h"
#include "dsi_panel.h"
#include "dsi_ctrl.h"
#include "dsi_ctrl_hw.h"
#include "dsi_drm.h"
+#include "dba_bridge.h"
#define to_dsi_display(x) container_of(x, struct dsi_display, host)
+#define DSI_DBA_CLIENT_NAME "dsi"
static DEFINE_MUTEX(dsi_display_list_lock);
static LIST_HEAD(dsi_display_list);
@@ -45,7 +48,7 @@ int dsi_display_set_backlight(void *display, u32 bl_lvl)
if (dsi_display == NULL)
return -EINVAL;
- panel = dsi_display->panel;
+ panel = dsi_display->panel[0];
rc = dsi_panel_set_backlight(panel, bl_lvl);
if (rc)
@@ -87,8 +90,9 @@ static ssize_t debugfs_dump_info_read(struct file *file,
display->ctrl[i].phy->name);
}
- len += snprintf(buf + len, (SZ_4K - len),
- "\tPanel = %s\n", display->panel->name);
+ for (i = 0; i < display->panel_count; i++)
+ len += snprintf(buf + len, (SZ_4K - len),
+ "\tPanel_%d = %s\n", i, display->panel[i]->name);
len += snprintf(buf + len, (SZ_4K - len),
"\tClock master = %s\n",
@@ -1108,7 +1112,7 @@ static int dsi_display_parse_lane_map(struct dsi_display *display)
static int dsi_display_parse_dt(struct dsi_display *display)
{
int rc = 0;
- int i;
+ int i, size;
u32 phy_count = 0;
struct device_node *of_node;
@@ -1151,14 +1155,69 @@ static int dsi_display_parse_dt(struct dsi_display *display)
goto error;
}
- of_node = of_parse_phandle(display->pdev->dev.of_node,
- "qcom,dsi-panel", 0);
- if (!of_node) {
- pr_err("No Panel device present\n");
+ if (of_get_property(display->pdev->dev.of_node, "qcom,dsi-panel",
+ &size)) {
+ display->panel_count = size / sizeof(int);
+ display->panel_of = devm_kzalloc(&display->pdev->dev,
+ sizeof(struct device_node *) * display->panel_count,
+ GFP_KERNEL);
+ if (!display->panel_of) {
+ SDE_ERROR("out of memory for panel_of\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+ display->panel = devm_kzalloc(&display->pdev->dev,
+ sizeof(struct dsi_panel *) * display->panel_count,
+ GFP_KERNEL);
+ if (!display->panel) {
+ SDE_ERROR("out of memory for panel\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+ for (i = 0; i < display->panel_count; i++) {
+ display->panel_of[i] =
+ of_parse_phandle(display->pdev->dev.of_node,
+ "qcom,dsi-panel", i);
+ if (!display->panel_of[i]) {
+ SDE_ERROR("of_parse dsi-panel failed\n");
+ rc = -ENODEV;
+ goto error;
+ }
+ }
+ } else {
+ SDE_ERROR("No qcom,dsi-panel of node\n");
rc = -ENODEV;
goto error;
- } else {
- display->panel_of = of_node;
+ }
+
+ if (of_get_property(display->pdev->dev.of_node, "qcom,bridge-index",
+ &size)) {
+ if (size / sizeof(int) != display->panel_count) {
+ SDE_ERROR("size=%lu is different than count=%u\n",
+ size / sizeof(int), display->panel_count);
+ rc = -EINVAL;
+ goto error;
+ }
+ display->bridge_idx = devm_kzalloc(&display->pdev->dev,
+ sizeof(u32) * display->panel_count, GFP_KERNEL);
+ if (!display->bridge_idx) {
+ SDE_ERROR("out of memory for bridge_idx\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+ for (i = 0; i < display->panel_count; i++) {
+ rc = of_property_read_u32_index(
+ display->pdev->dev.of_node,
+ "qcom,bridge-index", i,
+ &(display->bridge_idx[i]));
+ if (rc) {
+ SDE_ERROR(
+ "read bridge-index error,i=%d rc=%d\n",
+ i, rc);
+ rc = -ENODEV;
+ goto error;
+ }
+ }
}
rc = dsi_display_parse_lane_map(display);
@@ -1167,6 +1226,16 @@ static int dsi_display_parse_dt(struct dsi_display *display)
goto error;
}
error:
+ if (rc) {
+ if (display->panel_of)
+ for (i = 0; i < display->panel_count; i++)
+ if (display->panel_of[i])
+ of_node_put(display->panel_of[i]);
+ devm_kfree(&display->pdev->dev, display->panel_of);
+ devm_kfree(&display->pdev->dev, display->panel);
+ devm_kfree(&display->pdev->dev, display->bridge_idx);
+ display->panel_count = 0;
+ }
return rc;
}
@@ -1196,12 +1265,15 @@ static int dsi_display_res_init(struct dsi_display *display)
}
}
- display->panel = dsi_panel_get(&display->pdev->dev, display->panel_of);
- if (IS_ERR_OR_NULL(display->panel)) {
- rc = PTR_ERR(display->panel);
- pr_err("failed to get panel, rc=%d\n", rc);
- display->panel = NULL;
- goto error_ctrl_put;
+ for (i = 0; i < display->panel_count; i++) {
+ display->panel[i] = dsi_panel_get(&display->pdev->dev,
+ display->panel_of[i]);
+ if (IS_ERR_OR_NULL(display->panel)) {
+ rc = PTR_ERR(display->panel);
+ pr_err("failed to get panel, rc=%d\n", rc);
+ display->panel[i] = NULL;
+ goto error_ctrl_put;
+ }
}
rc = dsi_display_clocks_init(display);
@@ -1230,6 +1302,9 @@ static int dsi_display_res_deinit(struct dsi_display *display)
if (rc)
pr_err("clocks deinit failed, rc=%d\n", rc);
+ for (i = 0; i < display->panel_count; i++)
+ dsi_panel_put(display->panel[i]);
+
for (i = 0; i < display->ctrl_count; i++) {
ctrl = &display->ctrl[i];
dsi_phy_put(ctrl->phy);
@@ -1279,7 +1354,7 @@ static bool dsi_display_is_seamless_dfps_possible(
return false;
}
- cur = &display->panel->mode;
+ cur = &display->panel[0]->mode;
if (cur->timing.h_active != tgt->timing.h_active) {
pr_debug("timing.h_active differs %d %d\n",
@@ -1388,7 +1463,7 @@ static int dsi_display_dfps_update(struct dsi_display *display,
}
timing = &dsi_mode->timing;
- dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
+ dsi_panel_get_dfps_caps(display->panel[0], &dfps_caps);
if (!dfps_caps.dfps_support) {
pr_err("dfps not supported\n");
return -ENOTSUPP;
@@ -1425,7 +1500,7 @@ static int dsi_display_dfps_update(struct dsi_display *display,
}
}
- panel_mode = &display->panel->mode;
+ panel_mode = &display->panel[0]->mode;
memcpy(panel_mode, dsi_mode, sizeof(*panel_mode));
error:
@@ -1493,7 +1568,8 @@ static int dsi_display_get_dfps_timing(struct dsi_display *display,
}
m_ctrl = display->ctrl[display->clk_master_idx].ctrl;
- dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
+ /* Only check the first panel */
+ dsi_panel_get_dfps_caps(display->panel[0], &dfps_caps);
if (!dfps_caps.dfps_support) {
pr_err("dfps not supported by panel\n");
return -EINVAL;
@@ -1574,7 +1650,7 @@ static int dsi_display_set_mode_sub(struct dsi_display *display,
int i;
struct dsi_display_ctrl *ctrl;
- rc = dsi_panel_get_host_cfg_for_mode(display->panel,
+ rc = dsi_panel_get_host_cfg_for_mode(display->panel[0],
mode,
&display->config);
if (rc) {
@@ -1687,7 +1763,7 @@ static int dsi_display_bind(struct device *dev,
struct drm_device *drm;
struct dsi_display *display;
struct platform_device *pdev = to_platform_device(dev);
- int i, rc = 0;
+ int i, j, rc = 0;
if (!dev || !pdev || !master) {
pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
@@ -1737,15 +1813,19 @@ static int dsi_display_bind(struct device *dev,
goto error_ctrl_deinit;
}
- rc = dsi_panel_drv_init(display->panel, &display->host);
- if (rc) {
- if (rc != -EPROBE_DEFER)
- pr_err("[%s] failed to initialize panel driver, rc=%d\n",
- display->name, rc);
- goto error_host_deinit;
+ for (j = 0; j < display->panel_count; j++) {
+ rc = dsi_panel_drv_init(display->panel[j], &display->host);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ SDE_ERROR(
+ "[%s]Failed to init panel driver, rc=%d\n",
+ display->name, rc);
+ goto error_panel_deinit;
+ }
}
- rc = dsi_panel_get_mode_count(display->panel, &display->num_of_modes);
+ rc = dsi_panel_get_mode_count(display->panel[0],
+ &display->num_of_modes);
if (rc) {
pr_err("[%s] failed to get mode count, rc=%d\n",
display->name, rc);
@@ -1756,8 +1836,8 @@ static int dsi_display_bind(struct device *dev,
goto error;
error_panel_deinit:
- (void)dsi_panel_drv_deinit(display->panel);
-error_host_deinit:
+ for (j--; j >= 0; j--)
+ (void)dsi_panel_drv_deinit(display->panel[j]);
(void)dsi_display_mipi_host_deinit(display);
error_ctrl_deinit:
for (i = i - 1; i >= 0; i--) {
@@ -1798,10 +1878,12 @@ static void dsi_display_unbind(struct device *dev,
mutex_lock(&display->display_lock);
- rc = dsi_panel_drv_deinit(display->panel);
- if (rc)
- pr_err("[%s] failed to deinit panel driver, rc=%d\n",
- display->name, rc);
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_drv_deinit(display->panel[i]);
+ if (rc)
+ SDE_ERROR("[%s] failed to deinit panel driver, rc=%d\n",
+ display->name, rc);
+ }
rc = dsi_display_mipi_host_deinit(display);
if (rc)
@@ -1870,7 +1952,7 @@ int dsi_display_dev_probe(struct platform_device *pdev)
display->pdev = pdev;
platform_set_drvdata(pdev, display);
mutex_lock(&dsi_display_list_lock);
- list_add(&display->list, &dsi_display_list);
+ list_add_tail(&display->list, &dsi_display_list);
mutex_unlock(&dsi_display_list_lock);
if (display->is_active) {
@@ -1890,7 +1972,7 @@ int dsi_display_dev_probe(struct platform_device *pdev)
int dsi_display_dev_remove(struct platform_device *pdev)
{
- int rc = 0;
+ int rc = 0, i;
struct dsi_display *display;
struct dsi_display *pos, *tmp;
@@ -1913,6 +1995,13 @@ int dsi_display_dev_remove(struct platform_device *pdev)
mutex_unlock(&dsi_display_list_lock);
platform_set_drvdata(pdev, NULL);
+ if (display->panel_of)
+ for (i = 0; i < display->panel_count; i++)
+ if (display->panel_of[i])
+ of_node_put(display->panel_of[i]);
+ devm_kfree(&pdev->dev, display->panel_of);
+ devm_kfree(&pdev->dev, display->panel);
+ devm_kfree(&pdev->dev, display->bridge_idx);
devm_kfree(&pdev->dev, display);
return rc;
}
@@ -1984,9 +2073,15 @@ void dsi_display_set_active_state(struct dsi_display *display, bool is_active)
int dsi_display_drm_bridge_init(struct dsi_display *display,
struct drm_encoder *enc)
{
- int rc = 0;
+ int rc = 0, i;
struct dsi_bridge *bridge;
+ struct drm_bridge *dba_bridge;
+ struct dba_bridge_init init_data;
+ struct drm_bridge *precede_bridge;
struct msm_drm_private *priv = NULL;
+ struct dsi_panel *panel;
+ u32 *bridge_idx;
+ u32 num_of_lanes = 0;
if (!display || !display->drm_dev || !enc) {
pr_err("invalid param(s)\n");
@@ -1997,44 +2092,112 @@ int dsi_display_drm_bridge_init(struct dsi_display *display,
priv = display->drm_dev->dev_private;
if (!priv) {
- pr_err("Private data is not present\n");
+ SDE_ERROR("Private data is not present\n");
rc = -EINVAL;
- goto error;
+ goto out;
}
if (display->bridge) {
- pr_err("display is already initialize\n");
- goto error;
+ SDE_ERROR("display is already initialize\n");
+ goto out;
}
bridge = dsi_drm_bridge_init(display, display->drm_dev, enc);
if (IS_ERR_OR_NULL(bridge)) {
rc = PTR_ERR(bridge);
- pr_err("[%s] brige init failed, %d\n", display->name, rc);
- goto error;
+ SDE_ERROR("[%s] brige init failed, %d\n", display->name, rc);
+ goto out;
}
display->bridge = bridge;
priv->bridges[priv->num_bridges++] = &bridge->base;
+ precede_bridge = &bridge->base;
+
+ if (display->panel_count >= MAX_BRIDGES - 1) {
+ SDE_ERROR("too many bridge chips=%d\n", display->panel_count);
+ goto error_bridge;
+ }
+
+ for (i = 0; i < display->panel_count; i++) {
+ panel = display->panel[i];
+ if (panel && display->bridge_idx &&
+ panel->dba_config.dba_panel) {
+ bridge_idx = display->bridge_idx + i;
+ num_of_lanes = 0;
+ memset(&init_data, 0x00, sizeof(init_data));
+ if (panel->host_config.data_lanes & DSI_DATA_LANE_0)
+ num_of_lanes++;
+ if (panel->host_config.data_lanes & DSI_DATA_LANE_1)
+ num_of_lanes++;
+ if (panel->host_config.data_lanes & DSI_DATA_LANE_2)
+ num_of_lanes++;
+ if (panel->host_config.data_lanes & DSI_DATA_LANE_3)
+ num_of_lanes++;
+ init_data.client_name = DSI_DBA_CLIENT_NAME;
+ init_data.chip_name = panel->dba_config.bridge_name;
+ init_data.id = *bridge_idx;
+ init_data.display = display;
+ init_data.hdmi_mode = panel->dba_config.hdmi_mode;
+ init_data.num_of_input_lanes = num_of_lanes;
+ init_data.precede_bridge = precede_bridge;
+ init_data.panel_count = display->panel_count;
+ dba_bridge = dba_bridge_init(display->drm_dev, enc,
+ &init_data);
+ if (IS_ERR_OR_NULL(dba_bridge)) {
+ rc = PTR_ERR(dba_bridge);
+ SDE_ERROR("[%s:%d] dba brige init failed, %d\n",
+ init_data.chip_name, init_data.id, rc);
+ goto error_dba_bridge;
+ }
+ priv->bridges[priv->num_bridges++] = dba_bridge;
+ precede_bridge = dba_bridge;
+ }
+ }
-error:
+ goto out;
+
+error_dba_bridge:
+ for (i = 1; i < MAX_BRIDGES; i++) {
+ dba_bridge_cleanup(priv->bridges[i]);
+ priv->bridges[i] = NULL;
+ }
+error_bridge:
+ dsi_drm_bridge_cleanup(display->bridge);
+ display->bridge = NULL;
+ priv->bridges[0] = NULL;
+ priv->num_bridges = 0;
+out:
mutex_unlock(&display->display_lock);
return rc;
}
int dsi_display_drm_bridge_deinit(struct dsi_display *display)
{
- int rc = 0;
+ int rc = 0, i;
+ struct msm_drm_private *priv = NULL;
if (!display) {
- pr_err("Invalid params\n");
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+ priv = display->drm_dev->dev_private;
+
+ if (!priv) {
+ SDE_ERROR("Private data is not present\n");
return -EINVAL;
}
mutex_lock(&display->display_lock);
+ for (i = 1; i < MAX_BRIDGES; i++) {
+ dba_bridge_cleanup(priv->bridges[i]);
+ priv->bridges[i] = NULL;
+ }
+
dsi_drm_bridge_cleanup(display->bridge);
display->bridge = NULL;
+ priv->bridges[0] = NULL;
+ priv->num_bridges = 0;
mutex_unlock(&display->display_lock);
return rc;
@@ -2053,7 +2216,7 @@ int dsi_display_get_info(struct msm_display_info *info, void *disp)
display = disp;
mutex_lock(&display->display_lock);
- rc = dsi_panel_get_phy_props(display->panel, &phy_props);
+ rc = dsi_panel_get_phy_props(display->panel[0], &phy_props);
if (rc) {
pr_err("[%s] failed to get panel phy props, rc=%d\n",
display->name, rc);
@@ -2073,7 +2236,7 @@ int dsi_display_get_info(struct msm_display_info *info, void *disp)
info->max_height = 1080;
info->compression = MSM_DISPLAY_COMPRESS_NONE;
- switch (display->panel->mode.panel_mode) {
+ switch (display->panel[0]->mode.panel_mode) {
case DSI_OP_VIDEO_MODE:
info->capabilities |= MSM_DISPLAY_CAP_VID_MODE;
break;
@@ -2082,7 +2245,7 @@ int dsi_display_get_info(struct msm_display_info *info, void *disp)
break;
default:
pr_err("unknwown dsi panel mode %d\n",
- display->panel->mode.panel_mode);
+ display->panel[0]->mode.panel_mode);
break;
}
error:
@@ -2106,7 +2269,7 @@ int dsi_display_get_modes(struct dsi_display *display,
mutex_lock(&display->display_lock);
- rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
+ rc = dsi_panel_get_dfps_caps(display->panel[0], &dfps_caps);
if (rc) {
pr_err("[%s] failed to get dfps caps from panel\n",
display->name);
@@ -2127,7 +2290,8 @@ int dsi_display_get_modes(struct dsi_display *display,
/* Insert the dfps "sub-modes" between main panel modes */
int panel_mode_idx = i / num_dfps_rates;
- rc = dsi_panel_get_mode(display->panel, panel_mode_idx, modes);
+ rc = dsi_panel_get_mode(display->panel[0], panel_mode_idx,
+ modes);
if (rc) {
pr_err("[%s] failed to get mode from panel\n",
display->name);
@@ -2178,7 +2342,7 @@ int dsi_display_validate_mode(struct dsi_display *display,
adj_mode = *mode;
adjust_timing_by_ctrl_count(display, &adj_mode);
- rc = dsi_panel_validate_mode(display->panel, &adj_mode);
+ rc = dsi_panel_validate_mode(display->panel[0], &adj_mode);
if (rc) {
pr_err("[%s] panel mode validation failed, rc=%d\n",
display->name, rc);
@@ -2278,7 +2442,7 @@ error:
int dsi_display_prepare(struct dsi_display *display)
{
- int rc = 0;
+ int rc = 0, i, j;
if (!display) {
pr_err("Invalid params\n");
@@ -2287,11 +2451,13 @@ int dsi_display_prepare(struct dsi_display *display)
mutex_lock(&display->display_lock);
- rc = dsi_panel_pre_prepare(display->panel);
- if (rc) {
- pr_err("[%s] panel pre-prepare failed, rc=%d\n",
- display->name, rc);
- goto error;
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_pre_prepare(display->panel[i]);
+ if (rc) {
+ SDE_ERROR("[%s] panel pre-prepare failed, rc=%d\n",
+ display->name, rc);
+ goto error_panel_post_unprep;
+ }
}
rc = dsi_display_ctrl_power_on(display);
@@ -2349,15 +2515,20 @@ int dsi_display_prepare(struct dsi_display *display)
goto error_ctrl_link_off;
}
- rc = dsi_panel_prepare(display->panel);
- if (rc) {
- pr_err("[%s] panel prepare failed, rc=%d\n", display->name, rc);
- goto error_host_engine_off;
+ for (j = 0; j < display->panel_count; j++) {
+ rc = dsi_panel_prepare(display->panel[j]);
+ if (rc) {
+ SDE_ERROR("[%s] panel prepare failed, rc=%d\n",
+ display->name, rc);
+ goto error_panel_unprep;
+ }
}
goto error;
-error_host_engine_off:
+error_panel_unprep:
+ for (j--; j >= 0; j--)
+ (void)dsi_panel_unprepare(display->panel[j]);
(void)dsi_display_ctrl_host_disable(display);
error_ctrl_link_off:
(void)dsi_display_ctrl_link_clk_off(display);
@@ -2372,7 +2543,8 @@ error_phy_pwr_off:
error_ctrl_pwr_off:
(void)dsi_display_ctrl_power_off(display);
error_panel_post_unprep:
- (void)dsi_panel_post_unprepare(display->panel);
+ for (i--; i >= 0; i--)
+ (void)dsi_panel_post_unprepare(display->panel[i]);
error:
mutex_unlock(&display->display_lock);
return rc;
@@ -2380,7 +2552,7 @@ error:
int dsi_display_enable(struct dsi_display *display)
{
- int rc = 0;
+ int rc = 0, i;
if (!display) {
pr_err("Invalid params\n");
@@ -2389,11 +2561,13 @@ int dsi_display_enable(struct dsi_display *display)
mutex_lock(&display->display_lock);
- rc = dsi_panel_enable(display->panel);
- if (rc) {
- pr_err("[%s] failed to enable DSI panel, rc=%d\n",
- display->name, rc);
- goto error;
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_enable(display->panel[i]);
+ if (rc) {
+ SDE_ERROR("[%s] failed to enable DSI panel, rc=%d\n",
+ display->name, rc);
+ goto error_disable_panel;
+ }
}
if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
@@ -2419,7 +2593,8 @@ int dsi_display_enable(struct dsi_display *display)
goto error;
error_disable_panel:
- (void)dsi_panel_disable(display->panel);
+ for (i--; i >= 0; i--)
+ (void)dsi_panel_disable(display->panel[i]);
error:
mutex_unlock(&display->display_lock);
return rc;
@@ -2427,7 +2602,7 @@ error:
int dsi_display_post_enable(struct dsi_display *display)
{
- int rc = 0;
+ int rc = 0, i;
if (!display) {
pr_err("Invalid params\n");
@@ -2436,10 +2611,12 @@ int dsi_display_post_enable(struct dsi_display *display)
mutex_lock(&display->display_lock);
- rc = dsi_panel_post_enable(display->panel);
- if (rc)
- pr_err("[%s] panel post-enable failed, rc=%d\n",
- display->name, rc);
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_post_enable(display->panel[i]);
+ if (rc)
+ SDE_ERROR("[%s] panel post-enable failed, rc=%d\n",
+ display->name, rc);
+ }
mutex_unlock(&display->display_lock);
return rc;
@@ -2447,7 +2624,7 @@ int dsi_display_post_enable(struct dsi_display *display)
int dsi_display_pre_disable(struct dsi_display *display)
{
- int rc = 0;
+ int rc = 0, i;
if (!display) {
pr_err("Invalid params\n");
@@ -2456,10 +2633,12 @@ int dsi_display_pre_disable(struct dsi_display *display)
mutex_lock(&display->display_lock);
- rc = dsi_panel_pre_disable(display->panel);
- if (rc)
- pr_err("[%s] panel pre-disable failed, rc=%d\n",
- display->name, rc);
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_pre_disable(display->panel[i]);
+ if (rc)
+ SDE_ERROR("[%s] panel pre-disable failed, rc=%d\n",
+ display->name, rc);
+ }
mutex_unlock(&display->display_lock);
return rc;
@@ -2467,7 +2646,7 @@ int dsi_display_pre_disable(struct dsi_display *display)
int dsi_display_disable(struct dsi_display *display)
{
- int rc = 0;
+ int rc = 0, i;
if (!display) {
pr_err("Invalid params\n");
@@ -2481,10 +2660,12 @@ int dsi_display_disable(struct dsi_display *display)
pr_err("[%s] display wake up failed, rc=%d\n",
display->name, rc);
- rc = dsi_panel_disable(display->panel);
- if (rc)
- pr_err("[%s] failed to disable DSI panel, rc=%d\n",
- display->name, rc);
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_disable(display->panel[i]);
+ if (rc)
+ SDE_ERROR("[%s] failed to disable DSI panel, rc=%d\n",
+ display->name, rc);
+ }
if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
rc = dsi_display_vid_engine_disable(display);
@@ -2507,7 +2688,7 @@ int dsi_display_disable(struct dsi_display *display)
int dsi_display_unprepare(struct dsi_display *display)
{
- int rc = 0;
+ int rc = 0, i;
if (!display) {
pr_err("Invalid params\n");
@@ -2521,10 +2702,12 @@ int dsi_display_unprepare(struct dsi_display *display)
pr_err("[%s] display wake up failed, rc=%d\n",
display->name, rc);
- rc = dsi_panel_unprepare(display->panel);
- if (rc)
- pr_err("[%s] panel unprepare failed, rc=%d\n",
- display->name, rc);
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_unprepare(display->panel[i]);
+ if (rc)
+ SDE_ERROR("[%s] panel unprepare failed, rc=%d\n",
+ display->name, rc);
+ }
rc = dsi_display_ctrl_host_disable(display);
if (rc)
@@ -2561,10 +2744,12 @@ int dsi_display_unprepare(struct dsi_display *display)
pr_err("[%s] failed to power DSI vregs, rc=%d\n",
display->name, rc);
- rc = dsi_panel_post_unprepare(display->panel);
- if (rc)
- pr_err("[%s] panel post-unprepare failed, rc=%d\n",
- display->name, rc);
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_post_unprepare(display->panel[i]);
+ if (rc)
+ pr_err("[%s] panel post-unprepare failed, rc=%d\n",
+ display->name, rc);
+ }
mutex_unlock(&display->display_lock);
return rc;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index b77bf268dbd1..210b8d00850b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -102,8 +102,11 @@ struct dsi_display_clk_info {
* @display_lock: Mutex for dsi_display interface.
* @ctrl_count: Number of DSI interfaces required by panel.
* @ctrl: Controller information for DSI display.
+ * @panel_count: Number of DSI panel.
* @panel: Handle to DSI panel.
- * @panel_of: pHandle to DSI panel.
+ * @panel_of: pHandle to DSI panel, it's an array with panel_count
+ * of struct device_node pointers.
+ * @bridge_idx: Bridge chip index for each panel_of.
* @type: DSI display type.
* @clk_master_idx: The master controller for controlling clocks. This is an
* index into the ctrl[MAX_DSI_CTRLS_PER_DISPLAY] array.
@@ -133,8 +136,10 @@ struct dsi_display {
struct dsi_display_ctrl ctrl[MAX_DSI_CTRLS_PER_DISPLAY];
/* panel info */
- struct dsi_panel *panel;
- struct device_node *panel_of;
+ u32 panel_count;
+ struct dsi_panel **panel;
+ struct device_node **panel_of;
+ u32 *bridge_idx;
enum dsi_display_type type;
u32 clk_master_idx;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index a1adecf81cc0..995cda97a2f0 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -213,18 +213,21 @@ static void dsi_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *adjusted_mode)
{
struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+ struct dsi_panel *panel;
- if (!bridge || !mode || !adjusted_mode) {
+ if (!bridge || !mode || !adjusted_mode || !c_bridge->display ||
+ !c_bridge->display->panel[0]) {
pr_err("Invalid params\n");
return;
}
+ /* dsi drm bridge is always the first panel */
+ panel = c_bridge->display->panel[0];
memset(&(c_bridge->dsi_mode), 0x0, sizeof(struct dsi_display_mode));
convert_to_dsi_mode(adjusted_mode, &(c_bridge->dsi_mode));
pr_debug("note: using panel cmd/vid mode instead of user val\n");
- c_bridge->dsi_mode.panel_mode =
- c_bridge->display->panel->mode.panel_mode;
+ c_bridge->dsi_mode.panel_mode = panel->mode.panel_mode;
}
static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
@@ -271,6 +274,7 @@ int dsi_conn_post_init(struct drm_connector *connector,
{
struct dsi_display *dsi_display = display;
struct dsi_panel *panel;
+ int i;
if (!info || !dsi_display)
return -EINVAL;
@@ -299,60 +303,65 @@ int dsi_conn_post_init(struct drm_connector *connector,
break;
}
- if (!dsi_display->panel) {
- pr_debug("invalid panel data\n");
- goto end;
- }
-
- panel = dsi_display->panel;
- sde_kms_info_add_keystr(info, "panel name", panel->name);
-
- switch (panel->mode.panel_mode) {
- case DSI_OP_VIDEO_MODE:
- sde_kms_info_add_keystr(info, "panel mode", "video");
- break;
- case DSI_OP_CMD_MODE:
- sde_kms_info_add_keystr(info, "panel mode", "command");
- sde_kms_info_add_keyint(info, "mdp_transfer_time_us",
- panel->cmd_config.mdp_transfer_time_us);
- break;
- default:
- pr_debug("invalid panel type:%d\n", panel->mode.panel_mode);
- break;
- }
- sde_kms_info_add_keystr(info, "dfps support",
- panel->dfps_caps.dfps_support ? "true" : "false");
+ for (i = 0; i < dsi_display->panel_count; i++) {
+ if (!dsi_display->panel[i]) {
+ pr_debug("invalid panel data\n");
+ goto end;
+ }
- switch (panel->phy_props.rotation) {
- case DSI_PANEL_ROTATE_NONE:
- sde_kms_info_add_keystr(info, "panel orientation", "none");
- break;
- case DSI_PANEL_ROTATE_H_FLIP:
- sde_kms_info_add_keystr(info, "panel orientation", "horz flip");
- break;
- case DSI_PANEL_ROTATE_V_FLIP:
- sde_kms_info_add_keystr(info, "panel orientation", "vert flip");
- break;
- default:
- pr_debug("invalid panel rotation:%d\n",
+ panel = dsi_display->panel[i];
+ sde_kms_info_add_keystr(info, "panel name", panel->name);
+
+ switch (panel->mode.panel_mode) {
+ case DSI_OP_VIDEO_MODE:
+ sde_kms_info_add_keystr(info, "panel mode", "video");
+ break;
+ case DSI_OP_CMD_MODE:
+ sde_kms_info_add_keystr(info, "panel mode", "command");
+ break;
+ default:
+ pr_debug("invalid panel type:%d\n",
+ panel->mode.panel_mode);
+ break;
+ }
+ sde_kms_info_add_keystr(info, "dfps support",
+ panel->dfps_caps.dfps_support ?
+ "true" : "false");
+
+ switch (panel->phy_props.rotation) {
+ case DSI_PANEL_ROTATE_NONE:
+ sde_kms_info_add_keystr(info, "panel orientation",
+ "none");
+ break;
+ case DSI_PANEL_ROTATE_H_FLIP:
+ sde_kms_info_add_keystr(info, "panel orientation",
+ "horz flip");
+ break;
+ case DSI_PANEL_ROTATE_V_FLIP:
+ sde_kms_info_add_keystr(info, "panel orientation",
+ "vert flip");
+ break;
+ default:
+ pr_debug("invalid panel rotation:%d\n",
panel->phy_props.rotation);
- break;
- }
+ break;
+ }
- switch (panel->bl_config.type) {
- case DSI_BACKLIGHT_PWM:
- sde_kms_info_add_keystr(info, "backlight type", "pwm");
- break;
- case DSI_BACKLIGHT_WLED:
- sde_kms_info_add_keystr(info, "backlight type", "wled");
- break;
- case DSI_BACKLIGHT_DCS:
- sde_kms_info_add_keystr(info, "backlight type", "dcs");
- break;
- default:
- pr_debug("invalid panel backlight type:%d\n",
- panel->bl_config.type);
- break;
+ switch (panel->bl_config.type) {
+ case DSI_BACKLIGHT_PWM:
+ sde_kms_info_add_keystr(info, "backlight type", "pwm");
+ break;
+ case DSI_BACKLIGHT_WLED:
+ sde_kms_info_add_keystr(info, "backlight type", "wled");
+ break;
+ case DSI_BACKLIGHT_DCS:
+ sde_kms_info_add_keystr(info, "backlight type", "dcs");
+ break;
+ default:
+ pr_debug("invalid panel backlight type:%d\n",
+ panel->bl_config.type);
+ break;
+ }
}
end:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index a7a39e685d4d..b1319a68429f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include "sde_kms.h"
#include "dsi_panel.h"
#include "dsi_ctrl_hw.h"
@@ -171,10 +172,12 @@ static int dsi_panel_set_pinctrl_state(struct dsi_panel *panel, bool enable)
else
state = panel->pinctrl.suspend;
- rc = pinctrl_select_state(panel->pinctrl.pinctrl, state);
- if (rc)
- pr_err("[%s] failed to set pin state, rc=%d\n", panel->name,
- rc);
+ if (panel->pinctrl.pinctrl && state) {
+ rc = pinctrl_select_state(panel->pinctrl.pinctrl, state);
+ if (rc)
+ pr_err("[%s] failed to set pin state, rc=%d\n",
+ panel->name, rc);
+ }
return rc;
}
@@ -386,6 +389,9 @@ static int dsi_panel_bl_register(struct dsi_panel *panel)
case DSI_BACKLIGHT_WLED:
rc = dsi_panel_led_bl_register(panel, bl);
break;
+ case DSI_BACKLIGHT_UNKNOWN:
+ DRM_INFO("backlight type is unknown\n");
+ break;
default:
pr_err("Backlight type(%d) not supported\n", bl->type);
rc = -ENOTSUPP;
@@ -704,6 +710,8 @@ static int dsi_panel_parse_misc_host_config(struct dsi_host_common_cfg *host,
host->append_tx_eot = of_property_read_bool(of_node,
"qcom,mdss-dsi-tx-eot-append");
+ host->force_clk_lane_hs = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-force-clock-lane-hs");
return 0;
}
@@ -1348,6 +1356,8 @@ static int dsi_panel_parse_gpios(struct dsi_panel *panel,
{
int rc = 0;
+ /* Need to set GPIO default value to -1, since 0 is a valid value */
+ panel->reset_config.disp_en_gpio = -1;
panel->reset_config.reset_gpio = of_get_named_gpio(of_node,
"qcom,platform-reset-gpio",
0);
@@ -1496,6 +1506,33 @@ error:
return rc;
}
+static int dsi_panel_parse_dba_config(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0, len = 0;
+
+ panel->dba_config.dba_panel = of_property_read_bool(of_node,
+ "qcom,dba-panel");
+
+ if (panel->dba_config.dba_panel) {
+ panel->dba_config.hdmi_mode = of_property_read_bool(of_node,
+ "qcom,hdmi-mode");
+
+ panel->dba_config.bridge_name = of_get_property(of_node,
+ "qcom,bridge-name", &len);
+ if (!panel->dba_config.bridge_name || len <= 0) {
+ SDE_ERROR(
+ "%s:%d Unable to read bridge_name, data=%pK,len=%d\n",
+ __func__, __LINE__, panel->dba_config.bridge_name, len);
+ rc = -EINVAL;
+ goto error;
+ }
+ }
+
+error:
+ return rc;
+}
+
struct dsi_panel *dsi_panel_get(struct device *parent,
struct device_node *of_node)
{
@@ -1560,6 +1597,10 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
if (rc)
pr_err("failed to parse backlight config, rc=%d\n", rc);
+ rc = dsi_panel_parse_dba_config(panel, of_node);
+ if (rc)
+ pr_err("failed to parse dba config, rc=%d\n", rc);
+
panel->panel_of_node = of_node;
drm_panel_init(&panel->drm_panel);
mutex_init(&panel->panel_lock);
@@ -1574,6 +1615,9 @@ void dsi_panel_put(struct dsi_panel *panel)
{
u32 i;
+ if (!panel)
+ return;
+
for (i = 0; i < DSI_CMD_SET_MAX; i++)
dsi_panel_destroy_cmd_packets(&panel->cmd_sets[i]);
@@ -1614,10 +1658,8 @@ int dsi_panel_drv_init(struct dsi_panel *panel,
}
rc = dsi_panel_pinctrl_init(panel);
- if (rc) {
+ if (rc)
pr_err("[%s] failed to init pinctrl, rc=%d\n", panel->name, rc);
- goto error_vreg_put;
- }
rc = dsi_panel_gpio_request(panel);
if (rc) {
@@ -1640,7 +1682,6 @@ error_gpio_release:
(void)dsi_panel_gpio_release(panel);
error_pinctrl_deinit:
(void)dsi_panel_pinctrl_deinit(panel);
-error_vreg_put:
(void)dsi_panel_vreg_put(panel);
exit:
mutex_unlock(&panel->panel_lock);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 4d21a4cf6428..8106ed1261b4 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -132,6 +132,18 @@ struct dsi_panel_reset_config {
int disp_en_gpio;
};
+/**
+ * struct dsi_panel_dba - DSI DBA panel information
+ * @dba_panel: Indicate if it's DBA panel
+ * @bridge_name: Bridge chip name
+ * @hdmi_mode: If bridge chip is in hdmi mode.
+ */
+struct dsi_panel_dba {
+ bool dba_panel;
+ const char *bridge_name;
+ bool hdmi_mode;
+};
+
struct dsi_panel {
const char *name;
struct device_node *panel_of_node;
@@ -158,6 +170,8 @@ struct dsi_panel {
struct dsi_panel_reset_config reset_config;
struct dsi_pinctrl_info pinctrl;
+ struct dsi_panel_dba dba_config;
+
bool lp11_init;
};
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
index 6a6d02c5444d..c377f3759e67 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -401,12 +401,111 @@ static const struct file_operations edid_vendor_name_fops = {
.read = _sde_hdmi_edid_vendor_name_read,
};
+static u64 _sde_hdmi_clip_valid_pclk(struct drm_display_mode *mode, u64 pclk_in)
+{
+ u32 pclk_delta, pclk;
+ u64 pclk_clip = pclk_in;
+
+ /* as per standard, 0.5% of deviation is allowed */
+ pclk = mode->clock * HDMI_KHZ_TO_HZ;
+ pclk_delta = pclk * 5 / 1000;
+
+ if (pclk_in < (pclk - pclk_delta))
+ pclk_clip = pclk - pclk_delta;
+ else if (pclk_in > (pclk + pclk_delta))
+ pclk_clip = pclk + pclk_delta;
+
+ if (pclk_in != pclk_clip)
+ pr_warn("clip pclk from %lld to %lld\n", pclk_in, pclk_clip);
+
+ return pclk_clip;
+}
+
+/**
+ * _sde_hdmi_update_pll_delta() - Update the HDMI pixel clock as per input ppm
+ *
+ * @ppm: ppm is parts per million multiplied by 1000.
+ * return: 0 on success, non-zero in case of failure.
+ *
+ * The input ppm will be clipped if it's more than or less than 5% of the TMDS
+ * clock rate defined by HDMI spec.
+ */
+static int _sde_hdmi_update_pll_delta(struct sde_hdmi *display, s32 ppm)
+{
+ struct hdmi *hdmi = display->ctrl.ctrl;
+ struct drm_display_mode *current_mode = &display->mode;
+ u64 cur_pclk, dst_pclk;
+ u64 clip_pclk;
+ int rc = 0;
+
+ if (!hdmi->power_on || !display->connected) {
+ SDE_ERROR("HDMI display is not ready\n");
+ return -EINVAL;
+ }
+
+ /* get current pclk */
+ cur_pclk = hdmi->pixclock;
+ /* get desired pclk */
+ dst_pclk = cur_pclk * (1000000000 + ppm);
+ do_div(dst_pclk, 1000000000);
+
+ clip_pclk = _sde_hdmi_clip_valid_pclk(current_mode, dst_pclk);
+
+ /* update pclk */
+ if (clip_pclk != cur_pclk) {
+ SDE_DEBUG("PCLK changes from %llu to %llu when delta is %d\n",
+ cur_pclk, clip_pclk, ppm);
+
+ rc = clk_set_rate(hdmi->pwr_clks[0], clip_pclk);
+ if (rc < 0) {
+ SDE_ERROR("PLL update failed, reset clock rate\n");
+ return rc;
+ }
+
+ hdmi->pixclock = clip_pclk;
+ }
+
+ return rc;
+}
+
+static ssize_t _sde_hdmi_debugfs_pll_delta_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[10];
+ int ppm = 0;
+
+ if (!display)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 0, &ppm))
+ return -EFAULT;
+
+ if (ppm)
+ _sde_hdmi_update_pll_delta(display, ppm);
+
+ return count;
+}
+
+static const struct file_operations pll_delta_fops = {
+ .open = simple_open,
+ .write = _sde_hdmi_debugfs_pll_delta_write,
+};
+
static int _sde_hdmi_debugfs_init(struct sde_hdmi *display)
{
int rc = 0;
struct dentry *dir, *dump_file, *edid_modes;
struct dentry *edid_vsdb_info, *edid_hdr_info, *edid_hfvsdb_info;
- struct dentry *edid_vcdb_info, *edid_vendor_name;
+ struct dentry *edid_vcdb_info, *edid_vendor_name, *pll_file;
dir = debugfs_create_dir(display->name, NULL);
if (!dir) {
@@ -423,7 +522,19 @@ static int _sde_hdmi_debugfs_init(struct sde_hdmi *display)
&dump_info_fops);
if (IS_ERR_OR_NULL(dump_file)) {
rc = PTR_ERR(dump_file);
- SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ SDE_ERROR("[%s]debugfs create dump_info file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ pll_file = debugfs_create_file("pll_delta",
+ 0644,
+ dir,
+ display,
+ &pll_delta_fops);
+ if (IS_ERR_OR_NULL(pll_file)) {
+ rc = PTR_ERR(pll_file);
+ SDE_ERROR("[%s]debugfs create pll_delta file failed, rc=%d\n",
display->name, rc);
goto error_remove_dir;
}
@@ -823,7 +934,6 @@ static void _sde_hdmi_hotplug_work(struct work_struct *work)
} else
sde_free_edid((void **)&sde_hdmi->edid_ctrl);
- sde_hdmi_notify_clients(connector, sde_hdmi->connected);
drm_helper_hpd_irq_event(connector->dev);
}
@@ -853,8 +963,7 @@ static void _sde_hdmi_connector_irq(struct sde_hdmi *sde_hdmi)
hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
- if (!sde_hdmi->non_pluggable)
- queue_work(hdmi->workq, &sde_hdmi->hpd_work);
+ queue_work(hdmi->workq, &sde_hdmi->hpd_work);
}
}
@@ -943,6 +1052,25 @@ static int _sde_hdmi_get_cable_status(struct platform_device *pdev, u32 vote)
return hdmi->power_on && display->connected;
}
+static void _sde_hdmi_audio_codec_ready(struct platform_device *pdev)
+{
+ struct sde_hdmi *display = platform_get_drvdata(pdev);
+
+ if (!display) {
+ SDE_ERROR("invalid param(s), display %pK\n", display);
+ return;
+ }
+
+ mutex_lock(&display->display_lock);
+ if (!display->codec_ready) {
+ display->codec_ready = true;
+
+ if (display->client_notify_pending)
+ sde_hdmi_notify_clients(display, display->connected);
+ }
+ mutex_unlock(&display->display_lock);
+}
+
static int _sde_hdmi_ext_disp_init(struct sde_hdmi *display)
{
int rc = 0;
@@ -962,6 +1090,8 @@ static int _sde_hdmi_ext_disp_init(struct sde_hdmi *display)
_sde_hdmi_get_audio_edid_blk;
display->ext_audio_data.codec_ops.cable_status =
_sde_hdmi_get_cable_status;
+ display->ext_audio_data.codec_ops.codec_ready =
+ _sde_hdmi_audio_codec_ready;
if (!display->pdev->dev.of_node) {
SDE_ERROR("[%s]cannot find sde_hdmi of_node\n", display->name);
@@ -990,17 +1120,14 @@ static int _sde_hdmi_ext_disp_init(struct sde_hdmi *display)
return rc;
}
-void sde_hdmi_notify_clients(struct drm_connector *connector,
- bool connected)
+void sde_hdmi_notify_clients(struct sde_hdmi *display, bool connected)
{
- struct sde_connector *c_conn = to_sde_connector(connector);
- struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
int state = connected ?
EXT_DISPLAY_CABLE_CONNECT : EXT_DISPLAY_CABLE_DISCONNECT;
if (display && display->ext_audio_data.intf_ops.hpd) {
struct hdmi *hdmi = display->ctrl.ctrl;
- u32 flags = MSM_EXT_DISP_HPD_VIDEO;
+ u32 flags = MSM_EXT_DISP_HPD_ASYNC_VIDEO;
if (hdmi->hdmi_mode)
flags |= MSM_EXT_DISP_HPD_AUDIO;
@@ -1010,21 +1137,6 @@ void sde_hdmi_notify_clients(struct drm_connector *connector,
}
}
-void sde_hdmi_ack_state(struct drm_connector *connector,
- enum drm_connector_status status)
-{
- struct sde_connector *c_conn = to_sde_connector(connector);
- struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
-
- if (display) {
- struct hdmi *hdmi = display->ctrl.ctrl;
-
- if (hdmi->hdmi_mode && display->ext_audio_data.intf_ops.notify)
- display->ext_audio_data.intf_ops.notify(
- display->ext_pdev, status);
- }
-}
-
void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
{
uint32_t ctrl = 0;
@@ -1316,14 +1428,36 @@ int sde_hdmi_get_info(struct msm_display_info *info,
MSM_DISPLAY_CAP_EDID | MSM_DISPLAY_CAP_VID_MODE;
}
info->is_connected = hdmi_display->connected;
- info->max_width = 1920;
- info->max_height = 1080;
+ info->max_width = 4096;
+ info->max_height = 2160;
info->compression = MSM_DISPLAY_COMPRESS_NONE;
mutex_unlock(&hdmi_display->display_lock);
return rc;
}
+int sde_hdmi_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display)
+{
+ int rc = 0;
+
+ if (!connector || !display) {
+ SDE_ERROR("connector=%pK or display=%pK is NULL\n",
+ connector, display);
+ return 0;
+ }
+
+ SDE_DEBUG("\n");
+
+ if (property_index == CONNECTOR_PROP_PLL_DELTA)
+ rc = _sde_hdmi_update_pll_delta(display, value);
+
+ return rc;
+}
+
u32 sde_hdmi_get_num_of_displays(void)
{
u32 count = 0;
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
index bb3061a6ed00..ffa9a27e7dfe 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -80,9 +80,12 @@ struct sde_hdmi_ctrl {
* @non_pluggable: If HDMI display is non pluggable
* @num_of_modes: Number of modes supported by display if non pluggable.
* @mode_list: Mode list if non pluggable.
+ * @mode: Current display mode.
* @connected: If HDMI display is connected.
* @is_tpg_enabled: TPG state.
* @hpd_work: HPD work structure.
+ * @codec_ready: If audio codec is ready.
+ * @client_notify_pending: If there is client notification pending.
* @root: Debug fs root entry.
*/
struct sde_hdmi {
@@ -103,10 +106,13 @@ struct sde_hdmi {
bool non_pluggable;
u32 num_of_modes;
struct list_head mode_list;
+ struct drm_display_mode mode;
bool connected;
bool is_tpg_enabled;
struct work_struct hpd_work;
+ bool codec_ready;
+ bool client_notify_pending;
/* DEBUG FS */
struct dentry *root;
@@ -270,6 +276,22 @@ int sde_hdmi_get_info(struct msm_display_info *info,
void *display);
/**
+ * sde_hdmi_set_property() - set the connector properties
+ * @connector: Handle to the connector.
+ * @state: Handle to the connector state.
+ * @property_index: property index.
+ * @value: property value.
+ * @display: Handle to the display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display);
+
+/**
* sde_hdmi_bridge_init() - init sde hdmi bridge
* @hdmi: Handle to the hdmi.
*
@@ -361,13 +383,12 @@ int sde_hdmi_config_avmute(struct hdmi *hdmi, bool set);
/**
* sde_hdmi_notify_clients() - notify hdmi clients of the connection status.
- * @connector: Handle to the drm_connector.
+ * @display: Handle to sde_hdmi.
* @connected: connection status.
*
* Return: void.
*/
-void sde_hdmi_notify_clients(struct drm_connector *connector,
- bool connected);
+void sde_hdmi_notify_clients(struct sde_hdmi *display, bool connected);
/**
* sde_hdmi_ack_state() - acknowledge the connection status.
@@ -453,5 +474,15 @@ static inline int sde_hdmi_get_info(struct msm_display_info *info,
{
return 0;
}
+
+static inline int sde_hdmi_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display)
+{
+ return 0;
+}
+
#endif /*#else of CONFIG_DRM_SDE_HDMI*/
#endif /* _SDE_HDMI_H_ */
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c
index 13ea49cfa42d..48a3a9316a41 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c
@@ -30,8 +30,6 @@
#define HDMI_AUDIO_INFO_FRAME_PACKET_VERSION 0x1
#define HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH 0x0A
-#define HDMI_KHZ_TO_HZ 1000
-#define HDMI_MHZ_TO_HZ 1000000
#define HDMI_ACR_N_MULTIPLIER 128
#define DEFAULT_AUDIO_SAMPLE_RATE_HZ 48000
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
index c76e42c67885..34268aaedfc0 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -372,6 +372,8 @@ static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
struct hdmi_phy *phy = hdmi->phy;
+ struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
DRM_DEBUG("power up");
@@ -388,41 +390,20 @@ static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
hdmi_hdcp_ctrl_on(hdmi->hdcp_ctrl);
- sde_hdmi_ack_state(hdmi->connector, EXT_DISPLAY_CABLE_CONNECT);
-}
-
-static void sde_hdmi_force_update_audio(struct drm_connector *connector,
- enum drm_connector_status status)
-{
- struct sde_connector *c_conn = to_sde_connector(connector);
- struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
-
- if (display && display->non_pluggable) {
- display->ext_audio_data.intf_ops.hpd(display->ext_pdev,
- display->ext_audio_data.type,
- status,
- MSM_EXT_DISP_HPD_AUDIO);
- }
+ mutex_lock(&display->display_lock);
+ if (display->codec_ready)
+ sde_hdmi_notify_clients(display, display->connected);
+ else
+ display->client_notify_pending = true;
+ mutex_unlock(&display->display_lock);
}
static void _sde_hdmi_bridge_enable(struct drm_bridge *bridge)
{
- struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
- struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
-
- /* force update audio ops when there's no HPD event */
- sde_hdmi_force_update_audio(hdmi->connector,
- EXT_DISPLAY_CABLE_CONNECT);
}
static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge)
{
- struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
- struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
-
- /* force update audio ops when there's no HPD event */
- sde_hdmi_force_update_audio(hdmi->connector,
- EXT_DISPLAY_CABLE_DISCONNECT);
}
static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge)
@@ -430,6 +411,10 @@ static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge)
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
struct hdmi_phy *phy = hdmi->phy;
+ struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+ sde_hdmi_notify_clients(display, display->connected);
if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
hdmi_hdcp_ctrl_off(hdmi->hdcp_ctrl);
@@ -446,8 +431,6 @@ static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge)
_sde_hdmi_bridge_power_off(bridge);
hdmi->power_on = false;
}
-
- sde_hdmi_ack_state(hdmi->connector, EXT_DISPLAY_CABLE_DISCONNECT);
}
static void _sde_hdmi_bridge_set_avi_infoframe(struct hdmi *hdmi,
@@ -568,6 +551,15 @@ static void _sde_hdmi_bridge_set_spd_infoframe(struct hdmi *hdmi,
hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, packet_control);
}
+static inline void _sde_hdmi_save_mode(struct hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+ drm_mode_copy(&display->mode, mode);
+}
+
static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -640,6 +632,8 @@ static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
DRM_DEBUG("hdmi setup info frame\n");
}
_sde_hdmi_bridge_setup_scrambler(hdmi, mode);
+
+ _sde_hdmi_save_mode(hdmi, mode);
}
static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index b6cddee0cf34..119221eacb43 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -184,8 +184,7 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
+ aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_destroy(aspace);
}
}
@@ -202,8 +201,7 @@ static void mdp4_destroy(struct msm_kms *kms)
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
+ aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
@@ -416,10 +414,6 @@ fail:
return ret;
}
-static const char *iommu_ports[] = {
- "mdp_port0_cb0", "mdp_port1_cb0",
-};
-
struct msm_kms *mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = dev->platformdev;
@@ -515,15 +509,11 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
- struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, config->iommu);
-
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
- goto fail;
- }
+ config->iommu->geometry.aperture_start = 0x1000;
+ config->iommu->geometry.aperture_end = 0xffffffff;
aspace = msm_gem_address_space_create(&pdev->dev,
- mmu, "mdp4", 0x1000, 0xffffffff);
+ config->iommu, MSM_IOMMU_DOMAIN_DEFAULT, "mdp4");
if (IS_ERR(aspace)) {
ret = PTR_ERR(aspace);
goto fail;
@@ -531,8 +521,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdp4_kms->aspace = aspace;
- ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
if (ret)
goto fail;
} else {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index e4e69ebd116e..4dbf456504b7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -22,10 +22,6 @@
#include "msm_mmu.h"
#include "mdp5_kms.h"
-static const char *iommu_ports[] = {
- "mdp_0",
-};
-
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -613,8 +609,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdp5_kms->aspace = aspace;
- ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
if (ret) {
dev_err(&pdev->dev, "failed to attach iommu: %d\n",
ret);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 276329b7b10c..7d378f7ebaa4 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1537,6 +1537,37 @@ static int msm_ioctl_deregister_event(struct drm_device *dev, void *data,
return 0;
}
+static int msm_ioctl_gem_sync(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+
+ struct drm_msm_gem_sync *arg = data;
+ int i;
+
+ for (i = 0; i < arg->nr_ops; i++) {
+ struct drm_msm_gem_syncop syncop;
+ struct drm_gem_object *obj;
+ int ret;
+ void __user *ptr =
+ (void __user *)(uintptr_t)
+ (arg->ops + (i * sizeof(syncop)));
+
+ ret = copy_from_user(&syncop, ptr, sizeof(syncop));
+ if (ret)
+ return -EFAULT;
+
+ obj = drm_gem_object_lookup(dev, file, syncop.handle);
+ if (!obj)
+ return -ENOENT;
+
+ msm_gem_sync(obj, syncop.op);
+
+ drm_gem_object_unreference_unlocked(obj);
+ }
+
+ return 0;
+}
+
void msm_send_crtc_notification(struct drm_crtc *crtc,
struct drm_event *event, u8 *payload)
{
@@ -1665,6 +1696,8 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_COUNTER_READ, msm_ioctl_counter_read,
DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_SYNC, msm_ioctl_gem_sync,
+ DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d2d118cf7e07..bd75a1ba1f8b 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -148,6 +148,7 @@ enum msm_mdp_conn_property {
CONNECTOR_PROP_DST_Y,
CONNECTOR_PROP_DST_W,
CONNECTOR_PROP_DST_H,
+ CONNECTOR_PROP_PLL_DELTA,
/* enum/bitmask properties */
CONNECTOR_PROP_TOPOLOGY_NAME,
@@ -412,7 +413,7 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
/* For GPU and legacy display */
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
- const char *name);
+ int type, const char *name);
struct msm_gem_address_space *
msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
uint64_t start, uint64_t end);
@@ -469,6 +470,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
uint32_t size, struct sg_table *sgt);
+void msm_gem_sync(struct drm_gem_object *obj, u32 op);
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d1455fbc980e..d35d03c2935d 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -546,6 +546,26 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
return 0;
}
+void msm_gem_sync(struct drm_gem_object *obj, u32 op)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ return;
+
+ switch (op) {
+ case MSM_GEM_SYNC_TO_CPU:
+ dma_sync_sg_for_cpu(dev->dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ break;
+ case MSM_GEM_SYNC_TO_DEV:
+ dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ break;
+ }
+}
+
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 2045dc34c20a..0b19d11bc666 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -25,21 +25,12 @@
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
-struct msm_gem_aspace_ops {
- int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
- struct sg_table *sgt, void *priv, unsigned int flags);
-
- void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *,
- struct sg_table *sgt, void *priv);
-
- void (*destroy)(struct msm_gem_address_space *);
-};
-
struct msm_gem_address_space {
const char *name;
struct msm_mmu *mmu;
- const struct msm_gem_aspace_ops *ops;
struct kref kref;
+ struct drm_mm mm;
+ u64 va_len;
};
struct msm_gem_vma {
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 52fc81420690..8e0f15c416fd 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -82,13 +82,16 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
void __user *userptr =
to_user_ptr(args->bos + (i * sizeof(submit_bo)));
- ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
- if (unlikely(ret)) {
+ if (copy_from_user_inatomic(&submit_bo, userptr,
+ sizeof(submit_bo))) {
pagefault_enable();
spin_unlock(&file->table_lock);
- ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
- if (ret)
+ if (copy_from_user(&submit_bo, userptr,
+ sizeof(submit_bo))) {
+ ret = -EFAULT;
goto out;
+ }
+
spin_lock(&file->table_lock);
pagefault_disable();
}
@@ -283,8 +286,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
uint32_t off;
bool valid;
- ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
- if (ret)
+ if (copy_from_user(&submit_reloc, userptr,
+ sizeof(submit_reloc)))
return -EFAULT;
if (submit_reloc.submit_offset % 4) {
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index a227f1ba0573..47f7436854fb 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -25,8 +25,10 @@ msm_gem_address_space_destroy(struct kref *kref)
struct msm_gem_address_space *aspace = container_of(kref,
struct msm_gem_address_space, kref);
- if (aspace->ops->destroy)
- aspace->ops->destroy(aspace);
+ if (aspace->va_len)
+ drm_mm_takedown(&aspace->mm);
+
+ aspace->mmu->funcs->destroy(aspace->mmu);
kfree(aspace);
}
@@ -37,57 +39,9 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
kref_put(&aspace->kref, msm_gem_address_space_destroy);
}
-/* SDE address space operations */
-static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt,
- void *priv)
-{
- struct dma_buf *buf = priv;
-
- if (buf)
- aspace->mmu->funcs->unmap_dma_buf(aspace->mmu,
- sgt, buf, DMA_BIDIRECTIONAL);
- else
- aspace->mmu->funcs->unmap_sg(aspace->mmu, sgt,
- DMA_BIDIRECTIONAL);
-
- vma->iova = 0;
-
- msm_gem_address_space_put(aspace);
-}
-
-
-static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt,
- void *priv, unsigned int flags)
-{
- struct dma_buf *buf = priv;
- int ret;
-
- if (buf)
- ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, buf,
- DMA_BIDIRECTIONAL);
- else
- ret = aspace->mmu->funcs->map_sg(aspace->mmu, sgt,
- DMA_BIDIRECTIONAL);
-
- if (!ret)
- vma->iova = sg_dma_address(sgt->sgl);
-
- /* Get a reference to the aspace to keep it around */
- kref_get(&aspace->kref);
-
- return ret;
-}
-
-static const struct msm_gem_aspace_ops smmu_aspace_ops = {
- .map = smmu_aspace_map_vma,
- .unmap = smmu_aspace_unmap_vma,
-};
-
-struct msm_gem_address_space *
-msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
- const char *name)
+static struct msm_gem_address_space *
+msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
+ uint64_t start, uint64_t end)
{
struct msm_gem_address_space *aspace;
@@ -100,57 +54,28 @@ msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
aspace->name = name;
aspace->mmu = mmu;
- aspace->ops = &smmu_aspace_ops;
-
- kref_init(&aspace->kref);
-
- return aspace;
-}
-/* GPU address space operations */
-struct msm_iommu_aspace {
- struct msm_gem_address_space base;
- struct drm_mm mm;
-};
+ aspace->va_len = end - start;
-#define to_iommu_aspace(aspace) \
- ((struct msm_iommu_aspace *) \
- container_of(aspace, struct msm_iommu_aspace, base))
+ if (aspace->va_len)
+ drm_mm_init(&aspace->mm, (start >> PAGE_SHIFT),
+ (end >> PAGE_SHIFT) - 1);
-static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
-{
- if (!vma->iova)
- return;
-
- if (aspace->mmu)
- aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt);
-
- drm_mm_remove_node(&vma->node);
-
- vma->iova = 0;
+ kref_init(&aspace->kref);
- msm_gem_address_space_put(aspace);
+ return aspace;
}
-static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt, void *priv,
- unsigned int flags)
+static int allocate_iova(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ u64 *iova)
{
- struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
- size_t size = 0;
struct scatterlist *sg;
+ size_t size = 0;
int ret, i;
- int iommu_flags = IOMMU_READ;
-
- if (!(flags & MSM_BO_GPU_READONLY))
- iommu_flags |= IOMMU_WRITE;
- if (flags & MSM_BO_PRIVILEGED)
- iommu_flags |= IOMMU_PRIV;
-
- if ((flags & MSM_BO_CACHED) && msm_iommu_coherent(aspace->mmu))
- iommu_flags |= IOMMU_CACHE;
+ if (!aspace->va_len)
+ return 0;
if (WARN_ON(drm_mm_node_allocated(&vma->node)))
return 0;
@@ -158,84 +83,73 @@ static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
for_each_sg(sgt->sgl, sg, sgt->nents, i)
size += sg->length + sg->offset;
- ret = drm_mm_insert_node(&local->mm, &vma->node, size >> PAGE_SHIFT,
+ ret = drm_mm_insert_node(&aspace->mm, &vma->node, size >> PAGE_SHIFT,
0, DRM_MM_SEARCH_DEFAULT);
- if (ret)
- return ret;
-
- vma->iova = vma->node.start << PAGE_SHIFT;
-
- if (aspace->mmu)
- ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
- iommu_flags);
- /* Get a reference to the aspace to keep it around */
- kref_get(&aspace->kref);
+ if (!ret && iova)
+ *iova = vma->node.start << PAGE_SHIFT;
return ret;
}
-static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, unsigned int flags)
{
- struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
-
- drm_mm_takedown(&local->mm);
- aspace->mmu->funcs->destroy(aspace->mmu);
-}
-
-static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
- .map = iommu_aspace_map_vma,
- .unmap = iommu_aspace_unmap_vma,
- .destroy = iommu_aspace_destroy,
-};
+ u64 iova = 0;
+ int ret;
-static struct msm_gem_address_space *
-msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
- uint64_t start, uint64_t end)
-{
- struct msm_iommu_aspace *local;
+ if (!aspace)
+ return -EINVAL;
- if (!mmu)
- return ERR_PTR(-EINVAL);
+ ret = allocate_iova(aspace, vma, sgt, &iova);
+ if (ret)
+ return ret;
- local = kzalloc(sizeof(*local), GFP_KERNEL);
- if (!local)
- return ERR_PTR(-ENOMEM);
+ ret = aspace->mmu->funcs->map(aspace->mmu, iova, sgt,
+ flags, priv);
- drm_mm_init(&local->mm, (start >> PAGE_SHIFT),
- (end >> PAGE_SHIFT) - 1);
+ if (ret) {
+ if (drm_mm_node_allocated(&vma->node))
+ drm_mm_remove_node(&vma->node);
- local->base.name = name;
- local->base.mmu = mmu;
- local->base.ops = &msm_iommu_aspace_ops;
+ return ret;
+ }
- kref_init(&local->base.kref);
+ vma->iova = sg_dma_address(sgt->sgl);
+ kref_get(&aspace->kref);
- return &local->base;
+ return 0;
}
-int msm_gem_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt,
- void *priv, unsigned int flags)
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
{
- if (aspace && aspace->ops->map)
- return aspace->ops->map(aspace, vma, sgt, priv, flags);
+ if (!aspace || !vma->iova)
+ return;
- return -EINVAL;
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, priv);
+
+ if (drm_mm_node_allocated(&vma->node))
+ drm_mm_remove_node(&vma->node);
+
+ vma->iova = 0;
+
+ msm_gem_address_space_put(aspace);
}
-void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+ const char *name)
{
- if (aspace && aspace->ops->unmap)
- aspace->ops->unmap(aspace, vma, sgt, priv);
+ return msm_gem_address_space_new(mmu, name, 0, 0);
}
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
- const char *name)
+ int type, const char *name)
{
- struct msm_mmu *mmu = msm_iommu_new(dev, domain);
+ struct msm_mmu *mmu = msm_iommu_new(dev, type, domain);
if (IS_ERR(mmu))
return (struct msm_gem_address_space *) mmu;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5a505a8bf328..2f01db8b08c3 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -183,6 +183,9 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
+ if (gpu->aspace && gpu->aspace->mmu)
+ msm_mmu_enable(gpu->aspace->mmu);
+
return 0;
}
@@ -203,6 +206,9 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
if (WARN_ON(gpu->active_cnt < 0))
return -EINVAL;
+ if (gpu->aspace && gpu->aspace->mmu)
+ msm_mmu_disable(gpu->aspace->mmu);
+
ret = disable_axi(gpu);
if (ret)
return ret;
@@ -837,7 +843,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
dev_info(drm->dev, "%s: using IOMMU\n", name);
gpu->aspace = msm_gem_address_space_create(&pdev->dev,
- iommu, "gpu");
+ iommu, MSM_IOMMU_DOMAIN_USER, "gpu");
if (IS_ERR(gpu->aspace)) {
ret = PTR_ERR(gpu->aspace);
dev_err(drm->dev, "failed to init iommu: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 3fac423929c5..29e2e59b580b 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -29,6 +29,9 @@
struct msm_gem_submit;
struct msm_gpu_perfcntr;
+#define MSM_GPU_DEFAULT_IONAME "kgsl_3d0_reg_memory"
+#define MSM_GPU_DEFAULT_IRQNAME "kgsl_3d0_irq"
+
struct msm_gpu_config {
const char *ioname;
const char *irqname;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 3af24646f4f1..b52c4752c5fe 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -17,6 +17,7 @@
#include <linux/of_platform.h>
#include <linux/of_address.h>
+#include <soc/qcom/secure_buffer.h>
#include "msm_drv.h"
#include "msm_iommu.h"
@@ -27,31 +28,17 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
return 0;
}
-/*
- * Get and enable the IOMMU clocks so that we can make
- * sure they stay on the entire duration so that we can
- * safely change the pagetable from the GPU
- */
-static void _get_iommu_clocks(struct msm_mmu *mmu, struct platform_device *pdev)
+static void iommu_get_clocks(struct msm_iommu *iommu, struct device *dev)
{
- struct msm_iommu *iommu = to_msm_iommu(mmu);
- struct device *dev;
struct property *prop;
const char *name;
int i = 0;
- if (WARN_ON(!pdev))
- return;
-
- dev = &pdev->dev;
-
iommu->nr_clocks =
of_property_count_strings(dev->of_node, "clock-names");
- if (iommu->nr_clocks < 0) {
- iommu->nr_clocks = 0;
+ if (iommu->nr_clocks < 0)
return;
- }
if (WARN_ON(iommu->nr_clocks > ARRAY_SIZE(iommu->clocks)))
iommu->nr_clocks = ARRAY_SIZE(iommu->clocks);
@@ -60,78 +47,58 @@ static void _get_iommu_clocks(struct msm_mmu *mmu, struct platform_device *pdev)
if (i == iommu->nr_clocks)
break;
- iommu->clocks[i] = clk_get(dev, name);
- if (iommu->clocks[i])
- clk_prepare_enable(iommu->clocks[i]);
-
- i++;
+ iommu->clocks[i++] = clk_get(dev, name);
}
}
-static int _attach_iommu_device(struct msm_mmu *mmu,
- struct iommu_domain *domain, const char **names, int cnt)
+
+static void msm_iommu_clocks_enable(struct msm_mmu *mmu)
{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
int i;
- /* See if there is a iommus member in the current device. If not, look
- * for the names and see if there is one in there.
- */
+ if (!iommu->nr_clocks)
+ iommu_get_clocks(iommu, mmu->dev->parent);
- if (of_find_property(mmu->dev->of_node, "iommus", NULL))
- return iommu_attach_device(domain, mmu->dev);
-
- /* Look through the list of names for a target */
- for (i = 0; i < cnt; i++) {
- struct device_node *node =
- of_find_node_by_name(mmu->dev->of_node, names[i]);
-
- if (!node)
- continue;
-
- if (of_find_property(node, "iommus", NULL)) {
- struct platform_device *pdev;
-
- /* Get the platform device for the node */
- of_platform_populate(node->parent, NULL, NULL,
- mmu->dev);
-
- pdev = of_find_device_by_node(node);
-
- if (!pdev)
- continue;
-
- _get_iommu_clocks(mmu,
- of_find_device_by_node(node->parent));
+ for (i = 0; i < iommu->nr_clocks; i++) {
+ if (iommu->clocks[i])
+ clk_prepare_enable(iommu->clocks[i]);
+ }
+}
- mmu->dev = &pdev->dev;
+static void msm_iommu_clocks_disable(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int i;
- return iommu_attach_device(domain, mmu->dev);
- }
+ for (i = 0; i < iommu->nr_clocks; i++) {
+ if (iommu->clocks[i])
+ clk_disable_unprepare(iommu->clocks[i]);
}
+}
- dev_err(mmu->dev, "Couldn't find a IOMMU device\n");
- return -ENODEV;
+static int msm_iommu_attach(struct msm_mmu *mmu, const char **names,
+ int cnt)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ return iommu_attach_device(iommu->domain, mmu->dev);
}
-static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+static int msm_iommu_attach_user(struct msm_mmu *mmu, const char **names,
+ int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- int val = 1, ret;
+ int ret, val = 1;
/* Hope springs eternal */
- iommu->allow_dynamic = true;
-
- /* per-instance pagetables need TTBR1 support in the IOMMU driver */
- ret = iommu_domain_set_attr(iommu->domain,
- DOMAIN_ATTR_ENABLE_TTBR1, &val);
- if (ret)
- iommu->allow_dynamic = false;
+ iommu->allow_dynamic = !iommu_domain_set_attr(iommu->domain,
+ DOMAIN_ATTR_ENABLE_TTBR1, &val) ? true : false;
/* Mark the GPU as I/O coherent if it is supported */
iommu->is_coherent = of_dma_is_coherent(mmu->dev->of_node);
- /* Attach the device to the domain */
- ret = _attach_iommu_device(mmu, iommu->domain, names, cnt);
+ ret = iommu_attach_device(iommu->domain, mmu->dev);
if (ret)
return ret;
@@ -176,17 +143,25 @@ static int msm_iommu_attach_dynamic(struct msm_mmu *mmu, const char **names,
return 0;
}
+static int msm_iommu_attach_secure(struct msm_mmu *mmu, const char **names,
+ int cnt)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int ret, vmid = VMID_CP_PIXEL;
+
+ ret = iommu_domain_set_attr(iommu->domain, DOMAIN_ATTR_SECURE_VMID,
+ &vmid);
+ if (ret)
+ return ret;
+
+ return iommu_attach_device(iommu->domain, mmu->dev);
+}
+
static void msm_iommu_detach(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- int i;
iommu_detach_device(iommu->domain, mmu->dev);
-
- for (i = 0; i < iommu->nr_clocks; i++) {
- if (iommu->clocks[i])
- clk_disable(iommu->clocks[i]);
- }
}
static void msm_iommu_detach_dynamic(struct msm_mmu *mmu)
@@ -196,69 +171,50 @@ static void msm_iommu_detach_dynamic(struct msm_mmu *mmu)
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, int prot)
+ struct sg_table *sgt, u32 flags, void *priv)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
- struct scatterlist *sg;
- uint64_t da = iova;
- unsigned int i, j;
int ret;
+ u32 prot = IOMMU_READ;
if (!domain || !sgt)
return -EINVAL;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- phys_addr_t pa = sg_phys(sg) - sg->offset;
- size_t bytes = sg->length + sg->offset;
+ if (!(flags & MSM_BO_GPU_READONLY))
+ prot |= IOMMU_WRITE;
- VERB("map[%d]: %016llx %pa(%zx)", i, iova, &pa, bytes);
+ if (flags & MSM_BO_PRIVILEGED)
+ prot |= IOMMU_PRIV;
- ret = iommu_map(domain, da, pa, bytes, prot);
- if (ret)
- goto fail;
+ if ((flags & MSM_BO_CACHED) && msm_iommu_coherent(mmu))
+ prot |= IOMMU_CACHE;
- da += bytes;
- }
-
- return 0;
-
-fail:
- da = iova;
+ /* iommu_map_sg returns the number of bytes mapped */
+ ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->nents, prot);
+ if (ret)
+ sgt->sgl->dma_address = iova;
- for_each_sg(sgt->sgl, sg, i, j) {
- size_t bytes = sg->length + sg->offset;
- iommu_unmap(domain, da, bytes);
- da += bytes;
- }
- return ret;
+ return ret ? 0 : -ENOMEM;
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt)
+static void msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, void *priv)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
- uint64_t da = iova;
- int i;
+ size_t len = 0;
+ int ret, i;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- size_t bytes = sg->length + sg->offset;
- size_t unmapped;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ len += sg->length;
- unmapped = iommu_unmap(domain, da, bytes);
- if (unmapped < bytes)
- return unmapped;
+ ret = iommu_unmap(domain, iova, len);
+ if (ret != len)
+ dev_warn(mmu->dev, "could not unmap iova %llx\n", iova);
- VERB("unmap[%d]: %016llx(%zx)", i, iova, bytes);
-
- BUG_ON(!PAGE_ALIGNED(bytes));
-
- da += bytes;
- }
-
- return 0;
+ sgt->sgl->dma_address = 0;
}
static void msm_iommu_destroy(struct msm_mmu *mmu)
@@ -268,7 +224,30 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
kfree(iommu);
}
-static const struct msm_mmu_funcs funcs = {
+static struct device *find_context_bank(const char *name)
+{
+ struct device_node *node = of_find_node_by_name(NULL, name);
+ struct platform_device *pdev, *parent;
+
+ if (!node)
+ return ERR_PTR(-ENODEV);
+
+ if (!of_find_property(node, "iommus", NULL))
+ return ERR_PTR(-ENODEV);
+
+ /* Get the parent device */
+ parent = of_find_device_by_node(node->parent);
+
+ /* Populate the sub nodes */
+ of_platform_populate(parent->dev.of_node, NULL, NULL, &parent->dev);
+
+ /* Get the context bank device */
+ pdev = of_find_device_by_node(node);
+
+ return pdev ? &pdev->dev : ERR_PTR(-ENODEV);
+}
+
+static const struct msm_mmu_funcs default_funcs = {
.attach = msm_iommu_attach,
.detach = msm_iommu_detach,
.map = msm_iommu_map,
@@ -276,6 +255,24 @@ static const struct msm_mmu_funcs funcs = {
.destroy = msm_iommu_destroy,
};
+static const struct msm_mmu_funcs user_funcs = {
+ .attach = msm_iommu_attach_user,
+ .detach = msm_iommu_detach,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .destroy = msm_iommu_destroy,
+ .enable = msm_iommu_clocks_enable,
+ .disable = msm_iommu_clocks_disable,
+};
+
+static const struct msm_mmu_funcs secure_funcs = {
+ .attach = msm_iommu_attach_secure,
+ .detach = msm_iommu_detach,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .destroy = msm_iommu_destroy,
+};
+
static const struct msm_mmu_funcs dynamic_funcs = {
.attach = msm_iommu_attach_dynamic,
.detach = msm_iommu_detach_dynamic,
@@ -284,8 +281,26 @@ static const struct msm_mmu_funcs dynamic_funcs = {
.destroy = msm_iommu_destroy,
};
-struct msm_mmu *_msm_iommu_new(struct device *dev, struct iommu_domain *domain,
- const struct msm_mmu_funcs *funcs)
+static const struct {
+ const char *cbname;
+ const struct msm_mmu_funcs *funcs;
+} msm_iommu_domains[] = {
+ [MSM_IOMMU_DOMAIN_DEFAULT] = {
+ .cbname = NULL,
+ .funcs = &default_funcs,
+ },
+ [MSM_IOMMU_DOMAIN_USER] = {
+ .cbname = "gfx3d_user",
+ .funcs = &user_funcs,
+ },
+ [MSM_IOMMU_DOMAIN_SECURE] = {
+ .cbname = "gfx3d_secure",
+ .funcs = &secure_funcs
+ },
+};
+
+static struct msm_mmu *iommu_create(struct device *dev,
+ struct iommu_domain *domain, const struct msm_mmu_funcs *funcs)
{
struct msm_iommu *iommu;
@@ -299,9 +314,23 @@ struct msm_mmu *_msm_iommu_new(struct device *dev, struct iommu_domain *domain,
return &iommu->base;
}
-struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
+
+struct msm_mmu *msm_iommu_new(struct device *parent,
+ enum msm_iommu_domain_type type, struct iommu_domain *domain)
{
- return _msm_iommu_new(dev, domain, &funcs);
+ struct device *dev = parent;
+
+ if (type >= ARRAY_SIZE(msm_iommu_domains) ||
+ !msm_iommu_domains[type].funcs)
+ return ERR_PTR(-ENODEV);
+
+ if (msm_iommu_domains[type].cbname) {
+ dev = find_context_bank(msm_iommu_domains[type].cbname);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+ }
+
+ return iommu_create(dev, domain, msm_iommu_domains[type].funcs);
}
/*
@@ -326,7 +355,7 @@ struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *base)
if (!domain)
return ERR_PTR(-ENODEV);
- mmu = _msm_iommu_new(base->dev, domain, &dynamic_funcs);
+ mmu = iommu_create(base->dev, domain, &dynamic_funcs);
if (IS_ERR(mmu)) {
if (domain)
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 501f12bef00d..033370ccbe24 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -30,21 +30,22 @@ enum msm_mmu_domain_type {
MSM_SMMU_DOMAIN_MAX,
};
+enum msm_iommu_domain_type {
+ MSM_IOMMU_DOMAIN_DEFAULT,
+ MSM_IOMMU_DOMAIN_USER,
+ MSM_IOMMU_DOMAIN_SECURE,
+};
+
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
void (*detach)(struct msm_mmu *mmu);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- int prot);
- int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt);
- int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
- enum dma_data_direction dir);
- void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
- enum dma_data_direction dir);
- int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
- struct dma_buf *dma_buf, int dir);
- void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
- struct dma_buf *dma_buf, int dir);
+ u32 flags, void *priv);
+ void (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
+ void *priv);
void (*destroy)(struct msm_mmu *mmu);
+ void (*enable)(struct msm_mmu *mmu);
+ void (*disable)(struct msm_mmu *mmu);
};
struct msm_mmu {
@@ -59,9 +60,27 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
mmu->funcs = funcs;
}
-struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
+/* Create a new SDE mmu device */
struct msm_mmu *msm_smmu_new(struct device *dev,
enum msm_mmu_domain_type domain);
+
+/* Create a new legacy MDP4 or GPU mmu device */
+struct msm_mmu *msm_iommu_new(struct device *parent,
+ enum msm_iommu_domain_type type, struct iommu_domain *domain);
+
+/* Create a new dynamic domain for GPU */
struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *orig);
+static inline void msm_mmu_enable(struct msm_mmu *mmu)
+{
+ if (mmu->funcs->enable)
+ mmu->funcs->enable(mmu);
+}
+
+static inline void msm_mmu_disable(struct msm_mmu *mmu)
+{
+ if (mmu->funcs->disable)
+ mmu->funcs->disable(mmu);
+}
+
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c
index 5a9e472ea59b..5f3d1b6356aa 100644
--- a/drivers/gpu/drm/msm/msm_prop.c
+++ b/drivers/gpu/drm/msm/msm_prop.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -183,6 +183,55 @@ static void _msm_property_install_integer(struct msm_property_info *info,
}
}
+/**
+ * _msm_property_install_integer - install signed drm range property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ * @force_dirty: Whether or not to filter 'dirty' status on unchanged values
+ */
+static void _msm_property_install_signed_integer(struct msm_property_info *info,
+ const char *name, int flags, int64_t min, int64_t max,
+ int64_t init, uint32_t property_idx, bool force_dirty)
+{
+ struct drm_property **prop;
+
+ if (!info)
+ return;
+
+ ++info->install_request;
+
+ if (!name || (property_idx >= info->property_count)) {
+ DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
+ } else {
+ prop = &info->property_array[property_idx];
+ /*
+ * Properties need to be attached to each drm object that
+ * uses them, but only need to be created once
+ */
+ if (*prop == 0) {
+ *prop = drm_property_create_signed_range(info->dev,
+ flags, name, min, max);
+ if (*prop == 0)
+ DRM_ERROR("create %s property failed\n", name);
+ }
+
+ /* save init value for later */
+ info->property_data[property_idx].default_value = I642U64(init);
+ info->property_data[property_idx].force_dirty = force_dirty;
+
+ /* always attach property, if created */
+ if (*prop) {
+ drm_object_attach_property(info->base, *prop, init);
+ ++info->install_count;
+ }
+ }
+}
+
void msm_property_install_range(struct msm_property_info *info,
const char *name, int flags, uint64_t min, uint64_t max,
uint64_t init, uint32_t property_idx)
@@ -199,6 +248,22 @@ void msm_property_install_volatile_range(struct msm_property_info *info,
min, max, init, property_idx, true);
}
+void msm_property_install_signed_range(struct msm_property_info *info,
+ const char *name, int flags, int64_t min, int64_t max,
+ int64_t init, uint32_t property_idx)
+{
+ _msm_property_install_signed_integer(info, name, flags,
+ min, max, init, property_idx, false);
+}
+
+void msm_property_install_volatile_signed_range(struct msm_property_info *info,
+ const char *name, int flags, int64_t min, int64_t max,
+ int64_t init, uint32_t property_idx)
+{
+ _msm_property_install_signed_integer(info, name, flags,
+ min, max, init, property_idx, true);
+}
+
void msm_property_install_rotation(struct msm_property_info *info,
unsigned int supported_rotations, uint32_t property_idx)
{
diff --git a/drivers/gpu/drm/msm/msm_prop.h b/drivers/gpu/drm/msm/msm_prop.h
index dbe28bdf5638..1430551700c7 100644
--- a/drivers/gpu/drm/msm/msm_prop.h
+++ b/drivers/gpu/drm/msm/msm_prop.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -208,6 +208,45 @@ void msm_property_install_volatile_range(struct msm_property_info *info,
uint32_t property_idx);
/**
+ * msm_property_install_signed_range - install signed drm range property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ */
+void msm_property_install_signed_range(struct msm_property_info *info,
+ const char *name,
+ int flags,
+ int64_t min,
+ int64_t max,
+ int64_t init,
+ uint32_t property_idx);
+
+/**
+ * msm_property_install_volatile_signed_range - install signed range property
+ * This function is similar to msm_property_install_range, but assumes
+ * that the property is meant for holding user pointers or descriptors
+ * that may reference volatile data without having an updated value.
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ */
+void msm_property_install_volatile_signed_range(struct msm_property_info *info,
+ const char *name,
+ int flags,
+ int64_t min,
+ int64_t max,
+ int64_t init,
+ uint32_t property_idx);
+
+/**
* msm_property_install_rotation - install standard drm rotation property
* @info: Pointer to property info container struct
* @supported_rotations: Bitmask of supported rotation values (see
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index c99f51e09700..c2dd5f96521e 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -53,6 +53,17 @@ struct msm_smmu_domain {
#define to_msm_smmu(x) container_of(x, struct msm_smmu, base)
#define msm_smmu_to_client(smmu) (smmu->client)
+
+static int msm_smmu_fault_handler(struct iommu_domain *iommu,
+ struct device *dev, unsigned long iova, int flags, void *arg)
+{
+
+ dev_info(dev, "%s: iova=0x%08lx, flags=0x%x, iommu=%pK\n", __func__,
+ iova, flags, iommu);
+ return 0;
+}
+
+
static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
const struct msm_smmu_domain *domain);
@@ -105,106 +116,34 @@ static void msm_smmu_detach(struct msm_mmu *mmu)
}
static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, int prot)
+ struct sg_table *sgt, u32 flags, void *priv)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
- struct iommu_domain *domain;
- struct scatterlist *sg;
- uint64_t da = iova;
- unsigned int i, j;
int ret;
- if (!client)
- return -ENODEV;
-
- domain = client->mmu_mapping->domain;
- if (!domain || !sgt)
- return -EINVAL;
-
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- u32 pa = sg_phys(sg) - sg->offset;
- size_t bytes = sg->length + sg->offset;
-
- VERB("map[%d]: %16llx %08x(%zx)", i, iova, pa, bytes);
-
- ret = iommu_map(domain, da, pa, bytes, prot);
- if (ret)
- goto fail;
-
- da += bytes;
- }
-
- return 0;
-
-fail:
- da = iova;
+ if (priv)
+ ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents,
+ DMA_BIDIRECTIONAL, priv);
+ else
+ ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents,
+ DMA_BIDIRECTIONAL);
- for_each_sg(sgt->sgl, sg, i, j) {
- size_t bytes = sg->length + sg->offset;
-
- iommu_unmap(domain, da, bytes);
- da += bytes;
- }
- return ret;
+ return (ret != sgt->nents) ? -ENOMEM : 0;
}
-static int msm_smmu_map_sg(struct msm_mmu *mmu, struct sg_table *sgt,
- enum dma_data_direction dir)
+static void msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, void *priv)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
- int ret;
- ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, dir);
- if (ret != sgt->nents)
- return -ENOMEM;
-
- return 0;
-}
-
-static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt,
- enum dma_data_direction dir)
-{
- struct msm_smmu *smmu = to_msm_smmu(mmu);
- struct msm_smmu_client *client = msm_smmu_to_client(smmu);
-
- dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
-}
-
-static int msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt)
-{
- struct msm_smmu *smmu = to_msm_smmu(mmu);
- struct msm_smmu_client *client = msm_smmu_to_client(smmu);
- struct iommu_domain *domain;
- struct scatterlist *sg;
- uint64_t da = iova;
- int i;
-
- if (!client)
- return -ENODEV;
-
- domain = client->mmu_mapping->domain;
- if (!domain || !sgt)
- return -EINVAL;
-
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- size_t bytes = sg->length + sg->offset;
- size_t unmapped;
-
- unmapped = iommu_unmap(domain, da, bytes);
- if (unmapped < bytes)
- return unmapped;
-
- VERB("unmap[%d]: %16llx(%zx)", i, iova, bytes);
-
- WARN_ON(!PAGE_ALIGNED(bytes));
-
- da += bytes;
- }
-
- return 0;
+ if (priv)
+ msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents,
+ DMA_BIDIRECTIONAL, priv);
+ else
+ dma_unmap_sg(client->dev, sgt->sgl, sgt->nents,
+ DMA_BIDIRECTIONAL);
}
static void msm_smmu_destroy(struct msm_mmu *mmu)
@@ -217,42 +156,11 @@ static void msm_smmu_destroy(struct msm_mmu *mmu)
kfree(smmu);
}
-static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
- struct dma_buf *dma_buf, int dir)
-{
- struct msm_smmu *smmu = to_msm_smmu(mmu);
- struct msm_smmu_client *client = msm_smmu_to_client(smmu);
- int ret;
-
- ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents, dir,
- dma_buf);
- if (ret != sgt->nents) {
- DRM_ERROR("dma map sg failed\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-
-static void msm_smmu_unmap_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
- struct dma_buf *dma_buf, int dir)
-{
- struct msm_smmu *smmu = to_msm_smmu(mmu);
- struct msm_smmu_client *client = msm_smmu_to_client(smmu);
-
- msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir, dma_buf);
-}
-
static const struct msm_mmu_funcs funcs = {
.attach = msm_smmu_attach,
.detach = msm_smmu_detach,
.map = msm_smmu_map,
- .map_sg = msm_smmu_map_sg,
- .unmap_sg = msm_smmu_unmap_sg,
.unmap = msm_smmu_unmap,
- .map_dma_buf = msm_smmu_map_dma_buf,
- .unmap_dma_buf = msm_smmu_unmap_dma_buf,
.destroy = msm_smmu_destroy,
};
@@ -362,6 +270,7 @@ struct msm_mmu *msm_smmu_new(struct device *dev,
{
struct msm_smmu *smmu;
struct device *client_dev;
+ struct msm_smmu_client *client;
smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
if (!smmu)
@@ -376,6 +285,11 @@ struct msm_mmu *msm_smmu_new(struct device *dev,
smmu->client_dev = client_dev;
msm_mmu_init(&smmu->base, dev, &funcs);
+ client = msm_smmu_to_client(smmu);
+ if (client)
+ iommu_set_fault_handler(client->mmu_mapping->domain,
+ msm_smmu_fault_handler, dev);
+
return &smmu->base;
}
diff --git a/drivers/gpu/drm/msm/msm_snapshot.h b/drivers/gpu/drm/msm/msm_snapshot.h
index 247e1358c885..fd560b2129f1 100644
--- a/drivers/gpu/drm/msm/msm_snapshot.h
+++ b/drivers/gpu/drm/msm/msm_snapshot.h
@@ -71,8 +71,8 @@ static inline bool _snapshot_header(struct msm_snapshot *snapshot,
*/
#define SNAPSHOT_HEADER(_snapshot, _header, _id, _dwords) \
_snapshot_header((_snapshot), \
- (struct msm_snapshot_section_header *) &(header), \
- sizeof(header), (_dwords) << 2, (_id))
+ (struct msm_snapshot_section_header *) &(_header), \
+ sizeof(_header), (_dwords) << 2, (_id))
struct msm_gpu;
diff --git a/drivers/gpu/drm/msm/msm_snapshot_api.h b/drivers/gpu/drm/msm/msm_snapshot_api.h
index 9f0adb9ee784..7ad6f0498423 100644
--- a/drivers/gpu/drm/msm/msm_snapshot_api.h
+++ b/drivers/gpu/drm/msm/msm_snapshot_api.h
@@ -118,4 +118,17 @@ struct msm_snapshot_shader {
__u32 size;
} __packed;
+#define SNAPSHOT_GPU_OBJECT_SHADER 1
+#define SNAPSHOT_GPU_OBJECT_IB 2
+#define SNAPSHOT_GPU_OBJECT_GENERIC 3
+#define SNAPSHOT_GPU_OBJECT_DRAW 4
+#define SNAPSHOT_GPU_OBJECT_GLOBAL 5
+
+struct msm_snapshot_gpu_object {
+ struct msm_snapshot_section_header header;
+ __u32 type;
+ __u64 gpuaddr;
+ __u64 pt_base;
+ __u64 size;
+} __packed;
#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_backlight.c b/drivers/gpu/drm/msm/sde/sde_backlight.c
index 9034eeb944fe..78df28a0016b 100644
--- a/drivers/gpu/drm/msm/sde/sde_backlight.c
+++ b/drivers/gpu/drm/msm/sde/sde_backlight.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -37,15 +37,15 @@ static int sde_backlight_device_update_status(struct backlight_device *bd)
connector = bl_get_data(bd);
c_conn = to_sde_connector(connector);
display = (struct dsi_display *) c_conn->display;
- if (brightness > display->panel->bl_config.bl_max_level)
- brightness = display->panel->bl_config.bl_max_level;
+ if (brightness > display->panel[0]->bl_config.bl_max_level)
+ brightness = display->panel[0]->bl_config.bl_max_level;
/* This maps UI brightness into driver backlight level with
* rounding
*/
SDE_BRIGHT_TO_BL(bl_lvl, brightness,
- display->panel->bl_config.bl_max_level,
- display->panel->bl_config.brightness_max_level);
+ display->panel[0]->bl_config.bl_max_level,
+ display->panel[0]->bl_config.brightness_max_level);
if (!bl_lvl && brightness)
bl_lvl = 1;
@@ -85,7 +85,7 @@ int sde_backlight_setup(struct drm_connector *connector)
switch (c_conn->connector_type) {
case DRM_MODE_CONNECTOR_DSI:
display = (struct dsi_display *) c_conn->display;
- bl_config = &display->panel->bl_config;
+ bl_config = &display->panel[0]->bl_config;
props.max_brightness = bl_config->brightness_max_level;
props.brightness = bl_config->brightness_max_level;
bd = backlight_device_register("sde-backlight",
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 31cf25ab5691..7538927a4993 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -540,14 +540,6 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
goto error_unregister_conn;
}
- if (c_conn->ops.set_backlight) {
- rc = sde_backlight_setup(&c_conn->base);
- if (rc) {
- pr_err("failed to setup backlight, rc=%d\n", rc);
- goto error_unregister_conn;
- }
- }
-
/* create properties */
msm_property_init(&c_conn->property_info, &c_conn->base.base, dev,
priv->conn_property, c_conn->property_data,
@@ -586,6 +578,10 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE",
0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE);
+ msm_property_install_volatile_signed_range(&c_conn->property_info,
+ "PLL_DELTA", 0x0, INT_MIN, INT_MAX, 0,
+ CONNECTOR_PROP_PLL_DELTA);
+
/* enum/bitmask properties */
msm_property_install_enum(&c_conn->property_info, "topology_name",
DRM_MODE_PROP_IMMUTABLE, 0, e_topology_name,
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 23ffa9b554dd..a59ec31ba276 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -718,12 +718,12 @@ static int _sde_format_populate_addrs_linear(
{
unsigned int i;
- /* Can now check the pitches given vs pitches expected */
+ /* Update layout pitches from fb */
for (i = 0; i < layout->num_planes; ++i) {
if (layout->plane_pitch[i] != fb->pitches[i]) {
- DRM_ERROR("plane %u expected pitch %u, fb %u\n",
+ SDE_DEBUG("plane %u expected pitch %u, fb %u\n",
i, layout->plane_pitch[i], fb->pitches[i]);
- return -EINVAL;
+ layout->plane_pitch[i] = fb->pitches[i];
}
}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 581918da183f..709c9970b357 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -41,10 +41,6 @@
#define CREATE_TRACE_POINTS
#include "sde_trace.h"
-static const char * const iommu_ports[] = {
- "mdp_0",
-};
-
/**
* Controls size of event log buffer. Specified as a power of 2.
*/
@@ -416,6 +412,10 @@ static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
return;
}
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ return;
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
@@ -431,6 +431,8 @@ static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
break;
}
}
+
+ drm_crtc_vblank_put(crtc);
}
static void sde_kms_prepare_fence(struct msm_kms *kms,
@@ -598,6 +600,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
.get_modes = sde_hdmi_connector_get_modes,
.mode_valid = sde_hdmi_mode_valid,
.get_info = sde_hdmi_get_info,
+ .set_property = sde_hdmi_set_property,
};
struct msm_display_info info = {0};
struct drm_encoder *encoder;
@@ -1076,8 +1079,7 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
sde_kms->aspace[i] = aspace;
- ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ ret = mmu->funcs->attach(mmu, NULL, 0);
if (ret) {
SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
msm_gem_address_space_put(aspace);
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index 1d27b27d265c..fca0768e2734 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1074,12 +1074,6 @@ void _sde_rm_release_rsvp(
}
kfree(rsvp);
-
- (void) msm_property_set_property(
- sde_connector_get_propinfo(conn),
- sde_connector_get_property_values(conn->state),
- CONNECTOR_PROP_TOPOLOGY_NAME,
- SDE_RM_TOPOLOGY_UNKNOWN);
}
void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
@@ -1115,6 +1109,12 @@ void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
SDE_DEBUG("release rsvp[s%de%d]\n", rsvp->seq,
rsvp->enc_id);
_sde_rm_release_rsvp(rm, rsvp, conn);
+
+ (void) msm_property_set_property(
+ sde_connector_get_propinfo(conn),
+ sde_connector_get_property_values(conn->state),
+ CONNECTOR_PROP_TOPOLOGY_NAME,
+ SDE_RM_TOPOLOGY_UNKNOWN);
}
}
@@ -1132,8 +1132,12 @@ static int _sde_rm_commit_rsvp(
sde_connector_get_property_values(conn_state),
CONNECTOR_PROP_TOPOLOGY_NAME,
rsvp->topology);
- if (ret)
+ if (ret) {
+ SDE_ERROR("failed to set topology name property, ret %d\n",
+ ret);
_sde_rm_release_rsvp(rm, rsvp, conn_state->connector);
+ return ret;
+ }
/* Swap next rsvp to be the active */
for (type = 0; type < SDE_HW_BLK_MAX; type++) {
@@ -1226,6 +1230,12 @@ int sde_rm_reserve(
_sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
rsvp_cur = NULL;
_sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_CLEAR);
+ (void) msm_property_set_property(
+ sde_connector_get_propinfo(
+ conn_state->connector),
+ sde_connector_get_property_values(conn_state),
+ CONNECTOR_PROP_TOPOLOGY_NAME,
+ SDE_RM_TOPOLOGY_UNKNOWN);
}
/* Check the proposed reservation, store it in hw's "next" field */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index ece9f4102c0e..7f8acb3ebfcd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -714,7 +714,7 @@ nv4a_chipset = {
.i2c = nv04_i2c_new,
.imem = nv40_instmem_new,
.mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
+ .mmu = nv04_mmu_new,
.pci = nv40_pci_new,
.therm = nv40_therm_new,
.timer = nv41_timer_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index d4d8942b1347..e55f8302d08a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine)
}
if (type == 0x00000010) {
- if (!nv31_mpeg_mthd(mpeg, mthd, data))
+ if (nv31_mpeg_mthd(mpeg, mthd, data))
show &= ~0x01000000;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index d433cfa4a8ab..36af0a8927fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine)
}
if (type == 0x00000010) {
- if (!nv44_mpeg_mthd(subdev->device, mthd, data))
+ if (nv44_mpeg_mthd(subdev->device, mthd, data))
show &= ~0x01000000;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 35310336dd0a..d684e2b79d2b 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -213,8 +213,8 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
rbo->placement.num_busy_placement = 0;
for (i = 0; i < rbo->placement.num_placement; i++) {
if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
- if (rbo->placements[0].fpfn < fpfn)
- rbo->placements[0].fpfn = fpfn;
+ if (rbo->placements[i].fpfn < fpfn)
+ rbo->placements[i].fpfn = fpfn;
} else {
rbo->placement.busy_placement =
&rbo->placements[i];
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 4f5fa8d65fe9..144367c0c28f 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
if (unlikely(ret != 0))
goto out_err0;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed)
+ enum ttm_ref_type ref_type, bool *existed,
+ bool require_existed)
{
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
struct ttm_ref_object *ref;
@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
}
rcu_read_unlock();
+ if (require_existed)
+ return -EPERM;
+
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false);
if (unlikely(ret != 0))
@@ -635,7 +639,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
*handle = base->hash.key;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
dma_buf_put(dma_buf);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 8e689b439890..6c649f7b5929 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -539,7 +539,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
struct vmw_fence_obj **p_fence)
{
struct vmw_fence_obj *fence;
- int ret;
+ int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(fence == NULL))
@@ -702,6 +702,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
}
+/**
+ * vmw_fence_obj_lookup - Look up a user-space fence object
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @handle: A handle identifying the fence object.
+ * @return: A struct vmw_user_fence base ttm object on success or
+ * an error pointer on failure.
+ *
+ * The fence object is looked up and type-checked. The caller needs
+ * to have opened the fence object first, but since that happens on
+ * creation and fence objects aren't shareable, that's not an
+ * issue currently.
+ */
+static struct ttm_base_object *
+vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+ struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
+
+ if (!base) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (base->refcount_release != vmw_user_fence_base_release) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ ttm_base_object_unref(&base);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return base;
+}
+
+
int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -727,13 +762,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
arg->kernel_cookie = jiffies + wait_timeout;
}
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- printk(KERN_ERR "Wait invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
@@ -772,13 +803,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- printk(KERN_ERR "Fence signaled invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
fman = fman_from_fence(fence);
@@ -1093,6 +1120,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_event_arg *) data;
struct vmw_fence_obj *fence = NULL;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct ttm_object_file *tfile = vmw_fp->tfile;
struct drm_vmw_fence_rep __user *user_fence_rep =
(struct drm_vmw_fence_rep __user *)(unsigned long)
arg->fence_rep;
@@ -1106,24 +1134,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
*/
if (arg->handle) {
struct ttm_base_object *base =
- ttm_base_object_lookup_for_ref(dev_priv->tdev,
- arg->handle);
-
- if (unlikely(base == NULL)) {
- DRM_ERROR("Fence event invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ vmw_fence_obj_lookup(tfile, arg->handle);
+
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
fence = &(container_of(base, struct vmw_user_fence,
base)->fence);
(void) vmw_fence_obj_reference(fence);
if (user_fence_rep != NULL) {
- bool existed;
-
ret = ttm_ref_object_add(vmw_fp->tfile, base,
- TTM_REF_USAGE, &existed);
+ TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to reference a fence "
"object.\n");
@@ -1166,8 +1188,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
return 0;
out_no_create:
if (user_fence_rep != NULL)
- ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
out_no_ref_obj:
vmw_fence_obj_unreference(&fence);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b8c6a03c8c54..5ec24fd801cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value = dev_priv->has_dx;
break;
default:
- DRM_ERROR("Illegal vmwgfx get param request: %d\n",
- param->param);
return -EINVAL;
}
@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- if (unlikely(arg->pad64 != 0)) {
+ if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index e57667ca7557..dbca128a9aa6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -591,7 +591,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_SYNCCPU_WRITE, &existed);
+ TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
ttm_bo_synccpu_write_release(&user_bo->dma.base);
@@ -775,7 +775,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
*handle = user_bo->prime.base.hash.key;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_USAGE, NULL);
+ TTM_REF_USAGE, NULL, false);
}
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7d620e82e000..c9c04ccccdd9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -715,11 +715,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
128;
num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
num_sizes += req->mip_levels[i];
+ }
- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS)
+ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
+ num_sizes == 0)
return -EINVAL;
size = vmw_user_surface_size + 128 +
@@ -904,17 +907,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
uint32_t handle;
struct ttm_base_object *base;
int ret;
+ bool require_exist = false;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
if (unlikely(ret != 0))
return ret;
} else {
- if (unlikely(drm_is_render_client(file_priv))) {
- DRM_ERROR("Render client refused legacy "
- "surface reference.\n");
- return -EACCES;
- }
+ if (unlikely(drm_is_render_client(file_priv)))
+ require_exist = true;
+
if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
@@ -942,17 +944,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
/*
* Make sure the surface creator has the same
- * authenticating master.
+ * authenticating master, or is already registered with us.
*/
if (drm_is_primary_client(file_priv) &&
- user_srf->master != file_priv->master) {
- DRM_ERROR("Trying to reference surface outside of"
- " master domain.\n");
- ret = -EACCES;
- goto out_bad_resource;
- }
+ user_srf->master != file_priv->master)
+ require_exist = true;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
+ require_exist);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_bad_resource;
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 6e2a0e3f2645..fa95b4dfe718 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -560,16 +560,70 @@ static inline unsigned int _fixup_cache_range_op(unsigned int op)
}
#endif
-int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
- uint64_t size, unsigned int op)
+static int kgsl_do_cache_op(struct page *page, void *addr,
+ uint64_t offset, uint64_t size, unsigned int op)
{
+ void (*cache_op)(const void *, const void *);
+
/*
- * If the buffer is mapped in the kernel operate on that address
- * otherwise use the user address
+ * The dmac_xxx_range functions handle addresses and sizes that
+ * are not aligned to the cacheline size correctly.
*/
+ switch (_fixup_cache_range_op(op)) {
+ case KGSL_CACHE_OP_FLUSH:
+ cache_op = dmac_flush_range;
+ break;
+ case KGSL_CACHE_OP_CLEAN:
+ cache_op = dmac_clean_range;
+ break;
+ case KGSL_CACHE_OP_INV:
+ cache_op = dmac_inv_range;
+ break;
+ default:
+ return -EINVAL;
+ }
- void *addr = (memdesc->hostptr) ?
- memdesc->hostptr : (void *) memdesc->useraddr;
+ if (page != NULL) {
+ unsigned long pfn = page_to_pfn(page) + offset / PAGE_SIZE;
+ /*
+ * page_address() returns the kernel virtual address of page.
+ * For high memory kernel virtual address exists only if page
+ * has been mapped. So use a version of kmap rather than
+ * page_address() for high memory.
+ */
+ if (PageHighMem(page)) {
+ offset &= ~PAGE_MASK;
+
+ do {
+ unsigned int len = size;
+
+ if (len + offset > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+
+ page = pfn_to_page(pfn++);
+ addr = kmap_atomic(page);
+ cache_op(addr + offset, addr + offset + len);
+ kunmap_atomic(addr);
+
+ size -= len;
+ offset = 0;
+ } while (size);
+
+ return 0;
+ }
+
+ addr = page_address(page);
+ }
+
+ cache_op(addr + offset, addr + offset + (size_t) size);
+ return 0;
+}
+
+int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
+ uint64_t size, unsigned int op)
+{
+ void *addr = NULL;
+ int ret = 0;
if (size == 0 || size > UINT_MAX)
return -EINVAL;
@@ -578,38 +632,59 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
if ((offset + size < offset) || (offset + size < size))
return -ERANGE;
- /* Make sure the offset + size do not overflow the address */
- if (addr + ((size_t) offset + (size_t) size) < addr)
- return -ERANGE;
-
/* Check that offset+length does not exceed memdesc->size */
if (offset + size > memdesc->size)
return -ERANGE;
- /* Return quietly if the buffer isn't mapped on the CPU */
- if (addr == NULL)
- return 0;
+ if (memdesc->hostptr) {
+ addr = memdesc->hostptr;
+ /* Make sure the offset + size do not overflow the address */
+ if (addr + ((size_t) offset + (size_t) size) < addr)
+ return -ERANGE;
- addr = addr + offset;
+ ret = kgsl_do_cache_op(NULL, addr, offset, size, op);
+ return ret;
+ }
/*
- * The dmac_xxx_range functions handle addresses and sizes that
- * are not aligned to the cacheline size correctly.
+ * If the buffer is not to mapped to kernel, perform cache
+ * operations after mapping to kernel.
*/
+ if (memdesc->sgt != NULL) {
+ struct scatterlist *sg;
+ unsigned int i, pos = 0;
- switch (_fixup_cache_range_op(op)) {
- case KGSL_CACHE_OP_FLUSH:
- dmac_flush_range(addr, addr + (size_t) size);
- break;
- case KGSL_CACHE_OP_CLEAN:
- dmac_clean_range(addr, addr + (size_t) size);
- break;
- case KGSL_CACHE_OP_INV:
- dmac_inv_range(addr, addr + (size_t) size);
- break;
- }
+ for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) {
+ uint64_t sg_offset, sg_left;
- return 0;
+ if (offset >= (pos + sg->length)) {
+ pos += sg->length;
+ continue;
+ }
+ sg_offset = offset > pos ? offset - pos : 0;
+ sg_left = (sg->length - sg_offset > size) ? size :
+ sg->length - sg_offset;
+ ret = kgsl_do_cache_op(sg_page(sg), NULL, sg_offset,
+ sg_left, op);
+ size -= sg_left;
+ if (size == 0)
+ break;
+ pos += sg->length;
+ }
+ } else if (memdesc->pages != NULL) {
+ addr = vmap(memdesc->pages, memdesc->page_count,
+ VM_IOREMAP, pgprot_writecombine(PAGE_KERNEL));
+ if (addr == NULL)
+ return -ENOMEM;
+
+ /* Make sure the offset + size do not overflow the address */
+ if (addr + ((size_t) offset + (size_t) size) < addr)
+ return -ERANGE;
+
+ ret = kgsl_do_cache_op(NULL, addr, offset, size, op);
+ vunmap(addr);
+ }
+ return ret;
}
EXPORT_SYMBOL(kgsl_cache_range_op);
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 1ef37c727572..d037454fe7b8 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -73,7 +73,6 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
void *in, *out;
unsigned long flags;
int ret, err = 0;
- unsigned long t;
struct page *page;
spin_lock_irqsave(&newchannel->lock, flags);
@@ -183,11 +182,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
goto error1;
}
- t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
- if (t == 0) {
- err = -ETIMEDOUT;
- goto error1;
- }
+ wait_for_completion(&open_info->waitevent);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&open_info->msglistentry);
@@ -375,7 +370,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
struct vmbus_channel_gpadl_header *gpadlmsg;
struct vmbus_channel_gpadl_body *gpadl_body;
struct vmbus_channel_msginfo *msginfo = NULL;
- struct vmbus_channel_msginfo *submsginfo;
+ struct vmbus_channel_msginfo *submsginfo, *tmp;
u32 msgcount;
struct list_head *curr;
u32 next_gpadl_handle;
@@ -437,6 +432,13 @@ cleanup:
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ if (msgcount > 1) {
+ list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
+ msglistentry) {
+ kfree(submsginfo);
+ }
+ }
+
kfree(msginfo);
return ret;
}
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 4fc2e8836e60..2bbc53025549 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -429,7 +429,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
union hv_connection_id conn_id;
int ret = 0;
int retries = 0;
- u32 msec = 1;
+ u32 usec = 1;
conn_id.asu32 = 0;
conn_id.u.id = VMBUS_MESSAGE_CONNECTION_ID;
@@ -462,9 +462,9 @@ int vmbus_post_msg(void *buffer, size_t buflen)
}
retries++;
- msleep(msec);
- if (msec < 2048)
- msec *= 2;
+ udelay(usec);
+ if (usec < 2048)
+ usec *= 2;
}
return ret;
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 57c191798699..8ce1f2e22912 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -274,7 +274,7 @@ cleanup:
*
* This routine is called normally during driver unloading or exiting.
*/
-void hv_cleanup(void)
+void hv_cleanup(bool crash)
{
union hv_x64_msr_hypercall_contents hypercall_msr;
@@ -284,7 +284,8 @@ void hv_cleanup(void)
if (hv_context.hypercall_page) {
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
- vfree(hv_context.hypercall_page);
+ if (!crash)
+ vfree(hv_context.hypercall_page);
hv_context.hypercall_page = NULL;
}
@@ -304,8 +305,10 @@ void hv_cleanup(void)
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
- vfree(hv_context.tsc_page);
- hv_context.tsc_page = NULL;
+ if (!crash) {
+ vfree(hv_context.tsc_page);
+ hv_context.tsc_page = NULL;
+ }
}
#endif
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b853b4b083bd..354da7f207b7 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -430,16 +430,27 @@ struct dm_info_msg {
* currently hot added. We hot add in multiples of 128M
* chunks; it is possible that we may not be able to bring
* online all the pages in the region. The range
- * covered_end_pfn defines the pages that can
+ * covered_start_pfn:covered_end_pfn defines the pages that can
* be brough online.
*/
struct hv_hotadd_state {
struct list_head list;
unsigned long start_pfn;
+ unsigned long covered_start_pfn;
unsigned long covered_end_pfn;
unsigned long ha_end_pfn;
unsigned long end_pfn;
+ /*
+ * A list of gaps.
+ */
+ struct list_head gap_list;
+};
+
+struct hv_hotadd_gap {
+ struct list_head list;
+ unsigned long start_pfn;
+ unsigned long end_pfn;
};
struct balloon_state {
@@ -595,18 +606,46 @@ static struct notifier_block hv_memory_nb = {
.priority = 0
};
+/* Check if the particular page is backed and can be onlined and online it. */
+static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
+{
+ unsigned long cur_start_pgp;
+ unsigned long cur_end_pgp;
+ struct hv_hotadd_gap *gap;
+
+ cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn);
+ cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
+
+ /* The page is not backed. */
+ if (((unsigned long)pg < cur_start_pgp) ||
+ ((unsigned long)pg >= cur_end_pgp))
+ return;
+
+ /* Check for gaps. */
+ list_for_each_entry(gap, &has->gap_list, list) {
+ cur_start_pgp = (unsigned long)
+ pfn_to_page(gap->start_pfn);
+ cur_end_pgp = (unsigned long)
+ pfn_to_page(gap->end_pfn);
+ if (((unsigned long)pg >= cur_start_pgp) &&
+ ((unsigned long)pg < cur_end_pgp)) {
+ return;
+ }
+ }
-static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
+ /* This frame is currently backed; online the page. */
+ __online_page_set_limits(pg);
+ __online_page_increment_counters(pg);
+ __online_page_free(pg);
+}
+
+static void hv_bring_pgs_online(struct hv_hotadd_state *has,
+ unsigned long start_pfn, unsigned long size)
{
int i;
- for (i = 0; i < size; i++) {
- struct page *pg;
- pg = pfn_to_page(start_pfn + i);
- __online_page_set_limits(pg);
- __online_page_increment_counters(pg);
- __online_page_free(pg);
- }
+ for (i = 0; i < size; i++)
+ hv_page_online_one(has, pfn_to_page(start_pfn + i));
}
static void hv_mem_hot_add(unsigned long start, unsigned long size,
@@ -682,26 +721,25 @@ static void hv_online_page(struct page *pg)
list_for_each(cur, &dm_device.ha_region_list) {
has = list_entry(cur, struct hv_hotadd_state, list);
- cur_start_pgp = (unsigned long)pfn_to_page(has->start_pfn);
- cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
+ cur_start_pgp = (unsigned long)
+ pfn_to_page(has->start_pfn);
+ cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
- if (((unsigned long)pg >= cur_start_pgp) &&
- ((unsigned long)pg < cur_end_pgp)) {
- /*
- * This frame is currently backed; online the
- * page.
- */
- __online_page_set_limits(pg);
- __online_page_increment_counters(pg);
- __online_page_free(pg);
- }
+ /* The page belongs to a different HAS. */
+ if (((unsigned long)pg < cur_start_pgp) ||
+ ((unsigned long)pg >= cur_end_pgp))
+ continue;
+
+ hv_page_online_one(has, pg);
+ break;
}
}
-static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
+static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
{
struct list_head *cur;
struct hv_hotadd_state *has;
+ struct hv_hotadd_gap *gap;
unsigned long residual, new_inc;
if (list_empty(&dm_device.ha_region_list))
@@ -714,8 +752,26 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
* If the pfn range we are dealing with is not in the current
* "hot add block", move on.
*/
- if ((start_pfn >= has->end_pfn))
+ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
continue;
+
+ /*
+ * If the current start pfn is not where the covered_end
+ * is, create a gap and update covered_end_pfn.
+ */
+ if (has->covered_end_pfn != start_pfn) {
+ gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
+ if (!gap)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&gap->list);
+ gap->start_pfn = has->covered_end_pfn;
+ gap->end_pfn = start_pfn;
+ list_add_tail(&gap->list, &has->gap_list);
+
+ has->covered_end_pfn = start_pfn;
+ }
+
/*
* If the current hot add-request extends beyond
* our current limit; extend it.
@@ -732,19 +788,10 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
has->end_pfn += new_inc;
}
- /*
- * If the current start pfn is not where the covered_end
- * is, update it.
- */
-
- if (has->covered_end_pfn != start_pfn)
- has->covered_end_pfn = start_pfn;
-
- return true;
-
+ return 1;
}
- return false;
+ return 0;
}
static unsigned long handle_pg_range(unsigned long pg_start,
@@ -768,7 +815,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
* If the pfn range we are dealing with is not in the current
* "hot add block", move on.
*/
- if ((start_pfn >= has->end_pfn))
+ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
continue;
old_covered_state = has->covered_end_pfn;
@@ -783,6 +830,8 @@ static unsigned long handle_pg_range(unsigned long pg_start,
if (pgs_ol > pfn_cnt)
pgs_ol = pfn_cnt;
+ has->covered_end_pfn += pgs_ol;
+ pfn_cnt -= pgs_ol;
/*
* Check if the corresponding memory block is already
* online by checking its last previously backed page.
@@ -791,10 +840,8 @@ static unsigned long handle_pg_range(unsigned long pg_start,
*/
if (start_pfn > has->start_pfn &&
!PageReserved(pfn_to_page(start_pfn - 1)))
- hv_bring_pgs_online(start_pfn, pgs_ol);
+ hv_bring_pgs_online(has, start_pfn, pgs_ol);
- has->covered_end_pfn += pgs_ol;
- pfn_cnt -= pgs_ol;
}
if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
@@ -832,13 +879,19 @@ static unsigned long process_hot_add(unsigned long pg_start,
unsigned long rg_size)
{
struct hv_hotadd_state *ha_region = NULL;
+ int covered;
if (pfn_cnt == 0)
return 0;
- if (!dm_device.host_specified_ha_region)
- if (pfn_covered(pg_start, pfn_cnt))
+ if (!dm_device.host_specified_ha_region) {
+ covered = pfn_covered(pg_start, pfn_cnt);
+ if (covered < 0)
+ return 0;
+
+ if (covered)
goto do_pg_range;
+ }
/*
* If the host has specified a hot-add range; deal with it first.
@@ -850,10 +903,12 @@ static unsigned long process_hot_add(unsigned long pg_start,
return 0;
INIT_LIST_HEAD(&ha_region->list);
+ INIT_LIST_HEAD(&ha_region->gap_list);
list_add_tail(&ha_region->list, &dm_device.ha_region_list);
ha_region->start_pfn = rg_start;
ha_region->ha_end_pfn = rg_start;
+ ha_region->covered_start_pfn = pg_start;
ha_region->covered_end_pfn = pg_start;
ha_region->end_pfn = rg_start + rg_size;
}
@@ -1581,6 +1636,7 @@ static int balloon_remove(struct hv_device *dev)
struct hv_dynmem_device *dm = hv_get_drvdata(dev);
struct list_head *cur, *tmp;
struct hv_hotadd_state *has;
+ struct hv_hotadd_gap *gap, *tmp_gap;
if (dm->num_pages_ballooned != 0)
pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
@@ -1597,6 +1653,10 @@ static int balloon_remove(struct hv_device *dev)
#endif
list_for_each_safe(cur, tmp, &dm->ha_region_list) {
has = list_entry(cur, struct hv_hotadd_state, list);
+ list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
+ list_del(&gap->list);
+ kfree(gap);
+ }
list_del(&has->list);
kfree(has);
}
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 12156db2e88e..75e383e6d03d 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -581,7 +581,7 @@ struct hv_ring_buffer_debug_info {
extern int hv_init(void);
-extern void hv_cleanup(void);
+extern void hv_cleanup(bool crash);
extern int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type,
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 509ed9731630..802dcb409030 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -889,7 +889,7 @@ err_alloc:
bus_unregister(&hv_bus);
err_cleanup:
- hv_cleanup();
+ hv_cleanup(false);
return ret;
}
@@ -1254,7 +1254,7 @@ static void hv_kexec_handler(void)
vmbus_initiate_unload();
for_each_online_cpu(cpu)
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
- hv_cleanup();
+ hv_cleanup(false);
};
static void hv_crash_handler(struct pt_regs *regs)
@@ -1266,7 +1266,7 @@ static void hv_crash_handler(struct pt_regs *regs)
* for kdump.
*/
hv_synic_cleanup(NULL);
- hv_cleanup();
+ hv_cleanup(true);
};
static int __init hv_acpi_init(void)
@@ -1330,7 +1330,7 @@ static void __exit vmbus_exit(void)
&hyperv_panic_block);
}
bus_unregister(&hv_bus);
- hv_cleanup();
+ hv_cleanup(false);
for_each_online_cpu(cpu) {
tasklet_kill(hv_context.event_dpc[cpu]);
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 0470fc843d4e..9b6854607d73 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -151,7 +151,9 @@ static irqreturn_t tiadc_irq_h(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct tiadc_device *adc_dev = iio_priv(indio_dev);
- unsigned int status, config;
+ unsigned int status, config, adc_fsm;
+ unsigned short count = 0;
+
status = tiadc_readl(adc_dev, REG_IRQSTATUS);
/*
@@ -165,6 +167,15 @@ static irqreturn_t tiadc_irq_h(int irq, void *private)
tiadc_writel(adc_dev, REG_CTRL, config);
tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN
| IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES);
+
+ /* wait for idle state.
+ * ADC needs to finish the current conversion
+ * before disabling the module
+ */
+ do {
+ adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM);
+ } while (adc_fsm != 0x10 && count++ < 100);
+
tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB));
return IRQ_HANDLED;
} else if (status & IRQENB_FIFO1THRES) {
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 595511022795..0a86ef43e781 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -51,8 +51,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
st->report_state.report_id,
st->report_state.index,
HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
-
- poll_value = hid_sensor_read_poll_value(st);
} else {
int val;
@@ -89,7 +87,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
st->power_state.index,
sizeof(state_val), &state_val);
- if (state && poll_value)
+ if (state)
+ poll_value = hid_sensor_read_poll_value(st);
+ if (poll_value > 0)
msleep_interruptible(poll_value * 2);
return 0;
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index acb3b303d800..90841abd3ce4 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -28,6 +28,7 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/regmap.h>
+#include <linux/delay.h>
#include "bmg160.h"
#define BMG160_IRQ_NAME "bmg160_event"
@@ -53,6 +54,9 @@
#define BMG160_NO_FILTER 0
#define BMG160_DEF_BW 100
+#define BMG160_GYRO_REG_RESET 0x14
+#define BMG160_GYRO_RESET_VAL 0xb6
+
#define BMG160_REG_INT_MAP_0 0x17
#define BMG160_INT_MAP_0_BIT_ANY BIT(1)
@@ -186,6 +190,14 @@ static int bmg160_chip_init(struct bmg160_data *data)
int ret;
unsigned int val;
+ /*
+ * Reset chip to get it in a known good state. A delay of 30ms after
+ * reset is required according to the datasheet.
+ */
+ regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
+ BMG160_GYRO_RESET_VAL);
+ usleep_range(30000, 30700);
+
ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
if (ret < 0) {
dev_err(data->dev, "Error reading reg_chip_id\n");
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index d96aa27dfcdc..db64adfbe1af 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -141,6 +141,9 @@ static int iforce_usb_probe(struct usb_interface *intf,
interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints < 2)
+ return -ENODEV;
+
epirq = &interface->endpoint[0].desc;
epout = &interface->endpoint[1].desc;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 16f000a76de5..3258baf3282e 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -189,6 +189,7 @@ static const struct xpad_device {
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
@@ -310,6 +311,7 @@ static struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
+ XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */
XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
{ }
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 9365535ba7f1..50a7faa504f7 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -675,6 +675,10 @@ static int cm109_usb_probe(struct usb_interface *intf,
int error = -ENOMEM;
interface = intf->cur_altsetting;
+
+ if (interface->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
diff --git a/drivers/input/misc/hbtp_input.c b/drivers/input/misc/hbtp_input.c
index 21898c308075..0362095d4c38 100644
--- a/drivers/input/misc/hbtp_input.c
+++ b/drivers/input/misc/hbtp_input.c
@@ -62,7 +62,7 @@ struct hbtp_data {
u32 ts_pinctrl_seq_delay;
u32 ddic_pinctrl_seq_delay[HBTP_PINCTRL_DDIC_SEQ_NUM];
u32 fb_resume_seq_delay;
- bool lcd_on;
+ int lcd_state;
bool power_suspended;
bool power_sync_enabled;
bool power_sig_enabled;
@@ -108,6 +108,7 @@ static int fb_notifier_callback(struct notifier_block *self,
unsigned long event, void *data)
{
int blank;
+ int lcd_state;
struct fb_event *evdata = data;
struct fb_info *fbi = NULL;
struct hbtp_data *hbtp_data =
@@ -133,27 +134,32 @@ static int fb_notifier_callback(struct notifier_block *self,
(event == FB_EARLY_EVENT_BLANK ||
event == FB_R_EARLY_EVENT_BLANK)) {
blank = *(int *)(evdata->data);
+ lcd_state = hbtp->lcd_state;
if (event == FB_EARLY_EVENT_BLANK) {
- if (blank == FB_BLANK_UNBLANK) {
+ if (blank <= FB_BLANK_NORMAL &&
+ lcd_state == FB_BLANK_POWERDOWN) {
pr_debug("%s: receives EARLY_BLANK:UNBLANK\n",
__func__);
- hbtp_data->lcd_on = true;
hbtp_fb_early_resume(hbtp_data);
- } else if (blank == FB_BLANK_POWERDOWN) {
+ } else if (blank == FB_BLANK_POWERDOWN &&
+ lcd_state <= FB_BLANK_NORMAL) {
pr_debug("%s: receives EARLY_BLANK:POWERDOWN\n",
__func__);
- hbtp_data->lcd_on = false;
+ } else {
+ pr_debug("%s: receives EARLY_BLANK:%d in %d state\n",
+ __func__, blank, lcd_state);
}
} else if (event == FB_R_EARLY_EVENT_BLANK) {
- if (blank == FB_BLANK_UNBLANK) {
+ if (blank <= FB_BLANK_NORMAL) {
pr_debug("%s: receives R_EARLY_BALNK:UNBLANK\n",
__func__);
- hbtp_data->lcd_on = false;
hbtp_fb_suspend(hbtp_data);
} else if (blank == FB_BLANK_POWERDOWN) {
pr_debug("%s: receives R_EARLY_BALNK:POWERDOWN\n",
__func__);
- hbtp_data->lcd_on = true;
+ } else {
+ pr_debug("%s: receives R_EARLY_BALNK:%d in %d state\n",
+ __func__, blank, lcd_state);
}
}
}
@@ -161,13 +167,20 @@ static int fb_notifier_callback(struct notifier_block *self,
if (evdata->data && hbtp_data &&
event == FB_EVENT_BLANK) {
blank = *(int *)(evdata->data);
- if (blank == FB_BLANK_POWERDOWN) {
+ lcd_state = hbtp->lcd_state;
+ if (blank == FB_BLANK_POWERDOWN &&
+ lcd_state <= FB_BLANK_NORMAL) {
pr_debug("%s: receives BLANK:POWERDOWN\n", __func__);
hbtp_fb_suspend(hbtp_data);
- } else if (blank == FB_BLANK_UNBLANK) {
+ } else if (blank <= FB_BLANK_NORMAL &&
+ lcd_state == FB_BLANK_POWERDOWN) {
pr_debug("%s: receives BLANK:UNBLANK\n", __func__);
hbtp_fb_resume(hbtp_data);
+ } else {
+ pr_debug("%s: receives BLANK:%d in %d state\n",
+ __func__, blank, lcd_state);
}
+ hbtp_data->lcd_state = blank;
}
return 0;
}
@@ -1439,8 +1452,10 @@ static int __init hbtp_init(void)
hbtp->sensor_data = kzalloc(sizeof(struct hbtp_sensor_data),
GFP_KERNEL);
- if (!hbtp->sensor_data)
+ if (!hbtp->sensor_data) {
+ error = -ENOMEM;
goto err_sensordata;
+ }
mutex_init(&hbtp->mutex);
mutex_init(&hbtp->sensormutex);
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 9c0ea36913b4..f4e8fbec6a94 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1667,6 +1667,10 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
return -EINVAL;
alt = pcu->ctrl_intf->cur_altsetting;
+
+ if (alt->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
pcu->ep_ctrl = &alt->endpoint[0].desc;
pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl);
diff --git a/drivers/input/misc/qpnp-power-on.c b/drivers/input/misc/qpnp-power-on.c
index add11d47ea2f..339f94c072f4 100644
--- a/drivers/input/misc/qpnp-power-on.c
+++ b/drivers/input/misc/qpnp-power-on.c
@@ -207,7 +207,7 @@ struct qpnp_pon {
int pon_power_off_reason;
int num_pon_reg;
int num_pon_config;
- u32 dbc;
+ u32 dbc_time_us;
u32 uvlo;
int warm_reset_poff_type;
int hard_reset_poff_type;
@@ -219,6 +219,8 @@ struct qpnp_pon {
u8 warm_reset_reason2;
bool is_spon;
bool store_hard_reset_reason;
+ bool kpdpwr_dbc_enable;
+ ktime_t kpdpwr_last_release_time;
};
static int pon_ship_mode_en;
@@ -381,7 +383,7 @@ static int qpnp_pon_set_dbc(struct qpnp_pon *pon, u32 delay)
int rc = 0;
u32 val;
- if (delay == pon->dbc)
+ if (delay == pon->dbc_time_us)
goto out;
if (pon->pon_input)
@@ -409,7 +411,7 @@ static int qpnp_pon_set_dbc(struct qpnp_pon *pon, u32 delay)
goto unlock;
}
- pon->dbc = delay;
+ pon->dbc_time_us = delay;
unlock:
if (pon->pon_input)
@@ -418,12 +420,34 @@ out:
return rc;
}
+static int qpnp_pon_get_dbc(struct qpnp_pon *pon, u32 *delay)
+{
+ int rc;
+ unsigned int val;
+
+ rc = regmap_read(pon->regmap, QPNP_PON_DBC_CTL(pon), &val);
+ if (rc) {
+ pr_err("Unable to read pon_dbc_ctl rc=%d\n", rc);
+ return rc;
+ }
+ val &= QPNP_PON_DBC_DELAY_MASK(pon);
+
+ if (is_pon_gen2(pon))
+ *delay = USEC_PER_SEC /
+ (1 << (QPNP_PON_GEN2_DELAY_BIT_SHIFT - val));
+ else
+ *delay = USEC_PER_SEC /
+ (1 << (QPNP_PON_DELAY_BIT_SHIFT - val));
+
+ return rc;
+}
+
static ssize_t qpnp_pon_dbc_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qpnp_pon *pon = dev_get_drvdata(dev);
- return snprintf(buf, QPNP_PON_BUFFER_SIZE, "%d\n", pon->dbc);
+ return snprintf(buf, QPNP_PON_BUFFER_SIZE, "%d\n", pon->dbc_time_us);
}
static ssize_t qpnp_pon_dbc_store(struct device *dev,
@@ -777,6 +801,7 @@ qpnp_pon_input_dispatch(struct qpnp_pon *pon, u32 pon_type)
u8 pon_rt_bit = 0;
u32 key_status;
uint pon_rt_sts;
+ u64 elapsed_us;
cfg = qpnp_get_cfg(pon, pon_type);
if (!cfg)
@@ -786,6 +811,15 @@ qpnp_pon_input_dispatch(struct qpnp_pon *pon, u32 pon_type)
if (!cfg->key_code)
return 0;
+ if (pon->kpdpwr_dbc_enable && cfg->pon_type == PON_KPDPWR) {
+ elapsed_us = ktime_us_delta(ktime_get(),
+ pon->kpdpwr_last_release_time);
+ if (elapsed_us < pon->dbc_time_us) {
+ pr_debug("Ignoring kpdpwr event - within debounce time\n");
+ return 0;
+ }
+ }
+
/* check the RT status to get the current status of the line */
rc = regmap_read(pon->regmap, QPNP_PON_RT_STS(pon), &pon_rt_sts);
if (rc) {
@@ -814,6 +848,11 @@ qpnp_pon_input_dispatch(struct qpnp_pon *pon, u32 pon_type)
cfg->key_code, pon_rt_sts);
key_status = pon_rt_sts & pon_rt_bit;
+ if (pon->kpdpwr_dbc_enable && cfg->pon_type == PON_KPDPWR) {
+ if (!key_status)
+ pon->kpdpwr_last_release_time = ktime_get();
+ }
+
/*
* simulate press event in case release event occurred
* without a press event
@@ -2233,8 +2272,22 @@ static int qpnp_pon_probe(struct platform_device *pdev)
}
} else {
rc = qpnp_pon_set_dbc(pon, delay);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Unable to set PON debounce delay rc=%d\n", rc);
+ return rc;
+ }
+ }
+ rc = qpnp_pon_get_dbc(pon, &pon->dbc_time_us);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Unable to get PON debounce delay rc=%d\n", rc);
+ return rc;
}
+ pon->kpdpwr_dbc_enable = of_property_read_bool(pon->pdev->dev.of_node,
+ "qcom,kpdpwr-sw-debounce");
+
rc = of_property_read_u32(pon->pdev->dev.of_node,
"qcom,warm-reset-poweroff-type",
&pon->warm_reset_poff_type);
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 79c964c075f1..6e7ff9561d92 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -875,6 +875,10 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
int ret, pipe, i;
interface = intf->cur_altsetting;
+
+ if (interface->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -ENODEV;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index ed1935f300a7..da5458dfb1e3 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -218,17 +218,19 @@ static int elan_query_product(struct elan_tp_data *data)
static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
{
- if (data->ic_type != 0x0E)
- return false;
-
- switch (data->product_id) {
- case 0x05 ... 0x07:
- case 0x09:
- case 0x13:
+ if (data->ic_type == 0x0E) {
+ switch (data->product_id) {
+ case 0x05 ... 0x07:
+ case 0x09:
+ case 0x13:
+ return true;
+ }
+ } else if (data->ic_type == 0x08 && data->product_id == 0x26) {
+ /* ASUS EeeBook X205TA */
return true;
- default:
- return false;
}
+
+ return false;
}
static int __elan_initialize(struct elan_tp_data *data)
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 43482ae1e049..1a2b2620421e 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1122,6 +1122,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
* Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
@@ -1528,6 +1529,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
},
},
{
+ /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E547"),
+ },
+ },
+ {
/* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 0cdd95801a25..e7b96f1ac2c5 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -120,6 +120,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
},
},
{
+ /* Dell Embedded Box PC 3000 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
+ },
+ },
+ {
/* OQO Model 01 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
@@ -678,6 +685,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
},
},
+ {
+ /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
+ },
+ },
{ }
};
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index cd852059b99e..df4bea96d7ed 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -340,6 +340,9 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id
int error;
int i;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL);
input_dev = input_allocate_device();
if (!hanwang || !input_dev) {
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index d2ac7c2b5b82..2812f9236b7d 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -122,6 +122,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
struct input_dev *input_dev;
int error = -ENOMEM;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
input_dev = input_allocate_device();
if (!kbtab || !input_dev)
diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c
index bbe872001407..78bdd24af28b 100644
--- a/drivers/input/touchscreen/st/fts.c
+++ b/drivers/input/touchscreen/st/fts.c
@@ -1252,7 +1252,7 @@ static void fts_event_handler(struct work_struct *work)
static int cx_crc_check(void)
{
unsigned char regAdd1[3] = {FTS_CMD_HW_REG_R, ADDR_CRC_BYTE0, ADDR_CRC_BYTE1};
- unsigned char val;
+ unsigned char val = 0;
unsigned char crc_status;
unsigned int error;
diff --git a/drivers/input/touchscreen/st/fts_lib/ftsFlash.c b/drivers/input/touchscreen/st/fts_lib/ftsFlash.c
index 59c73f4c4edb..f3becac79102 100644
--- a/drivers/input/touchscreen/st/fts_lib/ftsFlash.c
+++ b/drivers/input/touchscreen/st/fts_lib/ftsFlash.c
@@ -93,7 +93,7 @@ int getFirmwareVersion(u16 *fw_vers, u16 *config_id)
int flash_status(void)
{
u8 cmd[2] = {FLASH_CMD_READSTATUS, 0x00};
- u8 readData;
+ u8 readData = 0;
logError(0, "%s Reading flash_status...\n", tag);
if (fts_readCmd(cmd, 2, &readData, FLASH_STATUS_BYTES) < 0) {
diff --git a/drivers/input/touchscreen/st/fts_lib/ftsGesture.c b/drivers/input/touchscreen/st/fts_lib/ftsGesture.c
index fda4ab281948..ee97a417d4cb 100644
--- a/drivers/input/touchscreen/st/fts_lib/ftsGesture.c
+++ b/drivers/input/touchscreen/st/fts_lib/ftsGesture.c
@@ -28,7 +28,7 @@ static u8 custom_gesture_index[GESTURE_CUSTOM_NUMBER] = { 0 };
int enableGesture(u8 *mask, int size)
{
u8 cmd[size+2];
- u8 readData[FIFO_EVENT_SIZE];
+ u8 readData[FIFO_EVENT_SIZE] = {0};
int i, res;
int event_to_search[4] = {EVENTID_GESTURE, EVENT_TYPE_ENB, 0x00, GESTURE_ENABLE};
@@ -82,7 +82,7 @@ int enableGesture(u8 *mask, int size)
int disableGesture(u8 *mask, int size)
{
u8 cmd[2+GESTURE_MASK_SIZE];
- u8 readData[FIFO_EVENT_SIZE];
+ u8 readData[FIFO_EVENT_SIZE] = {0};
u8 temp;
int i, res;
int event_to_search[4] = { EVENTID_GESTURE, EVENT_TYPE_ENB, 0x00, GESTURE_DISABLE };
@@ -141,7 +141,7 @@ int startAddCustomGesture(u8 gestureID)
{
u8 cmd[3] = { FTS_CMD_GESTURE_CMD, GESTURE_START_ADD, gestureID };
int res;
- u8 readData[FIFO_EVENT_SIZE];
+ u8 readData[FIFO_EVENT_SIZE] = {0};
int event_to_search[4] = { EVENTID_GESTURE, EVENT_TYPE_ENB, gestureID, GESTURE_START_ADD };
res = fts_writeFwCmd(cmd, 3);
@@ -168,7 +168,7 @@ int finishAddCustomGesture(u8 gestureID)
{
u8 cmd[3] = { FTS_CMD_GESTURE_CMD, GESTURE_FINISH_ADD, gestureID };
int res;
- u8 readData[FIFO_EVENT_SIZE];
+ u8 readData[FIFO_EVENT_SIZE] = {0};
int event_to_search[4] = { EVENTID_GESTURE, EVENT_TYPE_ENB, gestureID, GESTURE_FINISH_ADD };
res = fts_writeFwCmd(cmd, 3);
@@ -199,7 +199,7 @@ int loadCustomGesture(u8 *template, u8 gestureID)
int toWrite, offset = 0;
u8 cmd[TEMPLATE_CHUNK + 5];
int event_to_search[4] = { EVENTID_GESTURE, EVENT_TYPE_ENB, gestureID, GESTURE_DATA_ADD };
- u8 readData[FIFO_EVENT_SIZE];
+ u8 readData[FIFO_EVENT_SIZE] = {0};
logError(0, "%s Starting adding custom gesture procedure...\n", tag);
@@ -359,7 +359,7 @@ int removeCustomGesture(u8 gestureID)
int res, index;
u8 cmd[3] = { FTS_CMD_GESTURE_CMD, GETURE_REMOVE_CUSTOM, gestureID };
int event_to_search[4] = {EVENTID_GESTURE, EVENT_TYPE_ENB, gestureID, GETURE_REMOVE_CUSTOM };
- u8 readData[FIFO_EVENT_SIZE];
+ u8 readData[FIFO_EVENT_SIZE] = {0};
index = gestureID - GESTURE_CUSTOM_OFFSET;
diff --git a/drivers/input/touchscreen/st/fts_lib/ftsTest.c b/drivers/input/touchscreen/st/fts_lib/ftsTest.c
index 68bd04eff316..3810fd02001a 100644
--- a/drivers/input/touchscreen/st/fts_lib/ftsTest.c
+++ b/drivers/input/touchscreen/st/fts_lib/ftsTest.c
@@ -301,7 +301,7 @@ int production_test_ito(void)
{
int res = OK;
u8 cmd;
- u8 readData[FIFO_EVENT_SIZE];
+ u8 readData[FIFO_EVENT_SIZE] = {0};
int eventToSearch[2] = {EVENTID_ERROR_EVENT, EVENT_TYPE_ITO}; /* look for ito event */
logError(0, "%s ITO Production test is starting...\n", tag);
@@ -347,7 +347,7 @@ int production_test_initialization(void)
{
int res;
u8 cmd;
- u8 readData[FIFO_EVENT_SIZE];
+ u8 readData[FIFO_EVENT_SIZE] = {0};
int eventToSearch[2] = {EVENTID_STATUS_UPDATE, EVENT_TYPE_FULL_INITIALIZATION};
logError(0, "%s INITIALIZATION Production test is starting...\n", tag);
@@ -397,7 +397,7 @@ int ms_compensation_tuning(void)
{
int res;
u8 cmd;
- u8 readData[FIFO_EVENT_SIZE];
+ u8 readData[FIFO_EVENT_SIZE] = {0};
int eventToSearch[2] = {EVENTID_STATUS_UPDATE, EVENT_TYPE_MS_TUNING_CMPL};
logError(0, "%s MS INITIALIZATION command sent...\n", tag);
@@ -429,7 +429,7 @@ int ss_compensation_tuning(void)
{
int res;
u8 cmd;
- u8 readData[FIFO_EVENT_SIZE];
+ u8 readData[FIFO_EVENT_SIZE] = {0};
int eventToSearch[2] = {EVENTID_STATUS_UPDATE, EVENT_TYPE_SS_TUNING_CMPL};
logError(0, "%s SS INITIALIZATION command sent...\n", tag);
@@ -461,7 +461,7 @@ int lp_timer_calibration(void)
{
int res;
u8 cmd;
- u8 readData[FIFO_EVENT_SIZE];
+ u8 readData[FIFO_EVENT_SIZE] = {0};
int eventToSearch[2] = {EVENTID_STATUS_UPDATE, EVENT_TYPE_LPTIMER_TUNING_CMPL};
logError(0, "%s LP TIMER CALIBRATION command sent...\n", tag);
@@ -493,7 +493,7 @@ int save_cx_tuning(void)
{
int res;
u8 cmd;
- u8 readData[FIFO_EVENT_SIZE];
+ u8 readData[FIFO_EVENT_SIZE] = {0};
int eventToSearch[2] = {EVENTID_STATUS_UPDATE, EVENT_TYPE_COMP_DATA_SAVED};
logError(0, "%s SAVE CX command sent...\n", tag);
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 45b466e3bbe8..0146e2c74649 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -500,6 +500,9 @@ static int sur40_probe(struct usb_interface *interface,
if (iface_desc->desc.bInterfaceClass != 0xFF)
return -ENODEV;
+ if (iface_desc->desc.bNumEndpoints < 5)
+ return -ENODEV;
+
/* Use endpoint #4 (0x86). */
endpoint = &iface_desc->endpoint[4].desc;
if (endpoint->bEndpointAddress != TOUCH_ENDPOINT)
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
index 9d61eb110e2f..c1cbec81d7d6 100644
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
@@ -355,18 +355,25 @@ static ssize_t rmidev_read(struct file *filp, char __user *buf,
return -EBADF;
}
- if (count == 0)
- return 0;
+ mutex_lock(&(dev_data->file_mutex));
if (count > (REG_ADDR_LIMIT - *f_pos))
count = REG_ADDR_LIMIT - *f_pos;
- tmpbuf = kzalloc(count + 1, GFP_KERNEL);
- if (!tmpbuf)
- return -ENOMEM;
-
- mutex_lock(&(dev_data->file_mutex));
+ if (count == 0) {
+ retval = 0;
+ goto unlock;
+ }
+ if (*f_pos > REG_ADDR_LIMIT) {
+ retval = -EFAULT;
+ goto unlock;
+ }
+ tmpbuf = kzalloc(count + 1, GFP_KERNEL);
+ if (!tmpbuf) {
+ retval = -ENOMEM;
+ goto unlock;
+ }
retval = synaptics_rmi4_reg_read(rmidev->rmi4_data,
*f_pos,
tmpbuf,
@@ -380,8 +387,9 @@ static ssize_t rmidev_read(struct file *filp, char __user *buf,
*f_pos += retval;
clean_up:
- mutex_unlock(&(dev_data->file_mutex));
kfree(tmpbuf);
+unlock:
+ mutex_unlock(&(dev_data->file_mutex));
return retval;
}
@@ -405,21 +413,31 @@ static ssize_t rmidev_write(struct file *filp, const char __user *buf,
return -EBADF;
}
- if (count == 0)
- return 0;
+ mutex_lock(&(dev_data->file_mutex));
+
+ if (*f_pos > REG_ADDR_LIMIT) {
+ retval = -EFAULT;
+ goto unlock;
+ }
if (count > (REG_ADDR_LIMIT - *f_pos))
count = REG_ADDR_LIMIT - *f_pos;
+ if (count == 0) {
+ retval = 0;
+ goto unlock;
+ }
+
tmpbuf = kzalloc(count + 1, GFP_KERNEL);
- if (!tmpbuf)
- return -ENOMEM;
+ if (!tmpbuf) {
+ retval = -ENOMEM;
+ goto unlock;
+ }
if (copy_from_user(tmpbuf, buf, count)) {
- kfree(tmpbuf);
- return -EFAULT;
+ retval = -EFAULT;
+ goto clean_up;
}
- mutex_lock(&(dev_data->file_mutex));
retval = synaptics_rmi4_reg_write(rmidev->rmi4_data,
*f_pos,
@@ -428,8 +446,10 @@ static ssize_t rmidev_write(struct file *filp, const char __user *buf,
if (retval >= 0)
*f_pos += retval;
- mutex_unlock(&(dev_data->file_mutex));
+clean_up:
kfree(tmpbuf);
+unlock:
+ mutex_unlock(&(dev_data->file_mutex));
return retval;
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index f64111886584..7a504b1ad94d 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -41,7 +41,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST
config IOMMU_IO_PGTABLE_FAST
bool "Fast ARMv7/v8 Long Descriptor Format"
- select IOMMU_IO_PGTABLE
+ depends on ARM64_DMA_USE_IOMMU
help
Enable support for a subset of the ARM long descriptor pagetable
format. This allocator achieves fast performance by
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 7922608287d7..dc44b40a85f3 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -522,6 +522,8 @@ static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);
static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain);
static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
dma_addr_t iova);
+static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
+ dma_addr_t iova);
static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
@@ -2190,8 +2192,8 @@ static int arm_smmu_attach_dynamic(struct iommu_domain *domain,
smmu->num_context_banks + 2,
MAX_ASID + 1, GFP_KERNEL);
if (ret < 0) {
- dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
- ret);
+ dev_err_ratelimited(smmu->dev,
+ "dynamic ASID allocation failed: %d\n", ret);
goto out;
}
@@ -2536,6 +2538,23 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
+static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ uint64_t ret;
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+
+ if (!ops)
+ return 0;
+
+ flags = arm_smmu_pgtbl_lock(smmu_domain);
+ ret = ops->iova_to_pte(ops, iova);
+ arm_smmu_pgtbl_unlock(smmu_domain, flags);
+ return ret;
+}
+
static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
@@ -3437,6 +3456,7 @@ static struct iommu_ops arm_smmu_ops = {
.enable_config_clocks = arm_smmu_enable_config_clocks,
.disable_config_clocks = arm_smmu_disable_config_clocks,
.is_iova_coherent = arm_smmu_is_iova_coherent,
+ .iova_to_pte = arm_smmu_iova_to_pte,
};
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f0fc6f7b5d98..0628372f3591 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -908,7 +908,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
* which we used for the IOMMU lookup. Strictly speaking
* we could do this for all PCI devices; we only need to
* get the BDF# from the scope table for ACPI matches. */
- if (pdev->is_virtfn)
+ if (pdev && pdev->is_virtfn)
goto got_pdev;
*bus = drhd->devices[i].bus;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 6a8a9492c771..2d2583c78bdb 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -903,6 +903,19 @@ found_translation:
return 0;
}
+static uint64_t arm_lpae_iova_get_pte(struct io_pgtable_ops *ops,
+ unsigned long iova)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ arm_lpae_iopte pte;
+ int lvl;
+
+ if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte))
+ return pte;
+
+ return 0;
+}
+
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
unsigned long iova)
{
@@ -1033,6 +1046,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
.unmap = arm_lpae_unmap,
.iova_to_phys = arm_lpae_iova_to_phys,
.is_iova_coherent = arm_lpae_is_iova_coherent,
+ .iova_to_pte = arm_lpae_iova_get_pte,
};
return data;
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index e6939c2212d4..2cf213514221 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -124,6 +124,8 @@ struct io_pgtable_ops {
unsigned long iova);
bool (*is_iova_coherent)(struct io_pgtable_ops *ops,
unsigned long iova);
+ uint64_t (*iova_to_pte)(struct io_pgtable_ops *ops,
+ unsigned long iova);
};
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index ffeb47c6b367..776e06facc11 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -470,6 +470,7 @@ static inline void iommu_debug_destroy_tracking(void) { }
static LIST_HEAD(iommu_debug_devices);
static struct dentry *debugfs_tests_dir;
static u32 iters_per_op = 1;
+static void *virt_addr;
struct iommu_debug_device {
struct device *dev;
@@ -1537,6 +1538,68 @@ out_domain_free:
return -EIO;
}
+static ssize_t __iommu_debug_dma_attach_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+ struct dma_iommu_mapping *dma_mapping;
+ ssize_t retval = -EINVAL;
+ int val;
+
+ if (kstrtoint_from_user(ubuf, count, 0, &val)) {
+ pr_err("Invalid format. Expected a hex or decimal integer");
+ retval = -EFAULT;
+ goto out;
+ }
+
+ if (val) {
+ if (dev->archdata.mapping)
+ if (dev->archdata.mapping->domain) {
+ pr_err("Already attached.\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ if (WARN(dev->archdata.iommu,
+ "Attachment tracking out of sync with device\n")) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ dma_mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+ (SZ_1G * 4ULL));
+
+ if (!dma_mapping)
+ goto out;
+
+ if (arm_iommu_attach_device(dev, dma_mapping))
+ goto out_release_mapping;
+ pr_err("Attached\n");
+ } else {
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(dev->archdata.mapping);
+ pr_err("Detached\n");
+ }
+ retval = count;
+ return retval;
+
+out_release_mapping:
+ arm_iommu_release_mapping(dma_mapping);
+out:
+ return retval;
+}
+
static ssize_t __iommu_debug_attach_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *offset,
@@ -1585,6 +1648,79 @@ out:
return retval;
}
+static ssize_t iommu_debug_dma_attach_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ return __iommu_debug_dma_attach_write(file, ubuf, count, offset);
+
+}
+
+static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+ char c[2];
+
+ if (*offset)
+ return 0;
+
+ if (!dev->archdata.mapping)
+ c[0] = '0';
+ else
+ c[0] = dev->archdata.mapping->domain ? '1' : '0';
+
+ c[1] = '\n';
+ if (copy_to_user(ubuf, &c, 2)) {
+ pr_err("copy_to_user failed\n");
+ return -EFAULT;
+ }
+ *offset = 1; /* non-zero means we're done */
+
+ return 2;
+}
+
+static const struct file_operations iommu_debug_dma_attach_fops = {
+ .open = simple_open,
+ .write = iommu_debug_dma_attach_write,
+ .read = iommu_debug_dma_attach_read,
+};
+
+static ssize_t iommu_debug_virt_addr_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ char buf[100];
+ ssize_t retval;
+ size_t buflen;
+
+ if (*offset)
+ return 0;
+
+ memset(buf, 0, 100);
+
+ if (!virt_addr)
+ strlcpy(buf, "FAIL\n", 100);
+ else
+ snprintf(buf, 100, "0x%pK\n", virt_addr);
+
+ buflen = strlen(buf);
+ if (copy_to_user(ubuf, buf, buflen)) {
+ pr_err("Couldn't copy_to_user\n");
+ retval = -EFAULT;
+ } else {
+ *offset = 1; /* non-zero means we're done */
+ retval = buflen;
+ }
+
+ return retval;
+}
+
+static const struct file_operations iommu_debug_virt_addr_fops = {
+ .open = simple_open,
+ .read = iommu_debug_virt_addr_read,
+};
+
static ssize_t iommu_debug_attach_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *offset)
@@ -1635,6 +1771,75 @@ static const struct file_operations iommu_debug_secure_attach_fops = {
.read = iommu_debug_attach_read,
};
+static ssize_t iommu_debug_pte_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ dma_addr_t iova;
+
+ if (kstrtox_from_user(ubuf, count, 0, &iova)) {
+ pr_err("Invalid format for iova\n");
+ ddev->iova = 0;
+ return -EINVAL;
+ }
+
+ ddev->iova = iova;
+ pr_err("Saved iova=%pa for future PTE commands\n", &iova);
+ return count;
+}
+
+
+static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+ uint64_t pte;
+ char buf[100];
+ ssize_t retval;
+ size_t buflen;
+
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ return -EINVAL;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ return -EINVAL;
+ }
+
+ if (*offset)
+ return 0;
+
+ memset(buf, 0, 100);
+
+ pte = iommu_iova_to_pte(dev->archdata.mapping->domain,
+ ddev->iova);
+
+ if (!pte)
+ strlcpy(buf, "FAIL\n", 100);
+ else
+ snprintf(buf, 100, "pte=%016llx\n", pte);
+
+ buflen = strlen(buf);
+ if (copy_to_user(ubuf, buf, buflen)) {
+ pr_err("Couldn't copy_to_user\n");
+ retval = -EFAULT;
+ } else {
+ *offset = 1; /* non-zero means we're done */
+ retval = buflen;
+ }
+
+ return retval;
+}
+
+static const struct file_operations iommu_debug_pte_fops = {
+ .open = simple_open,
+ .write = iommu_debug_pte_write,
+ .read = iommu_debug_pte_read,
+};
+
static ssize_t iommu_debug_atos_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *offset)
@@ -1673,10 +1878,14 @@ static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
memset(buf, 0, 100);
phys = iommu_iova_to_phys_hard(ddev->domain, ddev->iova);
- if (!phys)
+ if (!phys) {
strlcpy(buf, "FAIL\n", 100);
- else
+ phys = iommu_iova_to_phys(ddev->domain, ddev->iova);
+ dev_err(ddev->dev, "ATOS for %pa failed. Software walk returned: %pa\n",
+ &ddev->iova, &phys);
+ } else {
snprintf(buf, 100, "%pa\n", &phys);
+ }
buflen = strlen(buf);
if (copy_to_user(ubuf, buf, buflen)) {
@@ -1696,6 +1905,55 @@ static const struct file_operations iommu_debug_atos_fops = {
.read = iommu_debug_atos_read,
};
+static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+ phys_addr_t phys;
+ char buf[100];
+ ssize_t retval;
+ size_t buflen;
+
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ return -EINVAL;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ return -EINVAL;
+ }
+
+ if (*offset)
+ return 0;
+
+ memset(buf, 0, 100);
+
+ phys = iommu_iova_to_phys_hard(dev->archdata.mapping->domain,
+ ddev->iova);
+ if (!phys)
+ strlcpy(buf, "FAIL\n", 100);
+ else
+ snprintf(buf, 100, "%pa\n", &phys);
+
+ buflen = strlen(buf);
+ if (copy_to_user(ubuf, buf, buflen)) {
+ pr_err("Couldn't copy_to_user\n");
+ retval = -EFAULT;
+ } else {
+ *offset = 1; /* non-zero means we're done */
+ retval = buflen;
+ }
+
+ return retval;
+}
+
+static const struct file_operations iommu_debug_dma_atos_fops = {
+ .open = simple_open,
+ .write = iommu_debug_atos_write,
+ .read = iommu_debug_dma_atos_read,
+};
+
static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *offset)
{
@@ -1776,6 +2034,152 @@ static const struct file_operations iommu_debug_map_fops = {
.write = iommu_debug_map_write,
};
+static ssize_t iommu_debug_dma_map_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *offset)
+{
+ ssize_t retval = -EINVAL;
+ int ret;
+ char *comma1, *comma2;
+ char buf[100];
+ unsigned long addr;
+ void *v_addr;
+ dma_addr_t iova;
+ size_t size;
+ unsigned int attr;
+ struct dma_attrs coherent_attr;
+ struct dma_attrs *dma_attrs = &coherent_attr;
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+
+ init_dma_attrs(dma_attrs);
+
+ if (count >= 100) {
+ pr_err("Value too large\n");
+ return -EINVAL;
+ }
+
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+
+ memset(buf, 0, 100);
+
+ if (copy_from_user(buf, ubuf, count)) {
+ pr_err("Couldn't copy from user\n");
+ retval = -EFAULT;
+ goto out;
+ }
+
+ comma1 = strnchr(buf, count, ',');
+ if (!comma1)
+ goto invalid_format;
+
+ comma2 = strnchr(comma1 + 1, count, ',');
+ if (!comma2)
+ goto invalid_format;
+
+ *comma1 = *comma2 = '\0';
+
+ if (kstrtoul(buf, 0, &addr))
+ goto invalid_format;
+ v_addr = (void *)addr;
+
+ if (kstrtosize_t(comma1 + 1, 0, &size))
+ goto invalid_format;
+
+ if (kstrtouint(comma2 + 1, 0, &attr))
+ goto invalid_format;
+
+ if (v_addr < virt_addr || v_addr > (virt_addr + SZ_1M - 1))
+ goto invalid_addr;
+
+ if (attr == 0)
+ dma_attrs = NULL;
+ else if (attr == 1)
+ dma_set_attr(DMA_ATTR_FORCE_COHERENT, dma_attrs);
+ else if (attr == 2)
+ dma_set_attr(DMA_ATTR_FORCE_NON_COHERENT, dma_attrs);
+ else
+ goto invalid_format;
+
+ iova = dma_map_single_attrs(dev, v_addr, size,
+ DMA_TO_DEVICE, dma_attrs);
+
+ if (dma_mapping_error(dev, iova)) {
+ pr_err("Failed to perform dma_map_single\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ retval = count;
+ pr_err("Mapped 0x%p to %pa (len=0x%zx)\n",
+ v_addr, &iova, size);
+ ddev->iova = iova;
+ pr_err("Saved iova=%pa for future PTE commands\n", &iova);
+out:
+ return retval;
+
+invalid_format:
+ pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n");
+ return retval;
+
+invalid_addr:
+ pr_err("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat virt_addr'.\n");
+ return retval;
+}
+
+static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+ char buf[100];
+ ssize_t retval;
+ size_t buflen;
+ dma_addr_t iova;
+
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ return -EINVAL;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ return -EINVAL;
+ }
+
+ if (*offset)
+ return 0;
+
+ memset(buf, 0, 100);
+
+ iova = ddev->iova;
+ snprintf(buf, 100, "%pa\n", &iova);
+
+ buflen = strlen(buf);
+ if (copy_to_user(ubuf, buf, buflen)) {
+ pr_err("Couldn't copy_to_user\n");
+ retval = -EFAULT;
+ } else {
+ *offset = 1; /* non-zero means we're done */
+ retval = buflen;
+ }
+
+ return retval;
+}
+
+static const struct file_operations iommu_debug_dma_map_fops = {
+ .open = simple_open,
+ .write = iommu_debug_dma_map_write,
+ .read = iommu_debug_dma_map_read,
+};
+
static ssize_t iommu_debug_unmap_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *offset)
@@ -1841,6 +2245,92 @@ static const struct file_operations iommu_debug_unmap_fops = {
.write = iommu_debug_unmap_write,
};
+static ssize_t iommu_debug_dma_unmap_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ ssize_t retval = 0;
+ char *comma1, *comma2;
+ char buf[100];
+ size_t size;
+ unsigned int attr;
+ dma_addr_t iova;
+ struct dma_attrs coherent_attr;
+ struct dma_attrs *dma_attrs = &coherent_attr;
+ struct iommu_debug_device *ddev = file->private_data;
+ struct device *dev = ddev->dev;
+
+ init_dma_attrs(dma_attrs);
+
+ if (count >= 100) {
+ pr_err("Value too large\n");
+ return -EINVAL;
+ }
+
+ if (!dev->archdata.mapping) {
+ pr_err("No mapping. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ if (!dev->archdata.mapping->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ retval = -EINVAL;
+ goto out;
+ }
+
+ memset(buf, 0, 100);
+
+ if (copy_from_user(buf, ubuf, count)) {
+ pr_err("Couldn't copy from user\n");
+ retval = -EFAULT;
+ goto out;
+ }
+
+ comma1 = strnchr(buf, count, ',');
+ if (!comma1)
+ goto invalid_format;
+
+ comma2 = strnchr(comma1 + 1, count, ',');
+ if (!comma2)
+ goto invalid_format;
+
+ *comma1 = *comma2 = '\0';
+
+ if (kstrtoux(buf, 0, &iova))
+ goto invalid_format;
+
+ if (kstrtosize_t(comma1 + 1, 0, &size))
+ goto invalid_format;
+
+ if (kstrtouint(comma2 + 1, 0, &attr))
+ goto invalid_format;
+
+ if (attr == 0)
+ dma_attrs = NULL;
+ else if (attr == 1)
+ dma_set_attr(DMA_ATTR_FORCE_COHERENT, dma_attrs);
+ else if (attr == 2)
+ dma_set_attr(DMA_ATTR_FORCE_NON_COHERENT, dma_attrs);
+ else
+ goto invalid_format;
+
+ dma_unmap_single_attrs(dev, iova, size, DMA_TO_DEVICE, dma_attrs);
+
+ retval = count;
+ pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
+out:
+ return retval;
+
+invalid_format:
+ pr_err("Invalid format. Expected: iova,len, dma attr\n");
+ return retval;
+}
+
+static const struct file_operations iommu_debug_dma_unmap_fops = {
+ .open = simple_open,
+ .write = iommu_debug_dma_unmap_write,
+};
+
static ssize_t iommu_debug_config_clocks_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *offset)
@@ -1919,6 +2409,13 @@ static int snarf_iommu_devices(struct device *dev, const char *name)
goto err_rmdir;
}
+ if (!debugfs_create_file("virt_addr", S_IRUSR, dir, ddev,
+ &iommu_debug_virt_addr_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/virt_addr debugfs file\n",
+ name);
+ goto err_rmdir;
+ }
+
if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
&iommu_debug_profiling_fops)) {
pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
@@ -1961,6 +2458,13 @@ static int snarf_iommu_devices(struct device *dev, const char *name)
goto err_rmdir;
}
+ if (!debugfs_create_file("dma_attach", S_IRUSR, dir, ddev,
+ &iommu_debug_dma_attach_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
+ name);
+ goto err_rmdir;
+ }
+
if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
&iommu_debug_attach_fops)) {
pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
@@ -1982,6 +2486,13 @@ static int snarf_iommu_devices(struct device *dev, const char *name)
goto err_rmdir;
}
+ if (!debugfs_create_file("dma_atos", S_IWUSR, dir, ddev,
+ &iommu_debug_dma_atos_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
+ name);
+ goto err_rmdir;
+ }
+
if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
&iommu_debug_map_fops)) {
pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
@@ -1989,6 +2500,13 @@ static int snarf_iommu_devices(struct device *dev, const char *name)
goto err_rmdir;
}
+ if (!debugfs_create_file("dma_map", S_IWUSR, dir, ddev,
+ &iommu_debug_dma_map_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
+ name);
+ goto err_rmdir;
+ }
+
if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
&iommu_debug_unmap_fops)) {
pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
@@ -1996,6 +2514,20 @@ static int snarf_iommu_devices(struct device *dev, const char *name)
goto err_rmdir;
}
+ if (!debugfs_create_file("dma_unmap", S_IWUSR, dir, ddev,
+ &iommu_debug_dma_unmap_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
+ name);
+ goto err_rmdir;
+ }
+
+ if (!debugfs_create_file("pte", S_IWUSR, dir, ddev,
+ &iommu_debug_pte_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/pte debugfs file\n",
+ name);
+ goto err_rmdir;
+ }
+
if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
&iommu_debug_config_clocks_fops)) {
pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
@@ -2050,6 +2582,11 @@ static int iommu_debug_init_tests(void)
return -ENODEV;
}
+ virt_addr = kzalloc(SZ_1M, GFP_KERNEL);
+
+ if (!virt_addr)
+ return -ENOMEM;
+
return iommu_debug_populate_devices();
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index a77a45088b9d..b831796b5b7d 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1336,6 +1336,15 @@ phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain,
return domain->ops->iova_to_phys_hard(domain, iova);
}
+uint64_t iommu_iova_to_pte(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ if (unlikely(domain->ops->iova_to_pte == NULL))
+ return 0;
+
+ return domain->ops->iova_to_pte(domain, iova);
+}
+
bool iommu_is_iova_coherent(struct iommu_domain *domain, dma_addr_t iova)
{
if (unlikely(domain->ops->is_iova_coherent == NULL))
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 15af9a9753e5..2d203b422129 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
return -ENOMEM;
}
+ raw_spin_lock_init(&cd->rlock);
+
cd->gpc_base = of_iomap(node, 0);
if (!cd->gpc_base) {
pr_err("fsl-gpcv2: unable to map gpc registers\n");
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index aecec6d32463..7f1c625b08ec 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
return -ENODEV;
}
+ if (hostif->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
dev_info(&udev->dev,
"%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
__func__, le16_to_cpu(udev->descriptor.idVendor),
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 36ca4e4cbfb7..4c15dee0857b 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -532,16 +532,27 @@ config DM_LOG_WRITES
If unsure, say N.
-config DM_ANDROID_VERITY
- tristate "Android verity target support"
+config DM_VERITY_AVB
+ tristate "Support AVB specific verity error behavior"
depends on DM_VERITY
+ ---help---
+ Enables Android Verified Boot platform-specific error
+ behavior. In particular, it will modify the vbmeta partition
+ specified on the kernel command-line when non-transient error
+ occurs (followed by a panic).
+
+ If unsure, say N.
+
+config DM_ANDROID_VERITY
+ bool "Android verity target support"
+ depends on DM_VERITY=y
depends on X509_CERTIFICATE_PARSER
depends on SYSTEM_TRUSTED_KEYRING
depends on PUBLIC_KEY_ALGO_RSA
depends on KEYS
depends on ASYMMETRIC_KEY_TYPE
depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
- depends on MD_LINEAR
+ depends on MD_LINEAR=y
select DM_VERITY_HASH_PREFETCH_MIN_SIZE_128
---help---
This device-mapper target is virtually a VERITY target. This
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 2b2ba36638cd..41ba86576d04 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -71,6 +71,6 @@ ifeq ($(CONFIG_DM_VERITY_FEC),y)
dm-verity-objs += dm-verity-fec.o
endif
-ifeq ($(CONFIG_DM_ANDROID_VERITY),y)
-dm-verity-objs += dm-android-verity.o
+ifeq ($(CONFIG_DM_VERITY_AVB),y)
+dm-verity-objs += dm-verity-avb.o
endif
diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c
index 13c60bee8af5..7cef735a01a7 100644
--- a/drivers/md/dm-android-verity.c
+++ b/drivers/md/dm-android-verity.c
@@ -115,6 +115,12 @@ static inline bool is_userdebug(void)
return !strncmp(buildvariant, typeuserdebug, sizeof(typeuserdebug));
}
+static inline bool is_unlocked(void)
+{
+ static const char unlocked[] = "orange";
+
+ return !strncmp(verifiedbootstate, unlocked, sizeof(unlocked));
+}
static int table_extract_mpi_array(struct public_key_signature *pks,
const void *data, size_t len)
@@ -585,6 +591,8 @@ static int verify_verity_signature(char *key_id,
if (IS_ERR(pks)) {
DMERR("hashing failed");
+ retval = PTR_ERR(pks);
+ pks = NULL;
goto error;
}
@@ -648,6 +656,28 @@ static int add_as_linear_device(struct dm_target *ti, char *dev)
return err;
}
+static int create_linear_device(struct dm_target *ti, dev_t dev,
+ char *target_device)
+{
+ u64 device_size = 0;
+ int err = find_size(dev, &device_size);
+
+ if (err) {
+ DMERR("error finding bdev size");
+ handle_error();
+ return err;
+ }
+
+ ti->len = device_size;
+ err = add_as_linear_device(ti, target_device);
+ if (err) {
+ handle_error();
+ return err;
+ }
+ verity_enabled = false;
+ return 0;
+}
+
/*
* Target parameters:
* <key id> Key id of the public key in the system keyring.
@@ -671,7 +701,6 @@ static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct fec_ecc_metadata uninitialized_var(ecc);
char buf[FEC_ARG_LENGTH], *buf_ptr;
unsigned long long tmpll;
- u64 uninitialized_var(device_size);
if (argc == 1) {
/* Use the default keyid */
@@ -699,23 +728,8 @@ static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
return -EINVAL;
}
- if (is_eng()) {
- err = find_size(dev, &device_size);
- if (err) {
- DMERR("error finding bdev size");
- handle_error();
- return err;
- }
-
- ti->len = device_size;
- err = add_as_linear_device(ti, target_device);
- if (err) {
- handle_error();
- return err;
- }
- verity_enabled = false;
- return 0;
- }
+ if (is_eng())
+ return create_linear_device(ti, dev, target_device);
strreplace(key_id, '#', ' ');
@@ -730,6 +744,11 @@ static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
err = extract_metadata(dev, &fec, &metadata, &verity_enabled);
if (err) {
+ /* Allow invalid metadata when the device is unlocked */
+ if (is_unlocked()) {
+ DMWARN("Allow invalid metadata when unlocked");
+ return create_linear_device(ti, dev, target_device);
+ }
DMERR("Error while extracting metadata");
handle_error();
goto free_metadata;
diff --git a/drivers/md/dm-verity-avb.c b/drivers/md/dm-verity-avb.c
new file mode 100644
index 000000000000..88487346c4c6
--- /dev/null
+++ b/drivers/md/dm-verity-avb.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2017 Google.
+ *
+ * This file is released under the GPLv2.
+ *
+ * Based on drivers/md/dm-verity-chromeos.c
+ */
+
+#include <linux/device-mapper.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+
+#define DM_MSG_PREFIX "verity-avb"
+
+/* Set via module parameter. */
+static char avb_vbmeta_device[64];
+
+static void invalidate_vbmeta_endio(struct bio *bio)
+{
+ complete(bio->bi_private);
+}
+
+static int invalidate_vbmeta_submit(struct bio *bio,
+ struct block_device *bdev,
+ int rw, int access_last_sector,
+ struct page *page)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ bio->bi_private = &wait;
+ bio->bi_end_io = invalidate_vbmeta_endio;
+ bio->bi_bdev = bdev;
+
+ bio->bi_iter.bi_sector = 0;
+ if (access_last_sector) {
+ sector_t last_sector = (i_size_read(bdev->bd_inode)>>SECTOR_SHIFT) - 1;
+ bio->bi_iter.bi_sector = last_sector;
+ }
+ bio->bi_vcnt = 1;
+ bio->bi_iter.bi_idx = 0;
+ bio->bi_iter.bi_size = 512;
+ bio->bi_iter.bi_bvec_done = 0;
+ bio->bi_rw = rw;
+ bio->bi_io_vec[0].bv_page = page;
+ bio->bi_io_vec[0].bv_len = 512;
+ bio->bi_io_vec[0].bv_offset = 0;
+
+ submit_bio(rw, bio);
+ /* Wait up to 2 seconds for completion or fail. */
+ if (!wait_for_completion_timeout(&wait, msecs_to_jiffies(2000)))
+ return -EIO;
+ return 0;
+}
+
+static int invalidate_vbmeta(dev_t vbmeta_devt)
+{
+ int ret = 0;
+ struct block_device *bdev;
+ struct bio *bio;
+ struct page *page;
+ fmode_t dev_mode;
+ /* Ensure we do synchronous unblocked I/O. We may also need
+ * sync_bdev() on completion, but it really shouldn't.
+ */
+ int rw = REQ_SYNC | REQ_SOFTBARRIER | REQ_NOIDLE;
+ int access_last_sector = 0;
+
+ /* First we open the device for reading. */
+ dev_mode = FMODE_READ | FMODE_EXCL;
+ bdev = blkdev_get_by_dev(vbmeta_devt, dev_mode,
+ invalidate_vbmeta);
+ if (IS_ERR(bdev)) {
+ DMERR("invalidate_kernel: could not open device for reading");
+ dev_mode = 0;
+ ret = -ENOENT;
+ goto failed_to_read;
+ }
+
+ bio = bio_alloc(GFP_NOIO, 1);
+ if (!bio) {
+ ret = -ENOMEM;
+ goto failed_bio_alloc;
+ }
+
+ page = alloc_page(GFP_NOIO);
+ if (!page) {
+ ret = -ENOMEM;
+ goto failed_to_alloc_page;
+ }
+
+ access_last_sector = 0;
+ ret = invalidate_vbmeta_submit(bio, bdev, rw, access_last_sector, page);
+ if (ret) {
+ DMERR("invalidate_vbmeta: error reading");
+ goto failed_to_submit_read;
+ }
+
+ /* We have a page. Let's make sure it looks right. */
+ if (memcmp("AVB0", page_address(page), 4) == 0) {
+ /* Stamp it. */
+ memcpy(page_address(page), "AVE0", 4);
+ DMINFO("invalidate_vbmeta: found vbmeta partition");
+ } else {
+ /* Could be this is on a AVB footer, check. Also, since the
+ * AVB footer is in the last 64 bytes, adjust for the fact that
+ * we're dealing with 512-byte sectors.
+ */
+ size_t offset = (1<<SECTOR_SHIFT) - 64;
+
+ access_last_sector = 1;
+ ret = invalidate_vbmeta_submit(bio, bdev, rw,
+ access_last_sector, page);
+ if (ret) {
+ DMERR("invalidate_vbmeta: error reading");
+ goto failed_to_submit_read;
+ }
+ if (memcmp("AVBf", page_address(page) + offset, 4) != 0) {
+ DMERR("invalidate_vbmeta called on non-vbmeta partition");
+ ret = -EINVAL;
+ goto invalid_header;
+ }
+ /* Stamp it. */
+ memcpy(page_address(page) + offset, "AVE0", 4);
+ DMINFO("invalidate_vbmeta: found vbmeta footer partition");
+ }
+
+ /* Now rewrite the changed page - the block dev was being
+ * changed on read. Let's reopen here.
+ */
+ blkdev_put(bdev, dev_mode);
+ dev_mode = FMODE_WRITE | FMODE_EXCL;
+ bdev = blkdev_get_by_dev(vbmeta_devt, dev_mode,
+ invalidate_vbmeta);
+ if (IS_ERR(bdev)) {
+ DMERR("invalidate_vbmeta: could not open device for writing");
+ dev_mode = 0;
+ ret = -ENOENT;
+ goto failed_to_write;
+ }
+
+ /* We re-use the same bio to do the write after the read. Need to reset
+ * it to initialize bio->bi_remaining.
+ */
+ bio_reset(bio);
+
+ rw |= REQ_WRITE;
+ ret = invalidate_vbmeta_submit(bio, bdev, rw, access_last_sector, page);
+ if (ret) {
+ DMERR("invalidate_vbmeta: error writing");
+ goto failed_to_submit_write;
+ }
+
+ DMERR("invalidate_vbmeta: completed.");
+ ret = 0;
+failed_to_submit_write:
+failed_to_write:
+invalid_header:
+ __free_page(page);
+failed_to_submit_read:
+ /* Technically, we'll leak a page with the pending bio, but
+ * we're about to reboot anyway.
+ */
+failed_to_alloc_page:
+ bio_put(bio);
+failed_bio_alloc:
+ if (dev_mode)
+ blkdev_put(bdev, dev_mode);
+failed_to_read:
+ return ret;
+}
+
+void dm_verity_avb_error_handler(void)
+{
+ dev_t dev;
+
+ DMINFO("AVB error handler called for %s", avb_vbmeta_device);
+
+ if (avb_vbmeta_device[0] == '\0') {
+ DMERR("avb_vbmeta_device parameter not set");
+ goto fail_no_dev;
+ }
+
+ dev = name_to_dev_t(avb_vbmeta_device);
+ if (!dev) {
+ DMERR("No matching partition for device: %s",
+ avb_vbmeta_device);
+ goto fail_no_dev;
+ }
+
+ invalidate_vbmeta(dev);
+
+fail_no_dev:
+ ;
+}
+
+static int __init dm_verity_avb_init(void)
+{
+ DMINFO("AVB error handler initialized with vbmeta device: %s",
+ avb_vbmeta_device);
+ return 0;
+}
+
+static void __exit dm_verity_avb_exit(void)
+{
+}
+
+module_init(dm_verity_avb_init);
+module_exit(dm_verity_avb_exit);
+
+MODULE_AUTHOR("David Zeuthen <zeuthen@google.com>");
+MODULE_DESCRIPTION("AVB-specific error handler for dm-verity");
+MODULE_LICENSE("GPL");
+
+/* Declare parameter with no module prefix */
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "androidboot.vbmeta."
+module_param_string(device, avb_vbmeta_device, sizeof(avb_vbmeta_device), 0);
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index c7e97cf6e7fb..e34cf53bd068 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -233,8 +233,12 @@ out:
if (v->mode == DM_VERITY_MODE_LOGGING)
return 0;
- if (v->mode == DM_VERITY_MODE_RESTART)
+ if (v->mode == DM_VERITY_MODE_RESTART) {
+#ifdef CONFIG_DM_VERITY_AVB
+ dm_verity_avb_error_handler();
+#endif
kernel_restart("dm-verity device corrupted");
+ }
return 1;
}
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 75effca400a3..a90d1d416107 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -136,4 +136,5 @@ extern void verity_io_hints(struct dm_target *ti, struct queue_limits *limits);
extern void verity_dtr(struct dm_target *ti);
extern int verity_ctr(struct dm_target *ti, unsigned argc, char **argv);
extern int verity_map(struct dm_target *ti, struct bio *bio);
+extern void dm_verity_avb_error_handler(void);
#endif /* DM_VERITY_H */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e9b34de2319e..5d42d8f09421 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1481,26 +1481,29 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
struct dm_offload *o = container_of(cb, struct dm_offload, cb);
struct bio_list list;
struct bio *bio;
+ int i;
INIT_LIST_HEAD(&o->cb.list);
if (unlikely(!current->bio_list))
return;
- list = *current->bio_list;
- bio_list_init(current->bio_list);
-
- while ((bio = bio_list_pop(&list))) {
- struct bio_set *bs = bio->bi_pool;
- if (unlikely(!bs) || bs == fs_bio_set) {
- bio_list_add(current->bio_list, bio);
- continue;
+ for (i = 0; i < 2; i++) {
+ list = current->bio_list[i];
+ bio_list_init(&current->bio_list[i]);
+
+ while ((bio = bio_list_pop(&list))) {
+ struct bio_set *bs = bio->bi_pool;
+ if (unlikely(!bs) || bs == fs_bio_set) {
+ bio_list_add(&current->bio_list[i], bio);
+ continue;
+ }
+
+ spin_lock(&bs->rescue_lock);
+ bio_list_add(&bs->rescue_list, bio);
+ queue_work(bs->rescue_workqueue, &bs->rescue_work);
+ spin_unlock(&bs->rescue_lock);
}
-
- spin_lock(&bs->rescue_lock);
- bio_list_add(&bs->rescue_list, bio);
- queue_work(bs->rescue_workqueue, &bs->rescue_work);
- spin_unlock(&bs->rescue_lock);
}
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 515554c7365b..d81be5e471d0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -570,7 +570,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if (best_dist_disk < 0) {
if (is_badblock(rdev, this_sector, sectors,
&first_bad, &bad_sectors)) {
- if (first_bad < this_sector)
+ if (first_bad <= this_sector)
/* Cannot use this */
continue;
best_good_sectors = first_bad - this_sector;
@@ -877,7 +877,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
((conf->start_next_window <
conf->next_resync + RESYNC_SECTORS) &&
current->bio_list &&
- !bio_list_empty(current->bio_list))),
+ (!bio_list_empty(&current->bio_list[0]) ||
+ !bio_list_empty(&current->bio_list[1])))),
conf->resync_lock);
conf->nr_waiting--;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ebb0dd612ebd..e5ee4e9e0ea5 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -946,7 +946,8 @@ static void wait_barrier(struct r10conf *conf)
!conf->barrier ||
(conf->nr_pending &&
current->bio_list &&
- !bio_list_empty(current->bio_list)),
+ (!bio_list_empty(&current->bio_list[0]) ||
+ !bio_list_empty(&current->bio_list[1]))),
conf->resync_lock);
conf->nr_waiting--;
}
@@ -1072,6 +1073,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
int max_sectors;
int sectors;
+ md_write_start(mddev, bio);
+
/*
* Register the new request and wait if the reconstruction
* thread has put up a bar for new requests.
@@ -1455,8 +1458,6 @@ static void make_request(struct mddev *mddev, struct bio *bio)
return;
}
- md_write_start(mddev, bio);
-
do {
/*
@@ -1477,7 +1478,25 @@ static void make_request(struct mddev *mddev, struct bio *bio)
split = bio;
}
+ /*
+ * If a bio is splitted, the first part of bio will pass
+ * barrier but the bio is queued in current->bio_list (see
+ * generic_make_request). If there is a raise_barrier() called
+ * here, the second part of bio can't pass barrier. But since
+ * the first part bio isn't dispatched to underlaying disks
+ * yet, the barrier is never released, hence raise_barrier will
+ * alays wait. We have a deadlock.
+ * Note, this only happens in read path. For write path, the
+ * first part of bio is dispatched in a schedule() call
+ * (because of blk plug) or offloaded to raid10d.
+ * Quitting from the function immediately can change the bio
+ * order queued in bio_list and avoid the deadlock.
+ */
__make_request(mddev, split);
+ if (split != bio && bio_data_dir(bio) == READ) {
+ generic_make_request(bio);
+ break;
+ }
} while (split != bio);
/* In case raid10d snuck in to freeze_array */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 2a9bb6e8e505..c8f5c051424e 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -2068,7 +2068,7 @@ static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
spin_lock_irqsave(&tasklet->tasklet_lock, flags);
queue_cmd = &tasklet->tasklet_queue_cmd[tasklet->taskletq_idx];
if (queue_cmd->cmd_used) {
- pr_err("%s: Tasklet queue overflow: %d\n",
+ pr_err_ratelimited("%s: Tasklet queue overflow: %d\n",
__func__, vfe_dev->pdev->id);
spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
return;
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index caf6639f5151..24e3223a79d0 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -67,6 +67,10 @@ static int pix_overflow_error_count[VFE_MAX] = { 0 };
#define CDBG(fmt, args...)
#endif
+/* Backward interface compatibility for 3D THRESHOLD calculation */
+#define ISPIF_USE_DEFAULT_THRESHOLD (0)
+#define ISPIF_CALCULATE_THRESHOLD (1)
+
static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable);
static int ispif_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
static long msm_ispif_subdev_ioctl_unlocked(struct v4l2_subdev *sd,
@@ -452,7 +456,7 @@ static int msm_ispif_reset_hw(struct ispif_device *ispif)
/* This is set when device is 8974 */
ispif->clk_idx = 1;
}
-
+ memset(ispif->stereo_configured, 0, sizeof(ispif->stereo_configured));
atomic_set(&ispif->reset_trig[VFE0], 1);
/* initiate reset of ISPIF */
msm_camera_io_w(ISPIF_RST_CMD_MASK,
@@ -1009,21 +1013,29 @@ static int msm_ispif_config(struct ispif_device *ispif,
}
static void msm_ispif_config_stereo(struct ispif_device *ispif,
- struct msm_ispif_param_data_ext *params) {
+ struct msm_ispif_param_data_ext *params, int use_line_width) {
int i;
enum msm_ispif_vfe_intf vfe_intf;
+ uint32_t stereo_3d_threshold = STEREO_DEFAULT_3D_THRESHOLD;
for (i = 0; i < params->num; i++) {
+ vfe_intf = params->entries[i].vfe_intf;
if (params->entries[i].intftype == PIX0 &&
- params->stereo_enable &&
- params->right_entries[i].csid < CSID_MAX) {
- vfe_intf = params->entries[i].vfe_intf;
+ params->stereo_enable &&
+ params->right_entries[i].csid < CSID_MAX &&
+ !ispif->stereo_configured[vfe_intf]) {
msm_camera_io_w_mb(0x3,
ispif->base + ISPIF_VFE_m_OUTPUT_SEL(vfe_intf));
- msm_camera_io_w_mb(STEREO_DEFAULT_3D_THRESHOLD,
+ if (use_line_width &&
+ (params->line_width[vfe_intf] > 0))
+ stereo_3d_threshold =
+ (params->line_width[vfe_intf] +
+ 2 * 6 - 1) / (2 * 6);
+ msm_camera_io_w_mb(stereo_3d_threshold,
ispif->base +
ISPIF_VFE_m_3D_THRESHOLD(vfe_intf));
+ ispif->stereo_configured[vfe_intf] = 1;
}
}
}
@@ -1132,6 +1144,8 @@ static int msm_ispif_stop_immediately(struct ispif_device *ispif,
msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
cid_mask, params->entries[i].vfe_intf, 0);
if (params->stereo_enable) {
+ ispif->stereo_configured[
+ params->entries[i].vfe_intf] = 0;
cid_mask = msm_ispif_get_right_cids_mask_from_cfg(
&params->right_entries[i],
params->entries[i].num_cids);
@@ -1162,7 +1176,8 @@ static int msm_ispif_start_frame_boundary(struct ispif_device *ispif,
rc = -EINVAL;
return rc;
}
- msm_ispif_config_stereo(ispif, params);
+
+ msm_ispif_config_stereo(ispif, params, ISPIF_USE_DEFAULT_THRESHOLD);
msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params);
return rc;
@@ -1392,6 +1407,8 @@ static int msm_ispif_stop_frame_boundary(struct ispif_device *ispif,
if (rc < 0)
goto end;
if (cid_right_mask) {
+ ispif->stereo_configured[
+ params->entries[i].vfe_intf] = 0;
intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1);
rc = readl_poll_timeout(ispif->base + intf_addr,
stop_flag,
@@ -1807,6 +1824,10 @@ static long msm_ispif_dispatch_cmd(enum ispif_cfg_type_t cmd,
rc = msm_ispif_config2(ispif, params);
msm_ispif_io_dump_reg(ispif);
break;
+ case ISPIF_CFG_STEREO:
+ msm_ispif_config_stereo(ispif, params,
+ ISPIF_CALCULATE_THRESHOLD);
+ break;
default:
pr_err("%s: invalid cfg_type\n", __func__);
rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
index 61e8f1dd7aff..3e6680c63ee5 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -77,5 +77,6 @@ struct ispif_device {
int ispif_vdd_count;
struct regulator *vfe_vdd[ISPIF_VFE_VDD_INFO_MAX];
int vfe_vdd_count;
+ int stereo_configured[VFE_MAX];
};
#endif
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index c2b42a854d35..f95cc37f5c2c 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -707,6 +707,9 @@ static long msm_private_ioctl(struct file *file, void *fh,
return 0;
}
+ if (!event_data)
+ return -EINVAL;
+
memset(&event, 0, sizeof(struct v4l2_event));
session_id = event_data->session_id;
stream_id = event_data->stream_id;
@@ -1012,11 +1015,9 @@ static int msm_open(struct file *filep)
BUG_ON(!pvdev);
/* !!! only ONE open is allowed !!! */
- if (atomic_read(&pvdev->opened))
+ if (atomic_cmpxchg(&pvdev->opened, 0, 1))
return -EBUSY;
- atomic_set(&pvdev->opened, 1);
-
spin_lock_irqsave(&msm_pid_lock, flags);
msm_pid = get_pid(task_pid(current));
spin_unlock_irqrestore(&msm_pid_lock, flags);
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 8402e31364b9..b7feb126f707 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -2542,9 +2542,29 @@ static int msm_cpp_cfg_frame(struct cpp_device *cpp_dev,
return -EINVAL;
}
- if (stripe_base == UINT_MAX || new_frame->num_strips >
- (UINT_MAX - 1 - stripe_base) / stripe_size) {
- pr_err("Invalid frame message,num_strips %d is large\n",
+ /* Stripe index starts at zero */
+ if ((!new_frame->num_strips) ||
+ (new_frame->first_stripe_index >= new_frame->num_strips) ||
+ (new_frame->last_stripe_index >= new_frame->num_strips) ||
+ (new_frame->first_stripe_index >
+ new_frame->last_stripe_index)) {
+ pr_err("Invalid frame message, #stripes=%d, stripe indices=[%d,%d]\n",
+ new_frame->num_strips,
+ new_frame->first_stripe_index,
+ new_frame->last_stripe_index);
+ return -EINVAL;
+ }
+
+ if (!stripe_size) {
+ pr_err("Invalid frame message, invalid stripe_size (%d)!\n",
+ stripe_size);
+ return -EINVAL;
+ }
+
+ if ((stripe_base == UINT_MAX) ||
+ (new_frame->num_strips >
+ (UINT_MAX - 1 - stripe_base) / stripe_size)) {
+ pr_err("Invalid frame message, num_strips %d is large\n",
new_frame->num_strips);
return -EINVAL;
}
@@ -2785,13 +2805,14 @@ static int msm_cpp_cfg(struct cpp_device *cpp_dev,
struct msm_cpp_frame_info_t *frame = NULL;
struct msm_cpp_frame_info_t k_frame_info;
int32_t rc = 0;
- int32_t i = 0;
- int32_t num_buff = sizeof(k_frame_info.output_buffer_info)/
+ uint32_t i = 0;
+ uint32_t num_buff = sizeof(k_frame_info.output_buffer_info) /
sizeof(struct msm_cpp_buffer_info_t);
+
if (copy_from_user(&k_frame_info,
(void __user *)ioctl_ptr->ioctl_ptr,
sizeof(k_frame_info)))
- return -EFAULT;
+ return -EFAULT;
frame = msm_cpp_get_frame(ioctl_ptr);
if (!frame) {
@@ -2953,8 +2974,9 @@ static int msm_cpp_validate_input(unsigned int cmd, void *arg,
}
*ioctl_ptr = arg;
- if ((*ioctl_ptr == NULL) ||
- ((*ioctl_ptr)->ioctl_ptr == NULL)) {
+ if (((*ioctl_ptr) == NULL) ||
+ ((*ioctl_ptr)->ioctl_ptr == NULL) ||
+ ((*ioctl_ptr)->len == 0)) {
pr_err("Error invalid ioctl argument cmd %u", cmd);
return -EINVAL;
}
@@ -3503,13 +3525,18 @@ STREAM_BUFF_END:
if (cpp_dev->iommu_state == CPP_IOMMU_STATE_DETACHED) {
struct msm_camera_smmu_attach_type cpp_attach_info;
+ if (ioctl_ptr->len !=
+ sizeof(struct msm_camera_smmu_attach_type)) {
+ rc = -EINVAL;
+ break;
+ }
+
memset(&cpp_attach_info, 0, sizeof(cpp_attach_info));
rc = msm_cpp_copy_from_ioctl_ptr(&cpp_attach_info,
ioctl_ptr);
if (rc < 0) {
pr_err("CPP_IOMMU_ATTACH copy from user fail");
- ERR_COPY_FROM_USER();
- return -EINVAL;
+ break;
}
cpp_dev->security_mode = cpp_attach_info.attach;
@@ -3538,16 +3565,20 @@ STREAM_BUFF_END:
case VIDIOC_MSM_CPP_IOMMU_DETACH: {
if ((cpp_dev->iommu_state == CPP_IOMMU_STATE_ATTACHED) &&
(cpp_dev->stream_cnt == 0)) {
-
struct msm_camera_smmu_attach_type cpp_attach_info;
+ if (ioctl_ptr->len !=
+ sizeof(struct msm_camera_smmu_attach_type)) {
+ rc = -EINVAL;
+ break;
+ }
+
memset(&cpp_attach_info, 0, sizeof(cpp_attach_info));
rc = msm_cpp_copy_from_ioctl_ptr(&cpp_attach_info,
ioctl_ptr);
if (rc < 0) {
pr_err("CPP_IOMMU_DETTACH copy from user fail");
- ERR_COPY_FROM_USER();
- return -EINVAL;
+ break;
}
cpp_dev->security_mode = cpp_attach_info.attach;
@@ -3568,6 +3599,7 @@ STREAM_BUFF_END:
} else {
pr_err("%s:%d IOMMMU attach triggered in invalid state\n",
__func__, __LINE__);
+ rc = -EINVAL;
}
break;
}
@@ -3883,6 +3915,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
struct msm_cpp_stream_buff_info_t k_cpp_buff_info;
struct msm_cpp_frame_info32_t k32_frame_info;
struct msm_cpp_frame_info_t k64_frame_info;
+ struct msm_camera_smmu_attach_type kb_cpp_smmu_attach_info;
uint32_t identity_k = 0;
bool is_copytouser_req = true;
void __user *up = (void __user *)arg;
@@ -4187,11 +4220,23 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
break;
}
case VIDIOC_MSM_CPP_IOMMU_ATTACH32:
- cmd = VIDIOC_MSM_CPP_IOMMU_ATTACH;
- break;
case VIDIOC_MSM_CPP_IOMMU_DETACH32:
- cmd = VIDIOC_MSM_CPP_IOMMU_DETACH;
+ {
+ if ((kp_ioctl.len != sizeof(struct msm_camera_smmu_attach_type))
+ || (copy_from_user(&kb_cpp_smmu_attach_info,
+ (void __user *)kp_ioctl.ioctl_ptr,
+ sizeof(kb_cpp_smmu_attach_info)))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ kp_ioctl.ioctl_ptr = (void *)&kb_cpp_smmu_attach_info;
+ is_copytouser_req = false;
+ cmd = (cmd == VIDIOC_MSM_CPP_IOMMU_ATTACH32) ?
+ VIDIOC_MSM_CPP_IOMMU_ATTACH :
+ VIDIOC_MSM_CPP_IOMMU_DETACH;
break;
+ }
case MSM_SD_NOTIFY_FREEZE:
break;
case MSM_SD_UNNOTIFY_FREEZE:
@@ -4202,7 +4247,8 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
default:
pr_err_ratelimited("%s: unsupported compat type :%x LOAD %lu\n",
__func__, cmd, VIDIOC_MSM_CPP_LOAD_FIRMWARE);
- break;
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
}
mutex_unlock(&cpp_dev->mutex);
@@ -4233,7 +4279,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
default:
pr_err_ratelimited("%s: unsupported compat type :%d\n",
__func__, cmd);
- break;
+ return -EINVAL;
}
if (is_copytouser_req) {
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
index 3b36b6bc76de..5eaa2910228e 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -507,10 +507,15 @@ struct sde_mdp_format_params *sde_get_format_params(u32 format)
if (!fmt_found) {
for (i = 0; i < ARRAY_SIZE(sde_mdp_format_ubwc_map); i++) {
fmt = &sde_mdp_format_ubwc_map[i].mdp_format;
- if (format == fmt->format)
+ if (format == fmt->format) {
+ fmt_found = true;
break;
+ }
}
}
+ /* If format not supported than return NULL */
+ if (!fmt_found)
+ fmt = NULL;
return fmt;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c
index fef4a8585eaa..74f362839ccc 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -133,7 +133,7 @@ struct sde_mdp_mixer *sde_mdp_mixer_get(struct sde_mdp_ctl *ctl, int mux)
int sde_mdp_get_pipe_flush_bits(struct sde_mdp_pipe *pipe)
{
- u32 flush_bits;
+ u32 flush_bits = 0;
if (pipe->type == SDE_MDP_PIPE_TYPE_DMA)
flush_bits |= BIT(pipe->num) << 5;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index f5df9eaba04f..9757f35cd5f5 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -1010,8 +1010,8 @@ EXPORT_SYMBOL(dvb_usbv2_probe);
void dvb_usbv2_disconnect(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
- const char *name = d->name;
- struct device dev = d->udev->dev;
+ const char *devname = kstrdup(dev_name(&d->udev->dev), GFP_KERNEL);
+ const char *drvname = d->name;
dev_dbg(&d->udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
intf->cur_altsetting->desc.bInterfaceNumber);
@@ -1021,8 +1021,9 @@ void dvb_usbv2_disconnect(struct usb_interface *intf)
dvb_usbv2_exit(d);
- dev_info(&dev, "%s: '%s' successfully deinitialized and disconnected\n",
- KBUILD_MODNAME, name);
+ pr_info("%s: '%s:%s' successfully deinitialized and disconnected\n",
+ KBUILD_MODNAME, drvname, devname);
+ kfree(devname);
}
EXPORT_SYMBOL(dvb_usbv2_disconnect);
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
index 733a7ff7b207..caad3b5c01ad 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
@@ -35,42 +35,51 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
{
- struct hexline hx;
- u8 reset;
- int ret,pos=0;
+ struct hexline *hx;
+ u8 *buf;
+ int ret, pos = 0;
+ u16 cpu_cs_register = cypress[type].cpu_cs_register;
+
+ buf = kmalloc(sizeof(*hx), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ hx = (struct hexline *)buf;
/* stop the CPU */
- reset = 1;
- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
+ buf[0] = 1;
+ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
err("could not stop the USB controller CPU.");
- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
+ while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk);
+ ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len);
- if (ret != hx.len) {
+ if (ret != hx->len) {
err("error while transferring firmware "
"(transferred size: %d, block size: %d)",
- ret,hx.len);
+ ret, hx->len);
ret = -EINVAL;
break;
}
}
if (ret < 0) {
err("firmware download failed at %d with %d",pos,ret);
+ kfree(buf);
return ret;
}
if (ret == 0) {
/* restart the CPU */
- reset = 0;
- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
+ buf[0] = 0;
+ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
err("could not restart the USB controller CPU.");
ret = -EINVAL;
}
} else
ret = -EIO;
+ kfree(buf);
+
return ret;
}
EXPORT_SYMBOL(usb_cypress_load_firmware);
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 5cefca95734e..885f689ac870 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1595,6 +1595,114 @@ static const char *uvc_print_chain(struct uvc_video_chain *chain)
return buffer;
}
+static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev)
+{
+ struct uvc_video_chain *chain;
+
+ chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ if (chain == NULL)
+ return NULL;
+
+ INIT_LIST_HEAD(&chain->entities);
+ mutex_init(&chain->ctrl_mutex);
+ chain->dev = dev;
+ v4l2_prio_init(&chain->prio);
+
+ return chain;
+}
+
+/*
+ * Fallback heuristic for devices that don't connect units and terminals in a
+ * valid chain.
+ *
+ * Some devices have invalid baSourceID references, causing uvc_scan_chain()
+ * to fail, but if we just take the entities we can find and put them together
+ * in the most sensible chain we can think of, turns out they do work anyway.
+ * Note: This heuristic assumes there is a single chain.
+ *
+ * At the time of writing, devices known to have such a broken chain are
+ * - Acer Integrated Camera (5986:055a)
+ * - Realtek rtl157a7 (0bda:57a7)
+ */
+static int uvc_scan_fallback(struct uvc_device *dev)
+{
+ struct uvc_video_chain *chain;
+ struct uvc_entity *iterm = NULL;
+ struct uvc_entity *oterm = NULL;
+ struct uvc_entity *entity;
+ struct uvc_entity *prev;
+
+ /*
+ * Start by locating the input and output terminals. We only support
+ * devices with exactly one of each for now.
+ */
+ list_for_each_entry(entity, &dev->entities, list) {
+ if (UVC_ENTITY_IS_ITERM(entity)) {
+ if (iterm)
+ return -EINVAL;
+ iterm = entity;
+ }
+
+ if (UVC_ENTITY_IS_OTERM(entity)) {
+ if (oterm)
+ return -EINVAL;
+ oterm = entity;
+ }
+ }
+
+ if (iterm == NULL || oterm == NULL)
+ return -EINVAL;
+
+ /* Allocate the chain and fill it. */
+ chain = uvc_alloc_chain(dev);
+ if (chain == NULL)
+ return -ENOMEM;
+
+ if (uvc_scan_chain_entity(chain, oterm) < 0)
+ goto error;
+
+ prev = oterm;
+
+ /*
+ * Add all Processing and Extension Units with two pads. The order
+ * doesn't matter much, use reverse list traversal to connect units in
+ * UVC descriptor order as we build the chain from output to input. This
+ * leads to units appearing in the order meant by the manufacturer for
+ * the cameras known to require this heuristic.
+ */
+ list_for_each_entry_reverse(entity, &dev->entities, list) {
+ if (entity->type != UVC_VC_PROCESSING_UNIT &&
+ entity->type != UVC_VC_EXTENSION_UNIT)
+ continue;
+
+ if (entity->num_pads != 2)
+ continue;
+
+ if (uvc_scan_chain_entity(chain, entity) < 0)
+ goto error;
+
+ prev->baSourceID[0] = entity->id;
+ prev = entity;
+ }
+
+ if (uvc_scan_chain_entity(chain, iterm) < 0)
+ goto error;
+
+ prev->baSourceID[0] = iterm->id;
+
+ list_add_tail(&chain->list, &dev->chains);
+
+ uvc_trace(UVC_TRACE_PROBE,
+ "Found a video chain by fallback heuristic (%s).\n",
+ uvc_print_chain(chain));
+
+ return 0;
+
+error:
+ kfree(chain);
+ return -EINVAL;
+}
+
/*
* Scan the device for video chains and register video devices.
*
@@ -1617,15 +1725,10 @@ static int uvc_scan_device(struct uvc_device *dev)
if (term->chain.next || term->chain.prev)
continue;
- chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ chain = uvc_alloc_chain(dev);
if (chain == NULL)
return -ENOMEM;
- INIT_LIST_HEAD(&chain->entities);
- mutex_init(&chain->ctrl_mutex);
- chain->dev = dev;
- v4l2_prio_init(&chain->prio);
-
term->flags |= UVC_ENTITY_FLAG_DEFAULT;
if (uvc_scan_chain(chain, term) < 0) {
@@ -1639,6 +1742,9 @@ static int uvc_scan_device(struct uvc_device *dev)
list_add_tail(&chain->list, &dev->chains);
}
+ if (list_empty(&dev->chains))
+ uvc_scan_fallback(dev);
+
if (list_empty(&dev->chains)) {
uvc_printk(KERN_INFO, "No valid video chain found.\n");
return -1;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 12ffa4192605..2a4abf736d89 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -549,11 +549,13 @@ config VEXPRESS_SYSCFG
bus. System Configuration interface is one of the possible means
of generating transactions on this bus.
-config UID_CPUTIME
- bool "Per-UID cpu time statistics"
+config UID_SYS_STATS
+ bool "Per-UID statistics"
depends on PROFILING
help
Per UID based cpu time statistics exported to /proc/uid_cputime
+ Per UID based io statistics exported to /proc/uid_io
+ Per UID based procstat control in /proc/uid_procstat
config QPNP_MISC
tristate "QPNP Misc Peripheral"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c2941afc961e..b0718228d2d9 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -62,7 +62,7 @@ endif
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
-obj-$(CONFIG_UID_CPUTIME) += uid_cputime.o
+obj-$(CONFIG_UID_SYS_STATS) += uid_sys_stats.o
obj-y += qcom/
obj-$(CONFIG_QPNP_MISC) += qpnp-misc.o
obj-$(CONFIG_MEMORY_STATE_TIME) += memory_state_time.o
diff --git a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
index 84761747f129..e602650c4cb5 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
@@ -17,7 +17,6 @@
#include "q6audio_common.h"
#include "audio_utils_aio.h"
#include <sound/msm-audio-effects-q6-v2.h>
-#include <sound/msm-dts-eagle.h>
#define MAX_CHANNELS_SUPPORTED 8
#define WAIT_TIMEDOUT_DURATION_SECS 1
@@ -53,31 +52,11 @@ static void audio_effects_init_pp(struct audio_client *ac)
pr_err("%s: audio client null to init pp\n", __func__);
return;
}
- switch (ac->topology) {
- case ASM_STREAM_POSTPROC_TOPO_ID_HPX_MASTER:
-
- ret = q6asm_set_softvolume_v2(ac, &softvol,
- SOFT_VOLUME_INSTANCE_1);
- if (ret < 0)
- pr_err("%s: Send SoftVolume1 Param failed ret=%d\n",
- __func__, ret);
- ret = q6asm_set_softvolume_v2(ac, &softvol,
- SOFT_VOLUME_INSTANCE_2);
- if (ret < 0)
- pr_err("%s: Send SoftVolume2 Param failed ret=%d\n",
- __func__, ret);
-
- msm_dts_eagle_init_master_module(ac);
-
- break;
- default:
- ret = q6asm_set_softvolume_v2(ac, &softvol,
- SOFT_VOLUME_INSTANCE_1);
- if (ret < 0)
- pr_err("%s: Send SoftVolume Param failed ret=%d\n",
- __func__, ret);
- break;
- }
+ ret = q6asm_set_softvolume_v2(ac, &softvol,
+ SOFT_VOLUME_INSTANCE_1);
+ if (ret < 0)
+ pr_err("%s: Send SoftVolume Param failed ret=%d\n",
+ __func__, ret);
}
static void audio_effects_deinit_pp(struct audio_client *ac)
@@ -86,13 +65,6 @@ static void audio_effects_deinit_pp(struct audio_client *ac)
pr_err("%s: audio client null to deinit pp\n", __func__);
return;
}
- switch (ac->topology) {
- case ASM_STREAM_POSTPROC_TOPO_ID_HPX_MASTER:
- msm_dts_eagle_deinit_master_module(ac);
- break;
- default:
- break;
- }
}
static void audio_effects_event_handler(uint32_t opcode, uint32_t token,
@@ -428,33 +400,6 @@ static long audio_effects_set_pp_param(struct q6audio_effects *effects,
&(effects->audio_effects.topo_switch_vol),
(long *)&values[1], SOFT_VOLUME_INSTANCE_2);
break;
- case DTS_EAGLE_MODULE_ENABLE:
- pr_debug("%s: DTS_EAGLE_MODULE_ENABLE\n", __func__);
- if (msm_audio_effects_is_effmodule_supp_in_top(
- effects_module, effects->ac->topology)) {
- /*
- * HPX->OFF: first disable HPX and then
- * enable SA+
- * HPX->ON: first disable SA+ and then
- * enable HPX
- */
- bool hpx_state = (bool)values[1];
- if (hpx_state)
- msm_audio_effects_enable_extn(effects->ac,
- &(effects->audio_effects),
- false);
- msm_dts_eagle_enable_asm(effects->ac,
- hpx_state,
- AUDPROC_MODULE_ID_DTS_HPX_PREMIX);
- msm_dts_eagle_enable_asm(effects->ac,
- hpx_state,
- AUDPROC_MODULE_ID_DTS_HPX_POSTMIX);
- if (!hpx_state)
- msm_audio_effects_enable_extn(effects->ac,
- &(effects->audio_effects),
- true);
- }
- break;
default:
pr_err("%s: Invalid effects config module\n", __func__);
rc = -EINVAL;
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
index b7af80854420..e3f23caac5b8 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
@@ -851,6 +851,7 @@ static long audio_aio_process_event_req_compat(struct q6audio_aio *audio,
long rc;
struct msm_audio_event32 usr_evt_32;
struct msm_audio_event usr_evt;
+ memset(&usr_evt, 0, sizeof(struct msm_audio_event));
if (copy_from_user(&usr_evt_32, arg,
sizeof(struct msm_audio_event32))) {
@@ -860,6 +861,11 @@ static long audio_aio_process_event_req_compat(struct q6audio_aio *audio,
usr_evt.timeout_ms = usr_evt_32.timeout_ms;
rc = audio_aio_process_event_req_common(audio, &usr_evt);
+ if (rc < 0) {
+ pr_err("%s: audio process event failed, rc = %ld",
+ __func__, rc);
+ return rc;
+ }
usr_evt_32.event_type = usr_evt.event_type;
switch (usr_evt_32.event_type) {
diff --git a/drivers/misc/uid_cputime.c b/drivers/misc/uid_cputime.c
deleted file mode 100644
index c1ad5246f564..000000000000
--- a/drivers/misc/uid_cputime.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/* drivers/misc/uid_cputime.c
- *
- * Copyright (C) 2014 - 2015 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/atomic.h>
-#include <linux/err.h>
-#include <linux/hashtable.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/proc_fs.h>
-#include <linux/profile.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#define UID_HASH_BITS 10
-DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
-
-static DEFINE_MUTEX(uid_lock);
-static struct proc_dir_entry *parent;
-
-struct uid_entry {
- uid_t uid;
- cputime_t utime;
- cputime_t stime;
- cputime_t active_utime;
- cputime_t active_stime;
- struct hlist_node hash;
-};
-
-static struct uid_entry *find_uid_entry(uid_t uid)
-{
- struct uid_entry *uid_entry;
- hash_for_each_possible(hash_table, uid_entry, hash, uid) {
- if (uid_entry->uid == uid)
- return uid_entry;
- }
- return NULL;
-}
-
-static struct uid_entry *find_or_register_uid(uid_t uid)
-{
- struct uid_entry *uid_entry;
-
- uid_entry = find_uid_entry(uid);
- if (uid_entry)
- return uid_entry;
-
- uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC);
- if (!uid_entry)
- return NULL;
-
- uid_entry->uid = uid;
-
- hash_add(hash_table, &uid_entry->hash, uid);
-
- return uid_entry;
-}
-
-static int uid_stat_show(struct seq_file *m, void *v)
-{
- struct uid_entry *uid_entry;
- struct task_struct *task, *temp;
- cputime_t utime;
- cputime_t stime;
- unsigned long bkt;
-
- mutex_lock(&uid_lock);
-
- hash_for_each(hash_table, bkt, uid_entry, hash) {
- uid_entry->active_stime = 0;
- uid_entry->active_utime = 0;
- }
-
- read_lock(&tasklist_lock);
- do_each_thread(temp, task) {
- uid_entry = find_or_register_uid(from_kuid_munged(
- current_user_ns(), task_uid(task)));
- if (!uid_entry) {
- read_unlock(&tasklist_lock);
- mutex_unlock(&uid_lock);
- pr_err("%s: failed to find the uid_entry for uid %d\n",
- __func__, from_kuid_munged(current_user_ns(),
- task_uid(task)));
- return -ENOMEM;
- }
- task_cputime_adjusted(task, &utime, &stime);
- uid_entry->active_utime += utime;
- uid_entry->active_stime += stime;
- } while_each_thread(temp, task);
- read_unlock(&tasklist_lock);
-
- hash_for_each(hash_table, bkt, uid_entry, hash) {
- cputime_t total_utime = uid_entry->utime +
- uid_entry->active_utime;
- cputime_t total_stime = uid_entry->stime +
- uid_entry->active_stime;
- seq_printf(m, "%d: %llu %llu\n", uid_entry->uid,
- (unsigned long long)jiffies_to_msecs(
- cputime_to_jiffies(total_utime)) * USEC_PER_MSEC,
- (unsigned long long)jiffies_to_msecs(
- cputime_to_jiffies(total_stime)) * USEC_PER_MSEC);
- }
-
- mutex_unlock(&uid_lock);
- return 0;
-}
-
-static int uid_stat_open(struct inode *inode, struct file *file)
-{
- return single_open(file, uid_stat_show, PDE_DATA(inode));
-}
-
-static const struct file_operations uid_stat_fops = {
- .open = uid_stat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int uid_remove_open(struct inode *inode, struct file *file)
-{
- return single_open(file, NULL, NULL);
-}
-
-static ssize_t uid_remove_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *ppos)
-{
- struct uid_entry *uid_entry;
- struct hlist_node *tmp;
- char uids[128];
- char *start_uid, *end_uid = NULL;
- long int uid_start = 0, uid_end = 0;
-
- if (count >= sizeof(uids))
- count = sizeof(uids) - 1;
-
- if (copy_from_user(uids, buffer, count))
- return -EFAULT;
-
- uids[count] = '\0';
- end_uid = uids;
- start_uid = strsep(&end_uid, "-");
-
- if (!start_uid || !end_uid)
- return -EINVAL;
-
- if (kstrtol(start_uid, 10, &uid_start) != 0 ||
- kstrtol(end_uid, 10, &uid_end) != 0) {
- return -EINVAL;
- }
- mutex_lock(&uid_lock);
-
- for (; uid_start <= uid_end; uid_start++) {
- hash_for_each_possible_safe(hash_table, uid_entry, tmp,
- hash, (uid_t)uid_start) {
- if (uid_start == uid_entry->uid) {
- hash_del(&uid_entry->hash);
- kfree(uid_entry);
- }
- }
- }
-
- mutex_unlock(&uid_lock);
- return count;
-}
-
-static const struct file_operations uid_remove_fops = {
- .open = uid_remove_open,
- .release = single_release,
- .write = uid_remove_write,
-};
-
-static int process_notifier(struct notifier_block *self,
- unsigned long cmd, void *v)
-{
- struct task_struct *task = v;
- struct uid_entry *uid_entry;
- cputime_t utime, stime;
- uid_t uid;
-
- if (!task)
- return NOTIFY_OK;
-
- mutex_lock(&uid_lock);
- uid = from_kuid_munged(current_user_ns(), task_uid(task));
- uid_entry = find_or_register_uid(uid);
- if (!uid_entry) {
- pr_err("%s: failed to find uid %d\n", __func__, uid);
- goto exit;
- }
-
- task_cputime_adjusted(task, &utime, &stime);
- uid_entry->utime += utime;
- uid_entry->stime += stime;
-
-exit:
- mutex_unlock(&uid_lock);
- return NOTIFY_OK;
-}
-
-static struct notifier_block process_notifier_block = {
- .notifier_call = process_notifier,
-};
-
-static int __init proc_uid_cputime_init(void)
-{
- hash_init(hash_table);
-
- parent = proc_mkdir("uid_cputime", NULL);
- if (!parent) {
- pr_err("%s: failed to create proc entry\n", __func__);
- return -ENOMEM;
- }
-
- proc_create_data("remove_uid_range", S_IWUGO, parent, &uid_remove_fops,
- NULL);
-
- proc_create_data("show_uid_stat", S_IRUGO, parent, &uid_stat_fops,
- NULL);
-
- profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
-
- return 0;
-}
-
-early_initcall(proc_uid_cputime_init);
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
new file mode 100644
index 000000000000..ad21276c8d9e
--- /dev/null
+++ b/drivers/misc/uid_sys_stats.c
@@ -0,0 +1,505 @@
+/* drivers/misc/uid_cputime.c
+ *
+ * Copyright (C) 2014 - 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/profile.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#define UID_HASH_BITS 10
+DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
+
+static DEFINE_RT_MUTEX(uid_lock);
+static struct proc_dir_entry *cpu_parent;
+static struct proc_dir_entry *io_parent;
+static struct proc_dir_entry *proc_parent;
+
+struct io_stats {
+ u64 read_bytes;
+ u64 write_bytes;
+ u64 rchar;
+ u64 wchar;
+ u64 fsync;
+};
+
+#define UID_STATE_FOREGROUND 0
+#define UID_STATE_BACKGROUND 1
+#define UID_STATE_BUCKET_SIZE 2
+
+#define UID_STATE_TOTAL_CURR 2
+#define UID_STATE_TOTAL_LAST 3
+#define UID_STATE_SIZE 4
+
+struct uid_entry {
+ uid_t uid;
+ cputime_t utime;
+ cputime_t stime;
+ cputime_t active_utime;
+ cputime_t active_stime;
+ int state;
+ struct io_stats io[UID_STATE_SIZE];
+ struct hlist_node hash;
+};
+
+static struct uid_entry *find_uid_entry(uid_t uid)
+{
+ struct uid_entry *uid_entry;
+ hash_for_each_possible(hash_table, uid_entry, hash, uid) {
+ if (uid_entry->uid == uid)
+ return uid_entry;
+ }
+ return NULL;
+}
+
+static struct uid_entry *find_or_register_uid(uid_t uid)
+{
+ struct uid_entry *uid_entry;
+
+ uid_entry = find_uid_entry(uid);
+ if (uid_entry)
+ return uid_entry;
+
+ uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC);
+ if (!uid_entry)
+ return NULL;
+
+ uid_entry->uid = uid;
+
+ hash_add(hash_table, &uid_entry->hash, uid);
+
+ return uid_entry;
+}
+
+static int uid_cputime_show(struct seq_file *m, void *v)
+{
+ struct uid_entry *uid_entry;
+ struct task_struct *task, *temp;
+ struct user_namespace *user_ns = current_user_ns();
+ cputime_t utime;
+ cputime_t stime;
+ unsigned long bkt;
+ uid_t uid;
+
+ rt_mutex_lock(&uid_lock);
+
+ hash_for_each(hash_table, bkt, uid_entry, hash) {
+ uid_entry->active_stime = 0;
+ uid_entry->active_utime = 0;
+ }
+
+ read_lock(&tasklist_lock);
+ do_each_thread(temp, task) {
+ uid = from_kuid_munged(user_ns, task_uid(task));
+ uid_entry = find_or_register_uid(uid);
+ if (!uid_entry) {
+ read_unlock(&tasklist_lock);
+ rt_mutex_unlock(&uid_lock);
+ pr_err("%s: failed to find the uid_entry for uid %d\n",
+ __func__, uid);
+ return -ENOMEM;
+ }
+ task_cputime_adjusted(task, &utime, &stime);
+ uid_entry->active_utime += utime;
+ uid_entry->active_stime += stime;
+ } while_each_thread(temp, task);
+ read_unlock(&tasklist_lock);
+
+ hash_for_each(hash_table, bkt, uid_entry, hash) {
+ cputime_t total_utime = uid_entry->utime +
+ uid_entry->active_utime;
+ cputime_t total_stime = uid_entry->stime +
+ uid_entry->active_stime;
+ seq_printf(m, "%d: %llu %llu\n", uid_entry->uid,
+ (unsigned long long)jiffies_to_msecs(
+ cputime_to_jiffies(total_utime)) * USEC_PER_MSEC,
+ (unsigned long long)jiffies_to_msecs(
+ cputime_to_jiffies(total_stime)) * USEC_PER_MSEC);
+ }
+
+ rt_mutex_unlock(&uid_lock);
+ return 0;
+}
+
+static int uid_cputime_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, uid_cputime_show, PDE_DATA(inode));
+}
+
+static const struct file_operations uid_cputime_fops = {
+ .open = uid_cputime_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int uid_remove_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, NULL, NULL);
+}
+
+static ssize_t uid_remove_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+{
+ struct uid_entry *uid_entry;
+ struct hlist_node *tmp;
+ char uids[128];
+ char *start_uid, *end_uid = NULL;
+ long int uid_start = 0, uid_end = 0;
+
+ if (count >= sizeof(uids))
+ count = sizeof(uids) - 1;
+
+ if (copy_from_user(uids, buffer, count))
+ return -EFAULT;
+
+ uids[count] = '\0';
+ end_uid = uids;
+ start_uid = strsep(&end_uid, "-");
+
+ if (!start_uid || !end_uid)
+ return -EINVAL;
+
+ if (kstrtol(start_uid, 10, &uid_start) != 0 ||
+ kstrtol(end_uid, 10, &uid_end) != 0) {
+ return -EINVAL;
+ }
+ rt_mutex_lock(&uid_lock);
+
+ for (; uid_start <= uid_end; uid_start++) {
+ hash_for_each_possible_safe(hash_table, uid_entry, tmp,
+ hash, (uid_t)uid_start) {
+ if (uid_start == uid_entry->uid) {
+ hash_del(&uid_entry->hash);
+ kfree(uid_entry);
+ }
+ }
+ }
+
+ rt_mutex_unlock(&uid_lock);
+ return count;
+}
+
+static const struct file_operations uid_remove_fops = {
+ .open = uid_remove_open,
+ .release = single_release,
+ .write = uid_remove_write,
+};
+
+static u64 compute_write_bytes(struct task_struct *task)
+{
+ if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes)
+ return 0;
+
+ return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
+}
+
+static void add_uid_io_curr_stats(struct uid_entry *uid_entry,
+ struct task_struct *task)
+{
+ struct io_stats *io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
+
+ io_curr->read_bytes += task->ioac.read_bytes;
+ io_curr->write_bytes += compute_write_bytes(task);
+ io_curr->rchar += task->ioac.rchar;
+ io_curr->wchar += task->ioac.wchar;
+ io_curr->fsync += task->ioac.syscfs;
+}
+
+static void clean_uid_io_last_stats(struct uid_entry *uid_entry,
+ struct task_struct *task)
+{
+ struct io_stats *io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
+
+ io_last->read_bytes -= task->ioac.read_bytes;
+ io_last->write_bytes -= compute_write_bytes(task);
+ io_last->rchar -= task->ioac.rchar;
+ io_last->wchar -= task->ioac.wchar;
+ io_last->fsync -= task->ioac.syscfs;
+}
+
+static void update_io_stats_all_locked(void)
+{
+ struct uid_entry *uid_entry;
+ struct task_struct *task, *temp;
+ struct io_stats *io_bucket, *io_curr, *io_last;
+ struct user_namespace *user_ns = current_user_ns();
+ unsigned long bkt;
+ uid_t uid;
+
+ hash_for_each(hash_table, bkt, uid_entry, hash)
+ memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
+ sizeof(struct io_stats));
+
+ rcu_read_lock();
+ do_each_thread(temp, task) {
+ uid = from_kuid_munged(user_ns, task_uid(task));
+ uid_entry = find_or_register_uid(uid);
+ if (!uid_entry)
+ continue;
+ add_uid_io_curr_stats(uid_entry, task);
+ } while_each_thread(temp, task);
+ rcu_read_unlock();
+
+ hash_for_each(hash_table, bkt, uid_entry, hash) {
+ io_bucket = &uid_entry->io[uid_entry->state];
+ io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
+ io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
+
+ io_bucket->read_bytes +=
+ io_curr->read_bytes - io_last->read_bytes;
+ io_bucket->write_bytes +=
+ io_curr->write_bytes - io_last->write_bytes;
+ io_bucket->rchar += io_curr->rchar - io_last->rchar;
+ io_bucket->wchar += io_curr->wchar - io_last->wchar;
+ io_bucket->fsync += io_curr->fsync - io_last->fsync;
+
+ io_last->read_bytes = io_curr->read_bytes;
+ io_last->write_bytes = io_curr->write_bytes;
+ io_last->rchar = io_curr->rchar;
+ io_last->wchar = io_curr->wchar;
+ io_last->fsync = io_curr->fsync;
+ }
+}
+
+static void update_io_stats_uid_locked(uid_t target_uid)
+{
+ struct uid_entry *uid_entry;
+ struct task_struct *task, *temp;
+ struct io_stats *io_bucket, *io_curr, *io_last;
+ struct user_namespace *user_ns = current_user_ns();
+
+ uid_entry = find_or_register_uid(target_uid);
+ if (!uid_entry)
+ return;
+
+ memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
+ sizeof(struct io_stats));
+
+ rcu_read_lock();
+ do_each_thread(temp, task) {
+ if (from_kuid_munged(user_ns, task_uid(task)) != target_uid)
+ continue;
+ add_uid_io_curr_stats(uid_entry, task);
+ } while_each_thread(temp, task);
+ rcu_read_unlock();
+
+ io_bucket = &uid_entry->io[uid_entry->state];
+ io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
+ io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
+
+ io_bucket->read_bytes +=
+ io_curr->read_bytes - io_last->read_bytes;
+ io_bucket->write_bytes +=
+ io_curr->write_bytes - io_last->write_bytes;
+ io_bucket->rchar += io_curr->rchar - io_last->rchar;
+ io_bucket->wchar += io_curr->wchar - io_last->wchar;
+ io_bucket->fsync += io_curr->fsync - io_last->fsync;
+
+ io_last->read_bytes = io_curr->read_bytes;
+ io_last->write_bytes = io_curr->write_bytes;
+ io_last->rchar = io_curr->rchar;
+ io_last->wchar = io_curr->wchar;
+ io_last->fsync = io_curr->fsync;
+}
+
+static int uid_io_show(struct seq_file *m, void *v)
+{
+ struct uid_entry *uid_entry;
+ unsigned long bkt;
+
+ rt_mutex_lock(&uid_lock);
+
+ update_io_stats_all_locked();
+
+ hash_for_each(hash_table, bkt, uid_entry, hash) {
+ seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
+ uid_entry->uid,
+ uid_entry->io[UID_STATE_FOREGROUND].rchar,
+ uid_entry->io[UID_STATE_FOREGROUND].wchar,
+ uid_entry->io[UID_STATE_FOREGROUND].read_bytes,
+ uid_entry->io[UID_STATE_FOREGROUND].write_bytes,
+ uid_entry->io[UID_STATE_BACKGROUND].rchar,
+ uid_entry->io[UID_STATE_BACKGROUND].wchar,
+ uid_entry->io[UID_STATE_BACKGROUND].read_bytes,
+ uid_entry->io[UID_STATE_BACKGROUND].write_bytes,
+ uid_entry->io[UID_STATE_FOREGROUND].fsync,
+ uid_entry->io[UID_STATE_BACKGROUND].fsync);
+ }
+
+ rt_mutex_unlock(&uid_lock);
+
+ return 0;
+}
+
+static int uid_io_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, uid_io_show, PDE_DATA(inode));
+}
+
+static const struct file_operations uid_io_fops = {
+ .open = uid_io_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int uid_procstat_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, NULL, NULL);
+}
+
+static ssize_t uid_procstat_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+{
+ struct uid_entry *uid_entry;
+ uid_t uid;
+ int argc, state;
+ char input[128];
+
+ if (count >= sizeof(input))
+ return -EINVAL;
+
+ if (copy_from_user(input, buffer, count))
+ return -EFAULT;
+
+ input[count] = '\0';
+
+ argc = sscanf(input, "%u %d", &uid, &state);
+ if (argc != 2)
+ return -EINVAL;
+
+ if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND)
+ return -EINVAL;
+
+ rt_mutex_lock(&uid_lock);
+
+ uid_entry = find_or_register_uid(uid);
+ if (!uid_entry) {
+ rt_mutex_unlock(&uid_lock);
+ return -EINVAL;
+ }
+
+ if (uid_entry->state == state) {
+ rt_mutex_unlock(&uid_lock);
+ return count;
+ }
+
+ update_io_stats_uid_locked(uid);
+
+ uid_entry->state = state;
+
+ rt_mutex_unlock(&uid_lock);
+
+ return count;
+}
+
+static const struct file_operations uid_procstat_fops = {
+ .open = uid_procstat_open,
+ .release = single_release,
+ .write = uid_procstat_write,
+};
+
+static int process_notifier(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ struct task_struct *task = v;
+ struct uid_entry *uid_entry;
+ cputime_t utime, stime;
+ uid_t uid;
+
+ if (!task)
+ return NOTIFY_OK;
+
+ rt_mutex_lock(&uid_lock);
+ uid = from_kuid_munged(current_user_ns(), task_uid(task));
+ uid_entry = find_or_register_uid(uid);
+ if (!uid_entry) {
+ pr_err("%s: failed to find uid %d\n", __func__, uid);
+ goto exit;
+ }
+
+ task_cputime_adjusted(task, &utime, &stime);
+ uid_entry->utime += utime;
+ uid_entry->stime += stime;
+
+ update_io_stats_uid_locked(uid);
+ clean_uid_io_last_stats(uid_entry, task);
+
+exit:
+ rt_mutex_unlock(&uid_lock);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block process_notifier_block = {
+ .notifier_call = process_notifier,
+};
+
+static int __init proc_uid_sys_stats_init(void)
+{
+ hash_init(hash_table);
+
+ cpu_parent = proc_mkdir("uid_cputime", NULL);
+ if (!cpu_parent) {
+ pr_err("%s: failed to create uid_cputime proc entry\n",
+ __func__);
+ goto err;
+ }
+
+ proc_create_data("remove_uid_range", 0222, cpu_parent,
+ &uid_remove_fops, NULL);
+ proc_create_data("show_uid_stat", 0444, cpu_parent,
+ &uid_cputime_fops, NULL);
+
+ io_parent = proc_mkdir("uid_io", NULL);
+ if (!io_parent) {
+ pr_err("%s: failed to create uid_io proc entry\n",
+ __func__);
+ goto err;
+ }
+
+ proc_create_data("stats", 0444, io_parent,
+ &uid_io_fops, NULL);
+
+ proc_parent = proc_mkdir("uid_procstat", NULL);
+ if (!proc_parent) {
+ pr_err("%s: failed to create uid_procstat proc entry\n",
+ __func__);
+ goto err;
+ }
+
+ proc_create_data("set", 0222, proc_parent,
+ &uid_procstat_fops, NULL);
+
+ profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
+
+ return 0;
+
+err:
+ remove_proc_subtree("uid_cputime", NULL);
+ remove_proc_subtree("uid_io", NULL);
+ remove_proc_subtree("uid_procstat", NULL);
+ return -ENOMEM;
+}
+
+early_initcall(proc_uid_sys_stats_init);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index f5dbb67ba929..ccf22eb5bdc0 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -128,14 +128,11 @@ static int mmc_cmdq_thread(void *d)
ret = mq->cmdq_issue_fn(mq, mq->cmdq_req_peeked);
/*
- * Don't requeue if issue_fn fails, just bug on.
- * We don't expect failure here and there is no recovery other
- * than fixing the actual issue if there is any.
+ * Don't requeue if issue_fn fails.
+ * Recovery will be come by completion softirq
* Also we end the request if there is a partition switch error,
* so we should not requeue the request here.
*/
- if (ret)
- BUG_ON(1);
} /* loop */
return 0;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 41f0935440fd..c462eee4a5f7 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -3944,12 +3944,10 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
*/
int mmc_cmdq_hw_reset(struct mmc_host *host)
{
- if (!host->bus_ops->power_restore)
- return -EOPNOTSUPP;
+ if (!host->bus_ops->reset)
+ return -EOPNOTSUPP;
- mmc_power_cycle(host, host->ocr_avail);
- mmc_select_voltage(host, host->card->ocr);
- return host->bus_ops->power_restore(host);
+ return host->bus_ops->reset(host);
}
EXPORT_SYMBOL(mmc_cmdq_hw_reset);
@@ -3969,8 +3967,9 @@ int mmc_hw_reset(struct mmc_host *host)
ret = host->bus_ops->reset(host);
mmc_bus_put(host);
- if (ret != -EOPNOTSUPP)
- pr_warn("%s: tried to reset card\n", mmc_hostname(host));
+ if (ret)
+ pr_warn("%s: tried to reset card, got error %d\n",
+ mmc_hostname(host), ret);
return ret;
}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index e8294502a701..9ca73a2b86db 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -653,19 +653,19 @@ static ssize_t store_enable(struct device *dev,
mmc_get_card(host->card);
if (!value) {
- /*turning off clock scaling*/
- mmc_exit_clk_scaling(host);
+ /* Suspend the clock scaling and mask host capability */
+ if (host->clk_scaling.enable)
+ mmc_suspend_clk_scaling(host);
host->caps2 &= ~MMC_CAP2_CLK_SCALE;
host->clk_scaling.state = MMC_LOAD_HIGH;
/* Set to max. frequency when disabling */
mmc_clk_update_freq(host, host->card->clk_scaling_highest,
host->clk_scaling.state);
} else if (value) {
- /* starting clock scaling, will restart in case started */
+ /* Unmask host capability and resume scaling */
host->caps2 |= MMC_CAP2_CLK_SCALE;
- if (host->clk_scaling.enable)
- mmc_exit_clk_scaling(host);
- mmc_init_clk_scaling(host);
+ if (!host->clk_scaling.enable)
+ mmc_resume_clk_scaling(host);
}
mmc_put_card(host->card);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 3b79f514350e..691287125895 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -680,6 +680,12 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.ffu_capable =
(ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
!(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
+
+ card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
+ card->ext_csd.device_life_time_est_typ_a =
+ ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
+ card->ext_csd.device_life_time_est_typ_b =
+ ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
}
out:
return err;
@@ -813,6 +819,11 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
+MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
+MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
+MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
+ card->ext_csd.device_life_time_est_typ_a,
+ card->ext_csd.device_life_time_est_typ_b);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
card->ext_csd.enhanced_area_offset);
@@ -851,6 +862,9 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_prv.attr,
+ &dev_attr_rev.attr,
+ &dev_attr_pre_eol_info.attr,
+ &dev_attr_life_time.attr,
&dev_attr_serial.attr,
&dev_attr_enhanced_area_offset.attr,
&dev_attr_enhanced_area_size.attr,
@@ -2878,23 +2892,42 @@ EXPORT_SYMBOL(mmc_can_reset);
static int mmc_reset(struct mmc_host *host)
{
struct mmc_card *card = host->card;
+ int ret;
+
+ if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
+ mmc_can_reset(card)) {
+ /* If the card accept RST_n signal, send it. */
+ mmc_set_clock(host, host->f_init);
+ host->ops->hw_reset(host);
+ /* Set initial state and call mmc_set_ios */
+ mmc_set_initial_state(host);
+ } else {
+ /* Do a brute force power cycle */
+ mmc_power_cycle(host, card->ocr);
+ }
- if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
- return -EOPNOTSUPP;
-
- if (!mmc_can_reset(card))
- return -EOPNOTSUPP;
+ /* Suspend clk scaling to avoid switching frequencies intermittently */
- mmc_host_clk_hold(host);
- mmc_set_clock(host, host->f_init);
+ ret = mmc_suspend_clk_scaling(host);
+ if (ret) {
+ pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
+ mmc_hostname(host), __func__, ret);
+ return ret;
+ }
- host->ops->hw_reset(host);
+ ret = mmc_init_card(host, host->card->ocr, host->card);
+ if (ret) {
+ pr_err("%s: %s: mmc_init_card failed (%d)\n",
+ mmc_hostname(host), __func__, ret);
+ return ret;
+ }
- /* Set initial state and call mmc_set_ios */
- mmc_set_initial_state(host);
- mmc_host_clk_release(host);
+ ret = mmc_resume_clk_scaling(host);
+ if (ret)
+ pr_err("%s: %s: fail to resume clock scaling (%d)\n",
+ mmc_hostname(host), __func__, ret);
- return mmc_init_card(host, card->ocr, card);
+ return ret;
}
static const struct mmc_bus_ops mmc_ops = {
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index a83960fd474f..3f741f83a436 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -1102,12 +1102,17 @@ skip_cqterri:
* before setting doorbell, hence one is not needed here.
*/
for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
- /* complete the corresponding mrq */
- pr_debug("%s: completing tag -> %lu\n",
- mmc_hostname(mmc), tag);
- MMC_TRACE(mmc, "%s: completing tag -> %lu\n",
- __func__, tag);
+ mrq = get_req_by_tag(cq_host, tag);
+ if (!((mrq->cmd && mrq->cmd->error) ||
+ mrq->cmdq_req->resp_err ||
+ (mrq->data && mrq->data->error))) {
+ /* complete the corresponding mrq */
+ pr_debug("%s: completing tag -> %lu\n",
+ mmc_hostname(mmc), tag);
+ MMC_TRACE(mmc, "%s: completing tag -> %lu\n",
+ __func__, tag);
cmdq_finish_data(mmc, tag);
+ }
}
}
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1f1582f6cccb..8d838779fd1b 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -804,6 +804,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
switch (uhs) {
case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_DDR50:
pinctrl = imx_data->pins_100mhz;
break;
case MMC_TIMING_UHS_SDR104:
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index d2c386f09d69..1d843357422e 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
struct ushc_data *ushc;
int ret;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
if (mmc == NULL)
return -ENOMEM;
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index c0720c1ee4c9..5abab8800891 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -225,12 +225,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
last_trx_part = curr_part - 1;
- /*
- * We have whole TRX scanned, skip to the next part. Use
- * roundown (not roundup), as the loop will increase
- * offset in next step.
- */
- offset = rounddown(offset + trx->length, blocksize);
+ /* Jump to the end of TRX */
+ offset = roundup(offset + trx->length, blocksize);
+ /* Next loop iteration will increase the offset */
+ offset -= blocksize;
continue;
}
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 0134ba32a057..39712560b4c1 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -148,11 +148,11 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
return err;
}
- if (bytes == 0) {
- err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
- if (err)
- return err;
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+ if (bytes == 0) {
err = clear_update_marker(ubi, vol, 0);
if (err)
return err;
diff --git a/drivers/net/can/spi/rh850.c b/drivers/net/can/spi/rh850.c
index c7a2182003bf..a93f979da9ed 100644
--- a/drivers/net/can/spi/rh850.c
+++ b/drivers/net/can/spi/rh850.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,7 @@
#include <linux/spi/spi.h>
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/completion.h>
#define DEBUG_RH850 0
#if DEBUG_RH850 == 1
@@ -33,8 +34,11 @@
#define MAX_TX_BUFFERS 1
#define XFER_BUFFER_SIZE 64
#define RX_ASSEMBLY_BUFFER_SIZE 128
-#define RH850_CLOCK 80000000
+#define RH850_CLOCK 16000000
#define RH850_MAX_CHANNELS 4
+#define DRIVER_MODE_RAW_FRAMES 0
+#define DRIVER_MODE_PROPERTIES 1
+#define DRIVER_MODE_AMB 2
struct rh850_can {
struct net_device *netdev[RH850_MAX_CHANNELS];
@@ -50,6 +54,10 @@ struct rh850_can {
char *assembly_buffer;
u8 assembly_buffer_size;
atomic_t netif_queue_stop;
+ struct completion response_completion;
+ int wait_cmd;
+ int cmd_result;
+ int driver_mode;
};
struct rh850_netdev_privdata {
@@ -84,6 +92,36 @@ struct spi_miso { /* TLV for MISO line */
#define CMD_CAN_ADD_FILTER 0x83
#define CMD_CAN_REMOVE_FILTER 0x84
#define CMD_CAN_RECEIVE_FRAME 0x85
+#define CMD_CAN_CONFIG_BIT_TIMING 0x86
+
+#define CMD_CAN_DATA_BUFF_ADD 0x87
+#define CMD_CAN_DATA_BUFF_REMOVE 0X88
+#define CMD_CAN_RELEASE_BUFFER 0x89
+#define CMD_CAN_DATA_BUFF_REMOVE_ALL 0x8A
+#define CMD_PROPERTY_WRITE 0x8B
+#define CMD_PROPERTY_READ 0x8C
+
+#define CMD_GET_FW_BR_VERSION 0x95
+#define CMD_BEGIN_FIRMWARE_UPGRADE 0x96
+#define CMD_FIRMWARE_UPGRADE_DATA 0x97
+#define CMD_END_FIRMWARE_UPGRADE 0x98
+#define CMD_BEGIN_BOOT_ROM_UPGRADE 0x99
+#define CMD_BOOT_ROM_UPGRADE_DATA 0x9A
+#define CMD_END_BOOT_ROM_UPGRADE 0x9B
+
+#define IOCTL_RELEASE_CAN_BUFFER (SIOCDEVPRIVATE + 0)
+#define IOCTL_ENABLE_BUFFERING (SIOCDEVPRIVATE + 1)
+#define IOCTL_ADD_FRAME_FILTER (SIOCDEVPRIVATE + 2)
+#define IOCTL_REMOVE_FRAME_FILTER (SIOCDEVPRIVATE + 3)
+#define IOCTL_DISABLE_BUFFERING (SIOCDEVPRIVATE + 5)
+#define IOCTL_DISABLE_ALL_BUFFERING (SIOCDEVPRIVATE + 6)
+#define IOCTL_GET_FW_BR_VERSION (SIOCDEVPRIVATE + 7)
+#define IOCTL_BEGIN_FIRMWARE_UPGRADE (SIOCDEVPRIVATE + 8)
+#define IOCTL_FIRMWARE_UPGRADE_DATA (SIOCDEVPRIVATE + 9)
+#define IOCTL_END_FIRMWARE_UPGRADE (SIOCDEVPRIVATE + 10)
+#define IOCTL_BEGIN_BOOT_ROM_UPGRADE (SIOCDEVPRIVATE + 11)
+#define IOCTL_BOOT_ROM_UPGRADE_DATA (SIOCDEVPRIVATE + 12)
+#define IOCTL_END_BOOT_ROM_UPGRADE (SIOCDEVPRIVATE + 13)
struct can_fw_resp {
u8 maj;
@@ -126,15 +164,82 @@ struct can_receive_frame {
u8 data[];
} __packed;
+struct can_config_bit_timing {
+ u8 can_if;
+ u32 brp;
+ u32 tseg1;
+ u32 tseg2;
+ u32 sjw;
+} __packed;
+
+struct vehicle_property {
+ int id;
+ u64 ts;
+ int zone;
+ int val_type;
+ u32 data_len;
+ union {
+ u8 bval;
+ int val;
+ int val_arr[4];
+ float f_value;
+ float float_arr[4];
+ u8 str[36];
+ };
+} __packed;
+
+/* IOCTL messages */
+struct rh850_release_can_buffer {
+ u8 enable;
+} __packed;
+
+struct rh850_add_can_buffer {
+ u8 can_if;
+ u32 mid;
+ u32 mask;
+} __packed;
+
+struct rh850_delete_can_buffer {
+ u8 can_if;
+ u32 mid;
+ u32 mask;
+} __packed;
+
+struct can_fw_br_resp {
+ u8 maj;
+ u8 min;
+ u8 ver[32];
+ u8 br_maj;
+ u8 br_min;
+ u8 curr_exec_mode;
+} __packed;
+
+struct rh850_ioctl_req {
+ u8 len;
+ u8 data[];
+} __packed;
+
static struct can_bittiming_const rh850_bittiming_const = {
.name = "rh850",
- .tseg1_min = 4,
+ .tseg1_min = 1,
.tseg1_max = 16,
- .tseg2_min = 2,
- .tseg2_max = 8,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
.sjw_max = 4,
- .brp_min = 4,
- .brp_max = 1023,
+ .brp_min = 1,
+ .brp_max = 70,
+ .brp_inc = 1,
+};
+
+static struct can_bittiming_const rh850_data_bittiming_const = {
+ .name = "rh850",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 70,
.brp_inc = 1,
};
@@ -165,7 +270,7 @@ static void rh850_receive_frame(struct rh850_can *priv_data,
}
netdev = priv_data->netdev[frame->can_if];
skb = alloc_can_skb(netdev, &cf);
- if (skb == NULL) {
+ if (!skb) {
LOGDE("skb alloc failed. frame->can_if %d\n", frame->can_if);
return;
}
@@ -191,9 +296,51 @@ static void rh850_receive_frame(struct rh850_can *priv_data,
netdev->stats.rx_packets++;
}
-static void rh850_process_response(struct rh850_can *priv_data,
- struct spi_miso *resp, int length)
+static void rh850_receive_property(struct rh850_can *priv_data,
+ struct vehicle_property *property)
{
+ struct canfd_frame *cfd;
+ u8 *p;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *skt;
+ struct timeval tv;
+ static u64 nanosec;
+ struct net_device *netdev;
+ int i;
+
+ /* can0 as the channel with properties */
+ netdev = priv_data->netdev[0];
+ skb = alloc_canfd_skb(netdev, &cfd);
+ if (!skb) {
+ LOGDE("skb alloc failed. frame->can_if %d\n", 0);
+ return;
+ }
+
+ LOGDI("rcv property:0x%x data:%2x %2x %2x %2x",
+ property->id, property->str[0], property->str[1],
+ property->str[2], property->str[3]);
+ cfd->can_id = 0x00;
+ cfd->len = sizeof(struct vehicle_property);
+
+ p = (u8 *)property;
+ for (i = 0; i < cfd->len; i++)
+ cfd->data[i] = p[i];
+
+ nanosec = le64_to_cpu(property->ts);
+ tv.tv_sec = (int)(nanosec / 1000000000);
+ tv.tv_usec = (int)(nanosec - (u64)tv.tv_sec * 1000000000) / 1000;
+ skt = skb_hwtstamps(skb);
+ skt->hwtstamp = timeval_to_ktime(tv);
+ LOGDI(" hwtstamp %lld\n", ktime_to_ms(skt->hwtstamp));
+ skb->tstamp = timeval_to_ktime(tv);
+ netif_rx(skb);
+ netdev->stats.rx_packets++;
+}
+
+static int rh850_process_response(struct rh850_can *priv_data,
+ struct spi_miso *resp, int length)
+{
+ int ret = 0;
LOGDI("<%x %2d [%d]\n", resp->cmd, resp->len, resp->seq);
if (resp->cmd == CMD_CAN_RECEIVE_FRAME) {
struct can_receive_frame *frame =
@@ -208,19 +355,55 @@ static void rh850_process_response(struct rh850_can *priv_data,
} else {
rh850_receive_frame(priv_data, frame);
}
+ } else if (resp->cmd == CMD_PROPERTY_READ) {
+ struct vehicle_property *property =
+ (struct vehicle_property *)&resp->data;
+ if (resp->len > length) {
+ LOGDE("Error. This should never happen\n");
+ LOGDE("process_response: Saving %d bytes\n",
+ length);
+ memcpy(priv_data->assembly_buffer, (char *)resp,
+ length);
+ priv_data->assembly_buffer_size = length;
+ } else {
+ rh850_receive_property(priv_data, property);
+ }
} else if (resp->cmd == CMD_GET_FW_VERSION) {
struct can_fw_resp *fw_resp = (struct can_fw_resp *)resp->data;
dev_info(&priv_data->spidev->dev, "fw %d.%d",
fw_resp->maj, fw_resp->min);
dev_info(&priv_data->spidev->dev, "fw string %s",
fw_resp->ver);
+ } else if (resp->cmd == CMD_GET_FW_BR_VERSION) {
+ struct can_fw_br_resp *fw_resp =
+ (struct can_fw_br_resp *)resp->data;
+
+ dev_info(&priv_data->spidev->dev, "fw_can %d.%d",
+ fw_resp->maj, fw_resp->min);
+ dev_info(&priv_data->spidev->dev, "fw string %s",
+ fw_resp->ver);
+ dev_info(&priv_data->spidev->dev, "fw_br %d.%d exec_mode %d",
+ fw_resp->br_maj, fw_resp->br_min,
+ fw_resp->curr_exec_mode);
+ ret = fw_resp->curr_exec_mode << 28;
+ ret |= (fw_resp->br_maj & 0xF) << 24;
+ ret |= (fw_resp->br_min & 0xFF) << 16;
+ ret |= (fw_resp->maj & 0xF) << 8;
+ ret |= (fw_resp->min & 0xFF);
+ }
+
+ if (resp->cmd == priv_data->wait_cmd) {
+ priv_data->cmd_result = ret;
+ complete(&priv_data->response_completion);
}
+ return ret;
}
-static void rh850_process_rx(struct rh850_can *priv_data, char *rx_buf)
+static int rh850_process_rx(struct rh850_can *priv_data, char *rx_buf)
{
struct spi_miso *resp;
int length_processed = 0, actual_length = priv_data->xfer_length;
+ int ret = 0;
while (length_processed < actual_length) {
int length_left = actual_length - length_processed;
@@ -237,7 +420,8 @@ static void rh850_process_rx(struct rh850_can *priv_data, char *rx_buf)
rx_buf, 2);
data = priv_data->assembly_buffer;
resp = (struct spi_miso *)data;
- length = resp->len - priv_data->assembly_buffer_size;
+ length = resp->len + sizeof(*resp)
+ - priv_data->assembly_buffer_size;
if (length > 0)
memcpy(priv_data->assembly_buffer +
priv_data->assembly_buffer_size,
@@ -258,15 +442,11 @@ static void rh850_process_rx(struct rh850_can *priv_data, char *rx_buf)
length_processed, length_left, priv_data->xfer_length);
length_processed += length;
if (length_left >= sizeof(*resp) &&
- resp->len <= length_left) {
+ resp->len + sizeof(*resp) <= length_left) {
struct spi_miso *resp =
(struct spi_miso *)data;
- if (resp->len < sizeof(struct spi_miso)) {
- LOGDE("Error resp->len is %d). Abort.\n",
- resp->len);
- break;
- }
- rh850_process_response(priv_data, resp, length_left);
+ ret = rh850_process_response(priv_data, resp,
+ length_left);
} else if (length_left > 0) {
/* Not full message. Store however much we have for */
/* later assembly */
@@ -277,6 +457,7 @@ static void rh850_process_rx(struct rh850_can *priv_data, char *rx_buf)
break;
}
}
+ return ret;
}
static int rh850_do_spi_transaction(struct rh850_can *priv_data)
@@ -291,15 +472,20 @@ static int rh850_do_spi_transaction(struct rh850_can *priv_data)
msg = kzalloc(sizeof(*msg), GFP_KERNEL);
if (xfer == 0 || msg == 0)
return -ENOMEM;
+ LOGDI(">%x %2d [%d]\n", priv_data->tx_buf[0],
+ priv_data->tx_buf[1], priv_data->tx_buf[2]);
spi_message_init(msg);
spi_message_add_tail(xfer, msg);
xfer->tx_buf = priv_data->tx_buf;
xfer->rx_buf = priv_data->rx_buf;
xfer->len = priv_data->xfer_length;
ret = spi_sync(spi, msg);
- LOGDI("spi_sync ret %d\n", ret);
+ LOGDI("spi_sync ret %d data %x %x %x %x %x %x %x %x\n", ret,
+ priv_data->rx_buf[0], priv_data->rx_buf[1], priv_data->rx_buf[2],
+ priv_data->rx_buf[3], priv_data->rx_buf[4], priv_data->rx_buf[5],
+ priv_data->rx_buf[6], priv_data->rx_buf[7]);
if (ret == 0)
- rh850_process_rx(priv_data, priv_data->rx_buf);
+ ret = rh850_process_rx(priv_data, priv_data->rx_buf);
kfree(msg);
kfree(xfer);
return ret;
@@ -347,8 +533,54 @@ static int rh850_query_firmware_version(struct rh850_can *priv_data)
return ret;
}
+static int rh850_set_bitrate(struct net_device *netdev)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct can_config_bit_timing *req_d;
+ struct rh850_can *priv_data;
+ struct can_priv *priv = netdev_priv(netdev);
+ struct rh850_netdev_privdata *rh850_priv;
+
+ rh850_priv = netdev_priv(netdev);
+ priv_data = rh850_priv->rh850_can;
+
+ netdev_info(netdev, "ch%i, bitrate setting>%i",
+ rh850_priv->netdev_index, priv->bittiming.bitrate);
+ LOGNI("sjw>%i brp>%i ph_sg1>%i ph_sg2>%i smpl_pt>%i tq>%i pr_seg>%i",
+ priv->bittiming.sjw, priv->bittiming.brp,
+ priv->bittiming.phase_seg1,
+ priv->bittiming.phase_seg2,
+ priv->bittiming.sample_point,
+ priv->bittiming.tq, priv->bittiming.prop_seg);
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_CAN_CONFIG_BIT_TIMING;
+ req->len = sizeof(struct can_config_bit_timing);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+ req_d = (struct can_config_bit_timing *)req->data;
+ req_d->can_if = rh850_priv->netdev_index;
+ req_d->brp = priv->bittiming.brp;
+ req_d->tseg1 = priv->bittiming.phase_seg1 + priv->bittiming.prop_seg;
+ req_d->tseg2 = priv->bittiming.phase_seg2;
+ req_d->sjw = priv->bittiming.sjw;
+
+ ret = rh850_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
static int rh850_can_write(struct rh850_can *priv_data,
- int can_channel, struct can_frame *cf)
+ int can_channel, struct canfd_frame *cf)
{
char *tx_buf, *rx_buf;
int ret, i;
@@ -369,16 +601,29 @@ static int rh850_can_write(struct rh850_can *priv_data,
priv_data->xfer_length = XFER_BUFFER_SIZE;
req = (struct spi_mosi *)tx_buf;
- req->cmd = CMD_CAN_SEND_FRAME;
- req->len = sizeof(struct can_write_req) + 8;
- req->seq = atomic_inc_return(&priv_data->msg_seq);
-
- req_d = (struct can_write_req *)req->data;
- req_d->can_if = can_channel;
- req_d->mid = cf->can_id;
- req_d->dlc = cf->can_dlc;
- for (i = 0; i < cf->can_dlc; i++)
- req_d->data[i] = cf->data[i];
+ if (priv_data->driver_mode == DRIVER_MODE_RAW_FRAMES) {
+ req->cmd = CMD_CAN_SEND_FRAME;
+ req->len = sizeof(struct can_write_req) + 8;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ req_d = (struct can_write_req *)req->data;
+ req_d->can_if = can_channel;
+ req_d->mid = cf->can_id;
+ req_d->dlc = cf->len;
+
+ for (i = 0; i < cf->len; i++)
+ req_d->data[i] = cf->data[i];
+ } else if (priv_data->driver_mode == DRIVER_MODE_PROPERTIES ||
+ priv_data->driver_mode == DRIVER_MODE_AMB) {
+ req->cmd = CMD_PROPERTY_WRITE;
+ req->len = sizeof(struct vehicle_property);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+ for (i = 0; i < cf->len; i++)
+ req->data[i] = cf->data[i];
+ } else {
+ LOGDE("rh850_can_write: wrong driver mode %i",
+ priv_data->driver_mode);
+ }
ret = rh850_do_spi_transaction(priv_data);
netdev = priv_data->netdev[can_channel];
@@ -414,7 +659,7 @@ static int rh850_netdev_close(struct net_device *netdev)
static void rh850_send_can_frame(struct work_struct *ws)
{
struct rh850_tx_work *tx_work;
- struct can_frame *cf;
+ struct canfd_frame *cf;
struct rh850_can *priv_data;
struct net_device *netdev;
struct rh850_netdev_privdata *netdev_priv_data;
@@ -428,7 +673,7 @@ static void rh850_send_can_frame(struct work_struct *ws)
LOGDI("send_can_frame ws %p\n", ws);
LOGDI("send_can_frame tx %p\n", tx_work);
- cf = (struct can_frame *)tx_work->skb->data;
+ cf = (struct canfd_frame *)tx_work->skb->data;
rh850_can_write(priv_data, can_channel, cf);
dev_kfree_skb(tx_work->skb);
@@ -458,10 +703,304 @@ static netdev_tx_t rh850_netdev_start_xmit(
return NETDEV_TX_OK;
}
+static int rh850_send_release_can_buffer_cmd(struct net_device *netdev)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct rh850_can *priv_data;
+ struct rh850_netdev_privdata *netdev_priv_data;
+ int *mode;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->rh850_can;
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_CAN_RELEASE_BUFFER;
+ req->len = sizeof(int);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+ mode = (int *)req->data;
+ *mode = priv_data->driver_mode;
+
+ ret = rh850_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int rh850_data_buffering(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct rh850_add_can_buffer *enable_buffering;
+ struct rh850_add_can_buffer *add_request;
+ struct rh850_can *priv_data;
+ struct rh850_netdev_privdata *netdev_priv_data;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->rh850_can;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ add_request = ifr->ifr_data;
+ req = (struct spi_mosi *)tx_buf;
+
+ if (cmd == IOCTL_ENABLE_BUFFERING)
+ req->cmd = CMD_CAN_DATA_BUFF_ADD;
+ else
+ req->cmd = CMD_CAN_DATA_BUFF_REMOVE;
+
+ req->len = sizeof(struct rh850_add_can_buffer);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ enable_buffering = (struct rh850_add_can_buffer *)req->data;
+ enable_buffering->can_if = add_request->can_if;
+ enable_buffering->mid = add_request->mid;
+ enable_buffering->mask = add_request->mask;
+
+ ret = rh850_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int rh850_remove_all_buffering(struct net_device *netdev)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct rh850_can *priv_data;
+ struct rh850_netdev_privdata *netdev_priv_data;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->rh850_can;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_CAN_DATA_BUFF_REMOVE_ALL;
+ req->len = 0;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ ret = rh850_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int rh850_frame_filter(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct can_add_filter_req *add_filter;
+ struct can_add_filter_req *filter_request;
+ struct rh850_can *priv_data;
+ struct rh850_netdev_privdata *netdev_priv_data;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->rh850_can;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ filter_request = ifr->ifr_data;
+ req = (struct spi_mosi *)tx_buf;
+
+ if (cmd == IOCTL_ADD_FRAME_FILTER)
+ req->cmd = CMD_CAN_ADD_FILTER;
+ else
+ req->cmd = CMD_CAN_REMOVE_FILTER;
+
+ req->len = sizeof(struct can_add_filter_req);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ add_filter = (struct can_add_filter_req *)req->data;
+ add_filter->can_if = filter_request->can_if;
+ add_filter->mid = filter_request->mid;
+ add_filter->mask = filter_request->mask;
+
+ ret = rh850_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int rh850_send_spi_locked(struct rh850_can *priv_data, int cmd, int len,
+ u8 *data)
+{
+ char *tx_buf, *rx_buf;
+ struct spi_mosi *req;
+ int ret;
+
+ LOGDI("rh850_send_spi_locked\n");
+
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = cmd;
+ req->len = len;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ if (unlikely(len > 64))
+ return -EINVAL;
+ memcpy(req->data, data, len);
+
+ ret = rh850_do_spi_transaction(priv_data);
+ return ret;
+}
+
+static int rh850_convert_ioctl_cmd_to_spi_cmd(int ioctl_cmd)
+{
+ switch (ioctl_cmd) {
+ case IOCTL_GET_FW_BR_VERSION:
+ return CMD_GET_FW_BR_VERSION;
+ case IOCTL_BEGIN_FIRMWARE_UPGRADE:
+ return CMD_BEGIN_FIRMWARE_UPGRADE;
+ case IOCTL_FIRMWARE_UPGRADE_DATA:
+ return CMD_FIRMWARE_UPGRADE_DATA;
+ case IOCTL_END_FIRMWARE_UPGRADE:
+ return CMD_END_FIRMWARE_UPGRADE;
+ case IOCTL_BEGIN_BOOT_ROM_UPGRADE:
+ return CMD_BEGIN_BOOT_ROM_UPGRADE;
+ case IOCTL_BOOT_ROM_UPGRADE_DATA:
+ return CMD_BOOT_ROM_UPGRADE_DATA;
+ case IOCTL_END_BOOT_ROM_UPGRADE:
+ return CMD_END_BOOT_ROM_UPGRADE;
+ }
+ return -EINVAL;
+}
+
+static int rh850_do_blocking_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ int spi_cmd, ret;
+
+ struct rh850_can *priv_data;
+ struct rh850_netdev_privdata *netdev_priv_data;
+ struct rh850_ioctl_req *ioctl_data;
+ int len = 0;
+ u8 *data = NULL;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->rh850_can;
+
+ spi_cmd = rh850_convert_ioctl_cmd_to_spi_cmd(cmd);
+ LOGDI("rh850_do_blocking_ioctl spi_cmd %x\n", spi_cmd);
+ if (spi_cmd < 0) {
+ LOGDE("rh850_do_blocking_ioctl wrong command %d\n", cmd);
+ return spi_cmd;
+ }
+ if (!ifr)
+ return -EINVAL;
+ ioctl_data = ifr->ifr_data;
+ /* Regular NULL check fails here as ioctl_data is at some offset */
+ if ((void *)ioctl_data > (void *)0x100) {
+ len = ioctl_data->len;
+ data = ioctl_data->data;
+ }
+ LOGDI("rh850_do_blocking_ioctl len %d\n", len);
+ mutex_lock(&priv_data->spi_lock);
+
+ priv_data->wait_cmd = spi_cmd;
+ priv_data->cmd_result = -1;
+ reinit_completion(&priv_data->response_completion);
+
+ ret = rh850_send_spi_locked(priv_data, spi_cmd, len, data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ if (ret == 0) {
+ LOGDI("rh850_do_blocking_ioctl ready to wait for response\n");
+ wait_for_completion_interruptible_timeout(
+ &priv_data->response_completion, 5 * HZ);
+ ret = priv_data->cmd_result;
+ }
+ return ret;
+}
+
+static int rh850_netdev_do_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct rh850_can *priv_data;
+ struct rh850_netdev_privdata *netdev_priv_data;
+ int *mode;
+ int ret = -EINVAL;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->rh850_can;
+ LOGDI("rh850_netdev_do_ioctl %x\n", cmd);
+
+ switch (cmd) {
+ case IOCTL_RELEASE_CAN_BUFFER:
+ if (ifr->ifr_data > (void *)0x100) {
+ mode = ifr->ifr_data;
+ priv_data->driver_mode = *mode;
+ }
+ LOGDE("rh850_driver_mode %d\n", priv_data->driver_mode);
+ rh850_send_release_can_buffer_cmd(netdev);
+ ret = 0;
+ break;
+ case IOCTL_ENABLE_BUFFERING:
+ case IOCTL_DISABLE_BUFFERING:
+ rh850_data_buffering(netdev, ifr, cmd);
+ ret = 0;
+ break;
+ case IOCTL_DISABLE_ALL_BUFFERING:
+ rh850_remove_all_buffering(netdev);
+ ret = 0;
+ break;
+ case IOCTL_ADD_FRAME_FILTER:
+ case IOCTL_REMOVE_FRAME_FILTER:
+ rh850_frame_filter(netdev, ifr, cmd);
+ ret = 0;
+ break;
+ case IOCTL_GET_FW_BR_VERSION:
+ case IOCTL_BEGIN_FIRMWARE_UPGRADE:
+ case IOCTL_FIRMWARE_UPGRADE_DATA:
+ case IOCTL_END_FIRMWARE_UPGRADE:
+ case IOCTL_BEGIN_BOOT_ROM_UPGRADE:
+ case IOCTL_BOOT_ROM_UPGRADE_DATA:
+ case IOCTL_END_BOOT_ROM_UPGRADE:
+ ret = rh850_do_blocking_ioctl(netdev, ifr, cmd);
+ break;
+ }
+ LOGDI("rh850_netdev_do_ioctl ret %d\n", ret);
+
+ return ret;
+}
+
static const struct net_device_ops rh850_netdev_ops = {
.ndo_open = rh850_netdev_open,
.ndo_stop = rh850_netdev_close,
.ndo_start_xmit = rh850_netdev_start_xmit,
+ .ndo_do_ioctl = rh850_netdev_do_ioctl,
};
static int rh850_create_netdev(struct spi_device *spi,
@@ -490,9 +1029,13 @@ static int rh850_create_netdev(struct spi_device *spi,
netdev->netdev_ops = &rh850_netdev_ops;
SET_NETDEV_DEV(netdev, &spi->dev);
netdev_priv_data->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
- CAN_CTRLMODE_LISTENONLY;
+ CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_FD;
netdev_priv_data->can.bittiming_const = &rh850_bittiming_const;
+ netdev_priv_data->can.data_bittiming_const =
+ &rh850_data_bittiming_const;
netdev_priv_data->can.clock.freq = RH850_CLOCK;
+ netdev_priv_data->can.do_set_bittiming = rh850_set_bitrate;
return 0;
}
@@ -534,9 +1077,11 @@ static struct rh850_can *rh850_create_priv_data(struct spi_device *spi)
goto cleanup_privdata;
}
priv_data->xfer_length = 0;
+ priv_data->driver_mode = DRIVER_MODE_RAW_FRAMES;
mutex_init(&priv_data->spi_lock);
atomic_set(&priv_data->msg_seq, 0);
+ init_completion(&priv_data->response_completion);
return priv_data;
cleanup_privdata:
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index b6fa89102526..66ba1e0ff37e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -913,8 +913,8 @@
#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_LAST_INDEX 2
+#define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
@@ -923,6 +923,8 @@
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7
+#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1
#define RX_NORMAL_DESC0_OVT_INDEX 0
#define RX_NORMAL_DESC0_OVT_WIDTH 16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index f6a7161e3b85..5e6238e0b2bd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1658,10 +1658,15 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
/* Get the header length */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ FIRST, 1);
rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
RX_NORMAL_DESC2, HL);
if (rdata->rx.hdr_len)
pdata->ext_stats.rx_split_header_packets++;
+ } else {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ FIRST, 0);
}
/* Get the RSS hash */
@@ -1684,19 +1689,16 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
}
}
- /* Get the packet length */
- rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
-
- if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
- /* Not all the data has been transferred for this packet */
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- INCOMPLETE, 1);
+ /* Not all the data has been transferred for this packet */
+ if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
return 0;
- }
/* This is the last of the data for this packet */
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- INCOMPLETE, 0);
+ LAST, 1);
+
+ /* Get the packet length */
+ rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
/* Set checksum done indicator as appropriate */
if (netdev->features & NETIF_F_RXCSUM)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 53ce1222b11d..865b7e0b133b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1760,13 +1760,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
{
struct sk_buff *skb;
u8 *packet;
- unsigned int copy_len;
skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
if (!skb)
return NULL;
- /* Start with the header buffer which may contain just the header
+ /* Pull in the header buffer which may contain just the header
* or the header plus data
*/
dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
@@ -1775,30 +1774,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
packet = page_address(rdata->rx.hdr.pa.pages) +
rdata->rx.hdr.pa.pages_offset;
- copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
- copy_len = min(rdata->rx.hdr.dma_len, copy_len);
- skb_copy_to_linear_data(skb, packet, copy_len);
- skb_put(skb, copy_len);
-
- len -= copy_len;
- if (len) {
- /* Add the remaining data as a frag */
- dma_sync_single_range_for_cpu(pdata->dev,
- rdata->rx.buf.dma_base,
- rdata->rx.buf.dma_off,
- rdata->rx.buf.dma_len,
- DMA_FROM_DEVICE);
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rdata->rx.buf.pa.pages,
- rdata->rx.buf.pa.pages_offset,
- len, rdata->rx.buf.dma_len);
- rdata->rx.buf.pa.pages = NULL;
- }
+ skb_copy_to_linear_data(skb, packet, len);
+ skb_put(skb, len);
return skb;
}
+static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
+ struct xgbe_packet_data *packet)
+{
+ /* Always zero if not the first descriptor */
+ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
+ return 0;
+
+ /* First descriptor with split header, return header length */
+ if (rdata->rx.hdr_len)
+ return rdata->rx.hdr_len;
+
+ /* First descriptor but not the last descriptor and no split header,
+ * so the full buffer was used
+ */
+ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+ return rdata->rx.hdr.dma_len;
+
+ /* First descriptor and last descriptor and no split header, so
+ * calculate how much of the buffer was used
+ */
+ return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
+}
+
+static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
+ struct xgbe_packet_data *packet,
+ unsigned int len)
+{
+ /* Always the full buffer if not the last descriptor */
+ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+ return rdata->rx.buf.dma_len;
+
+ /* Last descriptor so calculate how much of the buffer was used
+ * for the last bit of data
+ */
+ return rdata->rx.len - len;
+}
+
static int xgbe_tx_poll(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
@@ -1881,8 +1899,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
struct napi_struct *napi;
struct sk_buff *skb;
struct skb_shared_hwtstamps *hwtstamps;
- unsigned int incomplete, error, context_next, context;
- unsigned int len, rdesc_len, max_len;
+ unsigned int last, error, context_next, context;
+ unsigned int len, buf1_len, buf2_len, max_len;
unsigned int received = 0;
int packet_count = 0;
@@ -1892,7 +1910,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!ring)
return 0;
- incomplete = 0;
+ last = 0;
context_next = 0;
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
@@ -1926,9 +1944,8 @@ read_again:
received++;
ring->cur++;
- incomplete = XGMAC_GET_BITS(packet->attributes,
- RX_PACKET_ATTRIBUTES,
- INCOMPLETE);
+ last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ LAST);
context_next = XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES,
CONTEXT_NEXT);
@@ -1937,7 +1954,7 @@ read_again:
CONTEXT);
/* Earlier error, just drain the remaining data */
- if ((incomplete || context_next) && error)
+ if ((!last || context_next) && error)
goto read_again;
if (error || packet->errors) {
@@ -1949,16 +1966,22 @@ read_again:
}
if (!context) {
- /* Length is cumulative, get this descriptor's length */
- rdesc_len = rdata->rx.len - len;
- len += rdesc_len;
+ /* Get the data length in the descriptor buffers */
+ buf1_len = xgbe_rx_buf1_len(rdata, packet);
+ len += buf1_len;
+ buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
+ len += buf2_len;
- if (rdesc_len && !skb) {
+ if (!skb) {
skb = xgbe_create_skb(pdata, napi, rdata,
- rdesc_len);
- if (!skb)
+ buf1_len);
+ if (!skb) {
error = 1;
- } else if (rdesc_len) {
+ goto skip_data;
+ }
+ }
+
+ if (buf2_len) {
dma_sync_single_range_for_cpu(pdata->dev,
rdata->rx.buf.dma_base,
rdata->rx.buf.dma_off,
@@ -1968,13 +1991,14 @@ read_again:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages,
rdata->rx.buf.pa.pages_offset,
- rdesc_len,
+ buf2_len,
rdata->rx.buf.dma_len);
rdata->rx.buf.pa.pages = NULL;
}
}
- if (incomplete || context_next)
+skip_data:
+ if (!last || context_next)
goto read_again;
if (!skb)
@@ -2033,7 +2057,7 @@ next_packet:
}
/* Check if we need to save state before leaving */
- if (received && (incomplete || context_next)) {
+ if (received && (!last || context_next)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdata->state_saved = 1;
rdata->state.skb = skb;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 91627561c58d..f971d92f7b41 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3495,7 +3495,8 @@ static int bcmgenet_suspend(struct device *d)
bcmgenet_netif_stop(dev);
- phy_suspend(priv->phydev);
+ if (!device_may_wakeup(d))
+ phy_suspend(priv->phydev);
netif_device_detach(dev);
@@ -3592,7 +3593,8 @@ static int bcmgenet_resume(struct device *d)
netif_device_attach(dev);
- phy_resume(priv->phydev);
+ if (!device_may_wakeup(d))
+ phy_resume(priv->phydev);
if (priv->eee.eee_enabled)
bcmgenet_eee_enable_set(dev, true);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 8bdfe53754ba..e96d1f95bb47 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -220,20 +220,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
udelay(60);
}
-static void bcmgenet_internal_phy_setup(struct net_device *dev)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 reg;
-
- /* Power up PHY */
- bcmgenet_phy_power_set(dev, true);
- /* enable APD */
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= EXT_PWR_DN_EN_LD;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- bcmgenet_mii_reset(dev);
-}
-
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
u32 reg;
@@ -281,7 +267,6 @@ int bcmgenet_mii_config(struct net_device *dev)
if (priv->internal_phy) {
phy_name = "internal PHY";
- bcmgenet_internal_phy_setup(dev);
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
phy_name = "MoCA";
bcmgenet_moca_phy_setup(priv);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7445da218bd9..cc1725616f9d 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2823,7 +2823,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
if (!g) {
netif_info(lio, tx_err, lio->netdev,
"Transmit scatter gather: glist null!\n");
- goto lio_xmit_failed;
+ goto lio_xmit_dma_failed;
}
cmdsetup.s.gather = 1;
@@ -2894,7 +2894,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
else
status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
if (status == IQ_SEND_FAILED)
- goto lio_xmit_failed;
+ goto lio_xmit_dma_failed;
netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
@@ -2908,12 +2908,13 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
+lio_xmit_dma_failed:
+ dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
+ ndata.datasize, DMA_TO_DEVICE);
lio_xmit_failed:
stats->tx_dropped++;
netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
iq_no, stats->tx_dropped);
- dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
- ndata.datasize, DMA_TO_DEVICE);
recv_buffer_free(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 7af870a3c549..f9e4988ea30e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -58,7 +58,7 @@ static struct kobj_type ktype_veth_pool;
static const char ibmveth_driver_name[] = "ibmveth";
static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
-#define ibmveth_driver_version "1.05"
+#define ibmveth_driver_version "1.06"
MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
@@ -137,6 +137,11 @@ static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
}
+static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter)
+{
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT;
+}
+
static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
{
return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
@@ -1172,6 +1177,53 @@ map_failed:
goto retry_bounce;
}
+static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
+{
+ struct tcphdr *tcph;
+ int offset = 0;
+ int hdr_len;
+
+ /* only TCP packets will be aggregated */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_TCP) {
+ offset = iph->ihl * 4;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ } else {
+ return;
+ }
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data;
+
+ if (iph6->nexthdr == IPPROTO_TCP) {
+ offset = sizeof(struct ipv6hdr);
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ } else {
+ return;
+ }
+ } else {
+ return;
+ }
+ /* if mss is not set through Large Packet bit/mss in rx buffer,
+ * expect that the mss will be written to the tcp header checksum.
+ */
+ tcph = (struct tcphdr *)(skb->data + offset);
+ if (lrg_pkt) {
+ skb_shinfo(skb)->gso_size = mss;
+ } else if (offset) {
+ skb_shinfo(skb)->gso_size = ntohs(tcph->check);
+ tcph->check = 0;
+ }
+
+ if (skb_shinfo(skb)->gso_size) {
+ hdr_len = offset + tcph->doff * 4;
+ skb_shinfo(skb)->gso_segs =
+ DIV_ROUND_UP(skb->len - hdr_len,
+ skb_shinfo(skb)->gso_size);
+ }
+}
+
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
struct ibmveth_adapter *adapter =
@@ -1180,6 +1232,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
int frames_processed = 0;
unsigned long lpar_rc;
struct iphdr *iph;
+ u16 mss = 0;
restart_poll:
while (frames_processed < budget) {
@@ -1197,9 +1250,21 @@ restart_poll:
int length = ibmveth_rxq_frame_length(adapter);
int offset = ibmveth_rxq_frame_offset(adapter);
int csum_good = ibmveth_rxq_csum_good(adapter);
+ int lrg_pkt = ibmveth_rxq_large_packet(adapter);
skb = ibmveth_rxq_get_buffer(adapter);
+ /* if the large packet bit is set in the rx queue
+ * descriptor, the mss will be written by PHYP eight
+ * bytes from the start of the rx buffer, which is
+ * skb->data at this stage
+ */
+ if (lrg_pkt) {
+ __be64 *rxmss = (__be64 *)(skb->data + 8);
+
+ mss = (u16)be64_to_cpu(*rxmss);
+ }
+
new_skb = NULL;
if (length < rx_copybreak)
new_skb = netdev_alloc_skb(netdev, length);
@@ -1233,11 +1298,15 @@ restart_poll:
if (iph->check == 0xffff) {
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
- adapter->rx_large_packets++;
}
}
}
+ if (length > netdev->mtu + ETH_HLEN) {
+ ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
+ adapter->rx_large_packets++;
+ }
+
napi_gro_receive(napi, skb); /* send it up */
netdev->stats.rx_packets++;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 4eade67fe30c..7acda04d034e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -209,6 +209,7 @@ struct ibmveth_rx_q_entry {
#define IBMVETH_RXQ_TOGGLE 0x80000000
#define IBMVETH_RXQ_TOGGLE_SHIFT 31
#define IBMVETH_RXQ_VALID 0x40000000
+#define IBMVETH_RXQ_LRG_PKT 0x04000000
#define IBMVETH_RXQ_NO_CSUM 0x02000000
#define IBMVETH_RXQ_CSUM_GOOD 0x01000000
#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 23ec28f43f6d..afaa98d1d4e4 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -77,6 +77,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw)
s32 ret_val = 0;
u16 phy_id;
+ /* ensure PHY page selection to fix misconfigured i210 */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+ phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0);
+
ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
if (ret_val)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 3348e646db70..6eba58044456 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{
struct mlx4_cq *cq;
+ rcu_read_lock();
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
+ rcu_read_unlock();
+
if (!cq) {
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
++cq->arm_sn;
cq->comp(cq);
@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq;
- spin_lock(&cq_table->lock);
-
+ rcu_read_lock();
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
- if (cq)
- atomic_inc(&cq->refcount);
-
- spin_unlock(&cq_table->lock);
+ rcu_read_unlock();
if (!cq) {
- mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
cq->event(cq, event_type);
-
- if (atomic_dec_and_test(&cq->refcount))
- complete(&cq->free);
}
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
if (err)
return err;
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
if (err)
goto err_icm;
@@ -347,9 +349,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
return 0;
err_radix:
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
err_icm:
mlx4_cq_free_icm(dev, cq->cqn);
@@ -368,15 +370,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
+ spin_lock(&cq_table->lock);
+ radix_tree_delete(&cq_table->tree, cq->cqn);
+ spin_unlock(&cq_table->lock);
+
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
- spin_lock_irq(&cq_table->lock);
- radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
-
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 28a4b34310b2..82bf1b539d87 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -439,8 +439,14 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
ring->stride = stride;
- if (ring->stride <= TXBB_SIZE)
+ if (ring->stride <= TXBB_SIZE) {
+ /* Stamp first unused send wqe */
+ __be32 *ptr = (__be32 *)ring->buf;
+ __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
+ *ptr = stamp;
+ /* Move pointer to start of rx section */
ring->buf += TXBB_SIZE;
+ }
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index d314d96dcb1c..d1fc7fa87b05 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2955,6 +2955,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
put_res(dev, slave, srqn, RES_SRQ);
qp->srq = srq;
}
+
+ /* Save param3 for dynamic changes from VST back to VGT */
+ qp->param3 = qpc->param3;
put_res(dev, slave, rcqn, RES_CQ);
put_res(dev, slave, mtt_base, RES_MTT);
res_end_move(dev, slave, RES_QP, qpn);
@@ -3747,7 +3750,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
u8 orig_sched_queue;
- __be32 orig_param3 = qpc->param3;
u8 orig_vlan_control = qpc->pri_path.vlan_control;
u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
u8 orig_pri_path_fl = qpc->pri_path.fl;
@@ -3789,7 +3791,6 @@ out:
*/
if (!err) {
qp->sched_queue = orig_sched_queue;
- qp->param3 = orig_param3;
qp->vlan_control = orig_vlan_control;
qp->fvl_rx = orig_fvl_rx;
qp->pri_path_fl = orig_pri_path_fl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index cf0098596e85..e9408f5e2a1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -197,6 +197,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
+ /* Subtract one since we already counted this as one
+ * "regular" packet in mlx5e_complete_rx_cqe()
+ */
+ rq->stats.packets += lro_num_seg - 1;
rq->stats.lro_packets++;
rq->stats.lro_bytes += cqe_bcnt;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index ba115ec7aa92..1e611980cf99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -85,7 +85,7 @@ static struct mlx5_profile profile[] = {
[2] = {
.mask = MLX5_PROF_MASK_QP_SIZE |
MLX5_PROF_MASK_MR_CACHE,
- .log_max_qp = 17,
+ .log_max_qp = 18,
.mr_cache[0] = {
.size = 500,
.limit = 250
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index e8a09ff9e724..c8a7802d2953 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -197,65 +197,6 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
return ppi;
}
-union sub_key {
- u64 k;
- struct {
- u8 pad[3];
- u8 kb;
- u32 ka;
- };
-};
-
-/* Toeplitz hash function
- * data: network byte order
- * return: host byte order
- */
-static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
-{
- union sub_key subk;
- int k_next = 4;
- u8 dt;
- int i, j;
- u32 ret = 0;
-
- subk.k = 0;
- subk.ka = ntohl(*(u32 *)key);
-
- for (i = 0; i < dlen; i++) {
- subk.kb = key[k_next];
- k_next = (k_next + 1) % klen;
- dt = ((u8 *)data)[i];
- for (j = 0; j < 8; j++) {
- if (dt & 0x80)
- ret ^= subk.ka;
- dt <<= 1;
- subk.k <<= 1;
- }
- }
-
- return ret;
-}
-
-static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
-{
- struct flow_keys flow;
- int data_len;
-
- if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
- !(flow.basic.n_proto == htons(ETH_P_IP) ||
- flow.basic.n_proto == htons(ETH_P_IPV6)))
- return false;
-
- if (flow.basic.ip_proto == IPPROTO_TCP)
- data_len = 12;
- else
- data_len = 8;
-
- *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
-
- return true;
-}
-
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
@@ -268,11 +209,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
return 0;
- if (netvsc_set_hash(&hash, skb)) {
- q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
- ndev->real_num_tx_queues;
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
- }
+ hash = skb_get_hash(skb);
+ q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
+ ndev->real_num_tx_queues;
return q_idx;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 06c8bfeaccd6..40cd86614677 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1110,6 +1110,7 @@ static int macvlan_port_create(struct net_device *dev)
static void macvlan_port_destroy(struct net_device *dev)
{
struct macvlan_port *port = macvlan_port_get_rtnl(dev);
+ struct sk_buff *skb;
dev->priv_flags &= ~IFF_MACVLAN_PORT;
netdev_rx_handler_unregister(dev);
@@ -1118,7 +1119,15 @@ static void macvlan_port_destroy(struct net_device *dev)
* but we need to cancel it and purge left skbs if any.
*/
cancel_work_sync(&port->bc_work);
- __skb_queue_purge(&port->bc_queue);
+
+ while ((skb = __skb_dequeue(&port->bc_queue))) {
+ const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
+
+ if (src)
+ dev_put(src->dev);
+
+ kfree_skb(skb);
+ }
kfree_rcu(port, rcu);
}
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index e6cefd0e3262..84b9cca152eb 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1436,8 +1436,6 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
skb_queue_tail(&dp83640->rx_queue, skb);
schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
- } else {
- netif_rx_ni(skb);
}
return true;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index bba0ca786aaa..851c0e121807 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -538,7 +538,7 @@ void phy_stop_machine(struct phy_device *phydev)
cancel_delayed_work_sync(&phydev->state_queue);
mutex_lock(&phydev->lock);
- if (phydev->state > PHY_UP)
+ if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
}
diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c
index 0184c96579e9..3a45cf805288 100644
--- a/drivers/net/ppp/pppolac.c
+++ b/drivers/net/ppp/pppolac.c
@@ -206,7 +206,9 @@ static void pppolac_xmit_core(struct work_struct *delivery_work)
while ((skb = skb_dequeue(&delivery_queue))) {
struct sock *sk_udp = skb->sk;
struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
- struct msghdr msg = { 0 };
+ struct msghdr msg = {
+ .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+ };
iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1,
skb->len);
diff --git a/drivers/net/ppp/pppopns.c b/drivers/net/ppp/pppopns.c
index d9e06039794e..cdb4fa1af734 100644
--- a/drivers/net/ppp/pppopns.c
+++ b/drivers/net/ppp/pppopns.c
@@ -189,7 +189,9 @@ static void pppopns_xmit_core(struct work_struct *delivery_work)
while ((skb = skb_dequeue(&delivery_queue))) {
struct sock *sk_raw = skb->sk;
struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
- struct msghdr msg = { 0 };
+ struct msghdr msg = {
+ .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+ };
iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1,
skb->len);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 4e2b26a88b15..2aa1a1d29cb4 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -777,7 +777,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
struct net_device *netdev;
struct catc *catc;
u8 broadcast[ETH_ALEN];
- int i, pktsz;
+ int pktsz, ret;
if (usb_set_interface(usbdev,
intf->altsetting->desc.bInterfaceNumber, 1)) {
@@ -812,12 +812,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
(!catc->rx_urb) || (!catc->irq_urb)) {
dev_err(&intf->dev, "No free urbs available.\n");
- usb_free_urb(catc->ctrl_urb);
- usb_free_urb(catc->tx_urb);
- usb_free_urb(catc->rx_urb);
- usb_free_urb(catc->irq_urb);
- free_netdev(netdev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail_free;
}
/* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
@@ -845,15 +841,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
catc->irq_buf, 2, catc_irq_done, catc, 1);
if (!catc->is_f5u011) {
+ u32 *buf;
+ int i;
+
dev_dbg(dev, "Checking memory size\n");
- i = 0x12345678;
- catc_write_mem(catc, 0x7a80, &i, 4);
- i = 0x87654321;
- catc_write_mem(catc, 0xfa80, &i, 4);
- catc_read_mem(catc, 0x7a80, &i, 4);
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto fail_free;
+ }
+
+ *buf = 0x12345678;
+ catc_write_mem(catc, 0x7a80, buf, 4);
+ *buf = 0x87654321;
+ catc_write_mem(catc, 0xfa80, buf, 4);
+ catc_read_mem(catc, 0x7a80, buf, 4);
- switch (i) {
+ switch (*buf) {
case 0x12345678:
catc_set_reg(catc, TxBufCount, 8);
catc_set_reg(catc, RxBufCount, 32);
@@ -868,6 +873,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
dev_dbg(dev, "32k Memory\n");
break;
}
+
+ kfree(buf);
dev_dbg(dev, "Getting MAC from SEEROM.\n");
@@ -914,16 +921,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
usb_set_intfdata(intf, catc);
SET_NETDEV_DEV(netdev, &intf->dev);
- if (register_netdev(netdev) != 0) {
- usb_set_intfdata(intf, NULL);
- usb_free_urb(catc->ctrl_urb);
- usb_free_urb(catc->tx_urb);
- usb_free_urb(catc->rx_urb);
- usb_free_urb(catc->irq_urb);
- free_netdev(netdev);
- return -EIO;
- }
+ ret = register_netdev(netdev);
+ if (ret)
+ goto fail_clear_intfdata;
+
return 0;
+
+fail_clear_intfdata:
+ usb_set_intfdata(intf, NULL);
+fail_free:
+ usb_free_urb(catc->ctrl_urb);
+ usb_free_urb(catc->tx_urb);
+ usb_free_urb(catc->rx_urb);
+ usb_free_urb(catc->irq_urb);
+ free_netdev(netdev);
+ return ret;
}
static void catc_disconnect(struct usb_interface *intf)
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index f84080215915..17fac0121e56 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb)
static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
{
+ u8 *buf;
int ret;
+ buf = kmalloc(size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0),
PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0,
- indx, data, size, 1000);
+ indx, buf, size, 1000);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
+ else if (ret <= size)
+ memcpy(data, buf, ret);
+ kfree(buf);
return ret;
}
-static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
+static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
+ const void *data)
{
+ u8 *buf;
int ret;
+ buf = kmemdup(data, size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0,
- indx, data, size, 100);
+ indx, buf, size, 100);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
+ kfree(buf);
return ret;
}
static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
{
+ u8 *buf;
int ret;
+ buf = kmemdup(&data, 1, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
- indx, &data, 1, 1000);
+ indx, buf, 1, 1000);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
+ kfree(buf);
return ret;
}
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index d37b7dce2d40..39672984dde1 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -155,16 +155,36 @@ static const char driver_name [] = "rtl8150";
*/
static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
{
- return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
- RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
- indx, 0, data, size, 500);
+ void *buf;
+ int ret;
+
+ buf = kmalloc(size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
+ indx, 0, buf, size, 500);
+ if (ret > 0 && ret <= size)
+ memcpy(data, buf, ret);
+ kfree(buf);
+ return ret;
}
-static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
+static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data)
{
- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
- indx, 0, data, size, 500);
+ void *buf;
+ int ret;
+
+ buf = kmemdup(data, size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
+ indx, 0, buf, size, 500);
+ kfree(buf);
+ return ret;
}
static void async_set_reg_cb(struct urb *urb)
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index d6b619667f1a..349aecbc210a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -345,6 +345,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ int len = skb->len;
netdev_tx_t ret = is_ip_tx_frame(skb, dev);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
@@ -352,7 +353,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_begin(&dstats->syncp);
dstats->tx_pkts++;
- dstats->tx_bytes += skb->len;
+ dstats->tx_bytes += len;
u64_stats_update_end(&dstats->syncp);
} else {
this_cpu_inc(dev->dstats->tx_drps);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 6fa8e165878e..590750ab6564 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2600,7 +2600,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
if (data[IFLA_VXLAN_ID]) {
__u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
- if (id >= VXLAN_VID_MASK)
+ if (id >= VXLAN_N_VID)
return -ERANGE;
}
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 84a9b1a9577c..9bab797dcdbc 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -13,6 +13,7 @@
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/service-notifier.h>
#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/icnss.h>
#include <soc/qcom/service-locator.h>
#include "core.h"
#include "qmi.h"
@@ -448,6 +449,7 @@ int ath10k_snoc_qmi_wlan_enable(struct ath10k *ar,
int ret;
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg;
+ unsigned long time_left;
ath10k_dbg(ar, ATH10K_DBG_SNOC,
"Mode: %d, config: %p, host_version: %s\n",
@@ -461,10 +463,15 @@ int ath10k_snoc_qmi_wlan_enable(struct ath10k *ar,
return ret;
}
- wait_event_timeout(ath10k_fw_ready_wait_event,
+ time_left = wait_event_timeout(
+ ath10k_fw_ready_wait_event,
(atomic_read(&qmi_cfg->fw_ready) &&
atomic_read(&qmi_cfg->server_connected)),
msecs_to_jiffies(ATH10K_SNOC_WLAN_FW_READY_TIMEOUT));
+ if (time_left == 0) {
+ ath10k_err(ar, "Wait for FW ready and server connect timed out\n");
+ return -ETIMEDOUT;
+ }
req.host_version_valid = 1;
strlcpy(req.host_version, host_version,
@@ -854,9 +861,21 @@ int ath10k_snoc_start_qmi_service(struct ath10k *ar)
goto out_destroy_wq;
}
+ if (!icnss_is_fw_ready()) {
+ ath10k_err(ar, "failed to get fw ready indication\n");
+ ret = -EFAULT;
+ goto err_fw_ready;
+ }
+
+ atomic_set(&qmi_cfg->fw_ready, 1);
ath10k_dbg(ar, ATH10K_DBG_SNOC, "QMI service started successfully\n");
return 0;
+err_fw_ready:
+ qmi_svc_event_notifier_unregister(WLFW_SERVICE_ID_V01,
+ WLFW_SERVICE_VERS_V01,
+ WLFW_SERVICE_INS_ID_V01,
+ &qmi_cfg->wlfw_clnt_nb);
out_destroy_wq:
destroy_workqueue(qmi_cfg->event_wq);
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 2cbc8ee9abf9..08618cedf775 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -1329,6 +1329,8 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
if (!ar_snoc)
return -EINVAL;
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 removed\n", __func__);
+
ath10k_core_unregister(ar);
ath10k_snoc_pdr_unregister_notifier(ar);
ath10k_snoc_modem_ssr_unregister_notifier(ar);
@@ -1338,8 +1340,6 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
ath10k_snoc_stop_qmi_service(ar);
ath10k_core_destroy(ar);
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 removed\n", __func__);
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index a8762711ad74..03945731eb65 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -528,6 +528,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
return 0;
+ if (!spec_priv->rfs_chan_spec_scan)
+ return 1;
+
/* Output buffers are full, no need to process anything
* since there is no space to put the result anyway
*/
@@ -1072,7 +1075,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = {
void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
{
- if (config_enabled(CONFIG_ATH9K_DEBUGFS)) {
+ if (config_enabled(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) {
relay_close(spec_priv->rfs_chan_spec_scan);
spec_priv->rfs_chan_spec_scan = NULL;
}
@@ -1086,6 +1089,9 @@ void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv,
debugfs_phy,
1024, 256, &rfs_spec_scan_cb,
NULL);
+ if (!spec_priv->rfs_chan_spec_scan)
+ return;
+
debugfs_create_file("spectral_scan_ctl",
S_IRUSR | S_IWUSR,
debugfs_phy, spec_priv,
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index 8f0bde5825d5..0e66348e7513 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -44,7 +44,7 @@ config WIL6210_TRACING
config WIL6210_WRITE_IOCTL
bool "wil6210 write ioctl to the device"
depends on WIL6210
- default n
+ default y
---help---
Say Y here to allow write-access from user-space to
the device memory through ioctl. This is useful for
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 37898146f01d..d68d6c926029 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -26,6 +26,10 @@ bool disable_ap_sme;
module_param(disable_ap_sme, bool, 0444);
MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
+static bool ignore_reg_hints = true;
+module_param(ignore_reg_hints, bool, 0444);
+MODULE_PARM_DESC(ignore_reg_hints, " Ignore OTA regulatory hints (Default: true)");
+
#define CHAN60G(_channel, _flags) { \
.band = IEEE80211_BAND_60GHZ, \
.center_freq = 56160 + (2160 * (_channel)), \
@@ -1650,12 +1654,6 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_ps_profile_type ps_profile;
- int rc;
-
- if (!test_bit(WMI_FW_CAPABILITY_PS_CONFIG, wil->fw_capabilities)) {
- wil_err(wil, "set_power_mgmt not supported\n");
- return -EOPNOTSUPP;
- }
wil_dbg_misc(wil, "enabled=%d, timeout=%d\n",
enabled, timeout);
@@ -1665,11 +1663,7 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
else
ps_profile = WMI_PS_PROFILE_TYPE_PS_DISABLED;
- rc = wmi_ps_dev_profile_cfg(wil, ps_profile);
- if (rc)
- wil_err(wil, "wmi_ps_dev_profile_cfg failed (%d)\n", rc);
-
- return rc;
+ return wil_ps_update(wil, ps_profile);
}
static struct cfg80211_ops wil_cfg80211_ops = {
@@ -1743,6 +1737,11 @@ static void wil_wiphy_init(struct wiphy *wiphy)
wiphy->vendor_commands = wil_nl80211_vendor_commands;
wiphy->vendor_events = wil_nl80211_vendor_events;
wiphy->n_vendor_events = ARRAY_SIZE(wil_nl80211_vendor_events);
+
+ if (ignore_reg_hints) {
+ wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS;
+ wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE;
+ }
}
struct wireless_dev *wil_cfg80211_init(struct device *dev)
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index f4901587c005..e01acac88825 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -554,5 +554,7 @@ bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name)
rc = request_firmware(&fw, name, wil_to_dev(wil));
if (!rc)
release_firmware(fw);
- return rc != -ENOENT;
+ else
+ wil_dbg_fw(wil, "<%s> not available: %d\n", name, rc);
+ return !rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index fca8acffeed5..45a5fb6d23a0 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -576,6 +576,9 @@ int wil_priv_init(struct wil6210_priv *wil)
if (rx_ring_overflow_thrsh == WIL6210_RX_HIGH_TRSH_INIT)
rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_DEFAULT;
+
+ wil->ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT;
+
return 0;
out_wmi_wq:
@@ -901,6 +904,24 @@ void wil_abort_scan(struct wil6210_priv *wil, bool sync)
}
}
+int wil_ps_update(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile)
+{
+ int rc;
+
+ if (!test_bit(WMI_FW_CAPABILITY_PS_CONFIG, wil->fw_capabilities)) {
+ wil_err(wil, "set_power_mgmt not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ rc = wmi_ps_dev_profile_cfg(wil, ps_profile);
+ if (rc)
+ wil_err(wil, "wmi_ps_dev_profile_cfg failed (%d)\n", rc);
+ else
+ wil->ps_profile = ps_profile;
+
+ return rc;
+}
+
/*
* We reset all the structures, and we reset the UMAC.
* After calling this routine, you're expected to reload
@@ -950,15 +971,15 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* Disable device led before reset*/
wmi_led_cfg(wil, false);
+ mutex_lock(&wil->p2p_wdev_mutex);
+ wil_abort_scan(wil, false);
+ mutex_unlock(&wil->p2p_wdev_mutex);
+
/* prevent NAPI from being scheduled and prevent wmi commands */
mutex_lock(&wil->wmi_mutex);
bitmap_zero(wil->status, wil_status_last);
mutex_unlock(&wil->wmi_mutex);
- mutex_lock(&wil->p2p_wdev_mutex);
- wil_abort_scan(wil, false);
- mutex_unlock(&wil->p2p_wdev_mutex);
-
wil_mask_irq(wil);
wmi_event_flush(wil);
@@ -1035,6 +1056,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
}
+ if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT)
+ wil_ps_update(wil, wil->ps_profile);
+
if (wil->tt_data_set)
wmi_set_tt_cfg(wil, &wil->tt_data);
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index b067fdf086d4..2e301b6b32a9 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -200,7 +200,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
release_pmc_skbs:
wil_err(wil, "exit on error: Releasing skbs...\n");
- for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
+ for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) {
dma_free_coherent(dev,
descriptor_size,
pmc->descriptors[i].va,
@@ -283,7 +283,7 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
int i;
for (i = 0;
- pmc->descriptors[i].va && i < pmc->num_descriptors; i++) {
+ i < pmc->num_descriptors && pmc->descriptors[i].va; i++) {
dma_free_coherent(dev,
pmc->descriptor_size,
pmc->descriptors[i].va,
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 7404b6f39c6a..a43cffcf1bbf 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -343,8 +343,16 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
status = WLAN_STATUS_INVALID_QOS_PARAM;
}
- if (status == WLAN_STATUS_SUCCESS)
- agg_wsize = wil_agg_size(wil, req_agg_wsize);
+ if (status == WLAN_STATUS_SUCCESS) {
+ if (req_agg_wsize == 0) {
+ wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
+ WIL_MAX_AGG_WSIZE);
+ agg_wsize = WIL_MAX_AGG_WSIZE;
+ } else {
+ agg_wsize = min_t(u16,
+ WIL_MAX_AGG_WSIZE, req_agg_wsize);
+ }
+ }
rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status,
agg_amsdu, agg_wsize, agg_timeout);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 8b5411e4dc34..35bbf3a91f3e 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -37,6 +37,10 @@ bool rx_align_2;
module_param(rx_align_2, bool, 0444);
MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
+bool rx_large_buf;
+module_param(rx_large_buf, bool, 0444);
+MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
+
static inline uint wil_rx_snaplen(void)
{
return rx_align_2 ? 6 : 0;
@@ -255,7 +259,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
u32 i, int headroom)
{
struct device *dev = wil_to_dev(wil);
- unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
+ unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
struct vring_rx_desc dd, *d = &dd;
volatile struct vring_rx_desc *_d = &vring->va[i].rx;
dma_addr_t pa;
@@ -419,7 +423,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
struct sk_buff *skb;
dma_addr_t pa;
unsigned int snaplen = wil_rx_snaplen();
- unsigned int sz = mtu_max + ETH_HLEN + snaplen;
+ unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen;
u16 dmalen;
u8 ftype;
int cid;
@@ -780,6 +784,20 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
wil_rx_refill(wil, v->size);
}
+static void wil_rx_buf_len_init(struct wil6210_priv *wil)
+{
+ wil->rx_buf_len = rx_large_buf ?
+ WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
+ if (mtu_max > wil->rx_buf_len) {
+ /* do not allow RX buffers to be smaller than mtu_max, for
+ * backward compatibility (mtu_max parameter was also used
+ * to support receiving large packets)
+ */
+ wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max);
+ wil->rx_buf_len = mtu_max;
+ }
+}
+
int wil_rx_init(struct wil6210_priv *wil, u16 size)
{
struct vring *vring = &wil->vring_rx;
@@ -792,6 +810,8 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
return -EINVAL;
}
+ wil_rx_buf_len_init(wil);
+
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 0529d10a8268..eee528127e97 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -33,6 +33,7 @@ extern unsigned short rx_ring_overflow_thrsh;
extern int agg_wsize;
extern u32 vring_idle_trsh;
extern bool rx_align_2;
+extern bool rx_large_buf;
extern bool debug_fw;
extern bool disable_ap_sme;
@@ -670,6 +671,7 @@ struct wil6210_priv {
struct work_struct probe_client_worker;
/* DMA related */
struct vring vring_rx;
+ unsigned int rx_buf_len;
struct vring vring_tx[WIL6210_MAX_TX_RINGS];
struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
@@ -705,6 +707,8 @@ struct wil6210_priv {
/* High Access Latency Policy voting */
struct wil_halp halp;
+ enum wmi_ps_profile_type ps_profile;
+
struct wil_ftm_priv ftm;
bool tt_data_set;
struct wmi_tt_data tt_data;
@@ -824,6 +828,8 @@ int wil_if_add(struct wil6210_priv *wil);
void wil_if_remove(struct wil6210_priv *wil);
int wil_priv_init(struct wil6210_priv *wil);
void wil_priv_deinit(struct wil6210_priv *wil);
+int wil_ps_update(struct wil6210_priv *wil,
+ enum wmi_ps_profile_type ps_profile);
int wil_reset(struct wil6210_priv *wil, bool no_fw);
void wil_fw_error_recovery(struct wil6210_priv *wil);
void wil_set_recovery_state(struct wil6210_priv *wil, int state);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 41afbdc34c18..97e22281314c 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1436,7 +1436,8 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
struct wmi_cfg_rx_chain_cmd cmd = {
.action = WMI_RX_CHAIN_ADD,
.rx_sw_ring = {
- .max_mpdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+ .max_mpdu_size = cpu_to_le16(
+ wil_mtu2macbuf(wil->rx_buf_len)),
.ring_mem_base = cpu_to_le64(vring->pa),
.ring_size = cpu_to_le16(vring->size),
},
diff --git a/drivers/net/wireless/cnss/Kconfig b/drivers/net/wireless/cnss/Kconfig
index 4558dc30fec1..8e69a2b469b9 100644
--- a/drivers/net/wireless/cnss/Kconfig
+++ b/drivers/net/wireless/cnss/Kconfig
@@ -9,6 +9,15 @@ config CNSS
This driver also adds support to integrate WLAN module to subsystem
restart framework.
+config CNSS_ASYNC
+ bool "Enable/disable cnss pci platform driver asynchronous probe"
+ depends on CNSS
+ ---help---
+ If enabled, CNSS PCI platform driver would do asynchronous probe.
+ Using asynchronous probe will allow CNSS PCI platform driver to
+ probe in parallel with other device drivers and will help to
+ reduce kernel boot time.
+
config CNSS_MAC_BUG
bool "Enable/disable 0-4K memory initialization for QCA6174"
depends on CNSS
diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c
index f53ed2693879..48d358c4722a 100644
--- a/drivers/net/wireless/cnss/cnss_pci.c
+++ b/drivers/net/wireless/cnss/cnss_pci.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -3067,6 +3067,9 @@ static struct platform_driver cnss_driver = {
.name = "cnss",
.owner = THIS_MODULE,
.of_match_table = cnss_dt_match,
+#ifdef CONFIG_CNSS_ASYNC
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+#endif
},
};
diff --git a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
index 09c37c2383c6..af64b3dc4da8 100644
--- a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
+++ b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
@@ -228,6 +228,7 @@ void wcnss_prealloc_check_memory_leak(void)
#else
void wcnss_prealloc_check_memory_leak(void) {}
#endif
+EXPORT_SYMBOL(wcnss_prealloc_check_memory_leak);
int wcnss_pre_alloc_reset(void)
{
@@ -243,6 +244,7 @@ int wcnss_pre_alloc_reset(void)
return n;
}
+EXPORT_SYMBOL(wcnss_pre_alloc_reset);
int prealloc_memory_stats_show(struct seq_file *fp, void *data)
{
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 6df3ee561d52..515aa3f993f3 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -836,25 +836,30 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len,
spin_lock_bh(&local->baplock);
res = hfa384x_setup_bap(dev, BAP0, rid, 0);
- if (!res)
- res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
+ if (res)
+ goto unlock;
+
+ res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
+ if (res)
+ goto unlock;
if (le16_to_cpu(rec.len) == 0) {
/* RID not available */
res = -ENODATA;
+ goto unlock;
}
rlen = (le16_to_cpu(rec.len) - 1) * 2;
- if (!res && exact_len && rlen != len) {
+ if (exact_len && rlen != len) {
printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: "
"rid=0x%04x, len=%d (expected %d)\n",
dev->name, rid, rlen, len);
res = -ENODATA;
}
- if (!res)
- res = hfa384x_from_bap(dev, BAP0, buf, len);
+ res = hfa384x_from_bap(dev, BAP0, buf, len);
+unlock:
spin_unlock_bh(&local->baplock);
mutex_unlock(&local->rid_bap_mtx);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 5f47356d6942..254b0ee37039 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -590,8 +590,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len);
if (rc < 0)
goto out_unlock;
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+
if (copy_to_user(p, buf, buf_len))
rc = -EFAULT;
+
+ vfree(buf);
+ return rc;
+
out_unlock:
nvdimm_bus_unlock(&nvdimm_bus->dev);
out:
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 5ce5ef211bdb..754f21fd9768 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -936,8 +936,10 @@ parport_register_dev_model(struct parport *port, const char *name,
* pardevice fields. -arca
*/
port->ops->init_state(par_dev, par_dev->state);
- port->proc_device = par_dev;
- parport_device_proc_register(par_dev);
+ if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
+ port->proc_device = par_dev;
+ parport_device_proc_register(par_dev);
+ }
return par_dev;
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index f364882943e1..8bb759d10074 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -6449,7 +6449,6 @@ static int msm_pcie_probe(struct platform_device *pdev)
}
dev_set_drvdata(&msm_pcie_dev[rc_idx].pdev->dev, &msm_pcie_dev[rc_idx]);
- msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
msm_pcie_dev[rc_idx].pdev);
@@ -6500,6 +6499,8 @@ static int msm_pcie_probe(struct platform_device *pdev)
goto decrease_rc_num;
}
+ msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
+
msm_pcie_dev[rc_idx].drv_ready = true;
if (msm_pcie_dev[rc_idx].boot_option &
@@ -6754,12 +6755,12 @@ static int msm_pcie_pm_suspend(struct pci_dev *dev,
PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
pcie_dev->rc_idx);
- msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
-
if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
pinctrl_select_state(pcie_dev->pinctrl,
pcie_dev->pins_sleep);
+ msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
+
PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
return ret;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 31f31d460fc9..357527712539 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -303,13 +303,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return rc;
}
- pci_iov_set_numvfs(dev, nr_virtfn);
- iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
- pci_cfg_access_lock(dev);
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
- msleep(100);
- pci_cfg_access_unlock(dev);
-
iov->initial_VFs = initial;
if (nr_virtfn < initial)
initial = nr_virtfn;
@@ -320,6 +313,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
goto err_pcibios;
}
+ pci_iov_set_numvfs(dev, nr_virtfn);
+ iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
+ pci_cfg_access_lock(dev);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ msleep(100);
+ pci_cfg_access_unlock(dev);
+
for (i = 0; i < initial; i++) {
rc = virtfn_add(dev, i, 0);
if (rc)
@@ -555,21 +555,61 @@ void pci_iov_release(struct pci_dev *dev)
}
/**
- * pci_iov_resource_bar - get position of the SR-IOV BAR
+ * pci_iov_update_resource - update a VF BAR
* @dev: the PCI device
* @resno: the resource number
*
- * Returns position of the BAR encapsulated in the SR-IOV capability.
+ * Update a VF BAR in the SR-IOV capability of a PF.
*/
-int pci_iov_resource_bar(struct pci_dev *dev, int resno)
+void pci_iov_update_resource(struct pci_dev *dev, int resno)
{
- if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
- return 0;
+ struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
+ struct resource *res = dev->resource + resno;
+ int vf_bar = resno - PCI_IOV_RESOURCES;
+ struct pci_bus_region region;
+ u16 cmd;
+ u32 new;
+ int reg;
+
+ /*
+ * The generic pci_restore_bars() path calls this for all devices,
+ * including VFs and non-SR-IOV devices. If this is not a PF, we
+ * have nothing to do.
+ */
+ if (!iov)
+ return;
+
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
+ if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
+ dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
+ vf_bar, res);
+ return;
+ }
+
+ /*
+ * Ignore unimplemented BARs, unused resource slots for 64-bit
+ * BARs, and non-movable resources, e.g., those described via
+ * Enhanced Allocation.
+ */
+ if (!res->flags)
+ return;
+
+ if (res->flags & IORESOURCE_UNSET)
+ return;
+
+ if (res->flags & IORESOURCE_PCI_FIXED)
+ return;
- BUG_ON(!dev->is_physfn);
+ pcibios_resource_to_bus(dev->bus, &region, res);
+ new = region.start;
+ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
- return dev->sriov->pos + PCI_SRIOV_BAR +
- 4 * (resno - PCI_IOV_RESOURCES);
+ reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
+ pci_write_config_dword(dev, reg, new);
+ if (res->flags & IORESOURCE_MEM_64) {
+ new = region.start >> 16 >> 16;
+ pci_write_config_dword(dev, reg + 4, new);
+ }
}
resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e311a9bf2c90..0e53488f8ec1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -519,10 +519,6 @@ static void pci_restore_bars(struct pci_dev *dev)
{
int i;
- /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
- if (dev->is_virtfn)
- return;
-
for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
pci_update_resource(dev, i);
}
@@ -4472,36 +4468,6 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
}
EXPORT_SYMBOL(pci_select_bars);
-/**
- * pci_resource_bar - get position of the BAR associated with a resource
- * @dev: the PCI device
- * @resno: the resource number
- * @type: the BAR type to be filled in
- *
- * Returns BAR position in config space, or 0 if the BAR is invalid.
- */
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
-{
- int reg;
-
- if (resno < PCI_ROM_RESOURCE) {
- *type = pci_bar_unknown;
- return PCI_BASE_ADDRESS_0 + 4 * resno;
- } else if (resno == PCI_ROM_RESOURCE) {
- *type = pci_bar_mem32;
- return dev->rom_base_reg;
- } else if (resno < PCI_BRIDGE_RESOURCES) {
- /* device specific resource */
- *type = pci_bar_unknown;
- reg = pci_iov_resource_bar(dev, resno);
- if (reg)
- return reg;
- }
-
- dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
- return 0;
-}
-
/* Some architectures require additional programming to enable VGA */
static arch_set_vga_state_t arch_set_vga_state;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d390fc1475ec..c43e448873ca 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -232,7 +232,6 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int pci_setup_device(struct pci_dev *dev);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int reg);
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
void pci_configure_ari(struct pci_dev *dev);
void __pci_bus_size_bridges(struct pci_bus *bus,
struct list_head *realloc_head);
@@ -276,7 +275,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
#ifdef CONFIG_PCI_IOV
int pci_iov_init(struct pci_dev *dev);
void pci_iov_release(struct pci_dev *dev);
-int pci_iov_resource_bar(struct pci_dev *dev, int resno);
+void pci_iov_update_resource(struct pci_dev *dev, int resno);
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
void pci_restore_iov_state(struct pci_dev *dev);
int pci_iov_bus_range(struct pci_bus *bus);
@@ -290,10 +289,6 @@ static inline void pci_iov_release(struct pci_dev *dev)
{
}
-static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno)
-{
- return 0;
-}
static inline void pci_restore_iov_state(struct pci_dev *dev)
{
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 71d9a6d1bd56..b83df942794f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -226,7 +226,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
}
} else {
- res->flags |= (l & IORESOURCE_ROM_ENABLE);
+ if (l & PCI_ROM_ADDRESS_ENABLE)
+ res->flags |= IORESOURCE_ROM_ENABLE;
l64 = l & PCI_ROM_ADDRESS_MASK;
sz64 = sz & PCI_ROM_ADDRESS_MASK;
mask64 = (u32)PCI_ROM_ADDRESS_MASK;
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index eb0ad530dc43..3eea7fc5e1a2 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -31,6 +31,11 @@ int pci_enable_rom(struct pci_dev *pdev)
if (!res->flags)
return -1;
+ /*
+ * Ideally pci_update_resource() would update the ROM BAR address,
+ * and we would only set the enable bit here. But apparently some
+ * devices have buggy ROM BARs that read as zero when disabled.
+ */
pcibios_resource_to_bus(pdev->bus, &region, res);
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 604011e047d6..25062966cbfa 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -25,21 +25,18 @@
#include <linux/slab.h>
#include "pci.h"
-
-void pci_update_resource(struct pci_dev *dev, int resno)
+static void pci_std_update_resource(struct pci_dev *dev, int resno)
{
struct pci_bus_region region;
bool disable;
u16 cmd;
u32 new, check, mask;
int reg;
- enum pci_bar_type type;
struct resource *res = dev->resource + resno;
- if (dev->is_virtfn) {
- dev_warn(&dev->dev, "can't update VF BAR%d\n", resno);
+ /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
+ if (dev->is_virtfn)
return;
- }
/*
* Ignore resources for unimplemented BARs and unused resource slots
@@ -60,21 +57,34 @@ void pci_update_resource(struct pci_dev *dev, int resno)
return;
pcibios_resource_to_bus(dev->bus, &region, res);
+ new = region.start;
- new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
- if (res->flags & IORESOURCE_IO)
+ if (res->flags & IORESOURCE_IO) {
mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
- else
+ new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
+ } else if (resno == PCI_ROM_RESOURCE) {
+ mask = (u32)PCI_ROM_ADDRESS_MASK;
+ } else {
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
+ }
- reg = pci_resource_bar(dev, resno, &type);
- if (!reg)
- return;
- if (type != pci_bar_unknown) {
+ if (resno < PCI_ROM_RESOURCE) {
+ reg = PCI_BASE_ADDRESS_0 + 4 * resno;
+ } else if (resno == PCI_ROM_RESOURCE) {
+
+ /*
+ * Apparently some Matrox devices have ROM BARs that read
+ * as zero when disabled, so don't update ROM BARs unless
+ * they're enabled. See https://lkml.org/lkml/2005/8/30/138.
+ */
if (!(res->flags & IORESOURCE_ROM_ENABLE))
return;
+
+ reg = dev->rom_base_reg;
new |= PCI_ROM_ADDRESS_ENABLE;
- }
+ } else
+ return;
/*
* We can't update a 64-bit BAR atomically, so when possible,
@@ -110,6 +120,16 @@ void pci_update_resource(struct pci_dev *dev, int resno)
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
+void pci_update_resource(struct pci_dev *dev, int resno)
+{
+ if (resno <= PCI_ROM_RESOURCE)
+ pci_std_update_resource(dev, resno);
+#ifdef CONFIG_PCI_IOV
+ else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+ pci_iov_update_resource(dev, resno);
+#endif
+}
+
int pci_claim_resource(struct pci_dev *dev, int resource)
{
struct resource *res = &dev->resource[resource];
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index a009ae34c5ef..930f0f25c1ce 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1466,12 +1466,11 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
offset += range->npins;
}
- /* Mask and clear all interrupts */
- chv_writel(0, pctrl->regs + CHV_INTMASK);
+ /* Clear all interrupts */
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add IRQ chip\n");
goto fail;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index c1448955d3ed..c5a351e7bb4e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -602,10 +602,6 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->intr_status_reg);
- val &= ~BIT(g->intr_status_bit);
- writel(val, pctrl->regs + g->intr_status_reg);
-
val = readl(pctrl->regs + g->intr_cfg_reg);
val |= BIT(g->intr_enable_bit);
writel(val, pctrl->regs + g->intr_cfg_reg);
diff --git a/drivers/platform/msm/gpio-usbdetect.c b/drivers/platform/msm/gpio-usbdetect.c
index 80e16573e0aa..dc05d7108135 100644
--- a/drivers/platform/msm/gpio-usbdetect.c
+++ b/drivers/platform/msm/gpio-usbdetect.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2015,2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,57 +20,101 @@
#include <linux/interrupt.h>
#include <linux/of_gpio.h>
#include <linux/gpio.h>
-#include <linux/power_supply.h>
+#include <linux/extcon.h>
#include <linux/regulator/consumer.h>
struct gpio_usbdetect {
struct platform_device *pdev;
struct regulator *vin;
- struct power_supply *usb_psy;
int vbus_det_irq;
+ int id_det_irq;
int gpio;
+ struct extcon_dev *extcon_dev;
+ int vbus_state;
+ bool id_state;
+};
+
+static const unsigned int gpio_usb_extcon_table[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_USB_CC,
+ EXTCON_USB_SPEED,
+ EXTCON_NONE,
};
static irqreturn_t gpio_usbdetect_vbus_irq(int irq, void *data)
{
struct gpio_usbdetect *usb = data;
- int vbus;
- union power_supply_propval pval = {0,};
- vbus = gpio_get_value(usb->gpio);
- if (vbus)
- pval.intval = POWER_SUPPLY_TYPE_USB;
- else
- pval.intval = POWER_SUPPLY_TYPE_UNKNOWN;
+ usb->vbus_state = gpio_get_value(usb->gpio);
+ if (usb->vbus_state) {
+ dev_dbg(&usb->pdev->dev, "setting vbus notification\n");
+ extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB, 1);
+ } else {
+ dev_dbg(&usb->pdev->dev, "setting vbus removed notification\n");
+ extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t gpio_usbdetect_id_irq(int irq, void *data)
+{
+ struct gpio_usbdetect *usb = data;
+ int ret;
+
+ ret = irq_get_irqchip_state(irq, IRQCHIP_STATE_LINE_LEVEL,
+ &usb->id_state);
+ if (ret < 0) {
+ dev_err(&usb->pdev->dev, "unable to read ID IRQ LINE\n");
+ return IRQ_HANDLED;
+ }
- power_supply_set_property(usb->usb_psy,
- POWER_SUPPLY_PROP_TYPE, &pval);
+ return IRQ_WAKE_THREAD;
+}
- pval.intval = vbus;
- power_supply_set_property(usb->usb_psy, POWER_SUPPLY_PROP_PRESENT,
- &pval);
+static irqreturn_t gpio_usbdetect_id_irq_thread(int irq, void *data)
+{
+ struct gpio_usbdetect *usb = data;
+
+ if (usb->id_state) {
+ dev_dbg(&usb->pdev->dev, "stopping usb host\n");
+ extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_HOST, 0);
+ enable_irq(usb->vbus_det_irq);
+ } else {
+ dev_dbg(&usb->pdev->dev, "starting usb HOST\n");
+ disable_irq(usb->vbus_det_irq);
+ extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_HOST, 1);
+ }
return IRQ_HANDLED;
}
+static const u32 gpio_usb_extcon_exclusive[] = {0x3, 0};
+
static int gpio_usbdetect_probe(struct platform_device *pdev)
{
struct gpio_usbdetect *usb;
- struct power_supply *usb_psy;
int rc;
- unsigned long flags;
-
- usb_psy = power_supply_get_by_name("usb");
- if (!usb_psy) {
- dev_dbg(&pdev->dev, "USB power_supply not found, deferring probe\n");
- return -EPROBE_DEFER;
- }
usb = devm_kzalloc(&pdev->dev, sizeof(*usb), GFP_KERNEL);
if (!usb)
return -ENOMEM;
usb->pdev = pdev;
- usb->usb_psy = usb_psy;
+
+ usb->extcon_dev = devm_extcon_dev_allocate(&pdev->dev,
+ gpio_usb_extcon_table);
+ if (IS_ERR(usb->extcon_dev)) {
+ dev_err(&pdev->dev, "failed to allocate a extcon device\n");
+ return PTR_ERR(usb->extcon_dev);
+ }
+
+ usb->extcon_dev->mutually_exclusive = gpio_usb_extcon_exclusive;
+ rc = devm_extcon_dev_register(&pdev->dev, usb->extcon_dev);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to register extcon device\n");
+ return rc;
+ }
if (of_get_property(pdev->dev.of_node, "vin-supply", NULL)) {
usb->vin = devm_regulator_get(&pdev->dev, "vin");
@@ -94,43 +138,63 @@ static int gpio_usbdetect_probe(struct platform_device *pdev)
"qcom,vbus-det-gpio", 0);
if (usb->gpio < 0) {
dev_err(&pdev->dev, "Failed to get gpio: %d\n", usb->gpio);
- return usb->gpio;
+ rc = usb->gpio;
+ goto error;
}
rc = gpio_request(usb->gpio, "vbus-det-gpio");
if (rc < 0) {
dev_err(&pdev->dev, "Failed to request gpio: %d\n", rc);
- return rc;
+ goto error;
}
usb->vbus_det_irq = gpio_to_irq(usb->gpio);
if (usb->vbus_det_irq < 0) {
- if (usb->vin)
- regulator_disable(usb->vin);
- return usb->vbus_det_irq;
+ dev_err(&pdev->dev, "get vbus_det_irq failed\n");
+ rc = usb->vbus_det_irq;
+ goto error;
}
- rc = devm_request_irq(&pdev->dev, usb->vbus_det_irq,
- gpio_usbdetect_vbus_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "vbus_det_irq", usb);
+ rc = devm_request_threaded_irq(&pdev->dev, usb->vbus_det_irq,
+ NULL, gpio_usbdetect_vbus_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT, "vbus_det_irq", usb);
if (rc) {
dev_err(&pdev->dev, "request for vbus_det_irq failed: %d\n",
rc);
- if (usb->vin)
- regulator_disable(usb->vin);
- return rc;
+ goto error;
+ }
+
+ usb->id_det_irq = platform_get_irq_byname(pdev, "pmic_id_irq");
+ if (usb->id_det_irq < 0) {
+ dev_err(&pdev->dev, "get id_det_irq failed\n");
+ rc = usb->id_det_irq;
+ goto error;
+ }
+
+ rc = devm_request_threaded_irq(&pdev->dev, usb->id_det_irq,
+ gpio_usbdetect_id_irq,
+ gpio_usbdetect_id_irq_thread,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT, "id_det_irq", usb);
+ if (rc) {
+ dev_err(&pdev->dev, "request for id_det_irq failed: %d\n", rc);
+ goto error;
}
enable_irq_wake(usb->vbus_det_irq);
+ enable_irq_wake(usb->id_det_irq);
dev_set_drvdata(&pdev->dev, usb);
/* Read and report initial VBUS state */
- local_irq_save(flags);
gpio_usbdetect_vbus_irq(usb->vbus_det_irq, usb);
- local_irq_restore(flags);
return 0;
+
+error:
+ if (usb->vin)
+ regulator_disable(usb->vin);
+ return rc;
}
static int gpio_usbdetect_remove(struct platform_device *pdev)
@@ -139,6 +203,8 @@ static int gpio_usbdetect_remove(struct platform_device *pdev)
disable_irq_wake(usb->vbus_det_irq);
disable_irq(usb->vbus_det_irq);
+ disable_irq_wake(usb->id_det_irq);
+ disable_irq(usb->id_det_irq);
if (usb->vin)
regulator_disable(usb->vin);
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index 717c891788f2..eaf50ca1cea5 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -29,6 +29,7 @@
static struct dentry *dent;
static char dbg_buff[4096];
+static void *gsi_ipc_logbuf_low;
static void gsi_wq_print_dp_stats(struct work_struct *work);
static DECLARE_DELAYED_WORK(gsi_print_dp_stats_work, gsi_wq_print_dp_stats);
@@ -764,22 +765,20 @@ static ssize_t gsi_enable_ipc_low(struct file *file,
if (kstrtos8(dbg_buff, 0, &option))
return -EFAULT;
+ mutex_lock(&gsi_ctx->mlock);
if (option) {
- if (!gsi_ctx->ipc_logbuf_low) {
- gsi_ctx->ipc_logbuf_low =
+ if (!gsi_ipc_logbuf_low) {
+ gsi_ipc_logbuf_low =
ipc_log_context_create(GSI_IPC_LOG_PAGES,
"gsi_low", 0);
+ if (gsi_ipc_logbuf_low == NULL)
+ TERR("failed to get ipc_logbuf_low\n");
}
-
- if (gsi_ctx->ipc_logbuf_low == NULL) {
- TERR("failed to get ipc_logbuf_low\n");
- return -EFAULT;
- }
+ gsi_ctx->ipc_logbuf_low = gsi_ipc_logbuf_low;
} else {
- if (gsi_ctx->ipc_logbuf_low)
- ipc_log_context_destroy(gsi_ctx->ipc_logbuf_low);
gsi_ctx->ipc_logbuf_low = NULL;
}
+ mutex_unlock(&gsi_ctx->mlock);
return count;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index f01743d04e84..0629d6bca49a 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -3685,6 +3685,7 @@ void ipa_suspend_handler(enum ipa_irq_type interrupt,
* pipe will be unsuspended as part of
* enabling IPA clocks
*/
+ mutex_lock(&ipa_ctx->sps_pm.sps_pm_lock);
if (!atomic_read(
&ipa_ctx->sps_pm.dec_clients)
) {
@@ -3697,6 +3698,7 @@ void ipa_suspend_handler(enum ipa_irq_type interrupt,
1);
ipa_sps_process_irq_schedule_rel();
}
+ mutex_unlock(&ipa_ctx->sps_pm.sps_pm_lock);
} else {
resource = ipa2_get_rm_resource_from_ep(i);
res = ipa_rm_request_resource_with_timer(
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 3ee0741c9f1d..cb95f6e98956 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -126,6 +126,7 @@ static struct dentry *dfile_ipa_poll_iteration;
static char dbg_buff[IPA_MAX_MSG_LEN];
static char *active_clients_buf;
static s8 ep_reg_idx;
+static void *ipa_ipc_low_buff;
int _ipa_read_gen_reg_v1_1(char *buff, int max_len)
{
@@ -1826,23 +1827,20 @@ static ssize_t ipa_enable_ipc_low(struct file *file,
if (kstrtos8(dbg_buff, 0, &option))
return -EFAULT;
+ mutex_lock(&ipa_ctx->lock);
if (option) {
- if (!ipa_ctx->logbuf_low) {
- ipa_ctx->logbuf_low =
+ if (!ipa_ipc_low_buff) {
+ ipa_ipc_low_buff =
ipc_log_context_create(IPA_IPC_LOG_PAGES,
"ipa_low", 0);
+ if (ipa_ipc_low_buff == NULL)
+ IPAERR("failed to get logbuf_low\n");
}
-
- if (ipa_ctx->logbuf_low == NULL) {
- IPAERR("failed to get logbuf_low\n");
- return -EFAULT;
- }
-
+ ipa_ctx->logbuf_low = ipa_ipc_low_buff;
} else {
- if (ipa_ctx->logbuf_low)
- ipc_log_context_destroy(ipa_ctx->logbuf_low);
- ipa_ctx->logbuf_low = NULL;
+ ipa_ctx->logbuf_low = NULL;
}
+ mutex_unlock(&ipa_ctx->lock);
return count;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 2fdb20d99ce2..23c8a5059c3b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -2046,11 +2046,13 @@ static void ipa_alloc_wlan_rx_common_cache(u32 size)
goto fail_dma_mapping;
}
+ spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
list_add_tail(&rx_pkt->link,
&ipa_ctx->wc_memb.wlan_comm_desc_list);
rx_len_cached = ++ipa_ctx->wc_memb.wlan_comm_total_cnt;
ipa_ctx->wc_memb.wlan_comm_free_cnt++;
+ spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index a14d1fee9c35..bb11230c960a 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -51,6 +51,7 @@
#define IPA_UC_FINISH_MAX 6
#define IPA_UC_WAIT_MIN_SLEEP 1000
#define IPA_UC_WAII_MAX_SLEEP 1200
+#define IPA_BAM_STOP_MAX_RETRY 10
#define IPA_MAX_STATUS_STAT_NUM 30
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 11c77934e04f..4b6bc5b61bfc 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -1354,6 +1354,10 @@ int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
mutex_lock(&ipa_ctx->lock);
entry = __ipa_find_rt_tbl(lookup->ip, lookup->name);
if (entry && entry->cookie == IPA_COOKIE) {
+ if (entry->ref_cnt == U32_MAX) {
+ IPAERR("fail: ref count crossed limit\n");
+ goto ret;
+ }
entry->ref_cnt++;
lookup->hdl = entry->id;
@@ -1363,6 +1367,8 @@ int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
result = 0;
}
+
+ret:
mutex_unlock(&ipa_ctx->lock);
return result;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
index 3dd2eb093317..364cd4b7d38a 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
@@ -590,6 +590,7 @@ int ipa_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
{
int index;
union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+ int retries = 0;
mutex_lock(&ipa_ctx->uc_ctx.uc_lock);
@@ -599,6 +600,7 @@ int ipa_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
return -EBADF;
}
+send_cmd:
init_completion(&ipa_ctx->uc_ctx.uc_completion);
ipa_ctx->uc_ctx.uc_sram_mmio->cmdParams = cmd;
@@ -658,6 +660,19 @@ int ipa_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
}
if (ipa_ctx->uc_ctx.uc_status != expected_status) {
+ if (IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR ==
+ ipa_ctx->uc_ctx.uc_status) {
+ retries++;
+ if (retries == IPA_BAM_STOP_MAX_RETRY) {
+ IPAERR("Failed after %d tries\n", retries);
+ } else {
+ /* sleep for short period to flush IPA */
+ usleep_range(IPA_UC_WAIT_MIN_SLEEP,
+ IPA_UC_WAII_MAX_SLEEP);
+ goto send_cmd;
+ }
+ }
+
IPAERR("Recevied status %u, Expected status %u\n",
ipa_ctx->uc_ctx.uc_status, expected_status);
ipa_ctx->uc_ctx.pending_cmd = -1;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
index 3bec471b4656..a98d60249c0e 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -441,7 +441,7 @@ enum ipa_hw_offload_channel_states {
/**
- * enum ipa_hw_2_cpu_cmd_resp_status - Values that represent
+ * enum ipa_hw_2_cpu_offload_cmd_resp_status - Values that represent
* offload related command response status to be sent to CPU.
*/
enum ipa_hw_2_cpu_offload_cmd_resp_status {
@@ -478,6 +478,47 @@ enum ipa_hw_2_cpu_offload_cmd_resp_status {
};
/**
+ * enum ipa_hw_2_cpu_cmd_resp_status - Values that represent WDI related
+ * command response status to be sent to CPU.
+ */
+enum ipa_hw_2_cpu_cmd_resp_status {
+ IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+ IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+ IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+ IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+ IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+ IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+ IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+ IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+ IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8),
+ IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9),
+ IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10),
+ IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11),
+ IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12),
+ IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13),
+ IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14),
+ IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15),
+ IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16),
+};
+
+/**
* struct IpaHwSetUpCmd -
*
*
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index f60669132865..56b4bf1a2d1e 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -110,47 +110,6 @@ enum ipa_cpu_2_hw_wdi_commands {
};
/**
- * enum ipa_hw_2_cpu_cmd_resp_status - Values that represent WDI related
- * command response status to be sent to CPU.
- */
-enum ipa_hw_2_cpu_cmd_resp_status {
- IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
- IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
- IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
- IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
- IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
- IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
- IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
- IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
- IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8),
- IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9),
- IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10),
- IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11),
- IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12),
- IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13),
- IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14),
- IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15),
- IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16),
-};
-
-/**
* enum ipa_hw_wdi_errors - WDI specific error types.
* @IPA_HW_WDI_ERROR_NONE : No error persists
* @IPA_HW_WDI_CHANNEL_ERROR : Error is specific to channel
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 5ee6e5d2d9e3..5a7a0e5000b9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -3614,6 +3614,8 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
* pipe will be unsuspended as part of
* enabling IPA clocks
*/
+ mutex_lock(&ipa3_ctx->transport_pm.
+ transport_pm_mutex);
if (!atomic_read(
&ipa3_ctx->transport_pm.dec_clients)
) {
@@ -3626,6 +3628,8 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
1);
ipa3_sps_process_irq_schedule_rel();
}
+ mutex_unlock(&ipa3_ctx->transport_pm.
+ transport_pm_mutex);
} else {
resource = ipa3_get_rm_resource_from_ep(i);
res =
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 81eae05d7ed9..e349ade46075 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -857,6 +857,8 @@ static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl,
struct gsi_xfer_elem xfer_elem;
int i;
int aggr_active_bitmap = 0;
+ bool pipe_suspended = false;
+ struct ipa_ep_cfg_ctrl ctrl;
IPADBG("Applying reset channel with open aggregation frame WA\n");
ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
@@ -883,6 +885,15 @@ static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl,
if (result)
return -EFAULT;
+ ipahal_read_reg_n_fields(IPA_ENDP_INIT_CTRL_n, clnt_hdl, &ctrl);
+ if (ctrl.ipa_ep_suspend) {
+ IPADBG("pipe is suspended, remove suspend\n");
+ pipe_suspended = true;
+ ctrl.ipa_ep_suspend = false;
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
+ clnt_hdl, &ctrl);
+ }
+
/* Start channel and put 1 Byte descriptor on it */
gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
if (gsi_res != GSI_STATUS_SUCCESS) {
@@ -942,6 +953,13 @@ static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl,
*/
msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
+ if (pipe_suspended) {
+ IPADBG("suspend the pipe again\n");
+ ctrl.ipa_ep_suspend = true;
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
+ clnt_hdl, &ctrl);
+ }
+
/* Restore channels properties */
result = ipa3_restore_channel_properties(ep, &orig_chan_props,
&orig_chan_scratch);
@@ -956,6 +974,12 @@ queue_xfer_fail:
ipa3_stop_gsi_channel(clnt_hdl);
dma_free_coherent(ipa3_ctx->pdev, 1, buff, dma_addr);
start_chan_fail:
+ if (pipe_suspended) {
+ IPADBG("suspend the pipe again\n");
+ ctrl.ipa_ep_suspend = true;
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
+ clnt_hdl, &ctrl);
+ }
ipa3_restore_channel_properties(ep, &orig_chan_props,
&orig_chan_scratch);
restore_props_fail:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index c43d4ee19ad1..fbf84ab7d2d4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -109,6 +109,7 @@ static char dbg_buff[IPA_MAX_MSG_LEN];
static char *active_clients_buf;
static s8 ep_reg_idx;
+static void *ipa_ipc_low_buff;
static ssize_t ipa3_read_gen_reg(struct file *file, char __user *ubuf,
@@ -1781,22 +1782,20 @@ static ssize_t ipa3_enable_ipc_low(struct file *file,
if (kstrtos8(dbg_buff, 0, &option))
return -EFAULT;
+ mutex_lock(&ipa3_ctx->lock);
if (option) {
- if (!ipa3_ctx->logbuf_low) {
- ipa3_ctx->logbuf_low =
+ if (!ipa_ipc_low_buff) {
+ ipa_ipc_low_buff =
ipc_log_context_create(IPA_IPC_LOG_PAGES,
"ipa_low", 0);
}
-
- if (ipa3_ctx->logbuf_low == NULL) {
- IPAERR("failed to get logbuf_low\n");
- return -EFAULT;
- }
+ if (ipa_ipc_low_buff == NULL)
+ IPAERR("failed to get logbuf_low\n");
+ ipa3_ctx->logbuf_low = ipa_ipc_low_buff;
} else {
- if (ipa3_ctx->logbuf_low)
- ipc_log_context_destroy(ipa3_ctx->logbuf_low);
ipa3_ctx->logbuf_low = NULL;
}
+ mutex_unlock(&ipa3_ctx->lock);
return count;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index c5b56f16788a..0492fa27c5b7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -1515,6 +1515,7 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
struct ipa3_ep_context *ep;
int empty;
int result;
+ int i;
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
@@ -1551,13 +1552,23 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
flush_workqueue(ep->sys->wq);
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- result = ipa3_stop_gsi_channel(clnt_hdl);
+ /* channel stop might fail on timeout if IPA is busy */
+ for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
+ result = ipa3_stop_gsi_channel(clnt_hdl);
+ if (result == GSI_STATUS_SUCCESS)
+ break;
+
+ if (result != -GSI_STATUS_AGAIN &&
+ result != -GSI_STATUS_TIMED_OUT)
+ break;
+ }
+
if (result != GSI_STATUS_SUCCESS) {
IPAERR("GSI stop chan err: %d.\n", result);
BUG();
return result;
}
- result = gsi_reset_channel(ep->gsi_chan_hdl);
+ result = ipa3_reset_gsi_channel(clnt_hdl);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("Failed to reset chan: %d.\n", result);
BUG();
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 335e5283cc29..0269bfba993f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -53,6 +53,8 @@ struct ipa3_qmi_context *ipa3_qmi_ctx;
static bool workqueues_stopped;
static bool ipa3_modem_init_cmplt;
static bool first_time_handshake;
+struct mutex ipa3_qmi_lock;
+
/* QMI A5 service */
static struct msg_desc ipa3_indication_reg_req_desc = {
@@ -610,12 +612,17 @@ int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
req->filter_spec_ex_list_len);
}
- /* cache the qmi_filter_request */
- memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
- ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
- req, sizeof(struct ipa_install_fltr_rule_req_msg_v01));
- ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
- ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
+ mutex_lock(&ipa3_qmi_lock);
+ if (ipa3_qmi_ctx != NULL) {
+ /* cache the qmi_filter_request */
+ memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
+ ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
+ req,
+ sizeof(struct ipa_install_fltr_rule_req_msg_v01));
+ ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
+ ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
+ }
+ mutex_unlock(&ipa3_qmi_lock);
req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01;
req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01;
@@ -655,12 +662,17 @@ int ipa3_qmi_filter_request_ex_send(
req->filter_spec_ex_list_len);
}
- /* cache the qmi_filter_request */
- memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_ex_msg_cache[
- ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg]),
- req, sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01));
- ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg++;
- ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg %= 10;
+ mutex_lock(&ipa3_qmi_lock);
+ if (ipa3_qmi_ctx != NULL) {
+ /* cache the qmi_filter_request */
+ memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_ex_msg_cache[
+ ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg]),
+ req,
+ sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01));
+ ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg++;
+ ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg %= 10;
+ }
+ mutex_unlock(&ipa3_qmi_lock);
req_desc.max_msg_len =
QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01;
@@ -795,12 +807,17 @@ int ipa3_qmi_filter_notify_send(
return -EINVAL;
}
- /* cache the qmi_filter_request */
- memcpy(&(ipa3_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
- ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
- req, sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
- ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
- ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
+ mutex_lock(&ipa3_qmi_lock);
+ if (ipa3_qmi_ctx != NULL) {
+ /* cache the qmi_filter_request */
+ memcpy(&(ipa3_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
+ ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
+ req,
+ sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
+ ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
+ ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
+ }
+ mutex_unlock(&ipa3_qmi_lock);
req_desc.max_msg_len =
QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01;
@@ -1338,3 +1355,13 @@ int ipa3_qmi_stop_data_qouta(void)
resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
}
+void ipa3_qmi_init(void)
+{
+ mutex_init(&ipa3_qmi_lock);
+}
+
+void ipa3_qmi_cleanup(void)
+{
+ mutex_destroy(&ipa3_qmi_lock);
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index eb38d08f01c7..d5d850309696 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -207,6 +207,10 @@ int ipa3_qmi_stop_data_qouta(void);
void ipa3_q6_handshake_complete(bool ssr_bootup);
+void ipa3_qmi_init(void);
+
+void ipa3_qmi_cleanup(void);
+
#else /* CONFIG_RMNET_IPA3 */
static inline int ipa3_qmi_service_init(uint32_t wan_platform_type)
@@ -319,6 +323,14 @@ static inline int ipa3_qmi_stop_data_qouta(void)
static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
+static inline void ipa3_qmi_init(void)
+{
+}
+
+static inline void ipa3_qmi_cleanup(void)
+{
+}
+
#endif /* CONFIG_RMNET_IPA3 */
#endif /* IPA_QMI_SERVICE_H */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index ac7e57f10062..7212ba2a165c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1438,6 +1438,10 @@ int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
mutex_lock(&ipa3_ctx->lock);
entry = __ipa3_find_rt_tbl(lookup->ip, lookup->name);
if (entry && entry->cookie == IPA_COOKIE) {
+ if (entry->ref_cnt == U32_MAX) {
+ IPAERR("fail: ref count crossed limit\n");
+ goto ret;
+ }
entry->ref_cnt++;
lookup->hdl = entry->id;
@@ -1447,6 +1451,8 @@ int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
result = 0;
}
+
+ret:
mutex_unlock(&ipa3_ctx->lock);
return result;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 57fa2465c9ea..e49bdc8c7083 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1205,6 +1205,8 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
IPADBG("Skipping endpoint configuration.\n");
}
+ ipa3_enable_data_path(ipa_ep_idx);
+
out->clnt_hdl = ipa_ep_idx;
if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index c66fd21a078a..d19de2a7bdb5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -3582,21 +3582,30 @@ int ipa3_stop_gsi_channel(u32 clnt_hdl)
memset(&mem, 0, sizeof(mem));
+ if (IPA_CLIENT_IS_PROD(ep->client)) {
+ IPADBG("Calling gsi_stop_channel ch:%lu\n",
+ ep->gsi_chan_hdl);
+ res = gsi_stop_channel(ep->gsi_chan_hdl);
+ IPADBG("gsi_stop_channel ch: %lu returned %d\n",
+ ep->gsi_chan_hdl, res);
+ goto end_sequence;
+ }
+
for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
- IPADBG("Calling gsi_stop_channel\n");
+ IPADBG("Calling gsi_stop_channel ch:%lu\n",
+ ep->gsi_chan_hdl);
res = gsi_stop_channel(ep->gsi_chan_hdl);
- IPADBG("gsi_stop_channel returned %d\n", res);
+ IPADBG("gsi_stop_channel ch: %lu returned %d\n",
+ ep->gsi_chan_hdl, res);
if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT)
goto end_sequence;
- if (IPA_CLIENT_IS_CONS(ep->client)) {
- IPADBG("Inject a DMA_TASK with 1B packet to IPA\n");
- /* Send a 1B packet DMA_TASK to IPA and try again */
- res = ipa3_inject_dma_task_for_gsi();
- if (res) {
- IPAERR("Failed to inject DMA TASk for GSI\n");
- goto end_sequence;
- }
+ IPADBG("Inject a DMA_TASK with 1B packet to IPA\n");
+ /* Send a 1B packet DMA_TASK to IPA and try again */
+ res = ipa3_inject_dma_task_for_gsi();
+ if (res) {
+ IPAERR("Failed to inject DMA TASk for GSI\n");
+ goto end_sequence;
}
/* sleep for short period to flush IPA */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 3855e0d46ca9..585f9e6bd492 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -646,6 +646,21 @@ static void ipareg_construct_endp_init_ctrl_n(enum ipahal_reg_name reg,
IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK);
}
+static void ipareg_parse_endp_init_ctrl_n(enum ipahal_reg_name reg,
+ void *fields, u32 val)
+{
+ struct ipa_ep_cfg_ctrl *ep_ctrl =
+ (struct ipa_ep_cfg_ctrl *)fields;
+
+ ep_ctrl->ipa_ep_suspend =
+ ((val & IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK) >>
+ IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT);
+
+ ep_ctrl->ipa_ep_delay =
+ ((val & IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK) >>
+ IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT);
+}
+
static void ipareg_construct_endp_init_ctrl_scnd_n(enum ipahal_reg_name reg,
const void *fields, u32 *val)
{
@@ -1018,7 +1033,8 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
ipareg_construct_endp_init_nat_n, ipareg_parse_dummy,
0x0000080C, 0x70},
[IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_n] = {
- ipareg_construct_endp_init_ctrl_n, ipareg_parse_dummy,
+ ipareg_construct_endp_init_ctrl_n,
+ ipareg_parse_endp_init_ctrl_n,
0x00000800, 0x70},
[IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_SCND_n] = {
ipareg_construct_endp_init_ctrl_scnd_n, ipareg_parse_dummy,
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index d37630f74b48..1bd4f7fda1b7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -3303,6 +3303,9 @@ static int __init ipa3_wwan_init(void)
mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard);
rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1;
+
+ ipa3_qmi_init();
+
/* Register for Modem SSR */
rmnet_ipa3_ctx->subsys_notify_handle = subsys_notif_register_notifier(
SUBSYS_MODEM,
@@ -3316,6 +3319,7 @@ static int __init ipa3_wwan_init(void)
static void __exit ipa3_wwan_cleanup(void)
{
int ret;
+ ipa3_qmi_cleanup();
mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard);
ret = subsys_notif_unregister_notifier(
rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
diff --git a/drivers/platform/msm/mhi/mhi.h b/drivers/platform/msm/mhi/mhi.h
index 60e02fcb5e4b..b4f3df4ec3d2 100644
--- a/drivers/platform/msm/mhi/mhi.h
+++ b/drivers/platform/msm/mhi/mhi.h
@@ -338,9 +338,8 @@ struct db_mode {
u32 db_mode : 1;
enum MHI_BRSTMODE brstmode;
void (*process_db)(struct mhi_device_ctxt *mhi_dev_ctxt,
- void __iomem *io_addr,
- uintptr_t chan,
- u32 val);
+ void __iomem *io_addr, unsigned int chan,
+ dma_addr_t val);
};
struct mhi_ring {
@@ -728,15 +727,13 @@ int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt,
int index);
int start_chan_sync(struct mhi_client_handle *client_handle);
void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
- void __iomem *io_addr,
- uintptr_t chan,
- u32 val);
+ void __iomem *io_addr, unsigned int chan,
+ dma_addr_t val);
void mhi_process_db_brstmode_disable(struct mhi_device_ctxt *mhi_dev_ctxt,
- void __iomem *io_addr,
- uintptr_t chan,
- u32 val);
+ void __iomem *io_addr, unsigned int chan,
+ dma_addr_t val);
void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt, void __iomem *io_addr,
- uintptr_t io_offset, u32 val);
+ unsigned int chan, dma_addr_t val);
void mhi_reg_write_field(struct mhi_device_ctxt *mhi_dev_ctxt,
void __iomem *io_addr,
uintptr_t io_offset,
diff --git a/drivers/platform/msm/mhi/mhi_main.c b/drivers/platform/msm/mhi/mhi_main.c
index 46baf7332900..78aa1beb870d 100644
--- a/drivers/platform/msm/mhi/mhi_main.c
+++ b/drivers/platform/msm/mhi/mhi_main.c
@@ -90,8 +90,9 @@ dma_pool_error:
}
static void mhi_write_db(struct mhi_device_ctxt *mhi_dev_ctxt,
- void __iomem *io_addr_lower,
- uintptr_t chan, u64 val)
+ void __iomem *io_addr_lower,
+ unsigned int chan,
+ dma_addr_t val)
{
uintptr_t io_offset = chan * sizeof(u64);
void __iomem *io_addr_upper =
@@ -1918,8 +1919,8 @@ EXPORT_SYMBOL(mhi_xfer_rddm);
void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
void __iomem *io_addr,
- uintptr_t chan,
- u32 val)
+ unsigned int chan,
+ dma_addr_t val)
{
struct mhi_ring *ring_ctxt =
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
@@ -1932,7 +1933,7 @@ void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_local_event_ctxt[chan];
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
- "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+ "db.set addr: %p io_offset %u val:0x%llx\n",
io_addr, chan, val);
mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
@@ -1942,7 +1943,7 @@ void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
ring_ctxt->db_mode.db_mode = 0;
} else {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n",
+ "Not ringing xfer db, chan %u, brstmode %d db_mode %d\n",
chan, ring_ctxt->db_mode.brstmode,
ring_ctxt->db_mode.db_mode);
}
@@ -1950,23 +1951,24 @@ void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
void mhi_process_db_brstmode_disable(struct mhi_device_ctxt *mhi_dev_ctxt,
void __iomem *io_addr,
- uintptr_t chan,
- u32 val)
+ unsigned int chan,
+ dma_addr_t val)
{
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
- "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+ "db.set addr: %p io_offset %u val:0x%llx\n",
io_addr, chan, val);
mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
}
void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
- void __iomem *io_addr,
- uintptr_t chan, u32 val)
+ void __iomem *io_addr,
+ unsigned int chan,
+ dma_addr_t val)
{
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
- "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+ "db.set addr: %p io_offset %u val:0x%llx\n",
io_addr, chan, val);
mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
@@ -1981,7 +1983,7 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
chan_ctxt->db_mode.db_mode = 0;
} else {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n",
+ "Not ringing xfer db, chan %u, brstmode %d db_mode %d\n",
chan, chan_ctxt->db_mode.brstmode,
chan_ctxt->db_mode.db_mode);
}
diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index 6f9a13040cd5..c7c1b1567bf3 100644
--- a/drivers/platform/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -654,6 +654,7 @@ int msm_ext_disp_register_audio_codec(struct platform_device *pdev,
{
int ret = 0;
struct msm_ext_disp *ext_disp = NULL;
+ struct msm_ext_disp_list *node = NULL;
if (!pdev || !ops) {
pr_err("Invalid params\n");
@@ -671,17 +672,23 @@ int msm_ext_disp_register_audio_codec(struct platform_device *pdev,
if ((ext_disp->current_disp != EXT_DISPLAY_TYPE_MAX)
&& ext_disp->ops) {
pr_err("Codec already registered\n");
- ret = -EINVAL;
- goto end;
+ mutex_unlock(&ext_disp->lock);
+ return -EINVAL;
}
ext_disp->ops = ops;
- pr_debug("audio codec registered\n");
-
-end:
mutex_unlock(&ext_disp->lock);
+ list_for_each_entry(node, &ext_disp->display_list, list) {
+ struct msm_ext_disp_init_data *data = node->data;
+
+ if (data->codec_ops.codec_ready)
+ data->codec_ops.codec_ready(data->pdev);
+ }
+
+ pr_debug("audio codec registered\n");
+
return ret;
}
diff --git a/drivers/platform/msm/seemp_core/seemp_logk.c b/drivers/platform/msm/seemp_core/seemp_logk.c
index d0f21943cb0f..264db54f0d6e 100644
--- a/drivers/platform/msm/seemp_core/seemp_logk.c
+++ b/drivers/platform/msm/seemp_core/seemp_logk.c
@@ -287,7 +287,7 @@ static bool seemp_logk_get_bit_from_vector(__u8 *pVec, __u32 index)
unsigned int bit_num = index%8;
unsigned char byte;
- if (DIV_ROUND_UP(index, 8) > MASK_BUFFER_SIZE)
+ if (byte_num >= MASK_BUFFER_SIZE)
return false;
byte = pVec[byte_num];
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 1062fa42ff26..b2cdc1a1ad4f 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1816,11 +1816,24 @@ static int __init acer_wmi_enable_lm(void)
return status;
}
+#define ACER_WMID_ACCEL_HID "BST0001"
+
static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level,
void *ctx, void **retval)
{
+ struct acpi_device *dev;
+
+ if (!strcmp(ctx, "SENR")) {
+ if (acpi_bus_get_device(ah, &dev))
+ return AE_OK;
+ if (!strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev)))
+ return AE_OK;
+ } else
+ return AE_OK;
+
*(acpi_handle *)retval = ah;
- return AE_OK;
+
+ return AE_CTRL_TERMINATE;
}
static int __init acer_wmi_get_handle(const char *name, const char *prop,
@@ -1847,7 +1860,7 @@ static int __init acer_wmi_accel_setup(void)
{
int err;
- err = acer_wmi_get_handle("SENR", "BST0001", &gsensor_handle);
+ err = acer_wmi_get_handle("SENR", ACER_WMID_ACCEL_HID, &gsensor_handle);
if (err)
return err;
@@ -2185,10 +2198,11 @@ static int __init acer_wmi_init(void)
err = acer_wmi_input_setup();
if (err)
return err;
+ err = acer_wmi_accel_setup();
+ if (err)
+ return err;
}
- acer_wmi_accel_setup();
-
err = platform_driver_register(&acer_platform_driver);
if (err) {
pr_err("Unable to register platform driver\n");
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 723b9eaf658a..8d7322a325de 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -114,7 +114,8 @@ static ssize_t power_supply_show_property(struct device *dev,
return sprintf(buf, "%s\n", technology_text[value.intval]);
else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL)
return sprintf(buf, "%s\n", capacity_level_text[value.intval]);
- else if (off == POWER_SUPPLY_PROP_TYPE)
+ else if (off == POWER_SUPPLY_PROP_TYPE ||
+ off == POWER_SUPPLY_PROP_REAL_TYPE)
return sprintf(buf, "%s\n", type_text[value.intval]);
else if (off == POWER_SUPPLY_PROP_SCOPE)
return sprintf(buf, "%s\n", scope_text[value.intval]);
@@ -294,6 +295,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(connector_health),
POWER_SUPPLY_ATTR(ctm_current_max),
POWER_SUPPLY_ATTR(hw_current_max),
+ POWER_SUPPLY_ATTR(real_type),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
index e9e24df35f26..2579f025b90b 100644
--- a/drivers/power/reset/at91-poweroff.c
+++ b/drivers/power/reset/at91-poweroff.c
@@ -14,9 +14,12 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
+#include <soc/at91/at91sam9_ddrsdr.h>
+
#define AT91_SHDW_CR 0x00 /* Shut Down Control Register */
#define AT91_SHDW_SHDW BIT(0) /* Shut Down command */
#define AT91_SHDW_KEY (0xa5 << 24) /* KEY Password */
@@ -50,6 +53,7 @@ static const char *shdwc_wakeup_modes[] = {
static void __iomem *at91_shdwc_base;
static struct clk *sclk;
+static void __iomem *mpddrc_base;
static void __init at91_wakeup_status(void)
{
@@ -73,6 +77,29 @@ static void at91_poweroff(void)
writel(AT91_SHDW_KEY | AT91_SHDW_SHDW, at91_shdwc_base + AT91_SHDW_CR);
}
+static void at91_lpddr_poweroff(void)
+{
+ asm volatile(
+ /* Align to cache lines */
+ ".balign 32\n\t"
+
+ /* Ensure AT91_SHDW_CR is in the TLB by reading it */
+ " ldr r6, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
+
+ /* Power down SDRAM0 */
+ " str %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
+ /* Shutdown CPU */
+ " str %3, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
+
+ " b .\n\t"
+ :
+ : "r" (mpddrc_base),
+ "r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
+ "r" (at91_shdwc_base),
+ "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW)
+ : "r0");
+}
+
static int at91_poweroff_get_wakeup_mode(struct device_node *np)
{
const char *pm;
@@ -124,6 +151,8 @@ static void at91_poweroff_dt_set_wakeup_mode(struct platform_device *pdev)
static int __init at91_poweroff_probe(struct platform_device *pdev)
{
struct resource *res;
+ struct device_node *np;
+ u32 ddr_type;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -150,12 +179,30 @@ static int __init at91_poweroff_probe(struct platform_device *pdev)
pm_power_off = at91_poweroff;
+ np = of_find_compatible_node(NULL, NULL, "atmel,sama5d3-ddramc");
+ if (!np)
+ return 0;
+
+ mpddrc_base = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!mpddrc_base)
+ return 0;
+
+ ddr_type = readl(mpddrc_base + AT91_DDRSDRC_MDR) & AT91_DDRSDRC_MD;
+ if ((ddr_type == AT91_DDRSDRC_MD_LPDDR2) ||
+ (ddr_type == AT91_DDRSDRC_MD_LPDDR3))
+ pm_power_off = at91_lpddr_poweroff;
+ else
+ iounmap(mpddrc_base);
+
return 0;
}
static int __exit at91_poweroff_remove(struct platform_device *pdev)
{
- if (pm_power_off == at91_poweroff)
+ if (pm_power_off == at91_poweroff ||
+ pm_power_off == at91_lpddr_poweroff)
pm_power_off = NULL;
clk_disable_unprepare(sclk);
@@ -163,6 +210,11 @@ static int __exit at91_poweroff_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id at91_ramc_of_match[] = {
+ { .compatible = "atmel,sama5d3-ddramc", },
+ { /* sentinel */ }
+};
+
static const struct of_device_id at91_poweroff_of_match[] = {
{ .compatible = "atmel,at91sam9260-shdwc", },
{ .compatible = "atmel,at91sam9rl-shdwc", },
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 8d038ba0770d..acea4f213484 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -497,13 +497,13 @@ static size_t store_dload_mode(struct kobject *kobj, struct attribute *attr,
if (sysfs_streq(buf, "full")) {
dload_type = SCM_DLOAD_FULLDUMP;
} else if (sysfs_streq(buf, "mini")) {
- if (!msm_minidump_enabled()) {
- pr_info("Minidump is not enabled\n");
+ if (!minidump_enabled) {
+ pr_err("Minidump is not enabled\n");
return -ENODEV;
}
dload_type = SCM_DLOAD_MINIDUMP;
} else {
- pr_info("Invalid value. Use 'full' or 'mini'\n");
+ pr_err("Invalid value. Use 'full' or 'mini'\n");
return -EINVAL;
}
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 539e757d3e99..98d75f586b67 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -410,19 +410,6 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data,
if (!chip->main_psy)
return 0;
- if (chip->batt_psy) {
- rc = power_supply_get_property(chip->batt_psy,
- POWER_SUPPLY_PROP_CURRENT_QNOVO,
- &pval);
- if (rc < 0) {
- pr_err("Couldn't get qnovo fcc, rc=%d\n", rc);
- return rc;
- }
-
- if (pval.intval != -EINVAL)
- total_fcc_ua = pval.intval;
- }
-
if (chip->pl_mode == POWER_SUPPLY_PL_NONE
|| get_effective_result_locked(chip->pl_disable_votable)) {
pval.intval = total_fcc_ua;
@@ -473,7 +460,6 @@ static int pl_fv_vote_callback(struct votable *votable, void *data,
struct pl_data *chip = data;
union power_supply_propval pval = {0, };
int rc = 0;
- int effective_fv_uv = fv_uv;
if (fv_uv < 0)
return 0;
@@ -481,20 +467,7 @@ static int pl_fv_vote_callback(struct votable *votable, void *data,
if (!chip->main_psy)
return 0;
- if (chip->batt_psy) {
- rc = power_supply_get_property(chip->batt_psy,
- POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
- &pval);
- if (rc < 0) {
- pr_err("Couldn't get qnovo fv, rc=%d\n", rc);
- return rc;
- }
-
- if (pval.intval != -EINVAL)
- effective_fv_uv = pval.intval;
- }
-
- pval.intval = effective_fv_uv;
+ pval.intval = fv_uv;
rc = power_supply_set_property(chip->main_psy,
POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
@@ -930,11 +903,17 @@ static int pl_determine_initial_status(struct pl_data *chip)
}
#define DEFAULT_RESTRICTED_CURRENT_UA 1000000
-static int pl_init(void)
+int qcom_batt_init(void)
{
struct pl_data *chip;
int rc = 0;
+ /* initialize just once */
+ if (the_chip) {
+ pr_err("was initialized earlier Failing now\n");
+ return -EINVAL;
+ }
+
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
@@ -1014,7 +993,9 @@ static int pl_init(void)
goto unreg_notifier;
}
- return rc;
+ the_chip = chip;
+
+ return 0;
unreg_notifier:
power_supply_unreg_notifier(&chip->nb);
@@ -1031,21 +1012,23 @@ cleanup:
return rc;
}
-static void pl_deinit(void)
+void qcom_batt_deinit(void)
{
struct pl_data *chip = the_chip;
+ if (chip == NULL)
+ return;
+
+ cancel_work_sync(&chip->status_change_work);
+ cancel_delayed_work_sync(&chip->pl_taper_work);
+ cancel_work_sync(&chip->pl_disable_forever_work);
+
power_supply_unreg_notifier(&chip->nb);
destroy_votable(chip->pl_awake_votable);
destroy_votable(chip->pl_disable_votable);
destroy_votable(chip->fv_votable);
destroy_votable(chip->fcc_votable);
wakeup_source_unregister(chip->pl_ws);
+ the_chip = NULL;
kfree(chip);
}
-
-module_init(pl_init);
-module_exit(pl_deinit)
-
-MODULE_DESCRIPTION("");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/qcom/battery.h b/drivers/power/supply/qcom/battery.h
new file mode 100644
index 000000000000..38626e733a09
--- /dev/null
+++ b/drivers/power/supply/qcom/battery.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __BATTERY_H
+#define __BATTERY_H
+int qcom_batt_init(void);
+void qcom_batt_deinit(void);
+#endif /* __BATTERY_H */
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 4368709118ac..d0f7a5e1e227 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -3366,6 +3366,16 @@ static int fg_hw_init(struct fg_chip *chip)
return rc;
}
+ if (is_debug_batt_id(chip)) {
+ val = ESR_NO_PULL_DOWN;
+ rc = fg_masked_write(chip, BATT_INFO_ESR_PULL_DN_CFG(chip),
+ ESR_PULL_DOWN_MODE_MASK, val);
+ if (rc < 0) {
+ pr_err("Error in writing esr_pull_down, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
return 0;
}
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 8855a1c74e0b..94b9e9c4d912 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -238,11 +238,9 @@ static struct smb_params pm660_params = {
#define STEP_CHARGING_MAX_STEPS 5
struct smb_dt_props {
- int fcc_ua;
int usb_icl_ua;
int dc_icl_ua;
int boost_threshold_ua;
- int fv_uv;
int wipower_max_uw;
int min_freq_khz;
int max_freq_khz;
@@ -310,14 +308,14 @@ static int smb2_parse_dt(struct smb2 *chip)
"qcom,external-vconn");
rc = of_property_read_u32(node,
- "qcom,fcc-max-ua", &chip->dt.fcc_ua);
+ "qcom,fcc-max-ua", &chg->batt_profile_fcc_ua);
if (rc < 0)
- chip->dt.fcc_ua = -EINVAL;
+ chg->batt_profile_fcc_ua = -EINVAL;
rc = of_property_read_u32(node,
- "qcom,fv-max-uv", &chip->dt.fv_uv);
+ "qcom,fv-max-uv", &chg->batt_profile_fv_uv);
if (rc < 0)
- chip->dt.fv_uv = -EINVAL;
+ chg->batt_profile_fv_uv = -EINVAL;
rc = of_property_read_u32(node,
"qcom,usb-icl-ua", &chip->dt.usb_icl_ua);
@@ -429,6 +427,7 @@ static enum power_supply_property smb2_usb_props[] = {
POWER_SUPPLY_PROP_PE_START,
POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
POWER_SUPPLY_PROP_HW_CURRENT_MAX,
+ POWER_SUPPLY_PROP_REAL_TYPE,
};
static int smb2_usb_get_prop(struct power_supply *psy,
@@ -448,6 +447,16 @@ static int smb2_usb_get_prop(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_ONLINE:
rc = smblib_get_prop_usb_online(chg, val);
+ if (!val->intval)
+ break;
+
+ rc = smblib_get_prop_typec_mode(chg, val);
+ if ((val->intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
+ chg->micro_usb_mode) &&
+ chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+ val->intval = 0;
+ else
+ val->intval = 1;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
val->intval = chg->voltage_min_uv;
@@ -465,10 +474,13 @@ static int smb2_usb_get_prop(struct power_supply *psy,
rc = smblib_get_prop_usb_current_max(chg, val);
break;
case POWER_SUPPLY_PROP_TYPE:
+ val->intval = POWER_SUPPLY_TYPE_USB_PD;
+ break;
+ case POWER_SUPPLY_PROP_REAL_TYPE:
if (chip->bad_part)
- val->intval = POWER_SUPPLY_TYPE_USB;
+ val->intval = POWER_SUPPLY_TYPE_USB_PD;
else
- val->intval = chg->usb_psy_desc.type;
+ val->intval = chg->real_charger_type;
break;
case POWER_SUPPLY_PROP_TYPEC_MODE:
if (chg->micro_usb_mode)
@@ -610,7 +622,7 @@ static int smb2_init_usb_psy(struct smb2 *chip)
struct smb_charger *chg = &chip->chg;
chg->usb_psy_desc.name = "usb";
- chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_UNKNOWN;
+ chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_USB_PD;
chg->usb_psy_desc.properties = smb2_usb_props;
chg->usb_psy_desc.num_properties = ARRAY_SIZE(smb2_usb_props);
chg->usb_psy_desc.get_property = smb2_usb_get_prop;
@@ -619,7 +631,7 @@ static int smb2_init_usb_psy(struct smb2 *chip)
usb_cfg.drv_data = chip;
usb_cfg.of_node = chg->dev->of_node;
- chg->usb_psy = devm_power_supply_register(chg->dev,
+ chg->usb_psy = power_supply_register(chg->dev,
&chg->usb_psy_desc,
&usb_cfg);
if (IS_ERR(chg->usb_psy)) {
@@ -630,6 +642,97 @@ static int smb2_init_usb_psy(struct smb2 *chip)
return 0;
}
+/********************************
+ * USB PC_PORT PSY REGISTRATION *
+ ********************************/
+static enum power_supply_property smb2_usb_port_props[] = {
+ POWER_SUPPLY_PROP_TYPE,
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static int smb2_usb_port_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smb2 *chip = power_supply_get_drvdata(psy);
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_TYPE:
+ val->intval = POWER_SUPPLY_TYPE_USB;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ rc = smblib_get_prop_usb_online(chg, val);
+ if (!val->intval)
+ break;
+
+ rc = smblib_get_prop_typec_mode(chg, val);
+ if ((val->intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
+ chg->micro_usb_mode) &&
+ chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ default:
+ pr_err_ratelimited("Get prop %d is not supported in pc_port\n",
+ psp);
+ return -EINVAL;
+ }
+
+ if (rc < 0) {
+ pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+static int smb2_usb_port_set_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+
+ switch (psp) {
+ default:
+ pr_err_ratelimited("Set prop %d is not supported in pc_port\n",
+ psp);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static const struct power_supply_desc usb_port_psy_desc = {
+ .name = "pc_port",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = smb2_usb_port_props,
+ .num_properties = ARRAY_SIZE(smb2_usb_port_props),
+ .get_property = smb2_usb_port_get_prop,
+ .set_property = smb2_usb_port_set_prop,
+};
+
+static int smb2_init_usb_port_psy(struct smb2 *chip)
+{
+ struct power_supply_config usb_port_cfg = {};
+ struct smb_charger *chg = &chip->chg;
+
+ usb_port_cfg.drv_data = chip;
+ usb_port_cfg.of_node = chg->dev->of_node;
+ chg->usb_port_psy = power_supply_register(chg->dev,
+ &usb_port_psy_desc,
+ &usb_port_cfg);
+ if (IS_ERR(chg->usb_port_psy)) {
+ pr_err("Couldn't register USB pc_port power supply\n");
+ return PTR_ERR(chg->usb_port_psy);
+ }
+
+ return 0;
+}
+
/*****************************
* USB MAIN PSY REGISTRATION *
*****************************/
@@ -677,7 +780,7 @@ static int smb2_usb_main_get_prop(struct power_supply *psy,
rc = smblib_get_prop_fcc_delta(chg, val);
break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
- val->intval = get_effective_result(chg->usb_icl_votable);
+ rc = smblib_get_icl_current(chg, &val->intval);
break;
default:
pr_debug("get prop %d is not supported in usb-main\n", psp);
@@ -734,7 +837,7 @@ static int smb2_init_usb_main_psy(struct smb2 *chip)
usb_main_cfg.drv_data = chip;
usb_main_cfg.of_node = chg->dev->of_node;
- chg->usb_main_psy = devm_power_supply_register(chg->dev,
+ chg->usb_main_psy = power_supply_register(chg->dev,
&usb_main_psy_desc,
&usb_main_cfg);
if (IS_ERR(chg->usb_main_psy)) {
@@ -836,7 +939,7 @@ static int smb2_init_dc_psy(struct smb2 *chip)
dc_cfg.drv_data = chip;
dc_cfg.of_node = chg->dev->of_node;
- chg->dc_psy = devm_power_supply_register(chg->dev,
+ chg->dc_psy = power_supply_register(chg->dev,
&dc_psy_desc,
&dc_cfg);
if (IS_ERR(chg->dc_psy)) {
@@ -942,13 +1045,15 @@ static int smb2_batt_get_prop(struct power_supply *psy,
rc = smblib_get_prop_charge_qnovo_enable(chg, val);
break;
case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
- val->intval = chg->qnovo_fv_uv;
+ val->intval = get_client_vote_locked(chg->fv_votable,
+ QNOVO_VOTER);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
rc = smblib_get_prop_batt_current_now(chg, val);
break;
case POWER_SUPPLY_PROP_CURRENT_QNOVO:
- val->intval = chg->qnovo_fcc_ua;
+ val->intval = get_client_vote_locked(chg->fcc_votable,
+ QNOVO_VOTER);
break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
val->intval = get_client_vote(chg->fcc_votable,
@@ -1014,23 +1119,37 @@ static int smb2_batt_set_prop(struct power_supply *psy,
vote(chg->pl_disable_votable, USER_VOTER, (bool)val->intval, 0);
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
- vote(chg->fv_votable, DEFAULT_VOTER, true, val->intval);
+ chg->batt_profile_fv_uv = val->intval;
+ vote(chg->fv_votable, BATT_PROFILE_VOTER, true, val->intval);
break;
case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
rc = smblib_set_prop_charge_qnovo_enable(chg, val);
break;
case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
- chg->qnovo_fv_uv = val->intval;
- rc = rerun_election(chg->fv_votable);
+ if (val->intval == -EINVAL) {
+ vote(chg->fv_votable, BATT_PROFILE_VOTER,
+ true, chg->batt_profile_fv_uv);
+ vote(chg->fv_votable, QNOVO_VOTER, false, 0);
+ } else {
+ vote(chg->fv_votable, QNOVO_VOTER, true, val->intval);
+ vote(chg->fv_votable, BATT_PROFILE_VOTER, false, 0);
+ }
break;
case POWER_SUPPLY_PROP_CURRENT_QNOVO:
- chg->qnovo_fcc_ua = val->intval;
vote(chg->pl_disable_votable, PL_QNOVO_VOTER,
val->intval != -EINVAL && val->intval < 2000000, 0);
- rc = rerun_election(chg->fcc_votable);
+ if (val->intval == -EINVAL) {
+ vote(chg->fcc_votable, BATT_PROFILE_VOTER,
+ true, chg->batt_profile_fcc_ua);
+ vote(chg->fcc_votable, QNOVO_VOTER, false, 0);
+ } else {
+ vote(chg->fcc_votable, QNOVO_VOTER, true, val->intval);
+ vote(chg->fcc_votable, BATT_PROFILE_VOTER, false, 0);
+ }
break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
- vote(chg->fcc_votable, DEFAULT_VOTER, true, val->intval);
+ chg->batt_profile_fcc_ua = val->intval;
+ vote(chg->fcc_votable, BATT_PROFILE_VOTER, true, val->intval);
break;
case POWER_SUPPLY_PROP_SET_SHIP_MODE:
/* Not in ship mode as long as the device is active */
@@ -1047,6 +1166,9 @@ static int smb2_batt_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_DP_DM:
rc = smblib_dp_dm(chg, val->intval);
break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+ rc = smblib_set_prop_input_current_limited(chg, val);
+ break;
default:
rc = -EINVAL;
}
@@ -1064,6 +1186,7 @@ static int smb2_batt_prop_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
case POWER_SUPPLY_PROP_DP_DM:
case POWER_SUPPLY_PROP_RERUN_AICL:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
return 1;
default:
break;
@@ -1090,7 +1213,7 @@ static int smb2_init_batt_psy(struct smb2 *chip)
batt_cfg.drv_data = chg;
batt_cfg.of_node = chg->dev->of_node;
- chg->batt_psy = devm_power_supply_register(chg->dev,
+ chg->batt_psy = power_supply_register(chg->dev,
&batt_psy_desc,
&batt_cfg);
if (IS_ERR(chg->batt_psy)) {
@@ -1447,11 +1570,13 @@ static int smb2_init_hw(struct smb2 *chip)
if (chip->dt.no_battery)
chg->fake_capacity = 50;
- if (chip->dt.fcc_ua < 0)
- smblib_get_charge_param(chg, &chg->param.fcc, &chip->dt.fcc_ua);
+ if (chg->batt_profile_fcc_ua < 0)
+ smblib_get_charge_param(chg, &chg->param.fcc,
+ &chg->batt_profile_fcc_ua);
- if (chip->dt.fv_uv < 0)
- smblib_get_charge_param(chg, &chg->param.fv, &chip->dt.fv_uv);
+ if (chg->batt_profile_fv_uv < 0)
+ smblib_get_charge_param(chg, &chg->param.fv,
+ &chg->batt_profile_fv_uv);
smblib_get_charge_param(chg, &chg->param.usb_icl,
&chg->default_icl_ua);
@@ -1512,9 +1637,9 @@ static int smb2_init_hw(struct smb2 *chip)
vote(chg->dc_suspend_votable,
DEFAULT_VOTER, chip->dt.no_battery, 0);
vote(chg->fcc_votable,
- DEFAULT_VOTER, true, chip->dt.fcc_ua);
+ BATT_PROFILE_VOTER, true, chg->batt_profile_fcc_ua);
vote(chg->fv_votable,
- DEFAULT_VOTER, true, chip->dt.fv_uv);
+ BATT_PROFILE_VOTER, true, chg->batt_profile_fv_uv);
vote(chg->dc_icl_votable,
DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
@@ -2058,6 +2183,21 @@ static int smb2_request_interrupts(struct smb2 *chip)
return rc;
}
+static void smb2_free_interrupts(struct smb_charger *chg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(smb2_irqs); i++) {
+ if (smb2_irqs[i].irq > 0) {
+ if (smb2_irqs[i].wake)
+ disable_irq_wake(smb2_irqs[i].irq);
+
+ devm_free_irq(chg->dev, smb2_irqs[i].irq,
+ smb2_irqs[i].irq_data);
+ }
+ }
+}
+
static void smb2_disable_interrupts(struct smb_charger *chg)
{
int i;
@@ -2235,7 +2375,13 @@ static int smb2_probe(struct platform_device *pdev)
rc = smb2_init_usb_main_psy(chip);
if (rc < 0) {
- pr_err("Couldn't initialize usb psy rc=%d\n", rc);
+ pr_err("Couldn't initialize usb main psy rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = smb2_init_usb_port_psy(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize usb pc_port psy rc=%d\n", rc);
goto cleanup;
}
@@ -2293,20 +2439,29 @@ static int smb2_probe(struct platform_device *pdev)
device_init_wakeup(chg->dev, true);
pr_info("QPNP SMB2 probed successfully usb:present=%d type=%d batt:present = %d health = %d charge = %d\n",
- usb_present, chg->usb_psy_desc.type,
+ usb_present, chg->real_charger_type,
batt_present, batt_health, batt_charge_type);
return rc;
cleanup:
- smblib_deinit(chg);
- if (chg->usb_psy)
- power_supply_unregister(chg->usb_psy);
+ smb2_free_interrupts(chg);
if (chg->batt_psy)
power_supply_unregister(chg->batt_psy);
+ if (chg->usb_main_psy)
+ power_supply_unregister(chg->usb_main_psy);
+ if (chg->usb_psy)
+ power_supply_unregister(chg->usb_psy);
+ if (chg->usb_port_psy)
+ power_supply_unregister(chg->usb_port_psy);
+ if (chg->dc_psy)
+ power_supply_unregister(chg->dc_psy);
if (chg->vconn_vreg && chg->vconn_vreg->rdev)
- regulator_unregister(chg->vconn_vreg->rdev);
+ devm_regulator_unregister(chg->dev, chg->vconn_vreg->rdev);
if (chg->vbus_vreg && chg->vbus_vreg->rdev)
- regulator_unregister(chg->vbus_vreg->rdev);
+ devm_regulator_unregister(chg->dev, chg->vbus_vreg->rdev);
+
+ smblib_deinit(chg);
+
platform_set_drvdata(pdev, NULL);
return rc;
}
@@ -2318,6 +2473,7 @@ static int smb2_remove(struct platform_device *pdev)
power_supply_unregister(chg->batt_psy);
power_supply_unregister(chg->usb_psy);
+ power_supply_unregister(chg->usb_port_psy);
regulator_unregister(chg->vconn_vreg->rdev);
regulator_unregister(chg->vbus_vreg->rdev);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 1e417e8aa22d..a191391a3904 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -19,10 +19,11 @@
#include <linux/qpnp/qpnp-revid.h>
#include <linux/input/qpnp-power-on.h>
#include <linux/irq.h>
+#include <linux/pmic-voter.h>
#include "smb-lib.h"
#include "smb-reg.h"
+#include "battery.h"
#include "storm-watch.h"
-#include <linux/pmic-voter.h>
#define smblib_err(chg, fmt, ...) \
pr_err("%s: %s: " fmt, chg->name, \
@@ -549,9 +550,9 @@ static const struct apsd_result *smblib_update_usb_type(struct smb_charger *chg)
/* if PD is active, APSD is disabled so won't have a valid result */
if (chg->pd_active)
- chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_USB_PD;
+ chg->real_charger_type = POWER_SUPPLY_TYPE_USB_PD;
else
- chg->usb_psy_desc.type = apsd_result->pst;
+ chg->real_charger_type = apsd_result->pst;
smblib_dbg(chg, PR_MISC, "APSD=%s PD=%d\n",
apsd_result->name, chg->pd_active);
@@ -813,6 +814,28 @@ static int set_sdp_current(struct smb_charger *chg, int icl_ua)
return rc;
}
+static int get_sdp_current(struct smb_charger *chg, int *icl_ua)
+{
+ int rc;
+ u8 icl_options;
+ bool usb3 = false;
+
+ rc = smblib_read(chg, USBIN_ICL_OPTIONS_REG, &icl_options);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get ICL options rc=%d\n", rc);
+ return rc;
+ }
+
+ usb3 = (icl_options & CFG_USB3P0_SEL_BIT);
+
+ if (icl_options & USB51_MODE_BIT)
+ *icl_ua = usb3 ? USBIN_900MA : USBIN_500MA;
+ else
+ *icl_ua = usb3 ? USBIN_150MA : USBIN_100MA;
+
+ return rc;
+}
+
int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
{
int rc = 0;
@@ -835,7 +858,7 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
/* configure current */
if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
- && (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)) {
+ && (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)) {
rc = set_sdp_current(chg, icl_ua);
if (rc < 0) {
smblib_err(chg, "Couldn't set SDP ICL rc=%d\n", rc);
@@ -856,10 +879,10 @@ override_suspend_config:
/* remove override if no voters - hw defaults is desired */
override = false;
} else if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
- if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
/* For std cable with type = SDP never override */
override = false;
- else if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_CDP
+ else if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_CDP
&& icl_ua == 1500000)
/*
* For std cable with type = CDP override only if
@@ -890,6 +913,48 @@ enable_icl_changed_interrupt:
return rc;
}
+int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua)
+{
+ int rc = 0;
+ u8 load_cfg;
+ bool override;
+ union power_supply_propval pval;
+
+ rc = smblib_get_prop_typec_mode(chg, &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get typeC mode rc = %d\n", rc);
+ return rc;
+ }
+
+ if ((pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+ || chg->micro_usb_mode)
+ && (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)) {
+ rc = get_sdp_current(chg, icl_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get SDP ICL rc=%d\n", rc);
+ return rc;
+ }
+ } else {
+ rc = smblib_read(chg, USBIN_LOAD_CFG_REG, &load_cfg);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get load cfg rc=%d\n", rc);
+ return rc;
+ }
+ override = load_cfg & ICL_OVERRIDE_AFTER_APSD_BIT;
+ if (!override)
+ return INT_MAX;
+
+ /* override is set */
+ rc = smblib_get_charge_param(chg, &chg->param.usb_icl, icl_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get HC ICL rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
/*********************
* VOTABLE CALLBACKS *
*********************/
@@ -1642,6 +1707,11 @@ int smblib_get_prop_input_current_limited(struct smb_charger *chg,
u8 stat;
int rc;
+ if (chg->fake_input_current_limited >= 0) {
+ val->intval = chg->fake_input_current_limited;
+ return 0;
+ }
+
rc = smblib_read(chg, AICL_STATUS_REG, &stat);
if (rc < 0) {
smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n", rc);
@@ -1833,6 +1903,13 @@ int smblib_set_prop_charge_qnovo_enable(struct smb_charger *chg,
return rc;
}
+int smblib_set_prop_input_current_limited(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ chg->fake_input_current_limited = val->intval;
+ return 0;
+}
+
int smblib_rerun_aicl(struct smb_charger *chg)
{
int rc, settled_icl_ua;
@@ -2816,7 +2893,7 @@ int smblib_get_prop_fcc_delta(struct smb_charger *chg,
int smblib_get_charge_current(struct smb_charger *chg,
int *total_current_ua)
{
- const struct apsd_result *apsd_result = smblib_update_usb_type(chg);
+ const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
union power_supply_propval val = {0, };
int rc = 0, typec_source_rd, current_ua;
bool non_compliant;
@@ -3088,12 +3165,28 @@ static void smblib_micro_usb_plugin(struct smb_charger *chg, bool vbus_rising)
}
}
-static void smblib_typec_usb_plugin(struct smb_charger *chg, bool vbus_rising)
+void smblib_usb_plugin_hard_reset_locked(struct smb_charger *chg)
{
+ int rc;
+ u8 stat;
+ bool vbus_rising;
+
+ rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
+ return;
+ }
+
+ vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
+
if (vbus_rising)
smblib_cc2_sink_removal_exit(chg);
else
smblib_cc2_sink_removal_enter(chg);
+
+ power_supply_changed(chg->usb_psy);
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: usbin-plugin %s\n",
+ vbus_rising ? "attached" : "detached");
}
#define PL_DELAY_MS 30000
@@ -3152,8 +3245,6 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
if (chg->micro_usb_mode)
smblib_micro_usb_plugin(chg, vbus_rising);
- else
- smblib_typec_usb_plugin(chg, vbus_rising);
power_supply_changed(chg->usb_psy);
smblib_dbg(chg, PR_INTERRUPT, "IRQ: usbin-plugin %s\n",
@@ -3166,7 +3257,10 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
struct smb_charger *chg = irq_data->parent_data;
mutex_lock(&chg->lock);
- smblib_usb_plugin_locked(chg);
+ if (chg->pd_hard_reset)
+ smblib_usb_plugin_hard_reset_locked(chg);
+ else
+ smblib_usb_plugin_locked(chg);
mutex_unlock(&chg->lock);
return IRQ_HANDLED;
}
@@ -3232,7 +3326,7 @@ static void smblib_hvdcp_adaptive_voltage_change(struct smb_charger *chg)
int pulses;
power_supply_changed(chg->usb_main_psy);
- if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_HVDCP) {
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP) {
rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
if (rc < 0) {
smblib_err(chg,
@@ -3260,7 +3354,7 @@ static void smblib_hvdcp_adaptive_voltage_change(struct smb_charger *chg)
}
}
- if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_HVDCP_3) {
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP_3) {
rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, &stat);
if (rc < 0) {
smblib_err(chg,
@@ -3330,7 +3424,7 @@ static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg,
static void smblib_handle_hvdcp_check_timeout(struct smb_charger *chg,
bool rising, bool qc_charger)
{
- const struct apsd_result *apsd_result = smblib_update_usb_type(chg);
+ const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
/* Hold off PD only until hvdcp 2.0 detection timeout */
if (rising) {
@@ -4187,26 +4281,30 @@ static int smblib_create_votables(struct smb_charger *chg)
int rc = 0;
chg->fcc_votable = find_votable("FCC");
- if (!chg->fcc_votable) {
- rc = -EPROBE_DEFER;
+ if (chg->fcc_votable == NULL) {
+ rc = -EINVAL;
+ smblib_err(chg, "Couldn't find FCC votable rc=%d\n", rc);
return rc;
}
chg->fv_votable = find_votable("FV");
- if (!chg->fv_votable) {
- rc = -EPROBE_DEFER;
+ if (chg->fv_votable == NULL) {
+ rc = -EINVAL;
+ smblib_err(chg, "Couldn't find FV votable rc=%d\n", rc);
return rc;
}
chg->usb_icl_votable = find_votable("USB_ICL");
if (!chg->usb_icl_votable) {
- rc = -EPROBE_DEFER;
+ rc = -EINVAL;
+ smblib_err(chg, "Couldn't find USB_ICL votable rc=%d\n", rc);
return rc;
}
chg->pl_disable_votable = find_votable("PL_DISABLE");
- if (!chg->pl_disable_votable) {
- rc = -EPROBE_DEFER;
+ if (chg->pl_disable_votable == NULL) {
+ rc = -EINVAL;
+ smblib_err(chg, "Couldn't find votable PL_DISABLE rc=%d\n", rc);
return rc;
}
vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
@@ -4385,11 +4483,17 @@ int smblib_init(struct smb_charger *chg)
INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
INIT_WORK(&chg->legacy_detection_work, smblib_legacy_detection_work);
chg->fake_capacity = -EINVAL;
+ chg->fake_input_current_limited = -EINVAL;
switch (chg->mode) {
case PARALLEL_MASTER:
- chg->qnovo_fcc_ua = -EINVAL;
- chg->qnovo_fv_uv = -EINVAL;
+ rc = qcom_batt_init();
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't init qcom_batt_init rc=%d\n",
+ rc);
+ return rc;
+ }
+
rc = smblib_create_votables(chg);
if (rc < 0) {
smblib_err(chg, "Couldn't create votables rc=%d\n",
@@ -4421,8 +4525,20 @@ int smblib_deinit(struct smb_charger *chg)
{
switch (chg->mode) {
case PARALLEL_MASTER:
+ cancel_work_sync(&chg->bms_update_work);
+ cancel_work_sync(&chg->rdstd_cc2_detach_work);
+ cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+ cancel_delayed_work_sync(&chg->step_soc_req_work);
+ cancel_delayed_work_sync(&chg->clear_hdc_work);
+ cancel_work_sync(&chg->otg_oc_work);
+ cancel_work_sync(&chg->vconn_oc_work);
+ cancel_delayed_work_sync(&chg->otg_ss_done_work);
+ cancel_delayed_work_sync(&chg->icl_change_work);
+ cancel_delayed_work_sync(&chg->pl_enable_work);
+ cancel_work_sync(&chg->legacy_detection_work);
power_supply_unreg_notifier(&chg->nb);
smblib_destroy_votables(chg);
+ qcom_batt_deinit();
break;
case PARALLEL_SLAVE:
break;
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index b0d84f014b0d..5dc60ecb9436 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -62,6 +62,8 @@ enum print_reason {
#define AICL_RERUN_VOTER "AICL_RERUN_VOTER"
#define LEGACY_UNKNOWN_VOTER "LEGACY_UNKNOWN_VOTER"
#define CC2_WA_VOTER "CC2_WA_VOTER"
+#define QNOVO_VOTER "QNOVO_VOTER"
+#define BATT_PROFILE_VOTER "BATT_PROFILE_VOTER"
#define VCONN_MAX_ATTEMPTS 3
#define OTG_MAX_ATTEMPTS 3
@@ -242,6 +244,8 @@ struct smb_charger {
struct power_supply *bms_psy;
struct power_supply_desc usb_psy_desc;
struct power_supply *usb_main_psy;
+ struct power_supply *usb_port_psy;
+ enum power_supply_type real_charger_type;
/* notifiers */
struct notifier_block nb;
@@ -314,6 +318,7 @@ struct smb_charger {
bool typec_present;
u8 typec_status[5];
bool typec_legacy_valid;
+ int fake_input_current_limited;
/* workaround flag */
u32 wa_flags;
@@ -325,9 +330,11 @@ struct smb_charger {
/* extcon for VBUS / ID notification to USB for uUSB */
struct extcon_dev *extcon;
+ /* battery profile */
+ int batt_profile_fcc_ua;
+ int batt_profile_fv_uv;
+
/* qnovo */
- int qnovo_fcc_ua;
- int qnovo_fv_uv;
int usb_icl_delta_ua;
int pulse_cnt;
};
@@ -415,6 +422,8 @@ int smblib_set_prop_batt_capacity(struct smb_charger *chg,
const union power_supply_propval *val);
int smblib_set_prop_system_temp_level(struct smb_charger *chg,
const union power_supply_propval *val);
+int smblib_set_prop_input_current_limited(struct smb_charger *chg,
+ const union power_supply_propval *val);
int smblib_get_prop_dc_present(struct smb_charger *chg,
union power_supply_propval *val);
@@ -493,6 +502,7 @@ int smblib_icl_override(struct smb_charger *chg, bool override);
int smblib_dp_dm(struct smb_charger *chg, int val);
int smblib_rerun_aicl(struct smb_charger *chg);
int smblib_set_icl_current(struct smb_charger *chg, int icl_ua);
+int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua);
int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua);
int smblib_init(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index 3f260a407721..167666a8c548 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -1025,14 +1025,4 @@ enum {
/* CHGR FREQ Peripheral registers */
#define FREQ_CLK_DIV_REG (CHGR_FREQ_BASE + 0x50)
-/* SMB1355 specific registers */
-#define SMB1355_TEMP_COMP_STATUS_REG (MISC_BASE + 0x07)
-#define SKIN_TEMP_RST_HOT_BIT BIT(6)
-#define SKIN_TEMP_UB_HOT_BIT BIT(5)
-#define SKIN_TEMP_LB_HOT_BIT BIT(4)
-#define DIE_TEMP_TSD_HOT_BIT BIT(3)
-#define DIE_TEMP_RST_HOT_BIT BIT(2)
-#define DIE_TEMP_UB_HOT_BIT BIT(1)
-#define DIE_TEMP_LB_HOT_BIT BIT(0)
-
#endif /* __SMB2_CHARGER_REG_H */
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index 4e710cae6b78..694591c3ec56 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -104,8 +104,6 @@ struct smb138x {
struct smb_dt_props dt;
struct power_supply *parallel_psy;
u32 wa_flags;
- struct pmic_revid_data *pmic_rev_id;
- char *name;
};
static int __debug_mask;
@@ -169,14 +167,6 @@ static int smb138x_parse_dt(struct smb138x *chip)
if (rc < 0)
chip->dt.pl_mode = POWER_SUPPLY_PL_USBMID_USBMID;
- /* check that smb1355 is configured to run in mid-mid mode */
- if (chip->pmic_rev_id->pmic_subtype == SMB1355_SUBTYPE
- && chip->dt.pl_mode != POWER_SUPPLY_PL_USBMID_USBMID) {
- pr_err("Smb1355 can only run in MID-MID mode, saw = %d mode\n",
- chip->dt.pl_mode);
- return -EINVAL;
- }
-
chip->dt.suspend_input = of_property_read_bool(node,
"qcom,suspend-input");
@@ -489,30 +479,6 @@ static int smb138x_init_batt_psy(struct smb138x *chip)
* PARALLEL PSY REGISTRATION *
*****************************/
-static int smb1355_get_prop_connector_health(struct smb138x *chip)
-{
- struct smb_charger *chg = &chip->chg;
- u8 temp;
- int rc;
-
- rc = smblib_read(chg, SMB1355_TEMP_COMP_STATUS_REG, &temp);
- if (rc < 0) {
- pr_err("Couldn't read comp stat reg rc = %d\n", rc);
- return POWER_SUPPLY_HEALTH_UNKNOWN;
- }
-
- if (temp & SKIN_TEMP_RST_HOT_BIT)
- return POWER_SUPPLY_HEALTH_OVERHEAT;
-
- if (temp & SKIN_TEMP_UB_HOT_BIT)
- return POWER_SUPPLY_HEALTH_HOT;
-
- if (temp & SKIN_TEMP_LB_HOT_BIT)
- return POWER_SUPPLY_HEALTH_WARM;
-
- return POWER_SUPPLY_HEALTH_COOL;
-}
-
static int smb138x_get_prop_connector_health(struct smb138x *chip)
{
struct smb_charger *chg = &chip->chg;
@@ -570,32 +536,16 @@ static enum power_supply_property smb138x_parallel_props[] = {
POWER_SUPPLY_PROP_PIN_ENABLED,
POWER_SUPPLY_PROP_INPUT_SUSPEND,
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
- POWER_SUPPLY_PROP_MODEL_NAME,
- POWER_SUPPLY_PROP_PARALLEL_MODE,
- POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
- POWER_SUPPLY_PROP_SET_SHIP_MODE,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGER_TEMP,
POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
- POWER_SUPPLY_PROP_CURRENT_NOW,
- POWER_SUPPLY_PROP_CURRENT_MAX,
-};
-
-static enum power_supply_property smb1355_parallel_props[] = {
- POWER_SUPPLY_PROP_CHARGE_TYPE,
- POWER_SUPPLY_PROP_CHARGING_ENABLED,
- POWER_SUPPLY_PROP_PIN_ENABLED,
- POWER_SUPPLY_PROP_INPUT_SUSPEND,
- POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
- POWER_SUPPLY_PROP_VOLTAGE_MAX,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_PARALLEL_MODE,
POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
POWER_SUPPLY_PROP_SET_SHIP_MODE,
- POWER_SUPPLY_PROP_CHARGER_TEMP,
- POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
};
static int smb138x_parallel_get_prop(struct power_supply *psy,
@@ -633,6 +583,14 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
else
val->intval = 0;
break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ || (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+ rc = smblib_get_charge_param(chg, &chg->param.usb_icl,
+ &val->intval);
+ else
+ val->intval = 0;
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
break;
@@ -640,46 +598,28 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
rc = smblib_get_charge_param(chg, &chg->param.fcc,
&val->intval);
break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ rc = smblib_get_prop_slave_current_now(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP:
+ rc = smb138x_get_prop_charger_temp(chip, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+ rc = smblib_get_prop_charger_temp_max(chg, val);
+ break;
case POWER_SUPPLY_PROP_MODEL_NAME:
- val->strval = chip->name;
+ val->strval = "smb138x";
break;
case POWER_SUPPLY_PROP_PARALLEL_MODE:
val->intval = chip->dt.pl_mode;
break;
case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
- if (chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
- val->intval = smb138x_get_prop_connector_health(chip);
- else
- val->intval = smb1355_get_prop_connector_health(chip);
+ val->intval = smb138x_get_prop_connector_health(chip);
break;
case POWER_SUPPLY_PROP_SET_SHIP_MODE:
/* Not in ship mode as long as device is active */
val->intval = 0;
break;
- case POWER_SUPPLY_PROP_CHARGER_TEMP:
- if (chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
- rc = smb138x_get_prop_charger_temp(chip, val);
- else
- rc = smblib_get_prop_charger_temp(chg, val);
- break;
- case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
- rc = smblib_get_prop_charger_temp_max(chg, val);
- break;
- case POWER_SUPPLY_PROP_CURRENT_NOW:
- if (chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
- rc = smblib_get_prop_slave_current_now(chg, val);
- else
- rc = -ENODATA;
- break;
- case POWER_SUPPLY_PROP_CURRENT_MAX:
- if ((chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
- && ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
- || (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)))
- rc = smblib_get_charge_param(chg, &chg->param.usb_icl,
- &val->intval);
- else
- rc = -ENODATA;
- break;
default:
pr_err("parallel power supply get prop %d not supported\n",
prop);
@@ -763,7 +703,7 @@ static int smb138x_parallel_prop_is_writeable(struct power_supply *psy,
return 0;
}
-static struct power_supply_desc parallel_psy_desc = {
+static const struct power_supply_desc parallel_psy_desc = {
.name = "parallel",
.type = POWER_SUPPLY_TYPE_PARALLEL,
.properties = smb138x_parallel_props,
@@ -791,28 +731,6 @@ static int smb138x_init_parallel_psy(struct smb138x *chip)
return 0;
}
-static int smb1355_init_parallel_psy(struct smb138x *chip)
-{
- struct power_supply_config parallel_cfg = {};
- struct smb_charger *chg = &chip->chg;
-
- parallel_cfg.drv_data = chip;
- parallel_cfg.of_node = chg->dev->of_node;
-
- /* change to smb1355's property list */
- parallel_psy_desc.properties = smb1355_parallel_props;
- parallel_psy_desc.num_properties = ARRAY_SIZE(smb1355_parallel_props);
- chip->parallel_psy = devm_power_supply_register(chg->dev,
- &parallel_psy_desc,
- &parallel_cfg);
- if (IS_ERR(chip->parallel_psy)) {
- pr_err("Couldn't register parallel power supply\n");
- return PTR_ERR(chip->parallel_psy);
- }
-
- return 0;
-}
-
/******************************
* VBUS REGULATOR REGISTRATION *
******************************/
@@ -1132,6 +1050,7 @@ static int smb138x_init_hw(struct smb138x *chip)
static int smb138x_setup_wa_flags(struct smb138x *chip)
{
+ struct pmic_revid_data *pmic_rev_id;
struct device_node *revid_dev_node;
revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
@@ -1141,8 +1060,8 @@ static int smb138x_setup_wa_flags(struct smb138x *chip)
return -EINVAL;
}
- chip->pmic_rev_id = get_revid_data(revid_dev_node);
- if (IS_ERR_OR_NULL(chip->pmic_rev_id)) {
+ pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR_OR_NULL(pmic_rev_id)) {
/*
* the revid peripheral must be registered, any failure
* here only indicates that the rev-id module has not
@@ -1151,14 +1070,14 @@ static int smb138x_setup_wa_flags(struct smb138x *chip)
return -EPROBE_DEFER;
}
- switch (chip->pmic_rev_id->pmic_subtype) {
+ switch (pmic_rev_id->pmic_subtype) {
case SMB1381_SUBTYPE:
- if (chip->pmic_rev_id->rev4 < 2) /* SMB1381 rev 1.0 */
+ if (pmic_rev_id->rev4 < 2) /* SMB1381 rev 1.0 */
chip->wa_flags |= OOB_COMP_WA_BIT;
break;
default:
pr_err("PMIC subtype %d not supported\n",
- chip->pmic_rev_id->pmic_subtype);
+ pmic_rev_id->pmic_subtype);
return -EINVAL;
}
@@ -1456,7 +1375,6 @@ static int smb138x_master_probe(struct smb138x *chip)
chg->param = v1_params;
- chip->name = "smb1381";
rc = smblib_init(chg);
if (rc < 0) {
pr_err("Couldn't initialize smblib rc=%d\n", rc);
@@ -1517,7 +1435,7 @@ static int smb138x_master_probe(struct smb138x *chip)
return rc;
}
-static int smb1355_slave_probe(struct smb138x *chip)
+static int smb138x_slave_probe(struct smb138x *chip)
{
struct smb_charger *chg = &chip->chg;
int rc = 0;
@@ -1530,55 +1448,6 @@ static int smb1355_slave_probe(struct smb138x *chip)
goto cleanup;
}
- rc = smb138x_parse_dt(chip);
- if (rc < 0) {
- pr_err("Couldn't parse device tree rc=%d\n", rc);
- goto cleanup;
- }
-
- rc = smb138x_init_slave_hw(chip);
- if (rc < 0) {
- pr_err("Couldn't initialize hardware rc=%d\n", rc);
- goto cleanup;
- }
-
- rc = smb1355_init_parallel_psy(chip);
- if (rc < 0) {
- pr_err("Couldn't initialize parallel psy rc=%d\n", rc);
- goto cleanup;
- }
-
- rc = smb138x_determine_initial_slave_status(chip);
- if (rc < 0) {
- pr_err("Couldn't determine initial status rc=%d\n", rc);
- goto cleanup;
- }
-
- rc = smb138x_request_interrupts(chip);
- if (rc < 0) {
- pr_err("Couldn't request interrupts rc=%d\n", rc);
- goto cleanup;
- }
-
- return 0;
-
-cleanup:
- smblib_deinit(chg);
- return rc;
-}
-
-static int smb1381_slave_probe(struct smb138x *chip)
-{
- struct smb_charger *chg = &chip->chg;
- int rc = 0;
-
- chg->param = v1_params;
-
- rc = smblib_init(chg);
- if (rc < 0) {
- pr_err("Couldn't initialize smblib rc=%d\n", rc);
- goto cleanup;
- }
chg->iio.temp_max_chan = iio_channel_get(chg->dev, "charger_temp_max");
if (IS_ERR(chg->iio.temp_max_chan)) {
rc = PTR_ERR(chg->iio.temp_max_chan);
@@ -1646,71 +1515,25 @@ static int smb1381_slave_probe(struct smb138x *chip)
goto cleanup;
}
- return 0;
+ return rc;
cleanup:
smblib_deinit(chg);
+ if (chip->parallel_psy)
+ power_supply_unregister(chip->parallel_psy);
+ if (chg->vbus_vreg && chg->vbus_vreg->rdev)
+ regulator_unregister(chg->vbus_vreg->rdev);
return rc;
}
-static int slave_probe(struct smb138x *chip)
-{
- struct device_node *revid_dev_node;
- int rc = 0;
-
- revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
- "qcom,pmic-revid", 0);
- if (!revid_dev_node) {
- pr_err("Missing qcom,pmic-revid property\n");
- return -EINVAL;
- }
-
- chip->pmic_rev_id = get_revid_data(revid_dev_node);
- if (IS_ERR_OR_NULL(chip->pmic_rev_id)) {
- /*
- * the revid peripheral must be registered, any failure
- * here only indicates that the rev-id module has not
- * probed yet.
- */
- return -EPROBE_DEFER;
- }
-
- switch (chip->pmic_rev_id->pmic_subtype) {
- case SMB1355_SUBTYPE:
- chip->name = "smb1355";
- rc = smb1355_slave_probe(chip);
- break;
- case SMB1381_SUBTYPE:
- chip->name = "smb1381";
- rc = smb1381_slave_probe(chip);
- break;
- default:
- pr_err("Unsupported pmic subtype = 0x%02x\n",
- chip->pmic_rev_id->pmic_subtype);
- rc = -EINVAL;
- }
-
- if (rc < 0) {
- if (rc != -EPROBE_DEFER)
- pr_err("Couldn't probe SMB138X rc=%d\n", rc);
- return rc;
- }
-
- return 0;
-}
-
static const struct of_device_id match_table[] = {
{
- .compatible = "qcom,smb138x-charger",
- .data = (void *) PARALLEL_MASTER,
- },
- {
- .compatible = "qcom,smb138x-parallel-slave",
- .data = (void *) PARALLEL_SLAVE,
+ .compatible = "qcom,smb138x-charger",
+ .data = (void *) PARALLEL_MASTER
},
{
- .compatible = "qcom,smb1355-parallel-slave",
- .data = (void *) PARALLEL_SLAVE,
+ .compatible = "qcom,smb138x-parallel-slave",
+ .data = (void *) PARALLEL_SLAVE
},
{ },
};
@@ -1757,7 +1580,7 @@ static int smb138x_probe(struct platform_device *pdev)
rc = smb138x_master_probe(chip);
break;
case PARALLEL_SLAVE:
- rc = slave_probe(chip);
+ rc = smb138x_slave_probe(chip);
break;
default:
pr_err("Couldn't find a matching mode %d\n", chip->chg.mode);
@@ -1771,8 +1594,7 @@ static int smb138x_probe(struct platform_device *pdev)
goto cleanup;
}
- pr_info("%s probed successfully mode=%d pl_mode = %d\n",
- chip->name, chip->chg.mode, chip->dt.pl_mode);
+ pr_info("SMB138X probed successfully mode=%d\n", chip->chg.mode);
return rc;
cleanup:
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index e5ba63171eba..88956d3ba674 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -472,6 +472,15 @@ config REGULATOR_MT6397
This driver supports the control of different power rails of device
through regulator interface.
+config REGULATOR_ONSEMI_NCP6335D
+ tristate "OnSemi NCP6335D regulator support"
+ depends on I2C
+ help
+ This driver supports the OnSemi NCP6335D switching voltage regulator
+ (buck converter). The regulator is controlled using an I2C interface
+ and supports a programmable voltage range from 0.6V to 1.4V in steps
+ of 6.25mV.
+
config REGULATOR_PALMAS
tristate "TI Palmas PMIC Regulators"
depends on MFD_PALMAS
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 20cf304a4714..e345f10f94af 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
+obj-$(CONFIG_REGULATOR_ONSEMI_NCP6335D) += onsemi-ncp6335d.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
obj-$(CONFIG_REGULATOR_PWM) += pwm-regulator.o
diff --git a/drivers/regulator/onsemi-ncp6335d.c b/drivers/regulator/onsemi-ncp6335d.c
new file mode 100644
index 000000000000..3b4ca4f81527
--- /dev/null
+++ b/drivers/regulator/onsemi-ncp6335d.c
@@ -0,0 +1,775 @@
+/* Copyright (c) 2012-2014, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/log2.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/debugfs.h>
+#include <linux/regulator/onsemi-ncp6335d.h>
+#include <linux/string.h>
+
+/* registers */
+#define REG_NCP6335D_PID 0x03
+#define REG_NCP6335D_PROGVSEL1 0x10
+#define REG_NCP6335D_PROGVSEL0 0x11
+#define REG_NCP6335D_PGOOD 0x12
+#define REG_NCP6335D_TIMING 0x13
+#define REG_NCP6335D_COMMAND 0x14
+
+/* constraints */
+#define NCP6335D_MIN_VOLTAGE_UV 600000
+#define NCP6335D_STEP_VOLTAGE_UV 6250
+#define NCP6335D_VOLTAGE_STEPS 128
+#define NCP6335D_MIN_SLEW_NS 166
+#define NCP6335D_MAX_SLEW_NS 1333
+
+/* bits */
+#define NCP6335D_ENABLE BIT(7)
+#define NCP6335D_DVS_PWM_MODE BIT(5)
+#define NCP6335D_PWM_MODE1 BIT(6)
+#define NCP6335D_PWM_MODE0 BIT(7)
+#define NCP6335D_PGOOD_DISCHG BIT(4)
+#define NCP6335D_SLEEP_MODE BIT(4)
+
+#define NCP6335D_VOUT_SEL_MASK 0x7F
+#define NCP6335D_SLEW_MASK 0x18
+#define NCP6335D_SLEW_SHIFT 0x3
+
+struct ncp6335d_info {
+ struct regulator_dev *regulator;
+ struct regulator_init_data *init_data;
+ struct regmap *regmap;
+ struct device *dev;
+ unsigned int vsel_reg;
+ unsigned int vsel_backup_reg;
+ unsigned int mode_bit;
+ int curr_voltage;
+ int slew_rate;
+
+ unsigned int step_size;
+ unsigned int min_voltage;
+ unsigned int min_slew_ns;
+ unsigned int max_slew_ns;
+ unsigned int peek_poke_address;
+
+ struct dentry *debug_root;
+};
+
+static int delay_array[] = {10, 20, 30, 40, 50};
+
+static int ncp6335x_read(struct ncp6335d_info *dd, unsigned int reg,
+ unsigned int *val)
+{
+ int i = 0, rc = 0;
+
+ rc = regmap_read(dd->regmap, reg, val);
+ for (i = 0; rc && i < ARRAY_SIZE(delay_array); i++) {
+ pr_debug("Failed reading reg=%u - retry(%d)\n", reg, i);
+ msleep(delay_array[i]);
+ rc = regmap_read(dd->regmap, reg, val);
+ }
+
+ if (rc)
+ pr_err("Failed reading reg=%u rc=%d\n", reg, rc);
+
+ return rc;
+}
+
+static int ncp6335x_write(struct ncp6335d_info *dd, unsigned int reg,
+ unsigned int val)
+{
+ int i = 0, rc = 0;
+
+ rc = regmap_write(dd->regmap, reg, val);
+ for (i = 0; rc && i < ARRAY_SIZE(delay_array); i++) {
+ pr_debug("Failed writing reg=%u - retry(%d)\n", reg, i);
+ msleep(delay_array[i]);
+ rc = regmap_write(dd->regmap, reg, val);
+ }
+
+ if (rc)
+ pr_err("Failed writing reg=%u rc=%d\n", reg, rc);
+
+ return rc;
+}
+
+static int ncp6335x_update_bits(struct ncp6335d_info *dd, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ int i = 0, rc = 0;
+
+ rc = regmap_update_bits(dd->regmap, reg, mask, val);
+ for (i = 0; rc && i < ARRAY_SIZE(delay_array); i++) {
+ pr_debug("Failed updating reg=%u- retry(%d)\n", reg, i);
+ msleep(delay_array[i]);
+ rc = regmap_update_bits(dd->regmap, reg, mask, val);
+ }
+
+ if (rc)
+ pr_err("Failed updating reg=%u rc=%d\n", reg, rc);
+
+ return rc;
+}
+
+static void dump_registers(struct ncp6335d_info *dd,
+ unsigned int reg, const char *func)
+{
+ unsigned int val = 0;
+
+ ncp6335x_read(dd, reg, &val);
+ dev_dbg(dd->dev, "%s: NCP6335D: Reg = %x, Val = %x\n", func, reg, val);
+}
+
+static void ncp633d_slew_delay(struct ncp6335d_info *dd,
+ int prev_uV, int new_uV)
+{
+ u8 val;
+ int delay;
+
+ val = abs(prev_uV - new_uV) / dd->step_size;
+ delay = ((val * dd->slew_rate) / 1000) + 1;
+
+ dev_dbg(dd->dev, "Slew Delay = %d\n", delay);
+
+ udelay(delay);
+}
+
+static int ncp6335d_is_enabled(struct regulator_dev *rdev)
+{
+ int rc, val = 0;
+ struct ncp6335d_info *dd = rdev_get_drvdata(rdev);
+
+ rc = ncp6335x_read(dd, dd->vsel_reg, &val);
+ if (rc)
+ dev_err(dd->dev, "Unable to read enable register rc(%d)", rc);
+
+ dump_registers(dd, dd->vsel_reg, __func__);
+
+ return ((val & NCP6335D_ENABLE) ? 1 : 0);
+}
+
+static int ncp6335d_enable(struct regulator_dev *rdev)
+{
+ int rc;
+ struct ncp6335d_info *dd = rdev_get_drvdata(rdev);
+
+ rc = ncp6335x_update_bits(dd, dd->vsel_reg,
+ NCP6335D_ENABLE, NCP6335D_ENABLE);
+ if (rc)
+ dev_err(dd->dev, "Unable to enable regualtor rc(%d)", rc);
+
+ dump_registers(dd, dd->vsel_reg, __func__);
+
+ return rc;
+}
+
+static int ncp6335d_disable(struct regulator_dev *rdev)
+{
+ int rc;
+ struct ncp6335d_info *dd = rdev_get_drvdata(rdev);
+
+ rc = ncp6335x_update_bits(dd, dd->vsel_reg,
+ NCP6335D_ENABLE, 0);
+ if (rc)
+ dev_err(dd->dev, "Unable to disable regualtor rc(%d)", rc);
+
+ dump_registers(dd, dd->vsel_reg, __func__);
+
+ return rc;
+}
+
+static int ncp6335d_get_voltage(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int rc;
+ struct ncp6335d_info *dd = rdev_get_drvdata(rdev);
+
+ rc = ncp6335x_read(dd, dd->vsel_reg, &val);
+ if (rc) {
+ dev_err(dd->dev, "Unable to get volatge rc(%d)", rc);
+ return rc;
+ }
+ dd->curr_voltage = ((val & NCP6335D_VOUT_SEL_MASK) * dd->step_size) +
+ dd->min_voltage;
+
+ dump_registers(dd, dd->vsel_reg, __func__);
+
+ return dd->curr_voltage;
+}
+
+static int ncp6335d_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ int rc, set_val, new_uV;
+ struct ncp6335d_info *dd = rdev_get_drvdata(rdev);
+
+ set_val = DIV_ROUND_UP(min_uV - dd->min_voltage, dd->step_size);
+ new_uV = (set_val * dd->step_size) + dd->min_voltage;
+ if (new_uV > max_uV) {
+ dev_err(dd->dev, "Unable to set volatge (%d %d)\n",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ rc = ncp6335x_update_bits(dd, dd->vsel_reg,
+ NCP6335D_VOUT_SEL_MASK, (set_val & NCP6335D_VOUT_SEL_MASK));
+ if (rc) {
+ dev_err(dd->dev, "Unable to set volatge (%d %d)\n",
+ min_uV, max_uV);
+ } else {
+ ncp633d_slew_delay(dd, dd->curr_voltage, new_uV);
+ dd->curr_voltage = new_uV;
+ }
+
+ dump_registers(dd, dd->vsel_reg, __func__);
+
+ return rc;
+}
+
+static int ncp6335d_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct ncp6335d_info *dd = rdev_get_drvdata(rdev);
+
+ if (selector >= NCP6335D_VOLTAGE_STEPS)
+ return 0;
+
+ return selector * dd->step_size + dd->min_voltage;
+}
+
+static int ncp6335d_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ int rc;
+ struct ncp6335d_info *dd = rdev_get_drvdata(rdev);
+
+ /* only FAST and NORMAL mode types are supported */
+ if (mode != REGULATOR_MODE_FAST && mode != REGULATOR_MODE_NORMAL) {
+ dev_err(dd->dev, "Mode %d not supported\n", mode);
+ return -EINVAL;
+ }
+
+ rc = ncp6335x_update_bits(dd, REG_NCP6335D_COMMAND, dd->mode_bit,
+ (mode == REGULATOR_MODE_FAST) ? dd->mode_bit : 0);
+ if (rc) {
+ dev_err(dd->dev, "Unable to set operating mode rc(%d)", rc);
+ return rc;
+ }
+
+ rc = ncp6335x_update_bits(dd, REG_NCP6335D_COMMAND,
+ NCP6335D_DVS_PWM_MODE,
+ (mode == REGULATOR_MODE_FAST) ?
+ NCP6335D_DVS_PWM_MODE : 0);
+ if (rc)
+ dev_err(dd->dev, "Unable to set DVS trans. mode rc(%d)", rc);
+
+ dump_registers(dd, REG_NCP6335D_COMMAND, __func__);
+
+ return rc;
+}
+
+static unsigned int ncp6335d_get_mode(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int rc;
+ struct ncp6335d_info *dd = rdev_get_drvdata(rdev);
+
+ rc = ncp6335x_read(dd, REG_NCP6335D_COMMAND, &val);
+ if (rc) {
+ dev_err(dd->dev, "Unable to get regulator mode rc(%d)\n", rc);
+ return rc;
+ }
+
+ dump_registers(dd, REG_NCP6335D_COMMAND, __func__);
+
+ if (val & dd->mode_bit)
+ return REGULATOR_MODE_FAST;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static struct regulator_ops ncp6335d_ops = {
+ .set_voltage = ncp6335d_set_voltage,
+ .get_voltage = ncp6335d_get_voltage,
+ .list_voltage = ncp6335d_list_voltage,
+ .is_enabled = ncp6335d_is_enabled,
+ .enable = ncp6335d_enable,
+ .disable = ncp6335d_disable,
+ .set_mode = ncp6335d_set_mode,
+ .get_mode = ncp6335d_get_mode,
+};
+
+static struct regulator_desc rdesc = {
+ .name = "ncp6335d",
+ .supply_name = "vin",
+ .owner = THIS_MODULE,
+ .n_voltages = NCP6335D_VOLTAGE_STEPS,
+ .ops = &ncp6335d_ops,
+};
+
+static int ncp6335d_restore_working_reg(struct device_node *node,
+ struct ncp6335d_info *dd)
+{
+ int ret;
+ unsigned int val;
+
+ /* Restore register from back up register */
+ ret = ncp6335x_read(dd, dd->vsel_backup_reg, &val);
+ if (ret < 0) {
+ dev_err(dd->dev, "Failed to get backup data from reg %d, ret = %d\n",
+ dd->vsel_backup_reg, ret);
+ return ret;
+ }
+
+ ret = ncp6335x_update_bits(dd, dd->vsel_reg,
+ NCP6335D_VOUT_SEL_MASK, val);
+ if (ret < 0) {
+ dev_err(dd->dev, "Failed to update working reg %d, ret = %d\n",
+ dd->vsel_reg, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ncp6335d_parse_gpio(struct device_node *node,
+ struct ncp6335d_info *dd)
+{
+ int ret = 0, gpio;
+ enum of_gpio_flags flags;
+
+ if (!of_find_property(node, "onnn,vsel-gpio", NULL))
+ return ret;
+
+ /* Get GPIO connected to vsel and set its output */
+ gpio = of_get_named_gpio_flags(node,
+ "onnn,vsel-gpio", 0, &flags);
+ if (!gpio_is_valid(gpio)) {
+ if (gpio != -EPROBE_DEFER)
+ dev_err(dd->dev, "Could not get vsel, ret = %d\n",
+ gpio);
+ return gpio;
+ }
+
+ ret = devm_gpio_request(dd->dev, gpio, "ncp6335d_vsel");
+ if (ret) {
+ dev_err(dd->dev, "Failed to obtain gpio %d ret = %d\n",
+ gpio, ret);
+ return ret;
+ }
+
+ ret = gpio_direction_output(gpio, flags & OF_GPIO_ACTIVE_LOW ? 0 : 1);
+ if (ret) {
+ dev_err(dd->dev, "Failed to set GPIO %d to: %s, ret = %d",
+ gpio, flags & OF_GPIO_ACTIVE_LOW ?
+ "GPIO_LOW" : "GPIO_HIGH", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ncp6335d_init(struct i2c_client *client, struct ncp6335d_info *dd,
+ const struct ncp6335d_platform_data *pdata)
+{
+ int rc;
+ unsigned int val;
+
+ switch (pdata->default_vsel) {
+ case NCP6335D_VSEL0:
+ dd->vsel_reg = REG_NCP6335D_PROGVSEL0;
+ dd->vsel_backup_reg = REG_NCP6335D_PROGVSEL1;
+ dd->mode_bit = NCP6335D_PWM_MODE0;
+ break;
+ case NCP6335D_VSEL1:
+ dd->vsel_reg = REG_NCP6335D_PROGVSEL1;
+ dd->vsel_backup_reg = REG_NCP6335D_PROGVSEL0;
+ dd->mode_bit = NCP6335D_PWM_MODE1;
+ break;
+ default:
+ dev_err(dd->dev, "Invalid VSEL ID %d\n", pdata->default_vsel);
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(client->dev.of_node, "onnn,restore-reg")) {
+ rc = ncp6335d_restore_working_reg(client->dev.of_node, dd);
+ if (rc)
+ return rc;
+ }
+
+ rc = ncp6335d_parse_gpio(client->dev.of_node, dd);
+ if (rc)
+ return rc;
+
+ /* get the current programmed voltage */
+ rc = ncp6335x_read(dd, dd->vsel_reg, &val);
+ if (rc) {
+ dev_err(dd->dev, "Unable to get volatge rc(%d)", rc);
+ return rc;
+ }
+ dd->curr_voltage = ((val & NCP6335D_VOUT_SEL_MASK) *
+ dd->step_size) + dd->min_voltage;
+
+ /* set discharge */
+ rc = ncp6335x_update_bits(dd, REG_NCP6335D_PGOOD,
+ NCP6335D_PGOOD_DISCHG,
+ (pdata->discharge_enable ?
+ NCP6335D_PGOOD_DISCHG : 0));
+ if (rc) {
+ dev_err(dd->dev, "Unable to set Active Discharge rc(%d)\n", rc);
+ return -EINVAL;
+ }
+
+ /* set slew rate */
+ if (pdata->slew_rate_ns < dd->min_slew_ns ||
+ pdata->slew_rate_ns > dd->max_slew_ns) {
+ dev_err(dd->dev, "Invalid slew rate %d\n", pdata->slew_rate_ns);
+ return -EINVAL;
+ }
+
+ dd->slew_rate = pdata->slew_rate_ns;
+ val = DIV_ROUND_UP(pdata->slew_rate_ns, dd->min_slew_ns);
+ val = ilog2(val);
+
+ rc = ncp6335x_update_bits(dd, REG_NCP6335D_TIMING,
+ NCP6335D_SLEW_MASK, val << NCP6335D_SLEW_SHIFT);
+ if (rc)
+ dev_err(dd->dev, "Unable to set slew rate rc(%d)\n", rc);
+
+ /* Set Sleep mode bit */
+ rc = ncp6335x_update_bits(dd, REG_NCP6335D_COMMAND,
+ NCP6335D_SLEEP_MODE, pdata->sleep_enable ?
+ NCP6335D_SLEEP_MODE : 0);
+ if (rc)
+ dev_err(dd->dev, "Unable to set sleep mode (%d)\n", rc);
+
+ dump_registers(dd, REG_NCP6335D_COMMAND, __func__);
+ dump_registers(dd, REG_NCP6335D_PROGVSEL0, __func__);
+ dump_registers(dd, REG_NCP6335D_TIMING, __func__);
+ dump_registers(dd, REG_NCP6335D_PGOOD, __func__);
+
+ return rc;
+}
+
+static struct regmap_config ncp6335d_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int ncp6335d_parse_dt(struct i2c_client *client,
+ struct ncp6335d_info *dd)
+{
+ int rc;
+
+ rc = of_property_read_u32(client->dev.of_node,
+ "onnn,step-size", &dd->step_size);
+ if (rc < 0) {
+ dev_err(&client->dev, "step size missing: rc = %d.\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(client->dev.of_node,
+ "onnn,min-slew-ns", &dd->min_slew_ns);
+ if (rc < 0) {
+ dev_err(&client->dev, "min slew us missing: rc = %d.\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(client->dev.of_node,
+ "onnn,max-slew-ns", &dd->max_slew_ns);
+ if (rc < 0) {
+ dev_err(&client->dev, "max slew us missing: rc = %d.\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(client->dev.of_node,
+ "onnn,min-setpoint", &dd->min_voltage);
+ if (rc < 0) {
+ dev_err(&client->dev, "min set point missing: rc = %d.\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static struct ncp6335d_platform_data *
+ ncp6335d_get_of_platform_data(struct i2c_client *client)
+{
+ struct ncp6335d_platform_data *pdata = NULL;
+ struct regulator_init_data *init_data;
+ const char *mode_name;
+ int rc;
+
+ init_data = of_get_regulator_init_data(&client->dev,
+ client->dev.of_node, &rdesc);
+ if (!init_data) {
+ dev_err(&client->dev, "regulator init data is missing\n");
+ return pdata;
+ }
+
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct ncp6335d_platform_data), GFP_KERNEL);
+ if (!pdata)
+ return pdata;
+
+ rc = of_property_read_u32(client->dev.of_node,
+ "onnn,vsel", &pdata->default_vsel);
+ if (rc < 0) {
+ dev_err(&client->dev, "onnn,vsel property missing: rc = %d.\n",
+ rc);
+ return NULL;
+ }
+
+ rc = of_property_read_u32(client->dev.of_node,
+ "onnn,slew-ns", &pdata->slew_rate_ns);
+ if (rc < 0) {
+ dev_err(&client->dev, "onnn,slew-ns property missing: rc = %d.\n",
+ rc);
+ return NULL;
+ }
+
+ pdata->discharge_enable = of_property_read_bool(client->dev.of_node,
+ "onnn,discharge-enable");
+
+ pdata->sleep_enable = of_property_read_bool(client->dev.of_node,
+ "onnn,sleep-enable");
+
+ pdata->init_data = init_data;
+
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+ init_data->constraints.valid_ops_mask =
+ REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE;
+ init_data->constraints.valid_modes_mask =
+ REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_FAST;
+
+ rc = of_property_read_string(client->dev.of_node, "onnn,mode",
+ &mode_name);
+ if (!rc) {
+ if (strcmp("pwm", mode_name) == 0) {
+ init_data->constraints.initial_mode =
+ REGULATOR_MODE_FAST;
+ } else if (strcmp("auto", mode_name) == 0) {
+ init_data->constraints.initial_mode =
+ REGULATOR_MODE_NORMAL;
+ } else {
+ dev_err(&client->dev, "onnn,mode, unknown regulator mode: %s\n",
+ mode_name);
+ return NULL;
+ }
+ }
+
+ return pdata;
+}
+
+static int get_reg(void *data, u64 *val)
+{
+ struct ncp6335d_info *dd = data;
+ int rc;
+ unsigned int temp = 0;
+
+ rc = ncp6335x_read(dd, dd->peek_poke_address, &temp);
+ if (rc < 0)
+ dev_err(dd->dev, "Couldn't read reg %x rc = %d\n",
+ dd->peek_poke_address, rc);
+ else
+ *val = temp;
+
+ return rc;
+}
+
+static int set_reg(void *data, u64 val)
+{
+ struct ncp6335d_info *dd = data;
+ int rc;
+ unsigned int temp = 0;
+
+ temp = (unsigned int) val;
+ rc = ncp6335x_write(dd, dd->peek_poke_address, temp);
+ if (rc < 0)
+ dev_err(dd->dev, "Couldn't write 0x%02x to 0x%02x rc= %d\n",
+ dd->peek_poke_address, temp, rc);
+
+ return rc;
+}
+DEFINE_SIMPLE_ATTRIBUTE(poke_poke_debug_ops, get_reg, set_reg, "0x%02llx\n");
+
+static int ncp6335d_regulator_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc;
+ unsigned int val = 0;
+ struct ncp6335d_info *dd;
+ const struct ncp6335d_platform_data *pdata;
+ struct regulator_config config = { };
+
+ if (client->dev.of_node)
+ pdata = ncp6335d_get_of_platform_data(client);
+ else
+ pdata = client->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&client->dev, "Platform data not specified\n");
+ return -EINVAL;
+ }
+
+ dd = devm_kzalloc(&client->dev, sizeof(*dd), GFP_KERNEL);
+ if (!dd)
+ return -ENOMEM;
+
+ if (client->dev.of_node) {
+ rc = ncp6335d_parse_dt(client, dd);
+ if (rc)
+ return rc;
+ } else {
+ dd->step_size = NCP6335D_STEP_VOLTAGE_UV;
+ dd->min_voltage = NCP6335D_MIN_VOLTAGE_UV;
+ dd->min_slew_ns = NCP6335D_MIN_SLEW_NS;
+ dd->max_slew_ns = NCP6335D_MAX_SLEW_NS;
+ }
+
+ dd->regmap = devm_regmap_init_i2c(client, &ncp6335d_regmap_config);
+ if (IS_ERR(dd->regmap)) {
+ dev_err(&client->dev, "Error allocating regmap\n");
+ return PTR_ERR(dd->regmap);
+ }
+
+ rc = ncp6335x_read(dd, REG_NCP6335D_PID, &val);
+ if (rc) {
+ dev_err(&client->dev, "Unable to identify NCP6335D, rc(%d)\n",
+ rc);
+ return rc;
+ }
+ dev_info(&client->dev, "Detected Regulator NCP6335D PID = %d\n", val);
+
+ dd->init_data = pdata->init_data;
+ dd->dev = &client->dev;
+ i2c_set_clientdata(client, dd);
+
+ rc = ncp6335d_init(client, dd, pdata);
+ if (rc) {
+ dev_err(&client->dev, "Unable to initialize the regulator\n");
+ return -EINVAL;
+ }
+
+ config.dev = &client->dev;
+ config.init_data = dd->init_data;
+ config.regmap = dd->regmap;
+ config.driver_data = dd;
+ config.of_node = client->dev.of_node;
+
+ dd->regulator = regulator_register(&rdesc, &config);
+
+ if (IS_ERR(dd->regulator)) {
+ dev_err(&client->dev, "Unable to register regulator rc(%ld)",
+ PTR_ERR(dd->regulator));
+
+ return PTR_ERR(dd->regulator);
+ }
+
+ dd->debug_root = debugfs_create_dir("ncp6335x", NULL);
+ if (!dd->debug_root)
+ dev_err(&client->dev, "Couldn't create debug dir\n");
+
+ if (dd->debug_root) {
+ struct dentry *ent;
+
+ ent = debugfs_create_x32("address", S_IFREG | S_IWUSR | S_IRUGO,
+ dd->debug_root,
+ &(dd->peek_poke_address));
+ if (!ent)
+ dev_err(&client->dev, "Couldn't create address debug file rc = %d\n",
+ rc);
+
+ ent = debugfs_create_file("data", S_IFREG | S_IWUSR | S_IRUGO,
+ dd->debug_root, dd,
+ &poke_poke_debug_ops);
+ if (!ent)
+ dev_err(&client->dev, "Couldn't create data debug file rc = %d\n",
+ rc);
+ }
+
+ return 0;
+}
+
+static int ncp6335d_regulator_remove(struct i2c_client *client)
+{
+ struct ncp6335d_info *dd = i2c_get_clientdata(client);
+
+ regulator_unregister(dd->regulator);
+
+ debugfs_remove_recursive(dd->debug_root);
+
+ return 0;
+}
+
+static const struct of_device_id ncp6335d_match_table[] = {
+ { .compatible = "onnn,ncp6335d-regulator", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ncp6335d_match_table);
+
+static const struct i2c_device_id ncp6335d_id[] = {
+ {"ncp6335d", -1},
+ { },
+};
+
+static struct i2c_driver ncp6335d_regulator_driver = {
+ .driver = {
+ .name = "ncp6335d-regulator",
+ .owner = THIS_MODULE,
+ .of_match_table = ncp6335d_match_table,
+ },
+ .probe = ncp6335d_regulator_probe,
+ .remove = ncp6335d_regulator_remove,
+ .id_table = ncp6335d_id,
+};
+
+/**
+ * ncp6335d_regulator_init() - initialized ncp6335d regulator driver
+ * This function registers the ncp6335d regulator platform driver.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int __init ncp6335d_regulator_init(void)
+{
+ static bool initialized;
+
+ if (initialized)
+ return 0;
+
+ initialized = true;
+
+ return i2c_add_driver(&ncp6335d_regulator_driver);
+}
+EXPORT_SYMBOL(ncp6335d_regulator_init);
+subsys_initcall(ncp6335d_regulator_init);
+
+static void __exit ncp6335d_regulator_exit(void)
+{
+ i2c_del_driver(&ncp6335d_regulator_driver);
+}
+module_exit(ncp6335d_regulator_exit);
+MODULE_DESCRIPTION("OnSemi-NCP6335D regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index f40afdd0e5f5..00662dd28d66 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -15,6 +15,7 @@
#include <linux/bitrev.h>
#include <linux/bcd.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#define S35390A_CMD_STATUS1 0
#define S35390A_CMD_STATUS2 1
@@ -34,10 +35,14 @@
#define S35390A_ALRM_BYTE_HOURS 1
#define S35390A_ALRM_BYTE_MINS 2
+/* flags for STATUS1 */
#define S35390A_FLAG_POC 0x01
#define S35390A_FLAG_BLD 0x02
+#define S35390A_FLAG_INT2 0x04
#define S35390A_FLAG_24H 0x40
#define S35390A_FLAG_RESET 0x80
+
+/* flag for STATUS2 */
#define S35390A_FLAG_TEST 0x01
#define S35390A_INT2_MODE_MASK 0xF0
@@ -94,19 +99,63 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len)
return 0;
}
-static int s35390a_reset(struct s35390a *s35390a)
+/*
+ * Returns <0 on error, 0 if rtc is setup fine and 1 if the chip was reset.
+ * To keep the information if an irq is pending, pass the value read from
+ * STATUS1 to the caller.
+ */
+static int s35390a_reset(struct s35390a *s35390a, char *status1)
{
- char buf[1];
-
- if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0)
- return -EIO;
-
- if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD)))
+ char buf;
+ int ret;
+ unsigned initcount = 0;
+
+ ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, status1, 1);
+ if (ret < 0)
+ return ret;
+
+ if (*status1 & S35390A_FLAG_POC)
+ /*
+ * Do not communicate for 0.5 seconds since the power-on
+ * detection circuit is in operation.
+ */
+ msleep(500);
+ else if (!(*status1 & S35390A_FLAG_BLD))
+ /*
+ * If both POC and BLD are unset everything is fine.
+ */
return 0;
- buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H);
- buf[0] &= 0xf0;
- return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
+ /*
+ * At least one of POC and BLD are set, so reinitialise chip. Keeping
+ * this information in the hardware to know later that the time isn't
+ * valid is unfortunately not possible because POC and BLD are cleared
+ * on read. So the reset is best done now.
+ *
+ * The 24H bit is kept over reset, so set it already here.
+ */
+initialize:
+ *status1 = S35390A_FLAG_24H;
+ buf = S35390A_FLAG_RESET | S35390A_FLAG_24H;
+ ret = s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1);
+
+ if (ret < 0)
+ return ret;
+
+ ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1);
+ if (ret < 0)
+ return ret;
+
+ if (buf & (S35390A_FLAG_POC | S35390A_FLAG_BLD)) {
+ /* Try up to five times to reset the chip */
+ if (initcount < 5) {
+ ++initcount;
+ goto initialize;
+ } else
+ return -EIO;
+ }
+
+ return 1;
}
static int s35390a_disable_test_mode(struct s35390a *s35390a)
@@ -242,6 +291,8 @@ static int s35390a_set_alarm(struct i2c_client *client, struct rtc_wkalrm *alm)
if (alm->time.tm_wday != -1)
buf[S35390A_ALRM_BYTE_WDAY] = bin2bcd(alm->time.tm_wday) | 0x80;
+ else
+ buf[S35390A_ALRM_BYTE_WDAY] = 0;
buf[S35390A_ALRM_BYTE_HOURS] = s35390a_hr2reg(s35390a,
alm->time.tm_hour) | 0x80;
@@ -265,27 +316,61 @@ static int s35390a_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alm)
char buf[3], sts;
int i, err;
+ /*
+ * initialize all members to -1 to signal the core that they are not
+ * defined by the hardware.
+ */
+ alm->time.tm_sec = -1;
+ alm->time.tm_min = -1;
+ alm->time.tm_hour = -1;
+ alm->time.tm_mday = -1;
+ alm->time.tm_mon = -1;
+ alm->time.tm_year = -1;
+ alm->time.tm_wday = -1;
+ alm->time.tm_yday = -1;
+ alm->time.tm_isdst = -1;
+
err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, &sts, sizeof(sts));
if (err < 0)
return err;
- if (bitrev8(sts) != S35390A_INT2_MODE_ALARM)
- return -EINVAL;
+ if ((bitrev8(sts) & S35390A_INT2_MODE_MASK) != S35390A_INT2_MODE_ALARM) {
+ /*
+ * When the alarm isn't enabled, the register to configure
+ * the alarm time isn't accessible.
+ */
+ alm->enabled = 0;
+ return 0;
+ } else {
+ alm->enabled = 1;
+ }
err = s35390a_get_reg(s35390a, S35390A_CMD_INT2_REG1, buf, sizeof(buf));
if (err < 0)
return err;
/* This chip returns the bits of each byte in reverse order */
- for (i = 0; i < 3; ++i) {
+ for (i = 0; i < 3; ++i)
buf[i] = bitrev8(buf[i]);
- buf[i] &= ~0x80;
- }
- alm->time.tm_wday = bcd2bin(buf[S35390A_ALRM_BYTE_WDAY]);
- alm->time.tm_hour = s35390a_reg2hr(s35390a,
- buf[S35390A_ALRM_BYTE_HOURS]);
- alm->time.tm_min = bcd2bin(buf[S35390A_ALRM_BYTE_MINS]);
+ /*
+ * B0 of the three matching registers is an enable flag. Iff it is set
+ * the configured value is used for matching.
+ */
+ if (buf[S35390A_ALRM_BYTE_WDAY] & 0x80)
+ alm->time.tm_wday =
+ bcd2bin(buf[S35390A_ALRM_BYTE_WDAY] & ~0x80);
+
+ if (buf[S35390A_ALRM_BYTE_HOURS] & 0x80)
+ alm->time.tm_hour =
+ s35390a_reg2hr(s35390a,
+ buf[S35390A_ALRM_BYTE_HOURS] & ~0x80);
+
+ if (buf[S35390A_ALRM_BYTE_MINS] & 0x80)
+ alm->time.tm_min = bcd2bin(buf[S35390A_ALRM_BYTE_MINS] & ~0x80);
+
+ /* alarm triggers always at s=0 */
+ alm->time.tm_sec = 0;
dev_dbg(&client->dev, "%s: alm is mins=%d, hours=%d, wday=%d\n",
__func__, alm->time.tm_min, alm->time.tm_hour,
@@ -327,11 +412,11 @@ static struct i2c_driver s35390a_driver;
static int s35390a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int err;
+ int err, err_reset;
unsigned int i;
struct s35390a *s35390a;
struct rtc_time tm;
- char buf[1];
+ char buf, status1;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
err = -ENODEV;
@@ -360,29 +445,35 @@ static int s35390a_probe(struct i2c_client *client,
}
}
- err = s35390a_reset(s35390a);
- if (err < 0) {
+ err_reset = s35390a_reset(s35390a, &status1);
+ if (err_reset < 0) {
+ err = err_reset;
dev_err(&client->dev, "error resetting chip\n");
goto exit_dummy;
}
- err = s35390a_disable_test_mode(s35390a);
- if (err < 0) {
- dev_err(&client->dev, "error disabling test mode\n");
- goto exit_dummy;
- }
-
- err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
- if (err < 0) {
- dev_err(&client->dev, "error checking 12/24 hour mode\n");
- goto exit_dummy;
- }
- if (buf[0] & S35390A_FLAG_24H)
+ if (status1 & S35390A_FLAG_24H)
s35390a->twentyfourhour = 1;
else
s35390a->twentyfourhour = 0;
- if (s35390a_get_datetime(client, &tm) < 0)
+ if (status1 & S35390A_FLAG_INT2) {
+ /* disable alarm (and maybe test mode) */
+ buf = 0;
+ err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &buf, 1);
+ if (err < 0) {
+ dev_err(&client->dev, "error disabling alarm");
+ goto exit_dummy;
+ }
+ } else {
+ err = s35390a_disable_test_mode(s35390a);
+ if (err < 0) {
+ dev_err(&client->dev, "error disabling test mode\n");
+ goto exit_dummy;
+ }
+ }
+
+ if (err_reset > 0 || s35390a_get_datetime(client, &tm) < 0)
dev_warn(&client->dev, "clock needs to be set\n");
device_set_wakeup_capable(&client->dev, 1);
@@ -395,6 +486,10 @@ static int s35390a_probe(struct i2c_client *client,
err = PTR_ERR(s35390a->rtc);
goto exit_dummy;
}
+
+ if (status1 & S35390A_FLAG_INT2)
+ rtc_update_irq(s35390a->rtc, 1, RTC_AF);
+
return 0;
exit_dummy:
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 60232bd366ef..71216aa68905 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -18,6 +18,7 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -59,6 +60,7 @@ struct tegra_rtc_info {
struct platform_device *pdev;
struct rtc_device *rtc_dev;
void __iomem *rtc_base; /* NULL if not initialized. */
+ struct clk *clk;
int tegra_rtc_irq; /* alarm and periodic irq */
spinlock_t tegra_rtc_lock;
};
@@ -332,6 +334,14 @@ static int __init tegra_rtc_probe(struct platform_device *pdev)
if (info->tegra_rtc_irq <= 0)
return -EBUSY;
+ info->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(info->clk))
+ return PTR_ERR(info->clk);
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret < 0)
+ return ret;
+
/* set context info. */
info->pdev = pdev;
spin_lock_init(&info->tegra_rtc_lock);
@@ -352,7 +362,7 @@ static int __init tegra_rtc_probe(struct platform_device *pdev)
ret = PTR_ERR(info->rtc_dev);
dev_err(&pdev->dev, "Unable to register device (err=%d).\n",
ret);
- return ret;
+ goto disable_clk;
}
ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq,
@@ -362,12 +372,25 @@ static int __init tegra_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Unable to request interrupt for device (err=%d).\n",
ret);
- return ret;
+ goto disable_clk;
}
dev_notice(&pdev->dev, "Tegra internal Real Time Clock\n");
return 0;
+
+disable_clk:
+ clk_disable_unprepare(info->clk);
+ return ret;
+}
+
+static int tegra_rtc_remove(struct platform_device *pdev)
+{
+ struct tegra_rtc_info *info = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(info->clk);
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -419,6 +442,7 @@ static void tegra_rtc_shutdown(struct platform_device *pdev)
MODULE_ALIAS("platform:tegra_rtc");
static struct platform_driver tegra_rtc_driver = {
+ .remove = tegra_rtc_remove,
.shutdown = tegra_rtc_shutdown,
.driver = {
.name = "tegra_rtc",
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 24ec282e15d8..7c3b8d3516e3 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1651,6 +1651,9 @@ static void ap_scan_bus(struct work_struct *unused)
ap_dev->queue_depth = queue_depth;
ap_dev->raw_hwtype = device_type;
ap_dev->device_type = device_type;
+ /* CEX6 toleration: map to CEX5 */
+ if (device_type == AP_DEVICE_TYPE_CEX6)
+ ap_dev->device_type = AP_DEVICE_TYPE_CEX5;
ap_dev->functions = device_functions;
spin_lock_init(&ap_dev->lock);
INIT_LIST_HEAD(&ap_dev->pendingq);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 6adcbdf225d1..cc741e948170 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -105,6 +105,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_DEVICE_TYPE_CEX3C 9
#define AP_DEVICE_TYPE_CEX4 10
#define AP_DEVICE_TYPE_CEX5 11
+#define AP_DEVICE_TYPE_CEX6 12
/*
* Known function facilities
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 5ada9268a450..a8ac4c0a1493 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -34,7 +34,6 @@ extern const struct file_operations cxlflash_cxl_fops;
sectors
*/
-#define NUM_RRQ_ENTRY 16 /* for master issued cmds */
#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
/* AFU command retry limit */
@@ -48,9 +47,12 @@ extern const struct file_operations cxlflash_cxl_fops;
index derivation
*/
-#define CXLFLASH_MAX_CMDS 16
+#define CXLFLASH_MAX_CMDS 256
#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS
+/* RRQ for master issued cmds */
+#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS
+
static inline void check_sizes(void)
{
@@ -149,7 +151,7 @@ struct afu_cmd {
struct afu {
/* Stuff requiring alignment go first. */
- u64 rrq_entry[NUM_RRQ_ENTRY]; /* 128B RRQ */
+ u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
/*
* Command & data for AFU commands.
*/
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index c86847c68448..2882bcac918a 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -2305,7 +2305,7 @@ static struct scsi_host_template driver_template = {
.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
.change_queue_depth = cxlflash_change_queue_depth,
- .cmd_per_lun = 16,
+ .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
.can_queue = CXLFLASH_MAX_CMDS,
.this_id = -1,
.sg_tablesize = SG_NONE, /* No scatter gather support */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 6bffd91b973a..c1ccf1ee99ea 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -560,8 +560,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
task->state = state;
- if (!list_empty(&task->running))
+ spin_lock_bh(&conn->taskqueuelock);
+ if (!list_empty(&task->running)) {
+ pr_debug_once("%s while task on list", __func__);
list_del_init(&task->running);
+ }
+ spin_unlock_bh(&conn->taskqueuelock);
if (conn->task == task)
conn->task = NULL;
@@ -783,7 +787,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (session->tt->xmit_task(task))
goto free_task;
} else {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->mgmtqueue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
@@ -1474,8 +1480,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
* this may be on the requeue list already if the xmit_task callout
* is handling the r2ts while we are adding new ones
*/
+ spin_lock_bh(&conn->taskqueuelock);
if (list_empty(&task->running))
list_add_tail(&task->running, &conn->requeue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1512,22 +1520,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
* only have one nop-out as a ping from us and targets should not
* overflow us with nop-ins
*/
+ spin_lock_bh(&conn->taskqueuelock);
check_mgmt:
while (!list_empty(&conn->mgmtqueue)) {
conn->task = list_entry(conn->mgmtqueue.next,
struct iscsi_task, running);
list_del_init(&conn->task->running);
+ spin_unlock_bh(&conn->taskqueuelock);
if (iscsi_prep_mgmt_task(conn, conn->task)) {
/* regular RX path uses back_lock */
spin_lock_bh(&conn->session->back_lock);
__iscsi_put_task(conn->task);
spin_unlock_bh(&conn->session->back_lock);
conn->task = NULL;
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_xmit_task(conn);
if (rc)
goto done;
+ spin_lock_bh(&conn->taskqueuelock);
}
/* process pending command queue */
@@ -1535,19 +1547,24 @@ check_mgmt:
conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
running);
list_del_init(&conn->task->running);
+ spin_unlock_bh(&conn->taskqueuelock);
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
fail_scsi_task(conn->task, DID_IMM_RETRY);
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_prep_scsi_cmd_pdu(conn->task);
if (rc) {
if (rc == -ENOMEM || rc == -EACCES) {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&conn->task->running,
&conn->cmdqueue);
conn->task = NULL;
+ spin_unlock_bh(&conn->taskqueuelock);
goto done;
} else
fail_scsi_task(conn->task, DID_ABORT);
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_xmit_task(conn);
@@ -1558,6 +1575,7 @@ check_mgmt:
* we need to check the mgmt queue for nops that need to
* be sent to aviod starvation
*/
+ spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
@@ -1577,12 +1595,15 @@ check_mgmt:
conn->task = task;
list_del_init(&conn->task->running);
conn->task->state = ISCSI_TASK_RUNNING;
+ spin_unlock_bh(&conn->taskqueuelock);
rc = iscsi_xmit_task(conn);
if (rc)
goto done;
+ spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
+ spin_unlock_bh(&conn->taskqueuelock);
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1738,7 +1759,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto prepd_reject;
}
} else {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->cmdqueue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
@@ -2900,6 +2923,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
INIT_LIST_HEAD(&conn->mgmtqueue);
INIT_LIST_HEAD(&conn->cmdqueue);
INIT_LIST_HEAD(&conn->requeue);
+ spin_lock_init(&conn->taskqueuelock);
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
/* allocate login_task used for the login/text sequences */
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 9c706d8c1441..6f5e2720ffad 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -218,7 +218,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
task->num_scatter = qc->n_elem;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
- xfer += sg->length;
+ xfer += sg_dma_len(sg);
task->total_xfer_len = xfer;
task->num_scatter = si;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index c14ab6c3ae40..60c21093f865 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -11387,6 +11387,7 @@ static struct pci_driver lpfc_driver = {
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = lpfc_pci_remove_one,
+ .shutdown = lpfc_pci_remove_one,
.suspend = lpfc_pci_suspend_one,
.resume = lpfc_pci_resume_one,
.err_handler = &lpfc_err_handler,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 92648a5ea2d2..63f5965acc89 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -390,6 +390,7 @@ struct MPT3SAS_TARGET {
* @eedp_enable: eedp support enable bit
* @eedp_type: 0(type_1), 1(type_2), 2(type_3)
* @eedp_block_length: block size
+ * @ata_command_pending: SATL passthrough outstanding for device
*/
struct MPT3SAS_DEVICE {
struct MPT3SAS_TARGET *sas_target;
@@ -398,6 +399,17 @@ struct MPT3SAS_DEVICE {
u8 configured_lun;
u8 block;
u8 tlr_snoop_check;
+ /*
+ * Bug workaround for SATL handling: the mpt2/3sas firmware
+ * doesn't return BUSY or TASK_SET_FULL for subsequent
+ * commands while a SATL pass through is in operation as the
+ * spec requires, it simply does nothing with them until the
+ * pass through completes, causing them possibly to timeout if
+ * the passthrough is a long executing command (like format or
+ * secure erase). This variable allows us to do the right
+ * thing while a SATL command is pending.
+ */
+ unsigned long ata_command_pending;
};
#define MPT3_CMD_NOT_USED 0x8000 /* free */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index f6a8e9958e75..8a5fbdb45cfd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3707,9 +3707,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
}
}
-static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
+static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
{
- return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
+ struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
+
+ if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
+ return 0;
+
+ if (pending)
+ return test_and_set_bit(0, &priv->ata_command_pending);
+
+ clear_bit(0, &priv->ata_command_pending);
+ return 0;
}
/**
@@ -3733,9 +3742,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
if (!scmd)
continue;
count++;
- if (ata_12_16_cmd(scmd))
- scsi_internal_device_unblock(scmd->device,
- SDEV_RUNNING);
+ _scsih_set_satl_pending(scmd, false);
mpt3sas_base_free_smid(ioc, smid);
scsi_dma_unmap(scmd);
if (ioc->pci_error_recovery)
@@ -3866,13 +3873,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (ioc->logging_level & MPT_DEBUG_SCSI)
scsi_print_command(scmd);
- /*
- * Lock the device for any subsequent command until command is
- * done.
- */
- if (ata_12_16_cmd(scmd))
- scsi_internal_device_block(scmd->device);
-
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
@@ -3886,6 +3886,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return 0;
}
+ /*
+ * Bug work around for firmware SATL handling. The loop
+ * is based on atomic operations and ensures consistency
+ * since we're lockless at this point
+ */
+ do {
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+ scmd->result = SAM_STAT_BUSY;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ } while (_scsih_set_satl_pending(scmd, true));
+
sas_target_priv_data = sas_device_priv_data->sas_target;
/* invalid device handle */
@@ -4445,8 +4458,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
if (scmd == NULL)
return 1;
- if (ata_12_16_cmd(scmd))
- scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
+ _scsih_set_satl_pending(scmd, false);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5d81bcc1dc75..9b3af788376c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1995,6 +1995,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
#define READ_CAPACITY_RETRIES_ON_RESET 10
+/*
+ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
+ * and the reported logical block size is bigger than 512 bytes. Note
+ * that last_sector is a u64 and therefore logical_to_sectors() is not
+ * applicable.
+ */
+static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
+{
+ u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
+
+ if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
+ return false;
+
+ return true;
+}
+
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
@@ -2060,7 +2076,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
return -ENODEV;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
@@ -2146,7 +2162,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
return sector_size;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
@@ -2810,7 +2826,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
rw_max = q->limits.io_opt =
sdkp->opt_xfer_blocks * sdp->sector_size;
else
- rw_max = BLK_DEF_MAX_SECTORS;
+ rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
+ (sector_t)BLK_DEF_MAX_SECTORS);
/* Combine with controller limits */
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 93a523b42d3d..29d8c74e85e3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1008,6 +1008,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
result = get_user(val, ip);
if (result)
return result;
+ if (val > SG_MAX_CDB_SIZE)
+ return -ENOMEM;
sfp->next_cmd_len = (val > 0) ? val : 0;
return 0;
case SG_GET_VERSION_NUM:
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 64c867405ad4..804586aeaffe 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -834,6 +834,7 @@ static void get_capabilities(struct scsi_cd *cd)
unsigned char *buffer;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
+ unsigned int ms_len = 128;
int rc, n;
static const char *loadmech[] =
@@ -860,10 +861,11 @@ static void get_capabilities(struct scsi_cd *cd)
scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
/* ask for mode page 0x2a */
- rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
+ rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
SR_TIMEOUT, 3, &data, NULL);
- if (!scsi_status_is_good(rc)) {
+ if (!scsi_status_is_good(rc) || data.length > ms_len ||
+ data.header_length + data.block_descriptor_length > data.length) {
/* failed, drive doesn't have capabilities mode page */
cd->cdi.speed = 1;
cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index b3ed361eff26..fb2f2159c0e1 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -106,3 +106,15 @@ config SCSI_UFS_TEST
The UFS unit-tests register as a block device test utility to
the test-iosched and will be initiated when the test-iosched will
be chosen to be the active I/O scheduler.
+
+config SCSI_UFSHCD_CMD_LOGGING
+ bool "Universal Flash Storage host controller driver layer command logging support"
+ depends on SCSI_UFSHCD
+ help
+ This selects the UFS host controller driver layer command logging.
+ UFS host controller driver layer command logging records all the
+ command information sent from UFS host controller for debugging
+ purpose.
+
+ Select this if you want above mentioned debug information captured.
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index c5393d517432..9e64e0c34f57 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -516,14 +516,145 @@ static inline void ufshcd_remove_non_printable(char *val)
*val = ' ';
}
+#define UFSHCD_MAX_CMD_LOGGING 100
+
#ifdef CONFIG_TRACEPOINTS
-static void ufshcd_add_command_trace(struct ufs_hba *hba,
- unsigned int tag, const char *str)
+static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
+ struct ufshcd_cmd_log_entry *entry, u8 opcode)
+{
+ if (trace_ufshcd_command_enabled()) {
+ u32 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+
+ trace_ufshcd_command(dev_name(hba->dev), entry->str, entry->tag,
+ entry->doorbell, entry->transfer_len, intr,
+ entry->lba, opcode);
+ }
+}
+#else
+static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
+ struct ufshcd_cmd_log_entry *entry, u8 opcode)
+{
+}
+#endif
+
+#ifdef CONFIG_SCSI_UFSHCD_CMD_LOGGING
+static void ufshcd_cmd_log_init(struct ufs_hba *hba)
+{
+ /* Allocate log entries */
+ if (!hba->cmd_log.entries) {
+ hba->cmd_log.entries = kzalloc(UFSHCD_MAX_CMD_LOGGING *
+ sizeof(struct ufshcd_cmd_log_entry), GFP_KERNEL);
+ if (!hba->cmd_log.entries)
+ return;
+ dev_dbg(hba->dev, "%s: cmd_log.entries initialized\n",
+ __func__);
+ }
+}
+
+static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+ unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
+ sector_t lba, int transfer_len, u8 opcode)
+{
+ struct ufshcd_cmd_log_entry *entry;
+
+ if (!hba->cmd_log.entries)
+ return;
+
+ entry = &hba->cmd_log.entries[hba->cmd_log.pos];
+ entry->lun = lun;
+ entry->str = str;
+ entry->cmd_type = cmd_type;
+ entry->cmd_id = cmd_id;
+ entry->lba = lba;
+ entry->transfer_len = transfer_len;
+ entry->idn = idn;
+ entry->doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ entry->tag = tag;
+ entry->tstamp = ktime_get();
+ entry->outstanding_reqs = hba->outstanding_reqs;
+ entry->seq_num = hba->cmd_log.seq_num;
+ hba->cmd_log.seq_num++;
+ hba->cmd_log.pos =
+ (hba->cmd_log.pos + 1) % UFSHCD_MAX_CMD_LOGGING;
+
+ ufshcd_add_command_trace(hba, entry, opcode);
+}
+
+static void ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+ unsigned int tag, u8 cmd_id, u8 idn)
+{
+ __ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn,
+ 0xff, (sector_t)-1, -1, -1);
+}
+
+static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
+{
+ ufshcd_cmd_log(hba, str, "dme", 0xff, cmd_id, 0xff);
+}
+
+static void ufshcd_cmd_log_print(struct ufs_hba *hba)
+{
+ int i;
+ int pos;
+ struct ufshcd_cmd_log_entry *p;
+
+ if (!hba->cmd_log.entries)
+ return;
+
+ pos = hba->cmd_log.pos;
+ for (i = 0; i < UFSHCD_MAX_CMD_LOGGING; i++) {
+ p = &hba->cmd_log.entries[pos];
+ pos = (pos + 1) % UFSHCD_MAX_CMD_LOGGING;
+
+ if (ktime_to_us(p->tstamp)) {
+ pr_err("%s: %s: seq_no=%u lun=0x%x cmd_id=0x%02x lba=0x%llx txfer_len=%d tag=%u, doorbell=0x%x outstanding=0x%x idn=%d time=%lld us\n",
+ p->cmd_type, p->str, p->seq_num,
+ p->lun, p->cmd_id, (unsigned long long)p->lba,
+ p->transfer_len, p->tag, p->doorbell,
+ p->outstanding_reqs, p->idn,
+ ktime_to_us(p->tstamp));
+ usleep_range(1000, 1100);
+ }
+ }
+}
+#else
+static void ufshcd_cmd_log_init(struct ufs_hba *hba)
+{
+}
+
+static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+ unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
+ sector_t lba, int transfer_len, u8 opcode)
+{
+ struct ufshcd_cmd_log_entry entry;
+
+ entry.str = str;
+ entry.lba = lba;
+ entry.transfer_len = transfer_len;
+ entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ entry.tag = tag;
+
+ ufshcd_add_command_trace(hba, &entry, opcode);
+}
+
+static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
+{
+}
+
+static void ufshcd_cmd_log_print(struct ufs_hba *hba)
+{
+}
+#endif
+
+#ifdef CONFIG_TRACEPOINTS
+static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
+ unsigned int tag, const char *str)
{
- sector_t lba = -1;
- u8 opcode = 0;
- u32 intr, doorbell;
struct ufshcd_lrb *lrbp;
+ char *cmd_type;
+ u8 opcode = 0;
+ u8 cmd_id, idn = 0;
+ sector_t lba = -1;
int transfer_len = -1;
lrbp = &hba->lrb[tag];
@@ -537,23 +668,28 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
*/
if (lrbp->cmd->request && lrbp->cmd->request->bio)
lba =
- lrbp->cmd->request->bio->bi_iter.bi_sector;
+ lrbp->cmd->request->bio->bi_iter.bi_sector;
transfer_len = be32_to_cpu(
lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
}
}
- intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- trace_ufshcd_command(dev_name(hba->dev), str, tag,
- doorbell, transfer_len, intr, lba, opcode);
-}
+ if (lrbp->command_type == UTP_CMD_TYPE_SCSI) {
+ cmd_type = "scsi";
+ cmd_id = (u8)(*lrbp->cmd->cmnd);
+ } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
+ if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) {
+ cmd_type = "nop";
+ cmd_id = 0;
+ } else if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) {
+ cmd_type = "query";
+ cmd_id = hba->dev_cmd.query.request.upiu_req.opcode;
+ idn = hba->dev_cmd.query.request.upiu_req.idn;
+ }
+ }
-static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
- unsigned int tag, const char *str)
-{
- if (trace_ufshcd_command_enabled())
- ufshcd_add_command_trace(hba, tag, str);
+ __ufshcd_cmd_log(hba, (char *) str, cmd_type, tag, cmd_id, idn,
+ lrbp->lun, lba, transfer_len, opcode);
}
#else
static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
@@ -1370,6 +1506,7 @@ start:
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
+ hba->ufs_stats.clk_hold.ts = ktime_get();
return rc;
}
EXPORT_SYMBOL_GPL(ufshcd_hold);
@@ -1474,6 +1611,7 @@ static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ hba->ufs_stats.clk_rel.ts = ktime_get();
hrtimer_start(&hba->clk_gating.gate_hrtimer,
ms_to_ktime(hba->clk_gating.delay_ms),
@@ -1920,8 +2058,10 @@ static void ufshcd_hibern8_exit_work(struct work_struct *work)
/* Exit from hibern8 */
if (ufshcd_is_link_hibern8(hba)) {
+ hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK;
ufshcd_hold(hba, false);
ret = ufshcd_uic_hibern8_exit(hba);
+ hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK;
ufshcd_release(hba, false);
if (!ret) {
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2263,6 +2403,7 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
hba->active_uic_cmd = uic_cmd;
+ ufshcd_dme_cmd_log(hba, "send", hba->active_uic_cmd->command);
/* Write Args */
ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
@@ -2296,6 +2437,8 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
if (ret)
ufsdbg_set_err_state(hba);
+ ufshcd_dme_cmd_log(hba, "cmp1", hba->active_uic_cmd->command);
+
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -2344,6 +2487,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
int ret;
unsigned long flags;
+ hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND;
ufshcd_hold_all(hba);
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
@@ -2357,6 +2501,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
ufshcd_save_tstamp_of_last_dme_cmd(hba);
mutex_unlock(&hba->uic_cmd_mutex);
ufshcd_release_all(hba);
+ hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND;
ufsdbg_error_inject_dispatcher(hba,
ERR_INJECT_UIC, 0, &ret);
@@ -2834,6 +2979,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
}
+ hba->ufs_stats.clk_hold.ctx = QUEUE_CMD;
err = ufshcd_hold(hba, true);
if (err) {
err = SCSI_MLQUEUE_HOST_BUSY;
@@ -2847,6 +2993,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (err) {
clear_bit_unlock(tag, &hba->lrb_in_use);
err = SCSI_MLQUEUE_HOST_BUSY;
+ hba->ufs_stats.clk_rel.ctx = QUEUE_CMD;
ufshcd_release(hba, true);
goto out;
}
@@ -4113,6 +4260,8 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
cmd->command, status);
ret = (status != PWR_OK) ? status : -1;
}
+ ufshcd_dme_cmd_log(hba, "cmp2", hba->active_uic_cmd->command);
+
out:
if (ret) {
ufsdbg_set_err_state(hba);
@@ -4216,8 +4365,10 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
uic_cmd.command = UIC_CMD_DME_SET;
uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
uic_cmd.argument3 = mode;
+ hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND;
ufshcd_hold_all(hba);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND;
ufshcd_release_all(hba);
out:
return ret;
@@ -5385,6 +5536,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
update_req_stats(hba, lrbp);
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
+ hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
__ufshcd_release(hba, false);
__ufshcd_hibern8_release(hba, false);
if (cmd->request) {
@@ -5421,7 +5573,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
if (hba->dev_cmd.complete) {
ufshcd_cond_add_cmd_trace(hba, index,
- "dev_complete");
+ "dcmp");
complete(hba->dev_cmd.complete);
}
}
@@ -5907,6 +6059,7 @@ static void ufshcd_err_handler(struct work_struct *work)
if (unlikely((hba->clk_gating.state != CLKS_ON) &&
ufshcd_is_auto_hibern8_supported(hba))) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK;
ufshcd_hold(hba, false);
spin_lock_irqsave(hba->host->host_lock, flags);
clks_enabled = true;
@@ -5943,6 +6096,7 @@ static void ufshcd_err_handler(struct work_struct *work)
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+ ufshcd_cmd_log_print(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
}
}
@@ -6049,8 +6203,10 @@ skip_err_handling:
hba->silence_err_logs = false;
- if (clks_enabled)
+ if (clks_enabled) {
__ufshcd_release(hba, false);
+ hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK;
+ }
out:
ufshcd_clear_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -6286,7 +6442,8 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
-
+ hba->ufs_stats.last_intr_status = intr_status;
+ hba->ufs_stats.last_intr_ts = ktime_get();
/*
* There could be max of hba->nutrs reqs in flight and in worst case
* if the reqs get finished 1 by 1 after the interrupt status is
@@ -6365,6 +6522,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/
wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+ hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND;
ufshcd_hold_all(hba);
spin_lock_irqsave(host->host_lock, flags);
@@ -6422,6 +6580,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
clear_bit(free_slot, &hba->tm_condition);
ufshcd_put_tm_slot(hba, free_slot);
wake_up(&hba->tm_tag_wq);
+ hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND;
ufshcd_release_all(hba);
return err;
@@ -6449,6 +6608,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
hba = shost_priv(host);
tag = cmd->request->tag;
+ ufshcd_cmd_log_print(hba);
lrbp = &hba->lrb[tag];
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
@@ -9315,6 +9475,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
int ret = 0;
/* let's not get into low power until clock scaling is completed */
+ hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
ufshcd_hold_all(hba);
ret = ufshcd_clock_scaling_prepare(hba);
@@ -9378,6 +9539,7 @@ scale_up_gear:
clk_scaling_unprepare:
ufshcd_clock_scaling_unprepare(hba);
out:
+ hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK;
ufshcd_release_all(hba);
return ret;
}
@@ -9809,6 +9971,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
*/
ufshcd_set_ufs_dev_active(hba);
+ ufshcd_cmd_log_init(hba);
+
async_schedule(ufshcd_async_scan, hba);
ufsdbg_add_debugfs(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index c34a998aac17..d66205ff9f5d 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -3,7 +3,7 @@
*
* This code is based on drivers/scsi/ufs/ufshcd.h
* Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -519,7 +519,7 @@ struct ufs_init_prefetch {
u32 icc_level;
};
-#define UIC_ERR_REG_HIST_LENGTH 8
+#define UIC_ERR_REG_HIST_LENGTH 20
/**
* struct ufs_uic_err_reg_hist - keeps history of uic errors
* @pos: index to indicate cyclic buffer position
@@ -588,6 +588,22 @@ struct ufshcd_req_stat {
};
#endif
+enum ufshcd_ctx {
+ QUEUE_CMD,
+ ERR_HNDLR_WORK,
+ H8_EXIT_WORK,
+ UIC_CMD_SEND,
+ PWRCTL_CMD_SEND,
+ TM_CMD_SEND,
+ XFR_REQ_COMPL,
+ CLK_SCALE_WORK,
+};
+
+struct ufshcd_clk_ctx {
+ ktime_t ts;
+ enum ufshcd_ctx ctx;
+};
+
/**
* struct ufs_stats - keeps usage/err statistics
* @enabled: enable tag stats for debugfs
@@ -616,6 +632,10 @@ struct ufs_stats {
int query_stats_arr[UPIU_QUERY_OPCODE_MAX][MAX_QUERY_IDN];
#endif
+ u32 last_intr_status;
+ ktime_t last_intr_ts;
+ struct ufshcd_clk_ctx clk_hold;
+ struct ufshcd_clk_ctx clk_rel;
u32 hibern8_exit_cnt;
ktime_t last_hibern8_exit_tstamp;
struct ufs_uic_err_reg_hist pa_err;
@@ -641,6 +661,27 @@ struct ufs_stats {
UFSHCD_DBG_PRINT_TMRS_EN | UFSHCD_DBG_PRINT_PWR_EN | \
UFSHCD_DBG_PRINT_HOST_STATE_EN)
+struct ufshcd_cmd_log_entry {
+ char *str; /* context like "send", "complete" */
+ char *cmd_type; /* "scsi", "query", "nop", "dme" */
+ u8 lun;
+ u8 cmd_id;
+ sector_t lba;
+ int transfer_len;
+ u8 idn; /* used only for query idn */
+ u32 doorbell;
+ u32 outstanding_reqs;
+ u32 seq_num;
+ unsigned int tag;
+ ktime_t tstamp;
+};
+
+struct ufshcd_cmd_log {
+ struct ufshcd_cmd_log_entry *entries;
+ int pos;
+ u32 seq_num;
+};
+
/**
* struct ufs_hba - per adapter private structure
* @mmio_base: UFSHCI base register address
@@ -856,6 +897,7 @@ struct ufs_hba {
struct ufs_clk_gating clk_gating;
struct ufs_hibern8_on_idle hibern8_on_idle;
+ struct ufshcd_cmd_log cmd_log;
/* Control to enable/disable host capabilities */
u32 caps;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 34b0adb108eb..ea008ffbc856 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -367,6 +367,14 @@ config MSM_SPM
driver allows configuring SPM to allow different low power modes for
both core and L2.
+config MSM_L2_SPM
+ bool "SPM support for L2 cache"
+ help
+ Enable SPM driver support for L2 cache. Some MSM chipsets allow
+ control of L2 cache low power mode with a Subsystem Power manager.
+ Enabling this driver allows configuring L2 SPM for low power modes
+ on supported chipsets
+
config QCOM_SCM
bool "Secure Channel Manager (SCM) support"
default n
@@ -573,6 +581,16 @@ config MSM_BOOT_STATS
This figures are reported in mpm sleep clock cycles and have a
resolution of 31 bits as 1 bit is used as an overflow check.
+config MSM_BOOT_TIME_MARKER
+ bool "Use MSM boot time marker reporting"
+ depends on MSM_BOOT_STATS
+ help
+ Use this to mark msm boot kpi for measurement.
+ An instrumentation for boot time measurement.
+ To create an entry, call "place_marker" function.
+ At userspace, write marker name to "/sys/kernel/debug/bootkpi/kpi_values"
+ If unsure, say N
+
config QCOM_CPUSS_DUMP
bool "CPU Subsystem Dumping support"
help
@@ -900,4 +918,28 @@ config QCOM_CX_IPEAK
clients are going to cross their thresholds then Cx ipeak hw module will raise
an interrupt to cDSP block to throttle cDSP fmax.
+config MSM_CACHE_M4M_ERP64
+ bool "Cache and M4M error report"
+ depends on ARCH_MSM8996
+ help
+ Say 'Y' here to enable reporting of cache and M4M errors to the kernel
+ log. The kernel log contains collected error syndrome and address
+ registers. These register dumps can be used as useful information
+ to find out possible hardware problems.
+
+config MSM_CACHE_M4M_ERP64_PANIC_ON_CE
+ bool "Panic on correctable cache/M4M errors"
+ help
+ Say 'Y' here to cause kernel panic when correctable cache/M4M errors
+ are detected. Enabling this is useful when you want to dump memory
+ and system state close to the time when the error occured.
+
+ If unsure, say N.
+
+config MSM_CACHE_M4M_ERP64_PANIC_ON_UE
+ bool "Panic on uncorrectable cache/M4M errors"
+ help
+ Say 'Y' here to cause kernel panic when uncorrectable cache/M4M errors
+ are detected.
+
source "drivers/soc/qcom/memshare/Kconfig"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 87698b75d3b8..5eeede23333d 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_MSM_CORE_HANG_DETECT) += core_hang_detect.o
obj-$(CONFIG_MSM_GLADIATOR_HANG_DETECT) += gladiator_hang_detect.o
obj-$(CONFIG_MSM_RUN_QUEUE_STATS) += msm_rq_stats.o
obj-$(CONFIG_MSM_BOOT_STATS) += boot_stats.o
+obj-$(CONFIG_MSM_BOOT_TIME_MARKER) += boot_marker.o
obj-$(CONFIG_MSM_AVTIMER) += avtimer.o
ifdef CONFIG_ARCH_MSM8996
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_kryo.o
@@ -104,3 +105,4 @@ obj-$(CONFIG_WCD_DSP_GLINK) += wcd-dsp-glink.o
obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
obj-$(CONFIG_QCOM_EARLY_RANDOM) += early_random.o
obj-$(CONFIG_QCOM_CX_IPEAK) += cx_ipeak.o
+obj-$(CONFIG_MSM_CACHE_M4M_ERP64) += cache_m4m_erp64.o
diff --git a/drivers/soc/qcom/boot_marker.c b/drivers/soc/qcom/boot_marker.c
new file mode 100644
index 000000000000..b3a6c9f8d054
--- /dev/null
+++ b/drivers/soc/qcom/boot_marker.c
@@ -0,0 +1,183 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <soc/qcom/boot_stats.h>
+
+#define MAX_STRING_LEN 256
+#define BOOT_MARKER_MAX_LEN 40
+static struct dentry *dent_bkpi, *dent_bkpi_status;
+static struct boot_marker boot_marker_list;
+
+struct boot_marker {
+ char marker_name[BOOT_MARKER_MAX_LEN];
+ unsigned long long int timer_value;
+ struct list_head list;
+ struct mutex lock;
+};
+
+static void _create_boot_marker(const char *name,
+ unsigned long long int timer_value)
+{
+ struct boot_marker *new_boot_marker;
+
+ pr_debug("%-41s:%llu.%03llu seconds\n", name,
+ timer_value/TIMER_KHZ,
+ ((timer_value % TIMER_KHZ)
+ * 1000) / TIMER_KHZ);
+
+ new_boot_marker = kmalloc(sizeof(*new_boot_marker), GFP_KERNEL);
+ if (!new_boot_marker)
+ return;
+
+ strlcpy(new_boot_marker->marker_name, name,
+ sizeof(new_boot_marker->marker_name));
+ new_boot_marker->timer_value = timer_value;
+
+ mutex_lock(&boot_marker_list.lock);
+ list_add_tail(&(new_boot_marker->list), &(boot_marker_list.list));
+ mutex_unlock(&boot_marker_list.lock);
+}
+
+static void set_bootloader_stats(void)
+{
+ _create_boot_marker("M - APPSBL Start - ",
+ readl_relaxed(&boot_stats->bootloader_start));
+ _create_boot_marker("M - APPSBL Display Init - ",
+ readl_relaxed(&boot_stats->bootloader_display));
+ _create_boot_marker("M - APPSBL Early-Domain Start - ",
+ readl_relaxed(&boot_stats->bootloader_early_domain_start));
+ _create_boot_marker("D - APPSBL Kernel Load Time - ",
+ readl_relaxed(&boot_stats->bootloader_load_kernel));
+ _create_boot_marker("D - APPSBL Kernel Auth Time - ",
+ readl_relaxed(&boot_stats->bootloader_checksum));
+ _create_boot_marker("M - APPSBL End - ",
+ readl_relaxed(&boot_stats->bootloader_end));
+}
+
+void place_marker(const char *name)
+{
+ _create_boot_marker((char *) name, msm_timer_get_sclk_ticks());
+}
+EXPORT_SYMBOL(place_marker);
+
+static ssize_t bootkpi_reader(struct file *fp, char __user *user_buffer,
+ size_t count, loff_t *position)
+{
+ int rc = 0;
+ char *buf;
+ int temp = 0;
+ struct boot_marker *marker;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&boot_marker_list.lock);
+ list_for_each_entry(marker, &boot_marker_list.list, list) {
+ temp += scnprintf(buf + temp, PAGE_SIZE - temp,
+ "%-41s:%llu.%03llu seconds\n",
+ marker->marker_name,
+ marker->timer_value/TIMER_KHZ,
+ (((marker->timer_value % TIMER_KHZ)
+ * 1000) / TIMER_KHZ));
+ }
+ mutex_unlock(&boot_marker_list.lock);
+ rc = simple_read_from_buffer(user_buffer, count, position, buf, temp);
+ kfree(buf);
+ return rc;
+}
+
+static ssize_t bootkpi_writer(struct file *fp, const char __user *user_buffer,
+ size_t count, loff_t *position)
+{
+ int rc = 0;
+ char buf[MAX_STRING_LEN];
+
+ if (count > MAX_STRING_LEN)
+ return -EINVAL;
+ rc = simple_write_to_buffer(buf,
+ sizeof(buf) - 1, position, user_buffer, count);
+ if (rc < 0)
+ return rc;
+ buf[rc] = '\0';
+ place_marker(buf);
+ return rc;
+}
+
+static int bootkpi_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations fops_bkpi = {
+ .owner = THIS_MODULE,
+ .open = bootkpi_open,
+ .read = bootkpi_reader,
+ .write = bootkpi_writer,
+};
+
+static int __init init_bootkpi(void)
+{
+ dent_bkpi = debugfs_create_dir("bootkpi", NULL);
+ if (IS_ERR_OR_NULL(dent_bkpi))
+ return -ENODEV;
+
+ dent_bkpi_status = debugfs_create_file("kpi_values",
+ (S_IRUGO|S_IWUGO), dent_bkpi, 0, &fops_bkpi);
+ if (IS_ERR_OR_NULL(dent_bkpi_status)) {
+ debugfs_remove(dent_bkpi);
+ dent_bkpi = NULL;
+ pr_err("boot_marker: Could not create 'kpi_values' debugfs file\n");
+ return -ENODEV;
+ }
+
+ INIT_LIST_HEAD(&boot_marker_list.list);
+ mutex_init(&boot_marker_list.lock);
+ set_bootloader_stats();
+ return 0;
+}
+subsys_initcall(init_bootkpi);
+
+static void __exit exit_bootkpi(void)
+{
+ struct boot_marker *marker;
+ struct boot_marker *temp_addr;
+
+ debugfs_remove_recursive(dent_bkpi);
+ mutex_lock(&boot_marker_list.lock);
+ list_for_each_entry_safe(marker, temp_addr, &boot_marker_list.list,
+ list) {
+ list_del(&marker->list);
+ kfree(marker);
+ }
+ mutex_unlock(&boot_marker_list.lock);
+ boot_stats_exit();
+}
+module_exit(exit_bootkpi);
+
+MODULE_DESCRIPTION("MSM boot key performance indicators");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/boot_stats.c b/drivers/soc/qcom/boot_stats.c
index 2fc9cbf55d4b..eb5357e892eb 100644
--- a/drivers/soc/qcom/boot_stats.c
+++ b/drivers/soc/qcom/boot_stats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014,2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
@@ -22,17 +23,13 @@
#include <linux/sched.h>
#include <linux/of.h>
#include <linux/of_address.h>
-
-struct boot_stats {
- uint32_t bootloader_start;
- uint32_t bootloader_end;
- uint32_t bootloader_display;
- uint32_t bootloader_load_kernel;
-};
+#include <linux/export.h>
+#include <linux/types.h>
+#include <soc/qcom/boot_stats.h>
static void __iomem *mpm_counter_base;
static uint32_t mpm_counter_freq;
-static struct boot_stats __iomem *boot_stats;
+struct boot_stats __iomem *boot_stats;
static int mpm_parse_dt(void)
{
@@ -88,6 +85,42 @@ static void print_boot_stats(void)
mpm_counter_freq);
}
+unsigned long long int msm_timer_get_sclk_ticks(void)
+{
+ unsigned long long int t1, t2;
+ int loop_count = 10;
+ int loop_zero_count = 3;
+ int tmp = USEC_PER_SEC;
+ void __iomem *sclk_tick;
+
+ do_div(tmp, TIMER_KHZ);
+ tmp /= (loop_zero_count-1);
+ sclk_tick = mpm_counter_base;
+ if (!sclk_tick)
+ return -EINVAL;
+ while (loop_zero_count--) {
+ t1 = __raw_readl_no_log(sclk_tick);
+ do {
+ udelay(1);
+ t2 = t1;
+ t1 = __raw_readl_no_log(sclk_tick);
+ } while ((t2 != t1) && --loop_count);
+ if (!loop_count) {
+ pr_err("boot_stats: SCLK did not stabilize\n");
+ return 0;
+ }
+ if (t1)
+ break;
+
+ udelay(tmp);
+ }
+ if (!loop_zero_count) {
+ pr_err("boot_stats: SCLK reads zero\n");
+ return 0;
+ }
+ return t1;
+}
+
int boot_stats_init(void)
{
int ret;
@@ -98,9 +131,14 @@ int boot_stats_init(void)
print_boot_stats();
+ if (!(boot_marker_enabled()))
+ boot_stats_exit();
+ return 0;
+}
+
+int boot_stats_exit(void)
+{
iounmap(boot_stats);
iounmap(mpm_counter_base);
-
return 0;
}
-
diff --git a/drivers/soc/qcom/cache_m4m_erp64.c b/drivers/soc/qcom/cache_m4m_erp64.c
new file mode 100644
index 000000000000..758e9d03e07b
--- /dev/null
+++ b/drivers/soc/qcom/cache_m4m_erp64.c
@@ -0,0 +1,635 @@
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "msm_cache_erp64: " fmt
+
+#include <linux/printk.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/cpu.h>
+#include <linux/workqueue.h>
+#include <linux/of.h>
+#include <linux/cpu_pm.h>
+#include <linux/smp.h>
+
+#include <soc/qcom/kryo-l2-accessors.h>
+
+/* Instruction cache */
+#define ICECR_EL1 S3_1_c11_c1_0
+#define ICECR_IRQ_EN (BIT(1) | BIT(3) | BIT(5) | BIT(7))
+#define ICESR_EL1 S3_1_c11_c1_1
+#define ICESR_BIT_L1DPE BIT(3)
+#define ICESR_BIT_L1TPE BIT(2)
+#define ICESR_BIT_L0DPE BIT(1)
+#define ICESR_BIT_L0TPE BIT(0)
+#define ICESYNR0_EL1 S3_1_c11_c1_3
+#define ICESYNR1_EL1 S3_1_c11_c1_4
+#define ICEAR0_EL1 S3_1_c11_c1_5
+#define ICEAR1_EL1 S3_1_c11_c1_6
+#define ICESRS_EL1 S3_1_c11_c1_2
+
+/* Data cache */
+#define DCECR_EL1 S3_1_c11_c5_0
+#define DCECR_IRQ_EN (BIT(1) | BIT(3) | BIT(5) | BIT(7) | \
+ BIT(9))
+#define DCESR_EL1 S3_1_c11_c5_1
+#define DCESR_BIT_S1FTLBDPE BIT(4)
+#define DCESR_BIT_S1FTLBTPE BIT(3)
+#define DCESR_BIT_L1DPE BIT(2)
+#define DCESR_BIT_L1PTPE BIT(1)
+#define DCESR_BIT_L1VTPE BIT(0)
+#define DCESYNR0_EL1 S3_1_c11_c5_3
+#define DCESYNR1_EL1 S3_1_c11_c5_4
+#define DCESRS_EL1 S3_1_c11_c5_2
+#define DCEAR0_EL1 S3_1_c11_c5_5
+#define DCEAR1_EL1 S3_1_c11_c5_6
+
+/* L2 cache */
+#define L2CPUSRSELR_EL1I S3_3_c15_c0_6
+#define L2CPUSRDR_EL1 S3_3_c15_c0_7
+#define L2ECR0_IA 0x200
+#define L2ECR0_IRQ_EN (BIT(1) | BIT(3) | BIT(6) | BIT(9) | \
+ BIT(11) | BIT(13) | BIT(16) | \
+ BIT(19) | BIT(21) | BIT(23) | \
+ BIT(26) | BIT(29))
+
+#define L2ECR1_IA 0x201
+#define L2ECR1_IRQ_EN (BIT(1) | BIT(3) | BIT(6) | BIT(9) | \
+ BIT(11) | BIT(13) | BIT(16) | \
+ BIT(19) | BIT(21) | BIT(23) | BIT(29))
+#define L2ECR2_IA 0x202
+#define L2ECR2_IRQ_EN_MASK 0x3FFFFFF
+#define L2ECR2_IRQ_EN (BIT(1) | BIT(3) | BIT(6) | BIT(9) | \
+ BIT(12) | BIT(15) | BIT(17) | \
+ BIT(19) | BIT(22) | BIT(25))
+#define L2ESR0_IA 0x204
+#define L2ESR0_MASK 0x00FFFFFF
+#define L2ESR0_CE ((BIT(0) | BIT(1) | BIT(2) | BIT(3) | \
+ BIT(4) | BIT(5) | BIT(12) | BIT(13) | \
+ BIT(14) | BIT(15) | BIT(16) | BIT(17)) \
+ & L2ESR0_MASK)
+#define L2ESR0_UE (~L2ESR0_CE & L2ESR0_MASK)
+#define L2ESRS0_IA 0x205
+#define L2ESR1_IA 0x206
+#define L2ESR1_MASK 0x80FFFBFF
+#define L2ESRS1_IA 0x207
+#define L2ESYNR0_IA 0x208
+#define L2ESYNR1_IA 0x209
+#define L2ESYNR2_IA 0x20A
+#define L2ESYNR3_IA 0x20B
+#define L2ESYNR4_IA 0x20C
+#define L2EAR0_IA 0x20E
+#define L2EAR1_IA 0x20F
+
+#define L3_QLL_HML3_FIRA 0x3000
+#define L3_QLL_HML3_FIRA_CE (BIT(1) | BIT(3) | BIT(5))
+#define L3_QLL_HML3_FIRA_UE (BIT(2) | BIT(4) | BIT(6))
+#define L3_QLL_HML3_FIRAC 0x3008
+#define L3_QLL_HML3_FIRAS 0x3010
+#define L3_QLL_HML3_FIRAT0C 0x3020
+#define L3_QLL_HML3_FIRAT0C_IRQ_EN 0xFFFFFFFF
+#define L3_QLL_HML3_FIRAT1C 0x3024
+#define L3_QLL_HML3_FIRAT1S 0x302C
+#define L3_QLL_HML3_FIRAT1S_IRQ_EN 0x01EFC8FE
+#define L3_QLL_HML3_FIRSYNA 0x3100
+#define L3_QLL_HML3_FIRSYNB 0x3104
+#define L3_QLL_HML3_FIRSYNC 0x3108
+#define L3_QLL_HML3_FIRSYND 0x310C
+
+#define M4M_ERR_STATUS 0x10000
+#define M4M_ERR_STATUS_MASK 0x1FF
+#define M4M_ERR_Q22SIB_RET_DEC_ERR (BIT(7))
+#define M4M_ERR_Q22SIB_RET_SLV_ERR (BIT(6))
+#define M4M_ERR_CLR 0x10008
+#define M4M_INT_CTRL 0x10010
+#define M4M_INT_CTRL_IRQ_EN 0x1FF
+#define M4M_ERR_CTRL 0x10018
+#define M4M_ERR_INJ 0x10020
+#define M4M_ERR_CAP_0 0x10030
+#define M4M_ERR_CAP_1 0x10038
+#define M4M_ERR_CAP_2 0x10040
+#define M4M_ERR_CAP_3 0x10048
+
+#define AFFINITY_LEVEL_L3 3
+
+#ifdef CONFIG_MSM_CACHE_M4M_ERP64_PANIC_ON_CE
+static bool __read_mostly panic_on_ce = true;
+#else
+static bool __read_mostly panic_on_ce;
+#endif
+
+#ifdef CONFIG_MSM_CACHE_M4M_ERP64_PANIC_ON_UE
+static bool __read_mostly panic_on_ue = true;
+#else
+static bool __read_mostly panic_on_ue;
+#endif
+
+module_param(panic_on_ce, bool, false);
+module_param(panic_on_ue, bool, false);
+
+static void __iomem *hml3_base;
+static void __iomem *m4m_base;
+
+enum erp_irq_index { IRQ_L1, IRQ_L2_INFO0, IRQ_L2_INFO1, IRQ_L2_ERR0,
+ IRQ_L2_ERR1, IRQ_L3, IRQ_M4M, IRQ_MAX };
+static const char * const erp_irq_names[] = {
+ "l1_irq", "l2_irq_info_0", "l2_irq_info_1", "l2_irq_err_0",
+ "l2_irq_err_1", "l3_irq", "m4m_irq"
+};
+static int erp_irqs[IRQ_MAX];
+
+struct msm_l1_err_stats {
+ /* nothing */
+};
+
+static DEFINE_PER_CPU(struct msm_l1_err_stats, msm_l1_erp_stats);
+static DEFINE_PER_CPU(struct call_single_data, handler_csd);
+
+#define erp_mrs(reg) ({ \
+ u64 __val; \
+ asm volatile("mrs %0, " __stringify(reg) : "=r" (__val)); \
+ __val; \
+})
+
+#define erp_msr(reg, val) { \
+ asm volatile("msr " __stringify(reg) ", %0" : : "r" (val)); \
+}
+
+static void msm_erp_show_icache_error(void)
+{
+ u64 icesr;
+ int cpu = raw_smp_processor_id();
+
+ icesr = erp_mrs(ICESR_EL1);
+ if (!(icesr & (ICESR_BIT_L0TPE | ICESR_BIT_L0DPE | ICESR_BIT_L1TPE |
+ ICESR_BIT_L1DPE))) {
+ pr_debug("CPU%d: No I-cache error detected ICESR 0x%llx\n",
+ cpu, icesr);
+ goto clear_out;
+ }
+
+ pr_alert("CPU%d: I-cache error\n", cpu);
+ pr_alert("CPU%d: ICESR_EL1 0x%llx ICESYNR0 0x%llx ICESYNR1 0x%llx ICEAR0 0x%llx IECAR1 0x%llx\n",
+ cpu, icesr, erp_mrs(ICESYNR0_EL1), erp_mrs(ICESYNR1_EL1),
+ erp_mrs(ICEAR0_EL1), erp_mrs(ICEAR1_EL1));
+
+ /*
+ * all detectable I-cache erros are recoverable as
+ * corrupted lines are refetched
+ */
+ if (panic_on_ce)
+ BUG_ON(1);
+ else
+ WARN_ON(1);
+
+clear_out:
+ erp_msr(ICESR_EL1, icesr);
+}
+
+static void msm_erp_show_dcache_error(void)
+{
+ u64 dcesr;
+ int cpu = raw_smp_processor_id();
+
+ dcesr = erp_mrs(DCESR_EL1);
+ if (!(dcesr & (DCESR_BIT_L1VTPE | DCESR_BIT_L1PTPE | DCESR_BIT_L1DPE |
+ DCESR_BIT_S1FTLBTPE | DCESR_BIT_S1FTLBDPE))) {
+ pr_debug("CPU%d: No D-cache error detected DCESR 0x%llx\n",
+ cpu, dcesr);
+ goto clear_out;
+ }
+
+ pr_alert("CPU%d: D-cache error detected\n", cpu);
+ pr_alert("CPU%d: L1 DCESR 0x%llx, DCESYNR0 0x%llx, DCESYNR1 0x%llx, DCEAR0 0x%llx, DCEAR1 0x%llx\n",
+ cpu, dcesr, erp_mrs(DCESYNR0_EL1), erp_mrs(DCESYNR1_EL1),
+ erp_mrs(DCEAR0_EL1), erp_mrs(DCEAR1_EL1));
+
+ /* all D-cache erros are correctable */
+ if (panic_on_ce)
+ BUG_ON(1);
+ else
+ WARN_ON(1);
+
+clear_out:
+ erp_msr(DCESR_EL1, dcesr);
+}
+
+static irqreturn_t msm_l1_erp_irq(int irq, void *dev_id)
+{
+ msm_erp_show_icache_error();
+ msm_erp_show_dcache_error();
+ return IRQ_HANDLED;
+}
+
+static DEFINE_SPINLOCK(local_handler_lock);
+static void msm_l2_erp_local_handler(void *force)
+{
+ unsigned long flags;
+ u64 esr0, esr1;
+ bool parity_ue, parity_ce, misc_ue;
+ int cpu;
+
+ spin_lock_irqsave(&local_handler_lock, flags);
+
+ esr0 = get_l2_indirect_reg(L2ESR0_IA);
+ esr1 = get_l2_indirect_reg(L2ESR1_IA);
+ parity_ue = esr0 & L2ESR0_UE;
+ parity_ce = esr0 & L2ESR0_CE;
+ misc_ue = esr1;
+ cpu = raw_smp_processor_id();
+
+ if (force || parity_ue || parity_ce || misc_ue) {
+ if (parity_ue)
+ pr_alert("CPU%d: L2 uncorrectable parity error\n", cpu);
+ if (parity_ce)
+ pr_alert("CPU%d: L2 correctable parity error\n", cpu);
+ if (misc_ue)
+ pr_alert("CPU%d: L2 (non-parity) error\n", cpu);
+ pr_alert("CPU%d: L2ESR0 0x%llx, L2ESR1 0x%llx\n",
+ cpu, esr0, esr1);
+ pr_alert("CPU%d: L2ESYNR0 0x%llx, L2ESYNR1 0x%llx, L2ESYNR2 0x%llx\n",
+ cpu, get_l2_indirect_reg(L2ESYNR0_IA),
+ get_l2_indirect_reg(L2ESYNR1_IA),
+ get_l2_indirect_reg(L2ESYNR2_IA));
+ pr_alert("CPU%d: L2EAR0 0x%llx, L2EAR1 0x%llx\n", cpu,
+ get_l2_indirect_reg(L2EAR0_IA),
+ get_l2_indirect_reg(L2EAR1_IA));
+ } else {
+ pr_info("CPU%d: No L2 error detected in L2ESR0 0x%llx, L2ESR1 0x%llx)\n",
+ cpu, esr0, esr1);
+ }
+
+ /* clear */
+ set_l2_indirect_reg(L2ESR0_IA, esr0);
+ set_l2_indirect_reg(L2ESR1_IA, esr1);
+
+ if (panic_on_ue)
+ BUG_ON(parity_ue || misc_ue);
+ else
+ WARN_ON(parity_ue || misc_ue);
+
+ if (panic_on_ce)
+ BUG_ON(parity_ce);
+ else
+ WARN_ON(parity_ce);
+
+ spin_unlock_irqrestore(&local_handler_lock, flags);
+}
+
+static irqreturn_t msm_l2_erp_irq(int irq, void *dev_id)
+{
+ int cpu;
+ struct call_single_data *csd;
+
+ for_each_online_cpu(cpu) {
+ csd = &per_cpu(handler_csd, cpu);
+ csd->func = msm_l2_erp_local_handler;
+ smp_call_function_single_async(cpu, csd);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t msm_l3_erp_irq(int irq, void *dev_id)
+{
+ u32 hml3_fira;
+ bool parity_ue, parity_ce, misc_ue;
+
+ hml3_fira = readl_relaxed(hml3_base + L3_QLL_HML3_FIRA);
+ parity_ue = (hml3_fira & L3_QLL_HML3_FIRAT1S_IRQ_EN) &
+ L3_QLL_HML3_FIRA_UE;
+ parity_ce = (hml3_fira & L3_QLL_HML3_FIRAT1S_IRQ_EN) &
+ L3_QLL_HML3_FIRA_CE;
+ misc_ue = (hml3_fira & L3_QLL_HML3_FIRAT1S_IRQ_EN) &
+ ~(L3_QLL_HML3_FIRA_UE | L3_QLL_HML3_FIRA_CE);
+ if (parity_ue)
+ pr_alert("L3 uncorrectable parity error\n");
+ if (parity_ce)
+ pr_alert("L3 correctable parity error\n");
+ if (misc_ue)
+ pr_alert("L3 (non-parity) error\n");
+
+ pr_alert("HML3_FIRA 0x%0x\n", hml3_fira);
+ pr_alert("HML3_FIRSYNA 0x%0x, HML3_FIRSYNB 0x%0x\n",
+ readl_relaxed(hml3_base + L3_QLL_HML3_FIRSYNA),
+ readl_relaxed(hml3_base + L3_QLL_HML3_FIRSYNB));
+ pr_alert("HML3_FIRSYNC 0x%0x, HML3_FIRSYND 0x%0x\n",
+ readl_relaxed(hml3_base + L3_QLL_HML3_FIRSYNC),
+ readl_relaxed(hml3_base + L3_QLL_HML3_FIRSYND));
+
+ if (panic_on_ue)
+ BUG_ON(parity_ue || misc_ue);
+ else
+ WARN_ON(parity_ue || misc_ue);
+
+ if (panic_on_ce)
+ BUG_ON(parity_ce);
+ else
+ WARN_ON(parity_ce);
+
+ writel_relaxed(hml3_fira, hml3_base + L3_QLL_HML3_FIRAC);
+ /* ensure of irq clear */
+ wmb();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t msm_m4m_erp_irq(int irq, void *dev_id)
+{
+ u32 m4m_status;
+
+ pr_alert("CPU%d: M4M error detected\n", raw_smp_processor_id());
+ m4m_status = readl_relaxed(m4m_base + M4M_ERR_STATUS);
+ pr_alert("M4M_ERR_STATUS 0x%0x\n", m4m_status);
+ if ((m4m_status & M4M_ERR_STATUS_MASK) &
+ ~(M4M_ERR_Q22SIB_RET_DEC_ERR | M4M_ERR_Q22SIB_RET_SLV_ERR)) {
+ pr_alert("M4M_ERR_CAP_0 0x%0x, M4M_ERR_CAP_1 0x%x\n",
+ readl_relaxed(m4m_base + M4M_ERR_CAP_0),
+ readl_relaxed(m4m_base + M4M_ERR_CAP_1));
+ pr_alert("M4M_ERR_CAP_2 0x%0x, M4M_ERR_CAP_3 0x%x\n",
+ readl_relaxed(m4m_base + M4M_ERR_CAP_2),
+ readl_relaxed(m4m_base + M4M_ERR_CAP_3));
+ } else {
+ /*
+ * M4M error-capture registers not valid when error detected
+ * due to DEC_ERR or SLV_ERR. L2E registers are still valid.
+ */
+ pr_alert("Omit dumping M4M_ERR_CAP\n");
+ }
+
+ /*
+ * On QSB errors, the L2 captures the bad address and syndrome in
+ * L2E error registers. Therefore dump L2E always whenever M4M error
+ * detected.
+ */
+ on_each_cpu(msm_l2_erp_local_handler, (void *)1, 1);
+ writel_relaxed(1, m4m_base + M4M_ERR_CLR);
+ /* ensure of irq clear */
+ wmb();
+
+ if (panic_on_ue)
+ BUG_ON(1);
+ else
+ WARN_ON(1);
+
+ return IRQ_HANDLED;
+}
+
+static void enable_erp_irq_callback(void *info)
+{
+ enable_percpu_irq(erp_irqs[IRQ_L1], IRQ_TYPE_NONE);
+}
+
+static void disable_erp_irq_callback(void *info)
+{
+ disable_percpu_irq(erp_irqs[IRQ_L1]);
+}
+
+static void msm_cache_erp_irq_init(void *param)
+{
+ u64 v;
+ /* Enable L0/L1 I/D cache error reporting. */
+ erp_msr(ICECR_EL1, ICECR_IRQ_EN);
+ erp_msr(DCECR_EL1, DCECR_IRQ_EN);
+ /*
+ * Enable L2 data, tag, QSB and possion error reporting.
+ */
+ set_l2_indirect_reg(L2ECR0_IA, L2ECR0_IRQ_EN);
+ set_l2_indirect_reg(L2ECR1_IA, L2ECR1_IRQ_EN);
+ v = (get_l2_indirect_reg(L2ECR2_IA) & ~L2ECR2_IRQ_EN_MASK)
+ | L2ECR2_IRQ_EN;
+ set_l2_indirect_reg(L2ECR2_IA, v);
+}
+
+static void msm_cache_erp_l3_init(void)
+{
+ writel_relaxed(L3_QLL_HML3_FIRAT0C_IRQ_EN,
+ hml3_base + L3_QLL_HML3_FIRAT0C);
+ writel_relaxed(L3_QLL_HML3_FIRAT1S_IRQ_EN,
+ hml3_base + L3_QLL_HML3_FIRAT1S);
+}
+
+static int cache_erp_cpu_pm_callback(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ unsigned long aff_level = (unsigned long) v;
+
+ switch (cmd) {
+ case CPU_CLUSTER_PM_EXIT:
+ msm_cache_erp_irq_init(NULL);
+
+ if (aff_level >= AFFINITY_LEVEL_L3)
+ msm_cache_erp_l3_init();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cache_erp_cpu_pm_notifier = {
+ .notifier_call = cache_erp_cpu_pm_callback,
+};
+
+static int cache_erp_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ msm_cache_erp_irq_init(NULL);
+ enable_erp_irq_callback(NULL);
+ break;
+ case CPU_DYING:
+ disable_erp_irq_callback(NULL);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cache_erp_cpu_notifier = {
+ .notifier_call = cache_erp_cpu_callback,
+};
+
+static int msm_cache_erp_probe(struct platform_device *pdev)
+{
+ int i, ret = 0;
+ struct resource *r;
+
+ dev_dbg(&pdev->dev, "enter\n");
+
+ /* L3 */
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hml3_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(hml3_base)) {
+ dev_err(&pdev->dev, "failed to ioremap (0x%pK)\n", hml3_base);
+ return PTR_ERR(hml3_base);
+ }
+
+ for (i = 0; i <= IRQ_L3; i++) {
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ erp_irq_names[i]);
+ if (!r) {
+ dev_err(&pdev->dev, "failed to get %s\n",
+ erp_irq_names[i]);
+ return -ENODEV;
+ }
+ erp_irqs[i] = r->start;
+ }
+
+ msm_cache_erp_l3_init();
+
+ /* L0/L1 erp irq per cpu */
+ dev_info(&pdev->dev, "Registering for L1 error interrupts\n");
+ ret = request_percpu_irq(erp_irqs[IRQ_L1], msm_l1_erp_irq,
+ erp_irq_names[IRQ_L1], &msm_l1_erp_stats);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request L0/L1 ERP irq %s (%d)\n",
+ erp_irq_names[IRQ_L1], ret);
+ return ret;
+ } else {
+ dev_dbg(&pdev->dev, "requested L0/L1 ERP irq %s\n",
+ erp_irq_names[IRQ_L1]);
+ }
+
+ get_online_cpus();
+ register_hotcpu_notifier(&cache_erp_cpu_notifier);
+ cpu_pm_register_notifier(&cache_erp_cpu_pm_notifier);
+
+ /* Perform L1/L2 cache error detection init on online cpus */
+ on_each_cpu(msm_cache_erp_irq_init, NULL, 1);
+ /* Enable irqs */
+ on_each_cpu(enable_erp_irq_callback, NULL, 1);
+ put_online_cpus();
+
+ /* L2 erp irq per cluster */
+ dev_info(&pdev->dev, "Registering for L2 error interrupts\n");
+ for (i = IRQ_L2_INFO0; i <= IRQ_L2_ERR1; i++) {
+ ret = devm_request_irq(&pdev->dev, erp_irqs[i],
+ msm_l2_erp_irq,
+ IRQF_ONESHOT |
+ IRQF_TRIGGER_HIGH,
+ erp_irq_names[i], NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq %s (%d)\n",
+ erp_irq_names[i], ret);
+ goto cleanup;
+ }
+ }
+
+ /* L3 erp irq */
+ dev_info(&pdev->dev, "Registering for L3 error interrupts\n");
+ ret = devm_request_irq(&pdev->dev, erp_irqs[IRQ_L3], msm_l3_erp_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ erp_irq_names[IRQ_L3], NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request L3 irq %s (%d)\n",
+ erp_irq_names[IRQ_L3], ret);
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ free_percpu_irq(erp_irqs[IRQ_L1], NULL);
+ return ret;
+}
+
+static void msm_m4m_erp_irq_init(void)
+{
+ writel_relaxed(M4M_INT_CTRL_IRQ_EN, m4m_base + M4M_INT_CTRL);
+ writel_relaxed(0, m4m_base + M4M_ERR_CTRL);
+}
+
+static int msm_m4m_erp_m4m_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *r;
+
+ dev_dbg(&pdev->dev, "enter\n");
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ m4m_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(m4m_base)) {
+ dev_err(&pdev->dev, "failed to ioremap (0x%pK)\n", m4m_base);
+ return PTR_ERR(m4m_base);
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ erp_irq_names[IRQ_M4M]);
+ if (!r) {
+ dev_err(&pdev->dev, "failed to get %s\n",
+ erp_irq_names[IRQ_M4M]);
+ ret = -ENODEV;
+ goto exit;
+ }
+ erp_irqs[IRQ_M4M] = r->start;
+
+ dev_info(&pdev->dev, "Registering for M4M error interrupts\n");
+ ret = devm_request_irq(&pdev->dev, erp_irqs[IRQ_M4M],
+ msm_m4m_erp_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ erp_irq_names[IRQ_M4M], NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq %s (%d)\n",
+ erp_irq_names[IRQ_M4M], ret);
+ goto exit;
+ }
+
+ msm_m4m_erp_irq_init();
+
+exit:
+ return ret;
+}
+
+static struct of_device_id cache_erp_dt_ids[] = {
+ { .compatible = "qcom,kryo_cache_erp64", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cache_erp_dt_ids);
+
+static struct platform_driver msm_cache_erp_driver = {
+ .probe = msm_cache_erp_probe,
+ .driver = {
+ .name = "msm_cache_erp64",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cache_erp_dt_ids),
+ },
+};
+
+static struct of_device_id m4m_erp_dt_ids[] = {
+ { .compatible = "qcom,m4m_erp", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, m4m_erp_dt_ids);
+static struct platform_driver msm_m4m_erp_driver = {
+ .probe = msm_m4m_erp_m4m_probe,
+ .driver = {
+ .name = "msm_m4m_erp",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(m4m_erp_dt_ids),
+ },
+};
+
+static int __init msm_cache_erp_init(void)
+{
+ int r;
+
+ r = platform_driver_register(&msm_cache_erp_driver);
+ if (!r)
+ r = platform_driver_register(&msm_m4m_erp_driver);
+ if (r)
+ pr_err("failed to register driver %d\n", r);
+ return r;
+}
+
+arch_initcall(msm_cache_erp_init);
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index ab46eb70651c..8c242bc7a702 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -48,11 +48,6 @@
#include <soc/qcom/socinfo.h>
#include <soc/qcom/ramdump.h>
-#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
-#include <net/cnss_prealloc.h>
-#endif
-
-
#include "wlan_firmware_service_v01.h"
#ifdef CONFIG_ICNSS_DEBUG
@@ -202,6 +197,7 @@ enum icnss_driver_state {
ICNSS_MSA0_ASSIGNED,
ICNSS_WLFW_EXISTS,
ICNSS_WDOG_BITE,
+ ICNSS_SHUTDOWN_DONE,
};
struct ce_irq_list {
@@ -695,6 +691,8 @@ static int icnss_qmi_pin_connect_result_ind(void *msg, unsigned int msg_len)
goto out;
}
+ memset(&ind_msg, 0, sizeof(ind_msg));
+
ind_desc.msg_id = QMI_WLFW_PIN_CONNECT_RESULT_IND_V01;
ind_desc.max_msg_len = WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN;
ind_desc.ei_array = wlfw_pin_connect_result_ind_msg_v01_ei;
@@ -1968,8 +1966,6 @@ static int icnss_call_driver_probe(struct icnss_priv *priv)
if (ret < 0) {
icnss_pr_err("Driver probe failed: %d, state: 0x%lx\n",
ret, priv->state);
- wcnss_prealloc_check_memory_leak();
- wcnss_pre_alloc_reset();
goto out;
}
@@ -1990,9 +1986,13 @@ static int icnss_call_driver_shutdown(struct icnss_priv *priv)
if (!priv->ops || !priv->ops->shutdown)
goto out;
+ if (test_bit(ICNSS_SHUTDOWN_DONE, &penv->state))
+ goto out;
+
icnss_pr_dbg("Calling driver shutdown state: 0x%lx\n", priv->state);
priv->ops->shutdown(&priv->pdev->dev);
+ set_bit(ICNSS_SHUTDOWN_DONE, &penv->state);
out:
return 0;
@@ -2030,6 +2030,7 @@ static int icnss_pd_restart_complete(struct icnss_priv *priv)
}
out:
+ clear_bit(ICNSS_SHUTDOWN_DONE, &penv->state);
return 0;
call_probe:
@@ -2099,8 +2100,6 @@ static int icnss_driver_event_register_driver(void *data)
if (ret) {
icnss_pr_err("Driver probe failed: %d, state: 0x%lx\n",
ret, penv->state);
- wcnss_prealloc_check_memory_leak();
- wcnss_pre_alloc_reset();
goto power_off;
}
@@ -2125,8 +2124,6 @@ static int icnss_driver_event_unregister_driver(void *data)
penv->ops->remove(&penv->pdev->dev);
clear_bit(ICNSS_DRIVER_PROBED, &penv->state);
- wcnss_prealloc_check_memory_leak();
- wcnss_pre_alloc_reset();
penv->ops = NULL;
@@ -2151,8 +2148,6 @@ static int icnss_call_driver_remove(struct icnss_priv *priv)
penv->ops->remove(&priv->pdev->dev);
clear_bit(ICNSS_DRIVER_PROBED, &priv->state);
- wcnss_prealloc_check_memory_leak();
- wcnss_pre_alloc_reset();
icnss_hw_power_off(penv);
@@ -3667,6 +3662,9 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
case ICNSS_WDOG_BITE:
seq_puts(s, "MODEM WDOG BITE");
continue;
+ case ICNSS_SHUTDOWN_DONE:
+ seq_puts(s, "SHUTDOWN DONE");
+ continue;
}
seq_printf(s, "UNKNOWN-%d", i);
diff --git a/drivers/soc/qcom/ipc_router_mhi_xprt.c b/drivers/soc/qcom/ipc_router_mhi_xprt.c
index f9d967fd0af6..e5f6104bd7de 100644
--- a/drivers/soc/qcom/ipc_router_mhi_xprt.c
+++ b/drivers/soc/qcom/ipc_router_mhi_xprt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,7 +22,7 @@
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/types.h>
-
+#include <linux/spinlock.h>
static int ipc_router_mhi_xprt_debug_mask;
module_param_named(debug_mask, ipc_router_mhi_xprt_debug_mask,
@@ -123,9 +123,9 @@ struct ipc_router_mhi_xprt {
struct completion sft_close_complete;
unsigned xprt_version;
unsigned xprt_option;
- struct mutex tx_addr_map_list_lock;
+ spinlock_t tx_addr_map_list_lock;
struct list_head tx_addr_map_list;
- struct mutex rx_addr_map_list_lock;
+ spinlock_t rx_addr_map_list_lock;
struct list_head rx_addr_map_list;
};
@@ -179,16 +179,16 @@ void ipc_router_mhi_release_pkt(struct kref *ref)
* Return: The mapped virtual Address if found, NULL otherwise.
*/
void *ipc_router_mhi_xprt_find_addr_map(struct list_head *addr_map_list,
- struct mutex *addr_map_list_lock,
- void *addr)
+ spinlock_t *addr_map_list_lock, void *addr)
{
struct ipc_router_mhi_addr_map *addr_mapping;
struct ipc_router_mhi_addr_map *tmp_addr_mapping;
+ unsigned long flags;
void *virt_addr;
if (!addr_map_list || !addr_map_list_lock)
return NULL;
- mutex_lock(addr_map_list_lock);
+ spin_lock_irqsave(addr_map_list_lock, flags);
list_for_each_entry_safe(addr_mapping, tmp_addr_mapping,
addr_map_list, list_node) {
if (addr_mapping->virt_addr == addr) {
@@ -198,11 +198,11 @@ void *ipc_router_mhi_xprt_find_addr_map(struct list_head *addr_map_list,
kref_put(&addr_mapping->pkt->ref,
ipc_router_mhi_release_pkt);
kfree(addr_mapping);
- mutex_unlock(addr_map_list_lock);
+ spin_unlock_irqrestore(addr_map_list_lock, flags);
return virt_addr;
}
}
- mutex_unlock(addr_map_list_lock);
+ spin_unlock_irqrestore(addr_map_list_lock, flags);
IPC_RTR_ERR(
"%s: Virtual address mapping [%p] not found\n",
__func__, (void *)addr);
@@ -219,10 +219,11 @@ void *ipc_router_mhi_xprt_find_addr_map(struct list_head *addr_map_list,
* Return: 0 on success, standard Linux error code otherwise.
*/
int ipc_router_mhi_xprt_add_addr_map(struct list_head *addr_map_list,
- struct mutex *addr_map_list_lock,
+ spinlock_t *addr_map_list_lock,
struct rr_packet *pkt, void *virt_addr)
{
struct ipc_router_mhi_addr_map *addr_mapping;
+ unsigned long flags;
if (!addr_map_list || !addr_map_list_lock)
return -EINVAL;
@@ -231,11 +232,11 @@ int ipc_router_mhi_xprt_add_addr_map(struct list_head *addr_map_list,
return -ENOMEM;
addr_mapping->virt_addr = virt_addr;
addr_mapping->pkt = pkt;
- mutex_lock(addr_map_list_lock);
+ spin_lock_irqsave(addr_map_list_lock, flags);
if (addr_mapping->pkt)
kref_get(&addr_mapping->pkt->ref);
list_add_tail(&addr_mapping->list_node, addr_map_list);
- mutex_unlock(addr_map_list_lock);
+ spin_unlock_irqrestore(addr_map_list_lock, flags);
return 0;
}
@@ -719,12 +720,11 @@ static void mhi_xprt_xfer_event(struct mhi_cb_info *cb_info)
mhi_xprtp = (struct ipc_router_mhi_xprt *)(cb_info->result->user_data);
if (cb_info->chan == mhi_xprtp->ch_hndl.out_chan_id) {
out_addr = cb_info->result->buf_addr;
- mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
- ipc_router_mhi_xprt_find_addr_map(&mhi_xprtp->tx_addr_map_list,
+ ipc_router_mhi_xprt_find_addr_map(
+ &mhi_xprtp->tx_addr_map_list,
&mhi_xprtp->tx_addr_map_list_lock,
out_addr);
wake_up(&mhi_xprtp->write_wait_q);
- mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
} else if (cb_info->chan == mhi_xprtp->ch_hndl.in_chan_id) {
queue_work(mhi_xprtp->wq, &mhi_xprtp->read_work);
} else {
@@ -875,9 +875,9 @@ static int ipc_router_mhi_config_init(
mhi_xprtp->ch_hndl.num_trbs = IPC_ROUTER_MHI_XPRT_NUM_TRBS;
mhi_xprtp->ch_hndl.mhi_xprtp = mhi_xprtp;
INIT_LIST_HEAD(&mhi_xprtp->tx_addr_map_list);
- mutex_init(&mhi_xprtp->tx_addr_map_list_lock);
+ spin_lock_init(&mhi_xprtp->tx_addr_map_list_lock);
INIT_LIST_HEAD(&mhi_xprtp->rx_addr_map_list);
- mutex_init(&mhi_xprtp->rx_addr_map_list_lock);
+ spin_lock_init(&mhi_xprtp->rx_addr_map_list_lock);
rc = ipc_router_mhi_driver_register(mhi_xprtp);
return rc;
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
index 092b1c1af44b..924c826208dd 100644
--- a/drivers/soc/qcom/memory_dump_v2.c
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -95,7 +95,7 @@ int msm_dump_data_add_minidump(struct msm_dump_entry *entry)
data = (struct msm_dump_data *)(phys_to_virt(entry->addr));
if (!strcmp(data->name, "")) {
- pr_info("Entry name is NULL, Use ID %d for minidump\n",
+ pr_debug("Entry name is NULL, Use ID %d for minidump\n",
entry->id);
snprintf(md_entry.name, sizeof(md_entry.name), "KMDT0x%X",
entry->id);
@@ -133,7 +133,7 @@ int msm_dump_data_register(enum msm_dump_table_ids id,
dmac_flush_range(table, (void *)table + sizeof(struct msm_dump_table));
if (msm_dump_data_add_minidump(entry))
- pr_info("Failed to add entry in Minidump table\n");
+ pr_err("Failed to add entry in Minidump table\n");
return 0;
}
diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c
index b8417513ca55..c11114528d2a 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.c
+++ b/drivers/soc/qcom/memshare/msm_memshare.c
@@ -498,6 +498,7 @@ static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h)
struct mem_alloc_generic_resp_msg_v01 *alloc_resp;
int rc, resp = 0;
int client_id;
+ uint32_t size = 0;
alloc_req = (struct mem_alloc_generic_req_msg_v01 *)req;
pr_debug("memshare: alloc request client id: %d proc _id: %d\n",
@@ -528,7 +529,11 @@ static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h)
__func__, memblock[client_id].client_id,
memblock[client_id].free_memory);
if (!memblock[client_id].alloted) {
- rc = memshare_alloc(memsh_drv->dev, alloc_req->num_bytes,
+ if (alloc_req->client_id == 1 && alloc_req->num_bytes > 0)
+ size = alloc_req->num_bytes + MEMSHARE_GUARD_BYTES;
+ else
+ size = alloc_req->num_bytes;
+ rc = memshare_alloc(memsh_drv->dev, size,
&memblock[client_id]);
if (rc) {
pr_err("In %s,Unable to allocate memory for requested client\n",
@@ -963,8 +968,10 @@ static int memshare_child_probe(struct platform_device *pdev)
* Memshare allocation for guaranteed clients
*/
if (memblock[num_clients].guarantee) {
+ if (client_id == 1 && size > 0)
+ size += MEMSHARE_GUARD_BYTES;
rc = memshare_alloc(memsh_child->dev,
- memblock[num_clients].size,
+ size,
&memblock[num_clients]);
if (rc) {
pr_err("In %s, Unable to allocate memory for guaranteed clients, rc: %d\n",
diff --git a/drivers/soc/qcom/memshare/msm_memshare.h b/drivers/soc/qcom/memshare/msm_memshare.h
index 398907532977..c7123fb1314b 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.h
+++ b/drivers/soc/qcom/memshare/msm_memshare.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
#define GPS 0
#define CHECK 0
#define FREE 1
+#define MEMSHARE_GUARD_BYTES (4*1024)
struct mem_blocks {
/* Client Id information */
diff --git a/drivers/soc/qcom/msm_minidump.c b/drivers/soc/qcom/msm_minidump.c
index 1cb36bf98555..300233085161 100644
--- a/drivers/soc/qcom/msm_minidump.c
+++ b/drivers/soc/qcom/msm_minidump.c
@@ -62,24 +62,31 @@ struct md_table {
struct md_region entry[MAX_NUM_ENTRIES];
};
+/*
+ * md_elfhdr: Minidump table elf header
+ * @md_ehdr: elf main header
+ * @shdr: Section header
+ * @phdr: Program header
+ * @elf_offset: section offset in elf
+ * @strtable_idx: string table current index position
+ */
+struct md_elfhdr {
+ struct elfhdr *md_ehdr;
+ struct elf_shdr *shdr;
+ struct elf_phdr *phdr;
+ u64 elf_offset;
+ u64 strtable_idx;
+};
+
/* Protect elfheader and smem table from deferred calls contention */
static DEFINE_SPINLOCK(mdt_lock);
-static bool minidump_enabled;
-static struct md_table minidump_table;
+static struct md_table minidump_table;
+static struct md_elfhdr minidump_elfheader;
+
+bool minidump_enabled;
static unsigned int pendings;
static unsigned int region_idx = 1; /* First entry is ELF header*/
-/* ELF Header */
-static struct elfhdr *md_ehdr;
-/* ELF Program header */
-static struct elf_phdr *phdr;
-/* ELF Section header */
-static struct elf_shdr *shdr;
-/* Section offset in elf image */
-static u64 elf_offset;
-/* String table index, first byte must be '\0' */
-static unsigned int stringtable_idx = 1;
-
static inline struct elf_shdr *elf_sheader(struct elfhdr *hdr)
{
return (struct elf_shdr *)((size_t)hdr + (size_t)hdr->e_shoff);
@@ -90,6 +97,16 @@ static inline struct elf_shdr *elf_section(struct elfhdr *hdr, int idx)
return &elf_sheader(hdr)[idx];
}
+static inline struct elf_phdr *elf_pheader(struct elfhdr *hdr)
+{
+ return (struct elf_phdr *)((size_t)hdr + (size_t)hdr->e_phoff);
+}
+
+static inline struct elf_phdr *elf_program(struct elfhdr *hdr, int idx)
+{
+ return &elf_pheader(hdr)[idx];
+}
+
static inline char *elf_str_table(struct elfhdr *hdr)
{
if (hdr->e_shstrndx == SHN_UNDEF)
@@ -101,23 +118,24 @@ static inline char *elf_lookup_string(struct elfhdr *hdr, int offset)
{
char *strtab = elf_str_table(hdr);
- if ((strtab == NULL) | (stringtable_idx < offset))
+ if ((strtab == NULL) || (minidump_elfheader.strtable_idx < offset))
return NULL;
return strtab + offset;
}
static inline unsigned int set_section_name(const char *name)
{
- char *strtab = elf_str_table(md_ehdr);
+ char *strtab = elf_str_table(minidump_elfheader.md_ehdr);
+ int idx = minidump_elfheader.strtable_idx;
int ret = 0;
- if ((strtab == NULL) | (name == NULL))
+ if ((strtab == NULL) || (name == NULL))
return 0;
- ret = stringtable_idx;
- stringtable_idx += strlcpy((strtab + stringtable_idx),
- name, MAX_NAME_LENGTH);
- stringtable_idx += 1;
+ ret = idx;
+ idx += strlcpy((strtab + idx), name, MAX_NAME_LENGTH);
+ minidump_elfheader.strtable_idx = idx + 1;
+
return ret;
}
@@ -137,11 +155,9 @@ static inline bool md_check_name(const char *name)
static int md_update_smem_table(const struct md_region *entry)
{
struct md_smem_region *mdr;
-
- if (!minidump_enabled) {
- pr_info("Table in smem is not setup\n");
- return -ENODEV;
- }
+ struct elfhdr *hdr = minidump_elfheader.md_ehdr;
+ struct elf_shdr *shdr = elf_section(hdr, hdr->e_shnum++);
+ struct elf_phdr *phdr = elf_program(hdr, hdr->e_phnum++);
mdr = &minidump_table.region[region_idx++];
@@ -155,36 +171,21 @@ static int md_update_smem_table(const struct md_region *entry)
shdr->sh_addr = (elf_addr_t)entry->virt_addr;
shdr->sh_size = mdr->size;
shdr->sh_flags = SHF_WRITE;
- shdr->sh_offset = elf_offset;
+ shdr->sh_offset = minidump_elfheader.elf_offset;
shdr->sh_entsize = 0;
phdr->p_type = PT_LOAD;
- phdr->p_offset = elf_offset;
+ phdr->p_offset = minidump_elfheader.elf_offset;
phdr->p_vaddr = entry->virt_addr;
phdr->p_paddr = entry->phys_addr;
phdr->p_filesz = phdr->p_memsz = mdr->size;
phdr->p_flags = PF_R | PF_W;
- md_ehdr->e_shnum += 1;
- md_ehdr->e_phnum += 1;
- elf_offset += shdr->sh_size;
- shdr++;
- phdr++;
+ minidump_elfheader.elf_offset += shdr->sh_size;
return 0;
}
-bool msm_minidump_enabled(void)
-{
- bool ret;
-
- spin_lock(&mdt_lock);
- ret = minidump_enabled;
- spin_unlock(&mdt_lock);
- return ret;
-}
-EXPORT_SYMBOL(msm_minidump_enabled);
-
int msm_minidump_add_region(const struct md_region *entry)
{
u32 entries;
@@ -196,19 +197,19 @@ int msm_minidump_add_region(const struct md_region *entry)
if (((strlen(entry->name) > MAX_NAME_LENGTH) ||
md_check_name(entry->name)) && !entry->virt_addr) {
- pr_info("Invalid entry details\n");
+ pr_err("Invalid entry details\n");
return -EINVAL;
}
if (!IS_ALIGNED(entry->size, 4)) {
- pr_info("size should be 4 byte aligned\n");
+ pr_err("size should be 4 byte aligned\n");
return -EINVAL;
}
spin_lock(&mdt_lock);
entries = minidump_table.num_regions;
if (entries >= MAX_NUM_ENTRIES) {
- pr_info("Maximum entries reached.\n");
+ pr_err("Maximum entries reached.\n");
spin_unlock(&mdt_lock);
return -ENOMEM;
}
@@ -238,23 +239,32 @@ EXPORT_SYMBOL(msm_minidump_add_region);
static int msm_minidump_add_header(void)
{
struct md_smem_region *mdreg = &minidump_table.region[0];
- char *banner;
+ struct elfhdr *md_ehdr;
+ struct elf_shdr *shdr;
+ struct elf_phdr *phdr;
unsigned int strtbl_off, elfh_size, phdr_off;
+ char *banner;
+ /* Header buffer contains:
+ * elf header, MAX_NUM_ENTRIES+1 of section and program elf headers,
+ * string table section and linux banner.
+ */
elfh_size = sizeof(*md_ehdr) + MAX_STRTBL_SIZE + MAX_MEM_LENGTH +
((sizeof(*shdr) + sizeof(*phdr)) * (MAX_NUM_ENTRIES + 1));
- md_ehdr = kzalloc(elfh_size, GFP_KERNEL);
- if (!md_ehdr)
+ minidump_elfheader.md_ehdr = kzalloc(elfh_size, GFP_KERNEL);
+ if (!minidump_elfheader.md_ehdr)
return -ENOMEM;
strlcpy(mdreg->name, "KELF_HEADER", sizeof(mdreg->name));
- mdreg->address = virt_to_phys(md_ehdr);
+ mdreg->address = virt_to_phys(minidump_elfheader.md_ehdr);
mdreg->size = elfh_size;
- /* Section headers*/
- shdr = (struct elf_shdr *)(md_ehdr + 1);
- phdr = (struct elf_phdr *)(shdr + MAX_NUM_ENTRIES);
+ md_ehdr = minidump_elfheader.md_ehdr;
+ /* Assign section/program headers offset */
+ minidump_elfheader.shdr = shdr = (struct elf_shdr *)(md_ehdr + 1);
+ minidump_elfheader.phdr = phdr =
+ (struct elf_phdr *)(shdr + MAX_NUM_ENTRIES);
phdr_off = sizeof(*md_ehdr) + (sizeof(*shdr) * MAX_NUM_ENTRIES);
memcpy(md_ehdr->e_ident, ELFMAG, SELFMAG);
@@ -268,18 +278,19 @@ static int msm_minidump_add_header(void)
md_ehdr->e_ehsize = sizeof(*md_ehdr);
md_ehdr->e_phoff = phdr_off;
md_ehdr->e_phentsize = sizeof(*phdr);
- md_ehdr->e_phnum = 1;
md_ehdr->e_shoff = sizeof(*md_ehdr);
md_ehdr->e_shentsize = sizeof(*shdr);
- md_ehdr->e_shnum = 3; /* NULL, STR TABLE, Linux banner */
md_ehdr->e_shstrndx = 1;
- elf_offset = elfh_size;
+ minidump_elfheader.elf_offset = elfh_size;
+
+ /*
+ * First section header should be NULL,
+ * 2nd section is string table.
+ */
+ minidump_elfheader.strtable_idx = 1;
strtbl_off = sizeof(*md_ehdr) +
((sizeof(*phdr) + sizeof(*shdr)) * MAX_NUM_ENTRIES);
- /* First section header should be NULL
- * 2nd entry for string table
- */
shdr++;
shdr->sh_type = SHT_STRTAB;
shdr->sh_offset = (elf_addr_t)strtbl_off;
@@ -289,7 +300,15 @@ static int msm_minidump_add_header(void)
shdr->sh_name = set_section_name("STR_TBL");
shdr++;
- /* 3rd entry for linux banner */
+ /* 3rd section is for minidump_table VA, used by parsers */
+ shdr->sh_type = SHT_PROGBITS;
+ shdr->sh_entsize = 0;
+ shdr->sh_flags = 0;
+ shdr->sh_addr = (elf_addr_t)&minidump_table;
+ shdr->sh_name = set_section_name("minidump_table");
+ shdr++;
+
+ /* 4th section is linux banner */
banner = (char *)md_ehdr + strtbl_off + MAX_STRTBL_SIZE;
strlcpy(banner, linux_banner, MAX_MEM_LENGTH);
@@ -300,7 +319,6 @@ static int msm_minidump_add_header(void)
shdr->sh_entsize = 0;
shdr->sh_flags = SHF_WRITE;
shdr->sh_name = set_section_name("linux_banner");
- shdr++;
phdr->p_type = PT_LOAD;
phdr->p_offset = (elf_addr_t)(strtbl_off + MAX_STRTBL_SIZE);
@@ -309,8 +327,9 @@ static int msm_minidump_add_header(void)
phdr->p_filesz = phdr->p_memsz = strlen(linux_banner) + 1;
phdr->p_flags = PF_R | PF_W;
- md_ehdr->e_phnum += 1;
- phdr++;
+ /* Update headers count*/
+ md_ehdr->e_phnum = 1;
+ md_ehdr->e_shnum = 4;
return 0;
}
@@ -325,13 +344,13 @@ static int __init msm_minidump_init(void)
smem_table = smem_get_entry(SMEM_MINIDUMP_TABLE_ID, &size, 0,
SMEM_ANY_HOST_FLAG);
if (IS_ERR_OR_NULL(smem_table)) {
- pr_info("SMEM is not initialized.\n");
+ pr_err("SMEM is not initialized.\n");
return -ENODEV;
}
if ((smem_table->next_avail_offset + MAX_MEM_LENGTH) >
smem_table->smem_length) {
- pr_info("SMEM memory not available.\n");
+ pr_err("SMEM memory not available.\n");
return -ENOMEM;
}
@@ -353,10 +372,10 @@ static int __init msm_minidump_init(void)
for (i = 0; i < pendings; i++) {
mdr = &minidump_table.entry[i];
if (md_update_smem_table(mdr)) {
- pr_info("Unable to add entry %s to smem table\n",
+ pr_err("Unable to add entry %s to smem table\n",
mdr->name);
spin_unlock(&mdt_lock);
- return -ENODEV;
+ return -ENOENT;
}
}
diff --git a/drivers/soc/qcom/perf_event_kryo.c b/drivers/soc/qcom/perf_event_kryo.c
index c61a86850777..519961440742 100644
--- a/drivers/soc/qcom/perf_event_kryo.c
+++ b/drivers/soc/qcom/perf_event_kryo.c
@@ -118,12 +118,7 @@ static void kryo_write_pmresr(int reg, int l_h, u32 val)
static u32 kryo_read_pmresr(int reg, int l_h)
{
- u32 val;
-
- if (reg > KRYO_MAX_L1_REG) {
- pr_err("Invalid read of RESR reg %d\n", reg);
- return 0;
- }
+ u32 val = 0;
if (l_h == RESR_L) {
switch (reg) {
@@ -136,6 +131,9 @@ static u32 kryo_read_pmresr(int reg, int l_h)
case 2:
asm volatile("mrs %0, " pmresr2l_el0 : "=r" (val));
break;
+ default:
+ WARN_ONCE(1, "Invalid read of RESR reg %d\n", reg);
+ break;
}
} else {
switch (reg) {
@@ -148,6 +146,9 @@ static u32 kryo_read_pmresr(int reg, int l_h)
case 2:
asm volatile("mrs %0," pmresr2h_el0 : "=r" (val));
break;
+ default:
+ WARN_ONCE(1, "Invalid read of RESR reg %d\n", reg);
+ break;
}
}
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 6e5ddc4a3a7d..3415338a1294 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -917,13 +917,13 @@ out:
priv->region_start),
VMID_HLOS);
}
+ if (desc->clear_fw_region && priv->region_start)
+ pil_clear_segment(desc);
dma_free_attrs(desc->dev, priv->region_size,
priv->region, priv->region_start,
&desc->attrs);
priv->region = NULL;
}
- if (desc->clear_fw_region && priv->region_start)
- pil_clear_segment(desc);
pil_release_mmap(desc);
}
return ret;
diff --git a/drivers/soc/qcom/qbt1000.c b/drivers/soc/qcom/qbt1000.c
index 4ba92436bd06..6e7d34ac9163 100644
--- a/drivers/soc/qcom/qbt1000.c
+++ b/drivers/soc/qcom/qbt1000.c
@@ -377,6 +377,12 @@ static long qbt1000_ioctl(struct file *file, unsigned cmd, unsigned long arg)
drvdata = file->private_data;
+ if (IS_ERR(priv_arg)) {
+ dev_err(drvdata->dev, "%s: invalid user space pointer %lu\n",
+ __func__, arg);
+ return -EINVAL;
+ }
+
mutex_lock(&drvdata->mutex);
pr_debug("qbt1000_ioctl %d\n", cmd);
@@ -401,6 +407,13 @@ static long qbt1000_ioctl(struct file *file, unsigned cmd, unsigned long arg)
goto end;
}
+ if (strcmp(app.name, FP_APP_NAME)) {
+ dev_err(drvdata->dev, "%s: Invalid app name\n",
+ __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
if (drvdata->app_handle) {
dev_err(drvdata->dev, "%s: LOAD app already loaded, unloading first\n",
__func__);
@@ -414,6 +427,7 @@ static long qbt1000_ioctl(struct file *file, unsigned cmd, unsigned long arg)
}
pr_debug("app %s load before\n", app.name);
+ app.name[MAX_NAME_SIZE - 1] = '\0';
/* start the TZ app */
rc = qseecom_start_app(
@@ -427,7 +441,8 @@ static long qbt1000_ioctl(struct file *file, unsigned cmd, unsigned long arg)
pr_err("App %s failed to set bw\n", app.name);
}
} else {
- pr_err("app %s failed to load\n", app.name);
+ dev_err(drvdata->dev, "%s: Fingerprint Trusted App failed to load\n",
+ __func__);
goto end;
}
@@ -447,9 +462,7 @@ static long qbt1000_ioctl(struct file *file, unsigned cmd, unsigned long arg)
pr_debug("app %s load after\n", app.name);
- if (!strcmp(app.name, FP_APP_NAME))
- drvdata->fp_app_handle = drvdata->app_handle;
-
+ drvdata->fp_app_handle = drvdata->app_handle;
break;
}
case QBT1000_UNLOAD_APP:
diff --git a/drivers/soc/qcom/qdsp6v2/apr.c b/drivers/soc/qcom/qdsp6v2/apr.c
index 128ea434dcc8..a275537d4e08 100644
--- a/drivers/soc/qcom/qdsp6v2/apr.c
+++ b/drivers/soc/qcom/qdsp6v2/apr.c
@@ -514,19 +514,19 @@ struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn,
mutex_unlock(&svc->m_lock);
return NULL;
}
- if (!svc->port_cnt && !svc->svc_cnt)
+ if (!svc->svc_cnt)
clnt->svc_cnt++;
svc->port_cnt++;
svc->port_fn[temp_port] = svc_fn;
svc->port_priv[temp_port] = priv;
+ svc->svc_cnt++;
} else {
if (!svc->fn) {
- if (!svc->port_cnt && !svc->svc_cnt)
+ if (!svc->svc_cnt)
clnt->svc_cnt++;
svc->fn = svc_fn;
- if (svc->port_cnt)
- svc->svc_cnt++;
svc->priv = priv;
+ svc->svc_cnt++;
}
}
@@ -745,29 +745,28 @@ int apr_deregister(void *handle)
if (!handle)
return -EINVAL;
+ if (!svc->svc_cnt) {
+ pr_err("%s: svc already deregistered. svc = %pK\n",
+ __func__, svc);
+ return -EINVAL;
+ }
+
mutex_lock(&svc->m_lock);
dest_id = svc->dest_id;
client_id = svc->client_id;
clnt = &client[dest_id][client_id];
- if (svc->port_cnt > 0 || svc->svc_cnt > 0) {
+ if (svc->svc_cnt > 0) {
if (svc->port_cnt)
svc->port_cnt--;
- else if (svc->svc_cnt)
- svc->svc_cnt--;
- if (!svc->port_cnt && !svc->svc_cnt) {
+ svc->svc_cnt--;
+ if (!svc->svc_cnt) {
client[dest_id][client_id].svc_cnt--;
- svc->need_reset = 0x0;
- }
- } else if (client[dest_id][client_id].svc_cnt > 0) {
- client[dest_id][client_id].svc_cnt--;
- if (!client[dest_id][client_id].svc_cnt) {
- svc->need_reset = 0x0;
pr_debug("%s: service is reset %pK\n", __func__, svc);
}
}
- if (!svc->port_cnt && !svc->svc_cnt) {
+ if (!svc->svc_cnt) {
svc->priv = NULL;
svc->id = 0;
svc->fn = NULL;
diff --git a/drivers/soc/qcom/scm-boot.c b/drivers/soc/qcom/scm-boot.c
index 369fb27ff447..f3e96f9afa12 100644
--- a/drivers/soc/qcom/scm-boot.c
+++ b/drivers/soc/qcom/scm-boot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, 2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010, 2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,11 +24,20 @@ int scm_set_boot_addr(phys_addr_t addr, unsigned int flags)
u32 flags;
u32 addr;
} cmd;
+ struct scm_desc desc = {0};
+
+ if (!is_scm_armv8()) {
+ cmd.addr = addr;
+ cmd.flags = flags;
+ return scm_call(SCM_SVC_BOOT, SCM_BOOT_ADDR,
+ &cmd, sizeof(cmd), NULL, 0);
+ }
+
+ desc.args[0] = addr;
+ desc.args[1] = flags;
+ desc.arginfo = SCM_ARGS(2);
- cmd.addr = addr;
- cmd.flags = flags;
- return scm_call(SCM_SVC_BOOT, SCM_BOOT_ADDR,
- &cmd, sizeof(cmd), NULL, 0);
+ return scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, SCM_BOOT_ADDR), &desc);
}
EXPORT_SYMBOL(scm_set_boot_addr);
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index 68592feccb33..b5681a5c6817 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -376,13 +376,6 @@ static void root_service_service_arrive(struct work_struct *work)
mutex_unlock(&qmi_client_release_lock);
pr_info("Connection established between QMI handle and %d service\n",
data->instance_id);
- /* Register for indication messages about service */
- rc = qmi_register_ind_cb(data->clnt_handle, root_service_service_ind_cb,
- (void *)data);
- if (rc < 0)
- pr_err("Indication callback register failed(instance-id: %d) rc:%d\n",
- data->instance_id, rc);
-
mutex_lock(&notif_add_lock);
mutex_lock(&service_list_lock);
list_for_each_entry(service_notif, &service_list, list) {
@@ -405,6 +398,12 @@ static void root_service_service_arrive(struct work_struct *work)
}
mutex_unlock(&service_list_lock);
mutex_unlock(&notif_add_lock);
+ /* Register for indication messages about service */
+ rc = qmi_register_ind_cb(data->clnt_handle,
+ root_service_service_ind_cb, (void *)data);
+ if (rc < 0)
+ pr_err("Indication callback register failed(instance-id: %d) rc:%d\n",
+ data->instance_id, rc);
}
static void root_service_service_exit(struct qmi_client_info *data,
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index c1d8748a5d08..b9903fe86f60 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -65,6 +65,7 @@ enum {
HW_PLATFORM_RCM = 21,
HW_PLATFORM_STP = 23,
HW_PLATFORM_SBC = 24,
+ HW_PLATFORM_ADP = 25,
HW_PLATFORM_INVALID
};
@@ -85,6 +86,7 @@ const char *hw_platform[] = {
[HW_PLATFORM_DTV] = "DTV",
[HW_PLATFORM_STP] = "STP",
[HW_PLATFORM_SBC] = "SBC",
+ [HW_PLATFORM_ADP] = "ADP",
};
enum {
@@ -111,6 +113,22 @@ const char *qrd_hw_platform_subtype[] = {
};
enum {
+ PLATFORM_SUBTYPE_MOJAVE_V1 = 0x0,
+ PLATFORM_SUBTYPE_MMX = 0x1,
+ PLATFORM_SUBTYPE_MOJAVE_FULL_V2 = 0x2,
+ PLATFORM_SUBTYPE_MOJAVE_BARE_V2 = 0x3,
+ PLATFORM_SUBTYPE_ADP_INVALID,
+};
+
+const char *adp_hw_platform_subtype[] = {
+ [PLATFORM_SUBTYPE_MOJAVE_V1] = "MOJAVE_V1",
+ [PLATFORM_SUBTYPE_MMX] = "MMX",
+ [PLATFORM_SUBTYPE_MOJAVE_FULL_V2] = "_MOJAVE_V2_FULL",
+ [PLATFORM_SUBTYPE_MOJAVE_BARE_V2] = "_MOJAVE_V2_BARE",
+ [PLATFORM_SUBTYPE_ADP_INVALID] = "INVALID",
+};
+
+enum {
PLATFORM_SUBTYPE_UNKNOWN = 0x0,
PLATFORM_SUBTYPE_CHARM = 0x1,
PLATFORM_SUBTYPE_STRANGE = 0x2,
@@ -514,11 +532,13 @@ static struct msm_soc_info cpu_of_id[] = {
/* 8996 IDs */
[246] = {MSM_CPU_8996, "MSM8996"},
- [310] = {MSM_CPU_8996, "MSM8996"},
- [311] = {MSM_CPU_8996, "APQ8096"},
[291] = {MSM_CPU_8996, "APQ8096"},
[305] = {MSM_CPU_8996, "MSM8996pro"},
+ [310] = {MSM_CPU_8996, "MSM8996"},
+ [311] = {MSM_CPU_8996, "APQ8096"},
[312] = {MSM_CPU_8996, "APQ8096pro"},
+ [315] = {MSM_CPU_8996, "MSM8996pro"},
+ [316] = {MSM_CPU_8996, "APQ8096pro"},
/* 8976 ID */
[266] = {MSM_CPU_8976, "MSM8976"},
@@ -804,6 +824,14 @@ msm_get_platform_subtype(struct device *dev,
}
return snprintf(buf, PAGE_SIZE, "%-.32s\n",
qrd_hw_platform_subtype[hw_subtype]);
+ }
+ if (socinfo_get_platform_type() == HW_PLATFORM_ADP) {
+ if (hw_subtype >= PLATFORM_SUBTYPE_ADP_INVALID) {
+ pr_err("Invalid hardware platform sub type for adp found\n");
+ hw_subtype = PLATFORM_SUBTYPE_ADP_INVALID;
+ }
+ return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+ adp_hw_platform_subtype[hw_subtype]);
} else {
if (hw_subtype >= PLATFORM_SUBTYPE_INVALID) {
pr_err("Invalid hardware platform subtype\n");
@@ -1225,10 +1253,6 @@ static void * __init setup_dummy_socinfo(void)
dummy_socinfo.id = 246;
strlcpy(dummy_socinfo.build_id, "msm8996 - ",
sizeof(dummy_socinfo.build_id));
- } else if (early_machine_is_msm8996_auto()) {
- dummy_socinfo.id = 310;
- strlcpy(dummy_socinfo.build_id, "msm8996-auto - ",
- sizeof(dummy_socinfo.build_id));
} else if (early_machine_is_msm8929()) {
dummy_socinfo.id = 268;
strlcpy(dummy_socinfo.build_id, "msm8929 - ",
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index d3d0b8594c9f..51f4ec79db10 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -480,15 +480,19 @@ static void send_sysmon_notif(struct subsys_device *dev)
mutex_unlock(&subsys_list_lock);
}
-static void for_each_subsys_device(struct subsys_device **list, unsigned count,
- void *data, void (*fn)(struct subsys_device *, void *))
+static int for_each_subsys_device(struct subsys_device **list, unsigned count,
+ void *data, int (*fn)(struct subsys_device *, void *))
{
+ int ret;
while (count--) {
struct subsys_device *dev = *list++;
if (!dev)
continue;
- fn(dev, data);
+ ret = fn(dev, data);
+ if (ret)
+ return ret;
}
+ return 0;
}
static void notify_each_subsys_device(struct subsys_device **list,
@@ -590,21 +594,31 @@ static int wait_for_err_ready(struct subsys_device *subsys)
return 0;
}
-static void subsystem_shutdown(struct subsys_device *dev, void *data)
+static int subsystem_shutdown(struct subsys_device *dev, void *data)
{
const char *name = dev->desc->name;
+ int ret;
pr_info("[%s:%d]: Shutting down %s\n",
current->comm, current->pid, name);
- if (dev->desc->shutdown(dev->desc, true) < 0)
- panic("subsys-restart: [%s:%d]: Failed to shutdown %s!",
- current->comm, current->pid, name);
+ ret = dev->desc->shutdown(dev->desc, true);
+ if (ret < 0) {
+ if (!dev->desc->ignore_ssr_failure) {
+ panic("subsys-restart: [%s:%d]: Failed to shutdown %s!",
+ current->comm, current->pid, name);
+ } else {
+ pr_err("Shutdown failure on %s\n", name);
+ return ret;
+ }
+ }
dev->crash_count++;
subsys_set_state(dev, SUBSYS_OFFLINE);
disable_all_irqs(dev);
+
+ return 0;
}
-static void subsystem_ramdump(struct subsys_device *dev, void *data)
+static int subsystem_ramdump(struct subsys_device *dev, void *data)
{
const char *name = dev->desc->name;
@@ -613,15 +627,17 @@ static void subsystem_ramdump(struct subsys_device *dev, void *data)
pr_warn("%s[%s:%d]: Ramdump failed.\n",
name, current->comm, current->pid);
dev->do_ramdump_on_put = false;
+ return 0;
}
-static void subsystem_free_memory(struct subsys_device *dev, void *data)
+static int subsystem_free_memory(struct subsys_device *dev, void *data)
{
if (dev->desc->free_memory)
dev->desc->free_memory(dev->desc);
+ return 0;
}
-static void subsystem_powerup(struct subsys_device *dev, void *data)
+static int subsystem_powerup(struct subsys_device *dev, void *data)
{
const char *name = dev->desc->name;
int ret;
@@ -629,11 +645,17 @@ static void subsystem_powerup(struct subsys_device *dev, void *data)
pr_info("[%s:%d]: Powering up %s\n", current->comm, current->pid, name);
init_completion(&dev->err_ready);
- if (dev->desc->powerup(dev->desc) < 0) {
+ ret = dev->desc->powerup(dev->desc);
+ if (ret < 0) {
notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
NULL);
- panic("[%s:%d]: Powerup error: %s!",
- current->comm, current->pid, name);
+ if (!dev->desc->ignore_ssr_failure) {
+ panic("[%s:%d]: Powerup error: %s!",
+ current->comm, current->pid, name);
+ } else {
+ pr_err("Powerup failure on %s\n", name);
+ return ret;
+ }
}
enable_all_irqs(dev);
@@ -641,11 +663,16 @@ static void subsystem_powerup(struct subsys_device *dev, void *data)
if (ret) {
notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
NULL);
- panic("[%s:%d]: Timed out waiting for error ready: %s!",
- current->comm, current->pid, name);
+ if (!dev->desc->ignore_ssr_failure)
+ panic("[%s:%d]: Timed out waiting for error ready: %s!",
+ current->comm, current->pid, name);
+ else
+ return ret;
}
subsys_set_state(dev, SUBSYS_ONLINE);
subsys_set_crash_status(dev, CRASH_STATUS_NO_CRASH);
+
+ return 0;
}
static int __find_subsys(struct device *dev, void *data)
@@ -907,6 +934,7 @@ static void subsystem_restart_wq_func(struct work_struct *work)
struct subsys_tracking *track;
unsigned count;
unsigned long flags;
+ int ret;
/*
* It's OK to not take the registration lock at this point.
@@ -954,7 +982,9 @@ static void subsystem_restart_wq_func(struct work_struct *work)
pr_debug("[%s:%d]: Starting restart sequence for %s\n",
current->comm, current->pid, desc->name);
notify_each_subsys_device(list, count, SUBSYS_BEFORE_SHUTDOWN, NULL);
- for_each_subsys_device(list, count, NULL, subsystem_shutdown);
+ ret = for_each_subsys_device(list, count, NULL, subsystem_shutdown);
+ if (ret)
+ goto err;
notify_each_subsys_device(list, count, SUBSYS_AFTER_SHUTDOWN, NULL);
notify_each_subsys_device(list, count, SUBSYS_RAMDUMP_NOTIFICATION,
@@ -970,12 +1000,19 @@ static void subsystem_restart_wq_func(struct work_struct *work)
for_each_subsys_device(list, count, NULL, subsystem_free_memory);
notify_each_subsys_device(list, count, SUBSYS_BEFORE_POWERUP, NULL);
- for_each_subsys_device(list, count, NULL, subsystem_powerup);
+ ret = for_each_subsys_device(list, count, NULL, subsystem_powerup);
+ if (ret)
+ goto err;
notify_each_subsys_device(list, count, SUBSYS_AFTER_POWERUP, NULL);
pr_info("[%s:%d]: Restart sequence for %s completed.\n",
current->comm, current->pid, desc->name);
+err:
+ /* Reset subsys count */
+ if (ret)
+ dev->count = 0;
+
mutex_unlock(&soc_order_reg_lock);
mutex_unlock(&track->lock);
@@ -1466,6 +1503,9 @@ static int subsys_parse_devicetree(struct subsys_desc *desc)
desc->generic_irq = ret;
}
+ desc->ignore_ssr_failure = of_property_read_bool(pdev->dev.of_node,
+ "qcom,ignore-ssr-failure");
+
order = ssr_parse_restart_orders(desc);
if (IS_ERR(order)) {
pr_err("Could not initialize SSR restart order, err = %ld\n",
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 3f1133230a1a..28c9afe538ca 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -392,6 +392,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
ret = PTR_ERR(vmfile);
goto out;
}
+ vmfile->f_mode |= FMODE_LSEEK;
asma->file = vmfile;
}
get_file(asma->file);
@@ -752,10 +753,12 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case ASHMEM_SET_SIZE:
ret = -EINVAL;
+ mutex_lock(&ashmem_mutex);
if (!asma->file) {
ret = 0;
asma->size = (size_t)arg;
}
+ mutex_unlock(&ashmem_mutex);
break;
case ASHMEM_GET_SIZE:
ret = asma->size;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 58bf3d2f52bd..bfb7dd2d920d 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -3,7 +3,7 @@
* drivers/staging/android/ion/ion.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -122,8 +122,6 @@ struct ion_handle {
int id;
};
-static struct ion_device *ion_dev;
-
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
return (buffer->flags & ION_FLAG_CACHED) &&
@@ -844,32 +842,45 @@ void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
}
EXPORT_SYMBOL(ion_unmap_kernel);
-static int ion_debug_client_show(struct seq_file *s, void *unused)
+static struct mutex debugfs_mutex;
+static struct rb_root *ion_root_client;
+static int is_client_alive(struct ion_client *client)
{
- struct ion_client *client = s->private;
- struct rb_node *n, *cnode;
- bool found = false;
-
- down_write(&ion_dev->lock);
+ struct rb_node *node;
+ struct ion_client *tmp;
+ struct ion_device *dev;
- if (!client || (client->dev != ion_dev)) {
- up_write(&ion_dev->lock);
- return -EINVAL;
- }
+ node = ion_root_client->rb_node;
+ dev = container_of(ion_root_client, struct ion_device, clients);
- cnode = rb_first(&ion_dev->clients);
- for ( ; cnode; cnode = rb_next(cnode)) {
- struct ion_client *c = rb_entry(cnode,
- struct ion_client, node);
- if (client == c) {
- found = true;
- break;
+ down_read(&dev->lock);
+ while (node) {
+ tmp = rb_entry(node, struct ion_client, node);
+ if (client < tmp) {
+ node = node->rb_left;
+ } else if (client > tmp) {
+ node = node->rb_right;
+ } else {
+ up_read(&dev->lock);
+ return 1;
}
}
- if (!found) {
- up_write(&ion_dev->lock);
- return -EINVAL;
+ up_read(&dev->lock);
+ return 0;
+}
+
+static int ion_debug_client_show(struct seq_file *s, void *unused)
+{
+ struct ion_client *client = s->private;
+ struct rb_node *n;
+
+ mutex_lock(&debugfs_mutex);
+ if (!is_client_alive(client)) {
+ seq_printf(s, "ion_client 0x%pK dead, can't dump its buffers\n",
+ client);
+ mutex_unlock(&debugfs_mutex);
+ return 0;
}
seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s\n",
@@ -890,7 +901,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
seq_printf(s, "\n");
}
mutex_unlock(&client->lock);
- up_write(&ion_dev->lock);
+ mutex_unlock(&debugfs_mutex);
return 0;
}
@@ -1021,7 +1032,7 @@ void ion_client_destroy(struct ion_client *client)
struct rb_node *n;
pr_debug("%s: %d\n", __func__, __LINE__);
- mutex_lock(&client->lock);
+ mutex_lock(&debugfs_mutex);
while ((n = rb_first(&client->handles))) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
node);
@@ -1029,7 +1040,6 @@ void ion_client_destroy(struct ion_client *client)
}
idr_destroy(&client->idr);
- mutex_unlock(&client->lock);
down_write(&dev->lock);
if (client->task)
@@ -1042,6 +1052,7 @@ void ion_client_destroy(struct ion_client *client)
kfree(client->display_name);
kfree(client->name);
kfree(client);
+ mutex_unlock(&debugfs_mutex);
}
EXPORT_SYMBOL(ion_client_destroy);
@@ -1838,7 +1849,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
seq_puts(s, "----------------------------------------------------\n");
- down_read(&dev->lock);
+ mutex_lock(&debugfs_mutex);
for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
node);
@@ -1857,7 +1868,8 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
client->pid, size);
}
}
- up_read(&dev->lock);
+ mutex_unlock(&debugfs_mutex);
+
seq_puts(s, "----------------------------------------------------\n");
seq_puts(s, "orphaned allocations (info is from last known client):\n");
mutex_lock(&dev->buffer_lock);
@@ -2095,7 +2107,8 @@ debugfs_done:
init_rwsem(&idev->lock);
plist_head_init(&idev->heaps);
idev->clients = RB_ROOT;
- ion_dev = idev;
+ ion_root_client = &idev->clients;
+ mutex_init(&debugfs_mutex);
return idev;
}
EXPORT_SYMBOL(ion_device_create);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 2cbea2af7cd0..6d1b0acbc5b3 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -781,22 +781,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
/*
- * The GlobalSAN iSCSI Initiator for MacOSX does
- * not respond to MaxBurstLength, FirstBurstLength,
- * DefaultTime2Wait or DefaultTime2Retain parameter keys.
- * So, we set them to 'reply optional' here, and assume the
- * the defaults from iscsi_parameters.h if the initiator
- * is not RFC compliant and the keys are not negotiated.
- */
- if (!strcmp(param->name, MAXBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, FIRSTBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2WAIT))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2RETAIN))
- SET_PSTATE_REPLY_OPTIONAL(param);
- /*
* Required for gPXE iSCSI boot client
*/
if (!strcmp(param->name, MAXCONNECTIONS))
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 428b0d9e3dba..93590521ae33 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -731,21 +731,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
{
struct se_cmd *se_cmd = NULL;
int rc;
+ bool op_scsi = false;
/*
* Determine if a struct se_cmd is associated with
* this struct iscsi_cmd.
*/
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
- se_cmd = &cmd->se_cmd;
- __iscsit_free_cmd(cmd, true, shutdown);
+ op_scsi = true;
/*
* Fallthrough
*/
case ISCSI_OP_SCSI_TMFUNC:
- rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
- if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
- __iscsit_free_cmd(cmd, true, shutdown);
+ se_cmd = &cmd->se_cmd;
+ __iscsit_free_cmd(cmd, op_scsi, shutdown);
+ rc = transport_generic_free_cmd(se_cmd, shutdown);
+ if (!rc && shutdown && se_cmd->se_sess) {
+ __iscsit_free_cmd(cmd, op_scsi, shutdown);
target_put_sess_cmd(se_cmd);
}
break;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index de18790eb21c..d72a4058fd08 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
buf = kzalloc(12, GFP_KERNEL);
if (!buf)
- return;
+ goto out_free;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = MODE_SENSE;
@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
* If MODE_SENSE still returns zero, set the default value to 1024.
*/
sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+out_free:
if (!sdev->sector_size)
sdev->sector_size = 1024;
-out_free:
+
kfree(buf);
}
@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
sd->lun, sd->queue_depth);
}
- dev->dev_attrib.hw_block_size = sd->sector_size;
+ dev->dev_attrib.hw_block_size =
+ min_not_zero((int)sd->sector_size, 512);
dev->dev_attrib.hw_max_sectors =
- min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+ min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
dev->dev_attrib.hw_queue_depth = sd->queue_depth;
/*
@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
/*
* For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
*/
- if (sd->type == TYPE_TAPE)
+ if (sd->type == TYPE_TAPE) {
pscsi_tape_read_blocksize(dev, sd);
+ dev->dev_attrib.hw_block_size = sd->sector_size;
+ }
return 0;
}
@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
/*
* Called with struct Scsi_Host->host_lock called.
*/
-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
+static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
return 0;
}
-/*
- * Called with struct Scsi_Host->host_lock called.
- */
-static int pscsi_create_type_other(struct se_device *dev,
- struct scsi_device *sd)
- __releases(sh->host_lock)
-{
- struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
- struct Scsi_Host *sh = sd->host;
- int ret;
-
- spin_unlock_irq(sh->host_lock);
- ret = pscsi_add_device_to_list(dev, sd);
- if (ret)
- return ret;
-
- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
- phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
- sd->channel, sd->id, sd->lun);
- return 0;
-}
-
static int pscsi_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev)
case TYPE_DISK:
ret = pscsi_create_type_disk(dev, sd);
break;
- case TYPE_ROM:
- ret = pscsi_create_type_rom(dev, sd);
- break;
default:
- ret = pscsi_create_type_other(dev, sd);
+ ret = pscsi_create_type_nondisk(dev, sd);
break;
}
@@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev)
else if (pdv->pdv_lld_host)
scsi_host_put(pdv->pdv_lld_host);
- if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
- scsi_device_put(sd);
+ scsi_device_put(sd);
pdv->pdv_sd = NULL;
}
@@ -1088,7 +1066,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
return pdv->pdv_bd->bd_part->nr_sects;
- dump_stack();
return 0;
}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 2e27b1034ede..90c5dffc9fa4 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1096,9 +1096,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return ret;
break;
case VERIFY:
+ case VERIFY_16:
size = 0;
- sectors = transport_get_sectors_10(cdb);
- cmd->t_task_lba = transport_lba_32(cdb);
+ if (cdb[0] == VERIFY) {
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ } else {
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ }
cmd->execute_cmd = sbc_emulate_noop;
goto check_lba;
case REZERO_UNIT:
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index 80f9de907563..5cc80b80c82b 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -823,7 +823,7 @@ static int receive_data(enum port_type index, struct nozomi *dc)
struct tty_struct *tty = tty_port_tty_get(&port->port);
int i, ret;
- read_mem32((u32 *) &size, addr, 4);
+ size = __le32_to_cpu(readl(addr));
/* DBG1( "%d bytes port: %d", size, index); */
if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 5b24ffd93649..83ff1724ec79 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -57,6 +57,7 @@ struct serial_private {
unsigned int nr;
void __iomem *remapped_bar[PCI_NUM_BAR_RESOURCES];
struct pci_serial_quirk *quirk;
+ const struct pciserial_board *board;
int line[0];
};
@@ -4058,6 +4059,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
}
}
priv->nr = i;
+ priv->board = board;
return priv;
err_deinit:
@@ -4068,7 +4070,7 @@ err_out:
}
EXPORT_SYMBOL_GPL(pciserial_init_ports);
-void pciserial_remove_ports(struct serial_private *priv)
+void pciserial_detach_ports(struct serial_private *priv)
{
struct pci_serial_quirk *quirk;
int i;
@@ -4088,7 +4090,11 @@ void pciserial_remove_ports(struct serial_private *priv)
quirk = find_quirk(priv->dev);
if (quirk->exit)
quirk->exit(priv->dev);
+}
+void pciserial_remove_ports(struct serial_private *priv)
+{
+ pciserial_detach_ports(priv);
kfree(priv);
}
EXPORT_SYMBOL_GPL(pciserial_remove_ports);
@@ -5819,7 +5825,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev,
return PCI_ERS_RESULT_DISCONNECT;
if (priv)
- pciserial_suspend_ports(priv);
+ pciserial_detach_ports(priv);
pci_disable_device(dev);
@@ -5844,9 +5850,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
static void serial8250_io_resume(struct pci_dev *dev)
{
struct serial_private *priv = pci_get_drvdata(dev);
+ const struct pciserial_board *board;
- if (priv)
- pciserial_resume_ports(priv);
+ if (!priv)
+ return;
+
+ board = priv->board;
+ kfree(priv);
+ priv = pciserial_init_ports(dev, board);
+
+ if (!IS_ERR(priv)) {
+ pci_set_drvdata(dev, priv);
+ }
}
static const struct pci_error_handlers serial8250_err_handler = {
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index a0f911641b04..53e4d5056db7 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -810,6 +810,11 @@ static void atmel_complete_tx_dma(void *arg)
*/
if (!uart_circ_empty(xmit))
tasklet_schedule(&atmel_port->tasklet);
+ else if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
+ /* DMA done, stop TX, start RX for RS485 */
+ atmel_start_rx(port);
+ }
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -912,12 +917,6 @@ static void atmel_tx_dma(struct uart_port *port)
desc->callback = atmel_complete_tx_dma;
desc->callback_param = atmel_port;
atmel_port->cookie_tx = dmaengine_submit(desc);
-
- } else {
- if (port->rs485.flags & SER_RS485_ENABLED) {
- /* DMA done, stop TX, start RX for RS485 */
- atmel_start_rx(port);
- }
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -1987,6 +1986,11 @@ static void atmel_flush_buffer(struct uart_port *port)
atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
atmel_port->pdc_tx.ofs = 0;
}
+ /*
+ * in uart_flush_buffer(), the xmit circular buffer has just
+ * been cleared, so we have to reset tx_len accordingly.
+ */
+ atmel_port->tx_len = 0;
}
/*
@@ -2499,6 +2503,9 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
+ /* Make sure that tx path is actually able to send characters */
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
+
uart_console_write(port, s, count, atmel_console_putchar);
/*
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index db329230c7ca..efaac5eb6592 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -303,15 +303,17 @@ static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base)
struct device *dev = msm_port->uart.dev;
struct dma_slave_config conf;
struct msm_dma *dma;
+ struct dma_chan *dma_chan;
u32 crci = 0;
int ret;
dma = &msm_port->tx_dma;
/* allocate DMA resources, if available */
- dma->chan = dma_request_slave_channel_reason(dev, "tx");
- if (IS_ERR(dma->chan))
+ dma_chan = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR(dma_chan))
goto no_tx;
+ dma->chan = dma_chan;
of_property_read_u32(dev->of_node, "qcom,tx-crci", &crci);
@@ -346,15 +348,17 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
struct device *dev = msm_port->uart.dev;
struct dma_slave_config conf;
struct msm_dma *dma;
+ struct dma_chan *dma_chan;
u32 crci = 0;
int ret;
dma = &msm_port->rx_dma;
/* allocate DMA resources, if available */
- dma->chan = dma_request_slave_channel_reason(dev, "rx");
- if (IS_ERR(dma->chan))
+ dma_chan = dma_request_slave_channel_reason(dev, "rx");
+ if (IS_ERR(dma_chan))
goto no_rx;
+ dma->chan = dma_chan;
of_property_read_u32(dev->of_node, "qcom,rx-crci", &crci);
@@ -1166,26 +1170,17 @@ static int msm_startup(struct uart_port *port)
snprintf(msm_port->name, sizeof(msm_port->name),
"msm_serial%d", port->line);
- ret = request_irq(port->irq, msm_uart_irq, IRQF_TRIGGER_HIGH,
- msm_port->name, port);
- if (unlikely(ret))
- return ret;
-
/*
* UART clk must be kept enabled to
* avoid losing received character
*/
ret = clk_prepare_enable(msm_port->clk);
- if (ret) {
- goto err_clk;
+ if (ret)
return ret;
- }
ret = clk_prepare_enable(msm_port->pclk);
- if (ret) {
+ if (ret)
goto err_pclk;
- return ret;
- }
msm_serial_set_mnd_regs(port);
@@ -1213,12 +1208,21 @@ static int msm_startup(struct uart_port *port)
msm_request_rx_dma(msm_port, msm_port->uart.mapbase);
}
+ ret = request_irq(port->irq, msm_uart_irq, IRQF_TRIGGER_HIGH,
+ msm_port->name, port);
+ if (unlikely(ret))
+ goto err_irq;
+
return 0;
+err_irq:
+ if (msm_port->is_uartdm)
+ msm_release_dma(msm_port);
+
+ clk_disable_unprepare(msm_port->pclk);
+
err_pclk:
clk_disable_unprepare(msm_port->clk);
-err_clk:
- free_irq(port->irq, port);
return ret;
}
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index deaddb950c20..24337ac3323f 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1105,7 +1105,7 @@ static int usbtmc_probe(struct usb_interface *intf,
dev_dbg(&intf->dev, "%s called\n", __func__);
- data = kmalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1163,6 +1163,12 @@ static int usbtmc_probe(struct usb_interface *intf,
}
}
+ if (!data->bulk_out || !data->bulk_in) {
+ dev_err(&intf->dev, "bulk endpoints not found\n");
+ retcode = -ENODEV;
+ goto err_put;
+ }
+
retcode = get_capabilities(data);
if (retcode)
dev_err(&intf->dev, "can't read capabilities\n");
@@ -1186,6 +1192,7 @@ static int usbtmc_probe(struct usb_interface *intf,
error_register:
sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
+err_put:
kref_put(&data->kref, usbtmc_delete);
return retcode;
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index ac30a051ad71..325cbc9c35d8 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -246,6 +246,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
/*
* Adjust bInterval for quirked devices.
+ */
+ /*
+ * This quirk fixes bIntervals reported in ms.
+ */
+ if (to_usb_device(ddev)->quirks &
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
+ n = clamp(fls(d->bInterval) + 3, i, j);
+ i = j = n;
+ }
+ /*
* This quirk fixes bIntervals reported in
* linear microframes.
*/
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 9a3bf5e2977f..aa00bb51940b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -499,8 +499,10 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
*/
tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength);
tbuf = kzalloc(tbuf_size, GFP_KERNEL);
- if (!tbuf)
- return -ENOMEM;
+ if (!tbuf) {
+ status = -ENOMEM;
+ goto err_alloc;
+ }
bufp = tbuf;
@@ -705,6 +707,7 @@ error:
}
kfree(tbuf);
+ err_alloc:
/* any errors get returned through the urb completion */
spin_lock_irq(&hcd_root_hub_lock);
@@ -966,7 +969,7 @@ static void usb_bus_init (struct usb_bus *bus)
bus->bandwidth_allocated = 0;
bus->bandwidth_int_reqs = 0;
bus->bandwidth_isoc_reqs = 0;
- mutex_init(&bus->usb_address0_mutex);
+ mutex_init(&bus->devnum_next_mutex);
INIT_LIST_HEAD (&bus->bus_list);
}
@@ -2555,6 +2558,14 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
return NULL;
}
if (primary_hcd == NULL) {
+ hcd->address0_mutex = kmalloc(sizeof(*hcd->address0_mutex),
+ GFP_KERNEL);
+ if (!hcd->address0_mutex) {
+ kfree(hcd);
+ dev_dbg(dev, "hcd address0 mutex alloc failed\n");
+ return NULL;
+ }
+ mutex_init(hcd->address0_mutex);
hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
GFP_KERNEL);
if (!hcd->bandwidth_mutex) {
@@ -2566,6 +2577,7 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
dev_set_drvdata(dev, hcd);
} else {
mutex_lock(&usb_port_peer_mutex);
+ hcd->address0_mutex = primary_hcd->address0_mutex;
hcd->bandwidth_mutex = primary_hcd->bandwidth_mutex;
hcd->primary_hcd = primary_hcd;
primary_hcd->primary_hcd = primary_hcd;
@@ -2622,24 +2634,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd);
* Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
* deallocated.
*
- * Make sure to only deallocate the bandwidth_mutex when the primary HCD is
- * freed. When hcd_release() is called for either hcd in a peer set
- * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to
- * block new peering attempts
+ * Make sure to deallocate the bandwidth_mutex only when the last HCD is
+ * freed. When hcd_release() is called for either hcd in a peer set,
+ * invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
*/
static void hcd_release(struct kref *kref)
{
struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
mutex_lock(&usb_port_peer_mutex);
- if (hcd->primary_hcd == hcd)
- kfree(hcd->bandwidth_mutex);
if (hcd->shared_hcd) {
struct usb_hcd *peer = hcd->shared_hcd;
peer->shared_hcd = NULL;
- if (peer->primary_hcd == hcd)
- peer->primary_hcd = NULL;
+ peer->primary_hcd = NULL;
+ } else {
+ kfree(hcd->address0_mutex);
+ kfree(hcd->bandwidth_mutex);
}
mutex_unlock(&usb_port_peer_mutex);
kfree(hcd);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index f84ef04284f5..87912ead87b7 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1991,7 +1991,7 @@ static void choose_devnum(struct usb_device *udev)
struct usb_bus *bus = udev->bus;
/* be safe when more hub events are proceed in parallel */
- mutex_lock(&bus->usb_address0_mutex);
+ mutex_lock(&bus->devnum_next_mutex);
if (udev->wusb) {
devnum = udev->portnum + 1;
BUG_ON(test_bit(devnum, bus->devmap.devicemap));
@@ -2009,7 +2009,7 @@ static void choose_devnum(struct usb_device *udev)
set_bit(devnum, bus->devmap.devicemap);
udev->devnum = devnum;
}
- mutex_unlock(&bus->usb_address0_mutex);
+ mutex_unlock(&bus->devnum_next_mutex);
}
static void release_devnum(struct usb_device *udev)
@@ -2613,8 +2613,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (ret < 0)
return ret;
- /* The port state is unknown until the reset completes. */
- if (!(portstatus & USB_PORT_STAT_RESET))
+ /*
+ * The port state is unknown until the reset completes.
+ *
+ * On top of that, some chips may require additional time
+ * to re-establish a connection after the reset is complete,
+ * so also wait for the connection to be re-established.
+ */
+ if (!(portstatus & USB_PORT_STAT_RESET) &&
+ (portstatus & USB_PORT_STAT_CONNECTION))
break;
/* switch to the long delay after two short delay failures */
@@ -4212,7 +4219,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
- if (!udev->usb2_hw_lpm_capable)
+ if (!udev->usb2_hw_lpm_capable || !udev->bos)
return;
if (hub)
@@ -4275,7 +4282,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
if (oldspeed == USB_SPEED_LOW)
delay = HUB_LONG_RESET_TIME;
- mutex_lock(&hdev->bus->usb_address0_mutex);
+ mutex_lock(hcd->address0_mutex);
/* Reset the device; full speed may morph to high speed */
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
@@ -4561,7 +4568,7 @@ fail:
hub_port_disable(hub, port1, 0);
update_devnum(udev, devnum); /* for disconnect processing */
}
- mutex_unlock(&hdev->bus->usb_address0_mutex);
+ mutex_unlock(hcd->address0_mutex);
return retval;
}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 24f9f98968a5..96b21b0dac1e 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -170,6 +170,14 @@ static const struct usb_device_id usb_quirk_list[] = {
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Baum Vario Ultra */
+ { USB_DEVICE(0x0904, 0x6101), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ { USB_DEVICE(0x0904, 0x6102), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ { USB_DEVICE(0x0904, 0x6103), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+
/* Keytouch QWERTY Panel keyboard */
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index a80fb34cdce8..ad9d6cc4e23f 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -764,6 +764,7 @@ static int dwc3_msm_ep_queue(struct usb_ep *ep,
return 0;
err:
+ list_del(&req_complete->list_item);
spin_unlock_irqrestore(&dwc->lock, flags);
kfree(req_complete);
return ret;
@@ -3596,7 +3597,8 @@ static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA)
}
}
- power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_TYPE, &pval);
+ power_supply_get_property(mdwc->usb_psy,
+ POWER_SUPPLY_PROP_REAL_TYPE, &pval);
if (pval.intval != POWER_SUPPLY_TYPE_USB)
return 0;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 9608a79cbe40..658fcca485d8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -261,6 +261,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status)
{
struct dwc3 *dwc = dep->dwc;
+ unsigned int unmap_after_complete = false;
int i;
if (req->queued) {
@@ -285,11 +286,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
if (req->request.status == -EINPROGRESS)
req->request.status = status;
- if (dwc->ep0_bounced && dep->number <= 1)
+ /*
+ * NOTICE we don't want to unmap before calling ->complete() if we're
+ * dealing with a bounced ep0 request. If we unmap it here, we would end
+ * up overwritting the contents of req->buf and this could confuse the
+ * gadget driver.
+ */
+ if (dwc->ep0_bounced && dep->number <= 1) {
dwc->ep0_bounced = false;
-
- usb_gadget_unmap_request(&dwc->gadget, &req->request,
- req->direction);
+ unmap_after_complete = true;
+ } else {
+ usb_gadget_unmap_request(&dwc->gadget,
+ &req->request, req->direction);
+ }
dev_dbg(dwc->dev, "request %pK from %s completed %d/%d ===> %d\n",
req, dep->name, req->request.actual,
@@ -300,6 +309,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
spin_unlock(&dwc->lock);
usb_gadget_giveback_request(&dep->endpoint, &req->request);
spin_lock(&dwc->lock);
+
+ if (unmap_after_complete)
+ usb_gadget_unmap_request(&dwc->gadget,
+ &req->request, req->direction);
}
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index cd096fb9078f..1ef3442cf618 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -77,9 +77,13 @@ struct acc_dev {
struct usb_ep *ep_in;
struct usb_ep *ep_out;
- /* set to 1 when we connect */
+ /* online indicates state of function_set_alt & function_unbind
+ * set to 1 when we connect
+ */
int online:1;
- /* Set to 1 when we disconnect.
+
+ /* disconnected indicates state of open & release
+ * Set to 1 when we disconnect.
* Not cleared until our file is closed.
*/
int disconnected:1;
@@ -307,7 +311,6 @@ static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head)
static void acc_set_disconnected(struct acc_dev *dev)
{
- dev->online = 0;
dev->disconnected = 1;
}
@@ -721,9 +724,10 @@ static ssize_t acc_write(struct file *fp, const char __user *buf,
req->zero = 0;
} else {
xfer = count;
- /* If the data length is a multple of the
+ /*
+ * If the data length is a multple of the
* maxpacket size then send a zero length packet(ZLP).
- */
+ */
req->zero = ((xfer % dev->ep_in->maxpacket) == 0);
}
if (copy_from_user(req->buf, buf, xfer)) {
@@ -808,7 +812,10 @@ static int acc_release(struct inode *ip, struct file *fp)
printk(KERN_INFO "acc_release\n");
WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0));
- _acc_dev->disconnected = 0;
+ /* indicate that we are disconnected
+ * still could be online so don't touch online flag
+ */
+ _acc_dev->disconnected = 1;
return 0;
}
@@ -868,11 +875,11 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev,
unsigned long flags;
/*
- printk(KERN_INFO "acc_ctrlrequest "
- "%02x.%02x v%04x i%04x l%u\n",
- b_requestType, b_request,
- w_value, w_index, w_length);
-*/
+ * printk(KERN_INFO "acc_ctrlrequest "
+ * "%02x.%02x v%04x i%04x l%u\n",
+ * b_requestType, b_request,
+ * w_value, w_index, w_length);
+ */
if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {
if (b_request == ACCESSORY_START) {
@@ -1069,6 +1076,10 @@ acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
struct usb_request *req;
int i;
+ dev->online = 0; /* clear online flag */
+ wake_up(&dev->read_wq); /* unblock reads on closure */
+ wake_up(&dev->write_wq); /* likewise for writes */
+
while ((req = req_get(dev, &dev->tx_idle)))
acc_request_free(req, dev->ep_in);
for (i = 0; i < RX_REQ_MAX; i++)
@@ -1200,6 +1211,7 @@ static int acc_function_set_alt(struct usb_function *f,
}
dev->online = 1;
+ dev->disconnected = 0; /* if online then not disconnected */
/* readers may be blocked waiting for us to go online */
wake_up(&dev->read_wq);
@@ -1212,7 +1224,8 @@ static void acc_function_disable(struct usb_function *f)
struct usb_composite_dev *cdev = dev->cdev;
DBG(cdev, "acc_function_disable\n");
- acc_set_disconnected(dev);
+ acc_set_disconnected(dev); /* this now only sets disconnected */
+ dev->online = 0; /* so now need to clear online flag here too */
usb_ep_disable(dev->ep_in);
usb_ep_disable(dev->ep_out);
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 651e4afe0520..1bcfe819fad3 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -535,13 +535,15 @@ static int acm_notify_serial_state(struct f_acm *acm)
{
struct usb_composite_dev *cdev = acm->port.func.config->cdev;
int status;
+ __le16 serial_state;
spin_lock(&acm->lock);
if (acm->notify_req) {
dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n",
acm->port_num, acm->serial_state);
+ serial_state = cpu_to_le16(acm->serial_state);
status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
- 0, &acm->serial_state, sizeof(acm->serial_state));
+ 0, &serial_state, sizeof(acm->serial_state));
} else {
acm->pending = true;
status = 0;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 5bcff5d2cd8d..79f554f1fb23 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -167,6 +167,15 @@ static struct usb_endpoint_descriptor bulk_in_desc = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
+static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = {
+ .bLength = sizeof(ss_bulk_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
/* B.6.2 Class-specific MS Bulk IN Endpoint Descriptor */
static struct usb_ms_endpoint_descriptor_16 ms_in_desc = {
/* .bLength = DYNAMIC */
@@ -361,7 +370,9 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
/* allocate a bunch of read buffers and queue them all at once. */
for (i = 0; i < midi->qlen && err == 0; i++) {
struct usb_request *req =
- midi_alloc_ep_req(midi->out_ep, midi->buflen);
+ midi_alloc_ep_req(midi->out_ep,
+ max_t(unsigned, midi->buflen,
+ bulk_out_desc.wMaxPacketSize));
if (req == NULL)
return -ENOMEM;
@@ -720,6 +731,7 @@ fail:
static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_descriptor_header **midi_function;
+ struct usb_descriptor_header **midi_ss_function;
struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS];
struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS];
struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS];
@@ -727,7 +739,7 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_composite_dev *cdev = c->cdev;
struct f_midi *midi = func_to_midi(f);
struct usb_string *us;
- int status, n, jack = 1, i = 0;
+ int status, n, jack = 1, i = 0, j = 0;
midi->gadget = cdev->gadget;
tasklet_init(&midi->tasklet, f_midi_in_tasklet, (unsigned long) midi);
@@ -767,11 +779,20 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
if (!midi->out_ep)
goto fail;
+ /* allocate temporary function list for ss */
+ midi_ss_function = kcalloc((MAX_PORTS * 4) + 11,
+ sizeof(*midi_ss_function), GFP_KERNEL);
+ if (!midi_ss_function) {
+ status = -ENOMEM;
+ goto fail;
+ }
+
/* allocate temporary function list */
midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(*midi_function),
GFP_KERNEL);
if (!midi_function) {
status = -ENOMEM;
+ kfree(midi_ss_function);
goto fail;
}
@@ -785,6 +806,12 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
midi_function[i++] = (struct usb_descriptor_header *) &ac_interface_desc;
midi_function[i++] = (struct usb_descriptor_header *) &ac_header_desc;
midi_function[i++] = (struct usb_descriptor_header *) &ms_interface_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ac_interface_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ac_header_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ms_interface_desc;
/* calculate the header's wTotalLength */
n = USB_DT_MS_HEADER_SIZE
@@ -793,6 +820,8 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
ms_header_desc.wTotalLength = cpu_to_le16(n);
midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ms_header_desc;
/* configure the external IN jacks, each linked to an embedded OUT jack */
for (n = 0; n < midi->in_ports; n++) {
@@ -806,6 +835,7 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
in_ext->bJackID = jack++;
in_ext->iJack = 0;
midi_function[i++] = (struct usb_descriptor_header *) in_ext;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) in_ext;
out_emb->bLength = USB_DT_MIDI_OUT_SIZE(1);
out_emb->bDescriptorType = USB_DT_CS_INTERFACE;
@@ -817,6 +847,8 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
out_emb->pins[0].baSourceID = in_ext->bJackID;
out_emb->iJack = 0;
midi_function[i++] = (struct usb_descriptor_header *) out_emb;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) out_emb;
/* link it to the endpoint */
ms_in_desc.baAssocJackID[n] = out_emb->bJackID;
@@ -834,6 +866,7 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
in_emb->bJackID = jack++;
in_emb->iJack = 0;
midi_function[i++] = (struct usb_descriptor_header *) in_emb;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) in_emb;
out_ext->bLength = USB_DT_MIDI_OUT_SIZE(1);
out_ext->bDescriptorType = USB_DT_CS_INTERFACE;
@@ -845,6 +878,8 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
out_ext->pins[0].baSourceID = in_emb->bJackID;
out_ext->pins[0].baSourcePin = 1;
midi_function[i++] = (struct usb_descriptor_header *) out_ext;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) out_ext;
/* link it to the endpoint */
ms_out_desc.baAssocJackID[n] = in_emb->bJackID;
@@ -864,6 +899,16 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
midi_function[i++] = (struct usb_descriptor_header *) &ms_in_desc;
midi_function[i++] = NULL;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &bulk_out_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ss_bulk_comp_desc;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &ms_out_desc;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &bulk_in_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ss_bulk_comp_desc;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &ms_in_desc;
+ midi_ss_function[j++] = NULL;
+
/*
* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
@@ -882,13 +927,23 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
goto fail_f_midi;
}
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ bulk_in_desc.wMaxPacketSize = cpu_to_le16(1024);
+ bulk_out_desc.wMaxPacketSize = cpu_to_le16(1024);
+ f->ss_descriptors = usb_copy_descriptors(midi_ss_function);
+ if (!f->ss_descriptors)
+ goto fail_f_midi;
+ }
+
kfree(midi_function);
+ kfree(midi_ss_function);
return 0;
fail_f_midi:
kfree(midi_function);
usb_free_descriptors(f->hs_descriptors);
+ kfree(midi_ss_function);
fail:
f_midi_unregister_card(midi);
fail_register:
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index 4a0b3a0aa65e..6ee21d039415 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -1490,6 +1490,7 @@ mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
mtp_fullspeed_out_desc.bEndpointAddress;
}
+ fi_mtp->func_inst.f = &dev->function;
DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
gadget_is_superspeed(c->cdev->gadget) ? "super" :
(gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),
@@ -1501,9 +1502,10 @@ static void
mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct mtp_dev *dev = func_to_mtp(f);
+ struct mtp_instance *fi_mtp;
struct usb_request *req;
int i;
-
+ fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
mutex_lock(&dev->read_mutex);
while ((req = mtp_req_get(dev, &dev->tx_idle)))
@@ -1517,6 +1519,7 @@ mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
dev->is_ptp = false;
kfree(f->os_desc_table);
f->os_desc_n = 0;
+ fi_mtp->func_inst.f = NULL;
}
static int mtp_function_set_alt(struct usb_function *f,
@@ -1854,6 +1857,8 @@ struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
config_group_init_type_name(&fi_mtp->func_inst.group,
"", &mtp_func_type);
+ mutex_init(&fi_mtp->dev->read_mutex);
+
return &fi_mtp->func_inst;
}
EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
@@ -1914,9 +1919,7 @@ struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,
dev->function.setup = mtp_ctrlreq_configfs;
dev->function.free_func = mtp_free;
dev->is_ptp = !mtp_config;
- fi->f = &dev->function;
- mutex_init(&dev->read_mutex);
return &dev->function;
}
EXPORT_SYMBOL_GPL(function_alloc_mtp_ptp);
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 29b41b5dee04..c7689d05356c 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -625,7 +625,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst;
uvc_ss_streaming_comp.wBytesPerInterval =
cpu_to_le16(max_packet_size * max_packet_mult *
- opts->streaming_maxburst);
+ (opts->streaming_maxburst + 1));
/* Allocate endpoints. */
ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 43e054666b68..9123f1635843 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -66,7 +66,7 @@ struct eth_dev {
spinlock_t req_lock; /* guard {rx,tx}_reqs */
struct list_head tx_reqs, rx_reqs;
- unsigned tx_qlen;
+ atomic_t tx_qlen;
/* Minimum number of TX USB request queued to UDC */
#define TX_REQ_THRESHOLD 5
int no_tx_req_used;
@@ -568,6 +568,7 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req)
dev_kfree_skb_any(skb);
}
+ atomic_dec(&dev->tx_qlen);
if (netif_carrier_ok(dev->net))
netif_wake_queue(dev->net);
}
@@ -741,20 +742,13 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
req->length = length;
- /* throttle highspeed IRQ rate back slightly */
- if (gadget_is_dualspeed(dev->gadget) &&
- (dev->gadget->speed == USB_SPEED_HIGH) &&
- !list_empty(&dev->tx_reqs)) {
- dev->tx_qlen++;
- if (dev->tx_qlen == (dev->qmult/2)) {
- req->no_interrupt = 0;
- dev->tx_qlen = 0;
- } else {
- req->no_interrupt = 1;
- }
- } else {
- req->no_interrupt = 0;
- }
+ /* throttle high/super speed IRQ rate back slightly */
+ if (gadget_is_dualspeed(dev->gadget))
+ req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
+ dev->gadget->speed == USB_SPEED_SUPER)) &&
+ !list_empty(&dev->tx_reqs))
+ ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
+ : 0;
retval = usb_ep_queue(in, req, GFP_ATOMIC);
switch (retval) {
@@ -763,6 +757,7 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
break;
case 0:
net->trans_start = jiffies;
+ atomic_inc(&dev->tx_qlen);
}
if (retval) {
@@ -791,7 +786,7 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
rx_fill(dev, gfp_flags);
/* and open the tx floodgates */
- dev->tx_qlen = 0;
+ atomic_set(&dev->tx_qlen, 0);
netif_wake_queue(dev->net);
}
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index f7a7fc21be8a..e8f9172880c4 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -268,3 +268,13 @@ config USB_CHAOSKEY
To compile this driver as a module, choose M here: the
module will be called chaoskey.
+
+config USB_QTI_KS_BRIDGE
+ tristate "USB QTI kick start bridge"
+ depends on USB
+ help
+ Say Y here if you have a QTI modem device connected via USB that
+ will be bridged in kernel space. This driver works as a bridge to pass
+ boot images, ram-dumps and efs sync.
+ To compile this driver as a module, choose M here: the module
+ will be called ks_bridge. If unsure, choose N.
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 45fd4ac39d3e..616902bce450 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -29,3 +29,5 @@ obj-$(CONFIG_USB_CHAOSKEY) += chaoskey.o
obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/
obj-$(CONFIG_USB_LINK_LAYER_TEST) += lvstest.o
+
+obj-$(CONFIG_USB_QTI_KS_BRIDGE) += ks_bridge.o
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 4e38683c653c..6d4e75785710 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -346,6 +346,9 @@ static int idmouse_probe(struct usb_interface *interface,
if (iface_desc->desc.bInterfaceClass != 0x0A)
return -ENODEV;
+ if (iface_desc->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL)
diff --git a/drivers/usb/misc/ks_bridge.c b/drivers/usb/misc/ks_bridge.c
new file mode 100644
index 000000000000..35f652c281bb
--- /dev/null
+++ b/drivers/usb/misc/ks_bridge.c
@@ -0,0 +1,1105 @@
+/*
+ * Copyright (c) 2012-2014, 2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* add additional information to our printk's */
+#define pr_fmt(fmt) "%s: " fmt "\n", __func__
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/platform_device.h>
+#include <linux/ratelimit.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+
+#define DRIVER_DESC "USB host ks bridge driver"
+
+enum bus_id {
+ BUS_HSIC,
+ BUS_USB,
+ BUS_UNDEF,
+};
+
+#define BUSNAME_LEN 20
+
+static enum bus_id str_to_busid(const char *name)
+{
+ if (!strncasecmp("msm_hsic_host", name, BUSNAME_LEN))
+ return BUS_HSIC;
+ if (!strncasecmp("msm_ehci_host.0", name, BUSNAME_LEN))
+ return BUS_USB;
+ if (!strncasecmp("xhci-hcd.0.auto", name, BUSNAME_LEN))
+ return BUS_USB;
+
+ return BUS_UNDEF;
+}
+
+struct data_pkt {
+ int n_read;
+ char *buf;
+ size_t len;
+ struct list_head list;
+ void *ctxt;
+};
+
+#define FILE_OPENED BIT(0)
+#define USB_DEV_CONNECTED BIT(1)
+#define NO_RX_REQS 10
+#define NO_BRIDGE_INSTANCES 4
+#define EFS_HSIC_BRIDGE_INDEX 2
+#define EFS_USB_BRIDGE_INDEX 3
+#define MAX_DATA_PKT_SIZE 16384
+#define PENDING_URB_TIMEOUT 10
+
+struct ksb_dev_info {
+ const char *name;
+};
+
+struct ks_bridge {
+ char *name;
+ spinlock_t lock;
+ struct workqueue_struct *wq;
+ struct work_struct to_mdm_work;
+ struct work_struct start_rx_work;
+ struct list_head to_mdm_list;
+ struct list_head to_ks_list;
+ wait_queue_head_t ks_wait_q;
+ wait_queue_head_t pending_urb_wait;
+ atomic_t tx_pending_cnt;
+ atomic_t rx_pending_cnt;
+
+ struct ksb_dev_info id_info;
+
+ /* cdev interface */
+ dev_t cdev_start_no;
+ struct cdev cdev;
+ struct class *class;
+ struct device *device;
+
+ /* usb specific */
+ struct usb_device *udev;
+ struct usb_interface *ifc;
+ __u8 in_epAddr;
+ __u8 out_epAddr;
+ unsigned int in_pipe;
+ unsigned int out_pipe;
+ struct usb_anchor submitted;
+
+ unsigned long flags;
+
+ /* to handle INT IN ep */
+ unsigned int period;
+
+#define DBG_MSG_LEN 40
+#define DBG_MAX_MSG 500
+ unsigned int dbg_idx;
+ rwlock_t dbg_lock;
+
+ char (dbgbuf[DBG_MAX_MSG])[DBG_MSG_LEN]; /* buffer */
+};
+
+struct ks_bridge *__ksb[NO_BRIDGE_INSTANCES];
+
+/* by default debugging is enabled */
+static unsigned int enable_dbg = 1;
+module_param(enable_dbg, uint, S_IRUGO | S_IWUSR);
+
+static void
+dbg_log_event(struct ks_bridge *ksb, char *event, int d1, int d2)
+{
+ unsigned long flags;
+ unsigned long long t;
+ unsigned long nanosec;
+
+ if (!enable_dbg)
+ return;
+
+ write_lock_irqsave(&ksb->dbg_lock, flags);
+ t = cpu_clock(smp_processor_id());
+ nanosec = do_div(t, 1000000000)/1000;
+ scnprintf(ksb->dbgbuf[ksb->dbg_idx], DBG_MSG_LEN, "%5lu.%06lu:%s:%x:%x",
+ (unsigned long)t, nanosec, event, d1, d2);
+
+ ksb->dbg_idx++;
+ ksb->dbg_idx = ksb->dbg_idx % DBG_MAX_MSG;
+ write_unlock_irqrestore(&ksb->dbg_lock, flags);
+}
+
+static
+struct data_pkt *ksb_alloc_data_pkt(size_t count, gfp_t flags, void *ctxt)
+{
+ struct data_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct data_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(count, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pkt->len = count;
+ INIT_LIST_HEAD(&pkt->list);
+ pkt->ctxt = ctxt;
+
+ return pkt;
+}
+
+static void ksb_free_data_pkt(struct data_pkt *pkt)
+{
+ kfree(pkt->buf);
+ kfree(pkt);
+}
+
+
+static void
+submit_one_urb(struct ks_bridge *ksb, gfp_t flags, struct data_pkt *pkt);
+static ssize_t ksb_fs_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int ret;
+ unsigned long flags;
+ struct ks_bridge *ksb = fp->private_data;
+ struct data_pkt *pkt = NULL;
+ size_t space, copied;
+
+read_start:
+ if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ return -ENODEV;
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ if (list_empty(&ksb->to_ks_list)) {
+ spin_unlock_irqrestore(&ksb->lock, flags);
+ ret = wait_event_interruptible(ksb->ks_wait_q,
+ !list_empty(&ksb->to_ks_list) ||
+ !test_bit(USB_DEV_CONNECTED, &ksb->flags));
+ if (ret < 0)
+ return ret;
+
+ goto read_start;
+ }
+
+ space = count;
+ copied = 0;
+ while (!list_empty(&ksb->to_ks_list) && space &&
+ test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
+ size_t len;
+
+ pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list);
+ list_del_init(&pkt->list);
+ len = min_t(size_t, space, pkt->len - pkt->n_read);
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ ret = copy_to_user(buf + copied, pkt->buf + pkt->n_read, len);
+ if (ret) {
+ dev_err(ksb->device,
+ "copy_to_user failed err:%d\n", ret);
+ ksb_free_data_pkt(pkt);
+ return -EFAULT;
+ }
+
+ pkt->n_read += len;
+ space -= len;
+ copied += len;
+
+ if (pkt->n_read == pkt->len) {
+ /*
+ * re-init the packet and queue it
+ * for more data.
+ */
+ pkt->n_read = 0;
+ pkt->len = MAX_DATA_PKT_SIZE;
+ submit_one_urb(ksb, GFP_KERNEL, pkt);
+ pkt = NULL;
+ }
+ spin_lock_irqsave(&ksb->lock, flags);
+ }
+
+ /* put the partial packet back in the list */
+ if (!space && pkt && pkt->n_read != pkt->len) {
+ if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ list_add(&pkt->list, &ksb->to_ks_list);
+ else
+ ksb_free_data_pkt(pkt);
+ }
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ dbg_log_event(ksb, "KS_READ", copied, 0);
+
+ dev_dbg(ksb->device, "count:%zu space:%zu copied:%zu", count,
+ space, copied);
+
+ return copied;
+}
+
+static void ksb_tx_cb(struct urb *urb)
+{
+ struct data_pkt *pkt = urb->context;
+ struct ks_bridge *ksb = pkt->ctxt;
+
+ dbg_log_event(ksb, "C TX_URB", urb->status, 0);
+ dev_dbg(&ksb->udev->dev, "status:%d", urb->status);
+
+ if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ usb_autopm_put_interface_async(ksb->ifc);
+
+ if (urb->status < 0)
+ pr_err_ratelimited("%s: urb failed with err:%d",
+ ksb->id_info.name, urb->status);
+
+ ksb_free_data_pkt(pkt);
+
+ atomic_dec(&ksb->tx_pending_cnt);
+ wake_up(&ksb->pending_urb_wait);
+}
+
+static void ksb_tomdm_work(struct work_struct *w)
+{
+ struct ks_bridge *ksb = container_of(w, struct ks_bridge, to_mdm_work);
+ struct data_pkt *pkt;
+ unsigned long flags;
+ struct urb *urb;
+ int ret;
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ while (!list_empty(&ksb->to_mdm_list)
+ && test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
+ pkt = list_first_entry(&ksb->to_mdm_list,
+ struct data_pkt, list);
+ list_del_init(&pkt->list);
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ dbg_log_event(ksb, "TX_URB_MEM_FAIL", -ENOMEM, 0);
+ pr_err_ratelimited("%s: unable to allocate urb",
+ ksb->id_info.name);
+ ksb_free_data_pkt(pkt);
+ return;
+ }
+
+ ret = usb_autopm_get_interface(ksb->ifc);
+ if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
+ dbg_log_event(ksb, "TX_URB_AUTOPM_FAIL", ret, 0);
+ pr_err_ratelimited("%s: autopm_get failed:%d",
+ ksb->id_info.name, ret);
+ usb_free_urb(urb);
+ ksb_free_data_pkt(pkt);
+ return;
+ }
+ usb_fill_bulk_urb(urb, ksb->udev, ksb->out_pipe,
+ pkt->buf, pkt->len, ksb_tx_cb, pkt);
+ usb_anchor_urb(urb, &ksb->submitted);
+
+ dbg_log_event(ksb, "S TX_URB", pkt->len, 0);
+
+ atomic_inc(&ksb->tx_pending_cnt);
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(&ksb->udev->dev, "out urb submission failed");
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ ksb_free_data_pkt(pkt);
+ usb_autopm_put_interface(ksb->ifc);
+ atomic_dec(&ksb->tx_pending_cnt);
+ wake_up(&ksb->pending_urb_wait);
+ return;
+ }
+
+ usb_free_urb(urb);
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ }
+ spin_unlock_irqrestore(&ksb->lock, flags);
+}
+
+static ssize_t ksb_fs_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int ret;
+ struct data_pkt *pkt;
+ unsigned long flags;
+ struct ks_bridge *ksb = fp->private_data;
+
+ if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ return -ENODEV;
+
+ if (count > MAX_DATA_PKT_SIZE)
+ count = MAX_DATA_PKT_SIZE;
+
+ pkt = ksb_alloc_data_pkt(count, GFP_KERNEL, ksb);
+ if (IS_ERR(pkt)) {
+ dev_err(ksb->device,
+ "unable to allocate data packet");
+ return PTR_ERR(pkt);
+ }
+
+ ret = copy_from_user(pkt->buf, buf, count);
+ if (ret) {
+ dev_err(ksb->device,
+ "copy_from_user failed: err:%d", ret);
+ ksb_free_data_pkt(pkt);
+ return ret;
+ }
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ list_add_tail(&pkt->list, &ksb->to_mdm_list);
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ queue_work(ksb->wq, &ksb->to_mdm_work);
+
+ dbg_log_event(ksb, "KS_WRITE", count, 0);
+
+ return count;
+}
+
+static int ksb_fs_open(struct inode *ip, struct file *fp)
+{
+ struct ks_bridge *ksb =
+ container_of(ip->i_cdev, struct ks_bridge, cdev);
+
+ if (IS_ERR(ksb)) {
+ pr_err("ksb device not found");
+ return -ENODEV;
+ }
+
+ dev_dbg(ksb->device, ":%s", ksb->id_info.name);
+ dbg_log_event(ksb, "FS-OPEN", 0, 0);
+
+ fp->private_data = ksb;
+ set_bit(FILE_OPENED, &ksb->flags);
+
+ if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ queue_work(ksb->wq, &ksb->start_rx_work);
+
+ return 0;
+}
+
+static unsigned int ksb_fs_poll(struct file *file, poll_table *wait)
+{
+ struct ks_bridge *ksb = file->private_data;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ return POLLERR;
+
+ poll_wait(file, &ksb->ks_wait_q, wait);
+ if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ return POLLERR;
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ if (!list_empty(&ksb->to_ks_list))
+ ret = POLLIN | POLLRDNORM;
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ return ret;
+}
+
+static int ksb_fs_release(struct inode *ip, struct file *fp)
+{
+ struct ks_bridge *ksb = fp->private_data;
+
+ if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ dev_dbg(ksb->device, ":%s", ksb->id_info.name);
+ dbg_log_event(ksb, "FS-RELEASE", 0, 0);
+
+ clear_bit(FILE_OPENED, &ksb->flags);
+ fp->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations ksb_fops = {
+ .owner = THIS_MODULE,
+ .read = ksb_fs_read,
+ .write = ksb_fs_write,
+ .open = ksb_fs_open,
+ .release = ksb_fs_release,
+ .poll = ksb_fs_poll,
+};
+
+static struct ksb_dev_info ksb_fboot_dev[] = {
+ {
+ .name = "ks_hsic_bridge",
+ },
+ {
+ .name = "ks_usb_bridge",
+ },
+};
+
+static struct ksb_dev_info ksb_efs_hsic_dev = {
+ .name = "efs_hsic_bridge",
+};
+
+static struct ksb_dev_info ksb_efs_usb_dev = {
+ .name = "efs_usb_bridge",
+};
+static const struct usb_device_id ksb_usb_ids[] = {
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9008, 0),
+ .driver_info = (unsigned long)&ksb_fboot_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9025, 0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9091, 0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x901D, 0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x900E, 0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9048, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x904C, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9075, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9079, 2),
+ .driver_info = (unsigned long)&ksb_efs_usb_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x908A, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x908E, 3),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x909C, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x909D, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x909E, 3),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x909F, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x90A0, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x90A4, 3),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+
+ {} /* terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, ksb_usb_ids);
+
+static void ksb_rx_cb(struct urb *urb);
+static void
+submit_one_urb(struct ks_bridge *ksb, gfp_t flags, struct data_pkt *pkt)
+{
+ struct urb *urb;
+ int ret;
+
+ urb = usb_alloc_urb(0, flags);
+ if (!urb) {
+ dev_err(&ksb->udev->dev, "unable to allocate urb");
+ ksb_free_data_pkt(pkt);
+ return;
+ }
+
+ if (ksb->period)
+ usb_fill_int_urb(urb, ksb->udev, ksb->in_pipe,
+ pkt->buf, pkt->len,
+ ksb_rx_cb, pkt, ksb->period);
+ else
+ usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
+ pkt->buf, pkt->len,
+ ksb_rx_cb, pkt);
+
+ usb_anchor_urb(urb, &ksb->submitted);
+
+ if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ ksb_free_data_pkt(pkt);
+ return;
+ }
+
+ atomic_inc(&ksb->rx_pending_cnt);
+ ret = usb_submit_urb(urb, flags);
+ if (ret) {
+ dev_err(&ksb->udev->dev, "in urb submission failed");
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ ksb_free_data_pkt(pkt);
+ atomic_dec(&ksb->rx_pending_cnt);
+ wake_up(&ksb->pending_urb_wait);
+ return;
+ }
+
+ dbg_log_event(ksb, "S RX_URB", pkt->len, 0);
+
+ usb_free_urb(urb);
+}
+static void ksb_rx_cb(struct urb *urb)
+{
+ struct data_pkt *pkt = urb->context;
+ struct ks_bridge *ksb = pkt->ctxt;
+ bool wakeup = true;
+
+ dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length);
+
+ dev_dbg(&ksb->udev->dev, "status:%d actual:%d", urb->status,
+ urb->actual_length);
+
+ /*non zero len of data received while unlinking urb*/
+ if (urb->status == -ENOENT && (urb->actual_length > 0)) {
+ /*
+ * If we wakeup the reader process now, it may
+ * queue the URB before its reject flag gets
+ * cleared.
+ */
+ wakeup = false;
+ goto add_to_list;
+ }
+
+ if (urb->status < 0) {
+ if (urb->status != -ESHUTDOWN && urb->status != -ENOENT
+ && urb->status != -EPROTO)
+ pr_err_ratelimited("%s: urb failed with err:%d",
+ ksb->id_info.name, urb->status);
+
+ if (!urb->actual_length) {
+ ksb_free_data_pkt(pkt);
+ goto done;
+ }
+ }
+
+ usb_mark_last_busy(ksb->udev);
+
+ if (urb->actual_length == 0) {
+ submit_one_urb(ksb, GFP_ATOMIC, pkt);
+ goto done;
+ }
+
+add_to_list:
+ spin_lock(&ksb->lock);
+ pkt->len = urb->actual_length;
+ list_add_tail(&pkt->list, &ksb->to_ks_list);
+ spin_unlock(&ksb->lock);
+ /* wake up read thread */
+ if (wakeup)
+ wake_up(&ksb->ks_wait_q);
+done:
+ atomic_dec(&ksb->rx_pending_cnt);
+ wake_up(&ksb->pending_urb_wait);
+}
+
+static void ksb_start_rx_work(struct work_struct *w)
+{
+ struct ks_bridge *ksb =
+ container_of(w, struct ks_bridge, start_rx_work);
+ struct data_pkt *pkt;
+ struct urb *urb;
+ int i = 0;
+ int ret;
+ bool put = true;
+
+ ret = usb_autopm_get_interface(ksb->ifc);
+ if (ret < 0) {
+ if (ret != -EAGAIN && ret != -EACCES) {
+ pr_err_ratelimited("%s: autopm_get failed:%d",
+ ksb->id_info.name, ret);
+ return;
+ }
+ put = false;
+ }
+ for (i = 0; i < NO_RX_REQS; i++) {
+
+ if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ break;
+
+ pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb);
+ if (IS_ERR(pkt)) {
+ dev_err(&ksb->udev->dev, "unable to allocate data pkt");
+ break;
+ }
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ dev_err(&ksb->udev->dev, "unable to allocate urb");
+ ksb_free_data_pkt(pkt);
+ break;
+ }
+
+ if (ksb->period)
+ usb_fill_int_urb(urb, ksb->udev, ksb->in_pipe,
+ pkt->buf, pkt->len,
+ ksb_rx_cb, pkt, ksb->period);
+ else
+ usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
+ pkt->buf, pkt->len,
+ ksb_rx_cb, pkt);
+
+ usb_anchor_urb(urb, &ksb->submitted);
+
+ dbg_log_event(ksb, "S RX_URB", pkt->len, 0);
+
+ atomic_inc(&ksb->rx_pending_cnt);
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(&ksb->udev->dev, "in urb submission failed");
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ ksb_free_data_pkt(pkt);
+ atomic_dec(&ksb->rx_pending_cnt);
+ wake_up(&ksb->pending_urb_wait);
+ break;
+ }
+
+ usb_free_urb(urb);
+ }
+ if (put)
+ usb_autopm_put_interface_async(ksb->ifc);
+}
+
+static int
+ksb_usb_probe(struct usb_interface *ifc, const struct usb_device_id *id)
+{
+ __u8 ifc_num, ifc_count, ksb_port_num;
+ struct usb_host_interface *ifc_desc;
+ struct usb_endpoint_descriptor *ep_desc;
+ int i;
+ struct ks_bridge *ksb;
+ unsigned long flags;
+ struct data_pkt *pkt;
+ struct ksb_dev_info *mdev, *fbdev;
+ struct usb_device *udev;
+ unsigned int bus_id;
+ int ret;
+ bool free_mdev = false;
+
+ ifc_num = ifc->cur_altsetting->desc.bInterfaceNumber;
+
+ udev = interface_to_usbdev(ifc);
+ ifc_count = udev->actconfig->desc.bNumInterfaces;
+ fbdev = mdev = (struct ksb_dev_info *)id->driver_info;
+
+ bus_id = str_to_busid(udev->bus->bus_name);
+ if (bus_id == BUS_UNDEF) {
+ dev_err(&udev->dev, "unknown usb bus %s, probe failed\n",
+ udev->bus->bus_name);
+ return -ENODEV;
+ }
+
+ switch (id->idProduct) {
+ case 0x900E:
+ case 0x9025:
+ case 0x9091:
+ case 0x901D:
+ /* 1-1 mapping between ksb and udev port which starts with 1 */
+ ksb_port_num = udev->portnum - 1;
+ dev_dbg(&udev->dev, "ifc_count: %u, port_num:%u\n", ifc_count,
+ ksb_port_num);
+ if (ifc_count > 1)
+ return -ENODEV;
+ if (ksb_port_num >= NO_BRIDGE_INSTANCES) {
+ dev_err(&udev->dev, "port-num:%u invalid. Try first\n",
+ ksb_port_num);
+ ksb_port_num = 0;
+ }
+ ksb = __ksb[ksb_port_num];
+ if (ksb->ifc) {
+ dev_err(&udev->dev, "port already in use\n");
+ return -ENODEV;
+ }
+ mdev = kzalloc(sizeof(struct ksb_dev_info), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+ free_mdev = true;
+ mdev->name = ksb->name;
+ break;
+ case 0x9008:
+ ksb = __ksb[bus_id];
+ mdev = &fbdev[bus_id];
+ break;
+ case 0x9048:
+ case 0x904C:
+ case 0x9075:
+ case 0x908A:
+ case 0x908E:
+ case 0x90A0:
+ case 0x909C:
+ case 0x909D:
+ case 0x909E:
+ case 0x909F:
+ case 0x90A4:
+ ksb = __ksb[EFS_HSIC_BRIDGE_INDEX];
+ break;
+ case 0x9079:
+ if (ifc_num != 2)
+ return -ENODEV;
+ ksb = __ksb[EFS_USB_BRIDGE_INDEX];
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ if (!ksb) {
+ pr_err("ksb is not initialized");
+ return -ENODEV;
+ }
+
+ ksb->udev = usb_get_dev(interface_to_usbdev(ifc));
+ ksb->ifc = ifc;
+ ifc_desc = ifc->cur_altsetting;
+ ksb->id_info = *mdev;
+
+ for (i = 0; i < ifc_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &ifc_desc->endpoint[i].desc;
+
+ if (!ksb->in_epAddr && (usb_endpoint_is_bulk_in(ep_desc))) {
+ ksb->in_epAddr = ep_desc->bEndpointAddress;
+ ksb->period = 0;
+ }
+
+ if (!ksb->in_epAddr && (usb_endpoint_is_int_in(ep_desc))) {
+ ksb->in_epAddr = ep_desc->bEndpointAddress;
+ ksb->period = ep_desc->bInterval;
+ }
+
+ if (!ksb->out_epAddr && usb_endpoint_is_bulk_out(ep_desc))
+ ksb->out_epAddr = ep_desc->bEndpointAddress;
+ }
+
+ if (!(ksb->in_epAddr && ksb->out_epAddr)) {
+ dev_err(&udev->dev,
+ "could not find bulk in and bulk out endpoints");
+ usb_put_dev(ksb->udev);
+ ksb->ifc = NULL;
+ if (free_mdev)
+ kfree(mdev);
+ return -ENODEV;
+ }
+
+ ksb->in_pipe = ksb->period ?
+ usb_rcvintpipe(ksb->udev, ksb->in_epAddr) :
+ usb_rcvbulkpipe(ksb->udev, ksb->in_epAddr);
+
+ ksb->out_pipe = usb_sndbulkpipe(ksb->udev, ksb->out_epAddr);
+
+ usb_set_intfdata(ifc, ksb);
+ set_bit(USB_DEV_CONNECTED, &ksb->flags);
+ atomic_set(&ksb->tx_pending_cnt, 0);
+ atomic_set(&ksb->rx_pending_cnt, 0);
+
+ dbg_log_event(ksb, "PID-ATT", id->idProduct, 0);
+
+ /*free up stale buffers if any from previous disconnect*/
+ spin_lock_irqsave(&ksb->lock, flags);
+ while (!list_empty(&ksb->to_ks_list)) {
+ pkt = list_first_entry(&ksb->to_ks_list,
+ struct data_pkt, list);
+ list_del_init(&pkt->list);
+ ksb_free_data_pkt(pkt);
+ }
+ while (!list_empty(&ksb->to_mdm_list)) {
+ pkt = list_first_entry(&ksb->to_mdm_list,
+ struct data_pkt, list);
+ list_del_init(&pkt->list);
+ ksb_free_data_pkt(pkt);
+ }
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ ret = alloc_chrdev_region(&ksb->cdev_start_no, 0, 1, mdev->name);
+ if (ret < 0) {
+ dbg_log_event(ksb, "chr reg failed", ret, 0);
+ goto fail_chrdev_region;
+ }
+
+ ksb->class = class_create(THIS_MODULE, mdev->name);
+ if (IS_ERR(ksb->class)) {
+ dbg_log_event(ksb, "clscr failed", PTR_ERR(ksb->class), 0);
+ goto fail_class_create;
+ }
+
+ cdev_init(&ksb->cdev, &ksb_fops);
+ ksb->cdev.owner = THIS_MODULE;
+
+ ret = cdev_add(&ksb->cdev, ksb->cdev_start_no, 1);
+ if (ret < 0) {
+ dbg_log_event(ksb, "cdev_add failed", ret, 0);
+ goto fail_class_create;
+ }
+
+ ksb->device = device_create(ksb->class, &udev->dev, ksb->cdev_start_no,
+ NULL, mdev->name);
+ if (IS_ERR(ksb->device)) {
+ dbg_log_event(ksb, "devcrfailed", PTR_ERR(ksb->device), 0);
+ goto fail_device_create;
+ }
+
+ if (device_can_wakeup(&ksb->udev->dev))
+ ifc->needs_remote_wakeup = 1;
+
+ if (free_mdev)
+ kfree(mdev);
+ dev_dbg(&udev->dev, "usb dev connected");
+
+ return 0;
+
+fail_device_create:
+ cdev_del(&ksb->cdev);
+fail_class_create:
+ unregister_chrdev_region(ksb->cdev_start_no, 1);
+fail_chrdev_region:
+ usb_set_intfdata(ifc, NULL);
+ clear_bit(USB_DEV_CONNECTED, &ksb->flags);
+
+ if (free_mdev)
+ kfree(mdev);
+
+ return -ENODEV;
+
+}
+
+static int ksb_usb_suspend(struct usb_interface *ifc, pm_message_t message)
+{
+ struct ks_bridge *ksb = usb_get_intfdata(ifc);
+ unsigned long flags;
+
+ dbg_log_event(ksb, "SUSPEND", 0, 0);
+
+ if (pm_runtime_autosuspend_expiration(&ksb->udev->dev)) {
+ dbg_log_event(ksb, "SUSP ABORT-TimeCheck", 0, 0);
+ return -EBUSY;
+ }
+
+ usb_kill_anchored_urbs(&ksb->submitted);
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ if (!list_empty(&ksb->to_ks_list)) {
+ spin_unlock_irqrestore(&ksb->lock, flags);
+ dbg_log_event(ksb, "SUSPEND ABORT", 0, 0);
+ /*
+ * Now wakeup the reader process and queue
+ * Rx URBs for more data.
+ */
+ wake_up(&ksb->ks_wait_q);
+ queue_work(ksb->wq, &ksb->start_rx_work);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ return 0;
+}
+
+static int ksb_usb_resume(struct usb_interface *ifc)
+{
+ struct ks_bridge *ksb = usb_get_intfdata(ifc);
+
+ dbg_log_event(ksb, "RESUME", 0, 0);
+
+ if (test_bit(FILE_OPENED, &ksb->flags))
+ queue_work(ksb->wq, &ksb->start_rx_work);
+
+ return 0;
+}
+
+static void ksb_usb_disconnect(struct usb_interface *ifc)
+{
+ struct ks_bridge *ksb = usb_get_intfdata(ifc);
+ unsigned long flags;
+ struct data_pkt *pkt;
+
+ dbg_log_event(ksb, "PID-DETACH", 0, 0);
+
+ clear_bit(USB_DEV_CONNECTED, &ksb->flags);
+ wake_up(&ksb->ks_wait_q);
+ cancel_work_sync(&ksb->to_mdm_work);
+ cancel_work_sync(&ksb->start_rx_work);
+
+ device_destroy(ksb->class, ksb->cdev_start_no);
+ cdev_del(&ksb->cdev);
+ class_destroy(ksb->class);
+ unregister_chrdev_region(ksb->cdev_start_no, 1);
+
+ usb_kill_anchored_urbs(&ksb->submitted);
+
+ wait_event_interruptible_timeout(
+ ksb->pending_urb_wait,
+ !atomic_read(&ksb->tx_pending_cnt) &&
+ !atomic_read(&ksb->rx_pending_cnt),
+ msecs_to_jiffies(PENDING_URB_TIMEOUT));
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ while (!list_empty(&ksb->to_ks_list)) {
+ pkt = list_first_entry(&ksb->to_ks_list,
+ struct data_pkt, list);
+ list_del_init(&pkt->list);
+ ksb_free_data_pkt(pkt);
+ }
+ while (!list_empty(&ksb->to_mdm_list)) {
+ pkt = list_first_entry(&ksb->to_mdm_list,
+ struct data_pkt, list);
+ list_del_init(&pkt->list);
+ ksb_free_data_pkt(pkt);
+ }
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ ifc->needs_remote_wakeup = 0;
+ usb_put_dev(ksb->udev);
+ ksb->ifc = NULL;
+ usb_set_intfdata(ifc, NULL);
+}
+
+static struct usb_driver ksb_usb_driver = {
+ .name = "ks_bridge",
+ .probe = ksb_usb_probe,
+ .disconnect = ksb_usb_disconnect,
+ .suspend = ksb_usb_suspend,
+ .resume = ksb_usb_resume,
+ .reset_resume = ksb_usb_resume,
+ .id_table = ksb_usb_ids,
+ .supports_autosuspend = 1,
+};
+
+static int ksb_debug_show(struct seq_file *s, void *unused)
+{
+ unsigned long flags;
+ struct ks_bridge *ksb = s->private;
+ int i;
+
+ read_lock_irqsave(&ksb->dbg_lock, flags);
+ for (i = 0; i < DBG_MAX_MSG; i++) {
+ if (i == (ksb->dbg_idx - 1))
+ seq_printf(s, "-->%s\n", ksb->dbgbuf[i]);
+ else
+ seq_printf(s, "%s\n", ksb->dbgbuf[i]);
+ }
+ read_unlock_irqrestore(&ksb->dbg_lock, flags);
+
+ return 0;
+}
+
+static int ksb_debug_open(struct inode *ip, struct file *fp)
+{
+ return single_open(fp, ksb_debug_show, ip->i_private);
+
+ return 0;
+}
+
+static const struct file_operations dbg_fops = {
+ .open = ksb_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *dbg_dir;
+
+static int __init ksb_init(void)
+{
+ struct ks_bridge *ksb;
+ int num_instances = 0;
+ int ret = 0;
+ int i;
+
+ dbg_dir = debugfs_create_dir("ks_bridge", NULL);
+ if (IS_ERR(dbg_dir))
+ pr_err("unable to create debug dir");
+
+ for (i = 0; i < NO_BRIDGE_INSTANCES; i++) {
+ ksb = kzalloc(sizeof(struct ks_bridge), GFP_KERNEL);
+ if (!ksb) {
+ pr_err("unable to allocat mem for ks_bridge");
+ ret = -ENOMEM;
+ goto dev_free;
+ }
+ __ksb[i] = ksb;
+
+ ksb->name = kasprintf(GFP_KERNEL, "ks_usb_bridge.%i", i);
+ if (!ksb->name) {
+ pr_info("unable to allocate name");
+ kfree(ksb);
+ ret = -ENOMEM;
+ goto dev_free;
+ }
+
+ spin_lock_init(&ksb->lock);
+ INIT_LIST_HEAD(&ksb->to_mdm_list);
+ INIT_LIST_HEAD(&ksb->to_ks_list);
+ init_waitqueue_head(&ksb->ks_wait_q);
+ init_waitqueue_head(&ksb->pending_urb_wait);
+ ksb->wq = create_singlethread_workqueue(ksb->name);
+ if (!ksb->wq) {
+ pr_err("unable to allocate workqueue");
+ kfree(ksb->name);
+ kfree(ksb);
+ ret = -ENOMEM;
+ goto dev_free;
+ }
+
+ INIT_WORK(&ksb->to_mdm_work, ksb_tomdm_work);
+ INIT_WORK(&ksb->start_rx_work, ksb_start_rx_work);
+ init_usb_anchor(&ksb->submitted);
+
+ ksb->dbg_idx = 0;
+ ksb->dbg_lock = __RW_LOCK_UNLOCKED(lck);
+
+ if (!IS_ERR(dbg_dir))
+ debugfs_create_file(ksb->name, S_IRUGO, dbg_dir,
+ ksb, &dbg_fops);
+
+ num_instances++;
+ }
+
+ ret = usb_register(&ksb_usb_driver);
+ if (ret) {
+ pr_err("unable to register ks bridge driver");
+ goto dev_free;
+ }
+
+ pr_info("init done");
+
+ return 0;
+
+dev_free:
+ if (!IS_ERR(dbg_dir))
+ debugfs_remove_recursive(dbg_dir);
+
+ for (i = 0; i < num_instances; i++) {
+ ksb = __ksb[i];
+
+ destroy_workqueue(ksb->wq);
+ kfree(ksb->name);
+ kfree(ksb);
+ }
+
+ return ret;
+
+}
+
+static void __exit ksb_exit(void)
+{
+ struct ks_bridge *ksb;
+ int i;
+
+ if (!IS_ERR(dbg_dir))
+ debugfs_remove_recursive(dbg_dir);
+
+ usb_deregister(&ksb_usb_driver);
+
+ for (i = 0; i < NO_BRIDGE_INSTANCES; i++) {
+ ksb = __ksb[i];
+
+ destroy_workqueue(ksb->wq);
+ kfree(ksb->name);
+ kfree(ksb);
+ }
+}
+
+module_init(ksb_init);
+module_exit(ksb_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index 86b4e4b2ab9a..383fa007348f 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -370,6 +370,10 @@ static int lvs_rh_probe(struct usb_interface *intf,
hdev = interface_to_usbdev(intf);
desc = intf->cur_altsetting;
+
+ if (desc->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
endpoint = &desc->endpoint[0].desc;
/* valid only for SS root hub */
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index bbd029c9c725..442b6631162e 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -711,6 +711,11 @@ static int uss720_probe(struct usb_interface *intf,
interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints < 3) {
+ usb_put_dev(usbdev);
+ return -ENODEV;
+ }
+
/*
* Allocate parport interface
*/
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index e499b862a946..88f26ac2a185 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -250,8 +250,27 @@ static void cppi41_dma_callback(void *private_data)
transferred < cppi41_channel->packet_sz)
cppi41_channel->prog_len = 0;
- if (cppi41_channel->is_tx)
- empty = musb_is_tx_fifo_empty(hw_ep);
+ if (cppi41_channel->is_tx) {
+ u8 type;
+
+ if (is_host_active(musb))
+ type = hw_ep->out_qh->type;
+ else
+ type = hw_ep->ep_in.type;
+
+ if (type == USB_ENDPOINT_XFER_ISOC)
+ /*
+ * Don't use the early-TX-interrupt workaround below
+ * for Isoch transfter. Since Isoch are periodic
+ * transfer, by the time the next transfer is
+ * scheduled, the current one should be done already.
+ *
+ * This avoids audio playback underrun issue.
+ */
+ empty = true;
+ else
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ }
if (!cppi41_channel->is_tx || empty) {
cppi41_trans_done(cppi41_channel);
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index c76ca5a94557..055c6203577a 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -2365,7 +2365,7 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
pd->vbus_present = val.intval;
ret = power_supply_get_property(pd->usb_psy,
- POWER_SUPPLY_PROP_TYPE, &val);
+ POWER_SUPPLY_PROP_REAL_TYPE, &val);
if (ret) {
usbpd_err(&pd->dev, "Unable to read USB TYPE: %d\n", ret);
return ret;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 42cc72e54c05..af67a0de6b5d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -233,6 +233,14 @@ static void option_instat_callback(struct urb *urb);
#define BANDRICH_PRODUCT_1012 0x1012
#define QUALCOMM_VENDOR_ID 0x05C6
+/* These Quectel products use Qualcomm's vendor ID */
+#define QUECTEL_PRODUCT_UC20 0x9003
+#define QUECTEL_PRODUCT_UC15 0x9090
+
+#define QUECTEL_VENDOR_ID 0x2c7c
+/* These Quectel products use Quectel's vendor ID */
+#define QUECTEL_PRODUCT_EC21 0x0121
+#define QUECTEL_PRODUCT_EC25 0x0125
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -1161,7 +1169,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
+ /* Quectel products using Qualcomm vendor ID */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ /* Quectel products using Quectel vendor ID */
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 696458db7e3c..38b3f0d8cd58 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -169,6 +169,8 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+ {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index 252c7bd9218a..d01496fd27fe 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -39,6 +39,9 @@ int wa_create(struct wahc *wa, struct usb_interface *iface,
int result;
struct device *dev = &iface->dev;
+ if (iface->cur_altsetting->desc.bNumEndpoints < 3)
+ return -ENODEV;
+
result = wa_rpipes_create(wa);
if (result < 0)
goto error_rpipes_create;
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 0257f35cfb9d..e75bbe5a10cd 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -825,6 +825,9 @@ static int hwarc_probe(struct usb_interface *iface,
struct hwarc *hwarc;
struct device *dev = &iface->dev;
+ if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
result = -ENOMEM;
uwb_rc = uwb_rc_alloc();
if (uwb_rc == NULL) {
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 2bfc846ac071..6345e85822a4 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -362,6 +362,9 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
result);
}
+ if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
result = -ENOMEM;
i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
if (i1480_usb == NULL) {
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 9982cb176ce8..830e2fd47642 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -562,8 +562,9 @@ static long vfio_pci_ioctl(void *device_data,
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
+ size_t size;
u8 *data = NULL;
- int ret = 0;
+ int max, ret = 0;
minsz = offsetofend(struct vfio_irq_set, count);
@@ -571,23 +572,31 @@ static long vfio_pci_ioctl(void *device_data,
return -EFAULT;
if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
+ hdr.count >= (U32_MAX - hdr.start) ||
hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
VFIO_IRQ_SET_ACTION_TYPE_MASK))
return -EINVAL;
- if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
- size_t size;
- int max = vfio_pci_get_irq_count(vdev, hdr.index);
+ max = vfio_pci_get_irq_count(vdev, hdr.index);
+ if (hdr.start >= max || hdr.start + hdr.count > max)
+ return -EINVAL;
- if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
- size = sizeof(uint8_t);
- else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
- size = sizeof(int32_t);
- else
- return -EINVAL;
+ switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
+ case VFIO_IRQ_SET_DATA_NONE:
+ size = 0;
+ break;
+ case VFIO_IRQ_SET_DATA_BOOL:
+ size = sizeof(uint8_t);
+ break;
+ case VFIO_IRQ_SET_DATA_EVENTFD:
+ size = sizeof(int32_t);
+ break;
+ default:
+ return -EINVAL;
+ }
- if (hdr.argsz - minsz < hdr.count * size ||
- hdr.start >= max || hdr.start + hdr.count > max)
+ if (size) {
+ if (hdr.argsz - minsz < hdr.count * size)
return -EINVAL;
data = memdup_user((void __user *)(arg + minsz),
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 20e9a86d2dcf..5c8f767b6368 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -255,7 +255,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
if (!is_irq_none(vdev))
return -EINVAL;
- vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
+ vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
if (!vdev->ctx)
return -ENOMEM;
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 0582b72ef377..1a9f18b40be6 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -511,6 +511,12 @@ static long tce_iommu_build_v2(struct tce_container *container,
unsigned long hpa;
enum dma_data_direction dirtmp;
+ if (!tbl->it_userspace) {
+ ret = tce_iommu_userspace_view_alloc(tbl);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < pages; ++i) {
struct mm_iommu_table_group_mem_t *mem = NULL;
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
@@ -584,15 +590,6 @@ static long tce_iommu_create_table(struct tce_container *container,
WARN_ON(!ret && !(*ptbl)->it_ops->free);
WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
- if (!ret && container->v2) {
- ret = tce_iommu_userspace_view_alloc(*ptbl);
- if (ret)
- (*ptbl)->it_ops->free(*ptbl);
- }
-
- if (ret)
- decrement_locked_vm(table_size >> PAGE_SHIFT);
-
return ret;
}
@@ -1064,10 +1061,7 @@ static int tce_iommu_take_ownership(struct tce_container *container,
if (!tbl || !tbl->it_map)
continue;
- rc = tce_iommu_userspace_view_alloc(tbl);
- if (!rc)
- rc = iommu_take_ownership(tbl);
-
+ rc = iommu_take_ownership(tbl);
if (rc) {
for (j = 0; j < i; ++j)
iommu_release_ownership(
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 6e92917ba77a..4e3c78d88832 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1168,6 +1168,8 @@ static void fbcon_free_font(struct display *p, bool freefont)
p->userfont = 0;
}
+static void set_vc_hi_font(struct vc_data *vc, bool set);
+
static void fbcon_deinit(struct vc_data *vc)
{
struct display *p = &fb_display[vc->vc_num];
@@ -1203,6 +1205,9 @@ finished:
if (free_font)
vc->vc_font.data = NULL;
+ if (vc->vc_hi_font_mask)
+ set_vc_hi_font(vc, false);
+
if (!con_is_bound(&fb_con))
fbcon_exit();
@@ -2439,32 +2444,10 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
return 0;
}
-static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
- const u8 * data, int userfont)
+/* set/clear vc_hi_font_mask and update vc attrs accordingly */
+static void set_vc_hi_font(struct vc_data *vc, bool set)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_ops *ops = info->fbcon_par;
- struct display *p = &fb_display[vc->vc_num];
- int resize;
- int cnt;
- char *old_data = NULL;
-
- if (CON_IS_VISIBLE(vc) && softback_lines)
- fbcon_set_origin(vc);
-
- resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
- if (p->userfont)
- old_data = vc->vc_font.data;
- if (userfont)
- cnt = FNTCHARCNT(data);
- else
- cnt = 256;
- vc->vc_font.data = (void *)(p->fontdata = data);
- if ((p->userfont = userfont))
- REFCOUNT(data)++;
- vc->vc_font.width = w;
- vc->vc_font.height = h;
- if (vc->vc_hi_font_mask && cnt == 256) {
+ if (!set) {
vc->vc_hi_font_mask = 0;
if (vc->vc_can_do_color) {
vc->vc_complement_mask >>= 1;
@@ -2487,7 +2470,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
((c & 0xfe00) >> 1) | (c & 0xff);
vc->vc_attr >>= 1;
}
- } else if (!vc->vc_hi_font_mask && cnt == 512) {
+ } else {
vc->vc_hi_font_mask = 0x100;
if (vc->vc_can_do_color) {
vc->vc_complement_mask <<= 1;
@@ -2519,8 +2502,38 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
} else
vc->vc_video_erase_char = c & ~0x100;
}
-
}
+}
+
+static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ const u8 * data, int userfont)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct display *p = &fb_display[vc->vc_num];
+ int resize;
+ int cnt;
+ char *old_data = NULL;
+
+ if (CON_IS_VISIBLE(vc) && softback_lines)
+ fbcon_set_origin(vc);
+
+ resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
+ if (p->userfont)
+ old_data = vc->vc_font.data;
+ if (userfont)
+ cnt = FNTCHARCNT(data);
+ else
+ cnt = 256;
+ vc->vc_font.data = (void *)(p->fontdata = data);
+ if ((p->userfont = userfont))
+ REFCOUNT(data)++;
+ vc->vc_font.width = w;
+ vc->vc_font.height = h;
+ if (vc->vc_hi_font_mask && cnt == 256)
+ set_vc_hi_font(vc, false);
+ else if (!vc->vc_hi_font_mask && cnt == 512)
+ set_vc_hi_font(vc, true);
if (resize) {
int cols, rows;
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index f6f65ccce8e7..238e851c0705 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -126,7 +126,11 @@ obj-y += omap2/
obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
obj-$(CONFIG_FB_CARMINE) += carminefb.o
obj-$(CONFIG_FB_MB862XX) += mb862xx/
-obj-$(CONFIG_FB_MSM) += msm/
+ifeq ($(CONFIG_FB_MSM),y)
+obj-y += msm/
+else
+obj-$(CONFIG_MSM_DBA) += msm/msm_dba/
+endif
obj-$(CONFIG_FB_NUC900) += nuc900fb.o
obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o
diff --git a/drivers/video/fbdev/msm/dsi_status_6g.c b/drivers/video/fbdev/msm/dsi_status_6g.c
index 869ff1d9df37..d24b19ea77ad 100644
--- a/drivers/video/fbdev/msm/dsi_status_6g.c
+++ b/drivers/video/fbdev/msm/dsi_status_6g.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
#include "mdss_dsi.h"
#include "mdss_mdp.h"
+#include "mdss_debug.h"
/*
* mdss_check_te_status() - Check the status of panel for TE based ESD.
@@ -157,6 +158,7 @@ void mdss_check_dsi_ctrl_status(struct work_struct *work, uint32_t interval)
ctl->ops.wait_pingpong(ctl, NULL);
pr_debug("%s: DSI ctrl wait for ping pong done\n", __func__);
+ MDSS_XLOG(mipi->mode);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
ret = ctrl_pdata->check_status(ctrl_pdata);
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index 796246a856b4..1c984d02755e 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -45,6 +45,7 @@ enum mdss_mdp_clk_type {
MDSS_CLK_MDP_LUT,
MDSS_CLK_MDP_VSYNC,
MDSS_CLK_MNOC_AHB,
+ MDSS_CLK_THROTTLE_AXI,
MDSS_MAX_CLK
};
diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.c b/drivers/video/fbdev/msm/mdss_compat_utils.c
index e9ba77501b38..2b9c71441d68 100644
--- a/drivers/video/fbdev/msm/mdss_compat_utils.c
+++ b/drivers/video/fbdev/msm/mdss_compat_utils.c
@@ -119,6 +119,9 @@ static unsigned int __do_compat_ioctl_nr(unsigned int cmd32)
static void __copy_atomic_commit_struct(struct mdp_layer_commit *commit,
struct mdp_layer_commit32 *commit32)
{
+ unsigned int destSize = sizeof(commit->commit_v1.reserved);
+ unsigned int srcSize = sizeof(commit32->commit_v1.reserved);
+ unsigned int count = (destSize <= srcSize ? destSize : srcSize);
commit->version = commit32->version;
commit->commit_v1.flags = commit32->commit_v1.flags;
commit->commit_v1.input_layer_cnt =
@@ -127,7 +130,7 @@ static void __copy_atomic_commit_struct(struct mdp_layer_commit *commit,
commit->commit_v1.right_roi = commit32->commit_v1.right_roi;
commit->commit_v1.bl_level = commit32->commit_v1.bl_level;
memcpy(&commit->commit_v1.reserved, &commit32->commit_v1.reserved,
- sizeof(commit32->commit_v1.reserved));
+ count);
}
static struct mdp_input_layer32 *__create_layer_list32(
@@ -220,6 +223,7 @@ static struct mdp_input_layer *__create_layer_list(
layer->flags = layer32->flags;
layer->pipe_ndx = layer32->pipe_ndx;
+ layer->rect_num = layer32->rect_num;
layer->horz_deci = layer32->horz_deci;
layer->vert_deci = layer32->vert_deci;
layer->z_order = layer32->z_order;
diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.h b/drivers/video/fbdev/msm/mdss_compat_utils.h
index 4f44cd1c9471..b7fa401f52d2 100644
--- a/drivers/video/fbdev/msm/mdss_compat_utils.h
+++ b/drivers/video/fbdev/msm/mdss_compat_utils.h
@@ -515,7 +515,8 @@ struct mdp_input_layer32 {
struct mdp_layer_buffer buffer;
compat_caddr_t pp_info;
int error_code;
- uint32_t reserved[6];
+ uint32_t rect_num;
+ uint32_t reserved[5];
};
struct mdp_output_layer32 {
diff --git a/drivers/video/fbdev/msm/mdss_debug.c b/drivers/video/fbdev/msm/mdss_debug.c
index ddb7a4c31f68..8cb6c7157230 100644
--- a/drivers/video/fbdev/msm/mdss_debug.c
+++ b/drivers/video/fbdev/msm/mdss_debug.c
@@ -1077,7 +1077,7 @@ static ssize_t mdss_debug_perf_bw_limit_read(struct file *file,
struct mdss_data_type *mdata = file->private_data;
struct mdss_max_bw_settings *temp_settings;
int len = 0, i;
- char buf[256];
+ char buf[256] = {'\0'};
if (!mdata)
return -ENODEV;
diff --git a/drivers/video/fbdev/msm/mdss_debug_xlog.c b/drivers/video/fbdev/msm/mdss_debug_xlog.c
index bf4117650e3c..aeefc81657b0 100644
--- a/drivers/video/fbdev/msm/mdss_debug_xlog.c
+++ b/drivers/video/fbdev/msm/mdss_debug_xlog.c
@@ -93,6 +93,48 @@ static inline bool mdss_xlog_is_enabled(u32 flag)
(flag == MDSS_XLOG_ALL && mdss_dbg_xlog.xlog_enable);
}
+static void __halt_vbif_xin(void)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ pr_err("Halting VBIF-XIN\n");
+ MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0, 0xFFFFFFFF, false);
+}
+
+static void __halt_vbif_axi(void)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ pr_err("Halting VBIF-AXI\n");
+ MDSS_VBIF_WRITE(mdata, MMSS_VBIF_AXI_HALT_CTRL0, 0xFFFFFFFF, false);
+}
+
+static void __dump_vbif_state(void)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ unsigned int reg_vbif_src_err, reg_vbif_err_info,
+ reg_vbif_xin_halt_ctrl0, reg_vbif_xin_halt_ctrl1,
+ reg_vbif_axi_halt_ctrl0, reg_vbif_axi_halt_ctrl1;
+
+ reg_vbif_src_err = MDSS_VBIF_READ(mdata,
+ MMSS_VBIF_SRC_ERR, false);
+ reg_vbif_err_info = MDSS_VBIF_READ(mdata,
+ MMSS_VBIF_ERR_INFO, false);
+ reg_vbif_xin_halt_ctrl0 = MDSS_VBIF_READ(mdata,
+ MMSS_VBIF_XIN_HALT_CTRL0, false);
+ reg_vbif_xin_halt_ctrl1 = MDSS_VBIF_READ(mdata,
+ MMSS_VBIF_XIN_HALT_CTRL1, false);
+ reg_vbif_axi_halt_ctrl0 = MDSS_VBIF_READ(mdata,
+ MMSS_VBIF_AXI_HALT_CTRL0, false);
+ reg_vbif_axi_halt_ctrl1 = MDSS_VBIF_READ(mdata,
+ MMSS_VBIF_AXI_HALT_CTRL1, false);
+ pr_err("VBIF SRC_ERR=%x, ERR_INFO=%x\n",
+ reg_vbif_src_err, reg_vbif_err_info);
+ pr_err("VBIF XIN_HALT_CTRL0=%x, XIN_HALT_CTRL1=%x, AXI_HALT_CTRL0=%x, AXI_HALT_CTRL1=%x\n"
+ , reg_vbif_xin_halt_ctrl0, reg_vbif_xin_halt_ctrl1,
+ reg_vbif_axi_halt_ctrl0, reg_vbif_axi_halt_ctrl1);
+}
+
void mdss_xlog(const char *name, int line, int flag, ...)
{
unsigned long flags;
@@ -611,8 +653,17 @@ static void mdss_xlog_dump_array(struct mdss_debug_base *blk_arr[],
mdss_dump_dsi_debug_bus(mdss_dbg_xlog.enable_dsi_dbgbus_dump,
&mdss_dbg_xlog.dsi_dbgbus_dump);
- if (dead && mdss_dbg_xlog.panic_on_err)
+ if (dead && mdss_dbg_xlog.panic_on_err) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ __dump_vbif_state();
+ __halt_vbif_xin();
+ usleep_range(10000, 10010);
+ __halt_vbif_axi();
+ usleep_range(10000, 10010);
+ __dump_vbif_state();
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
panic(name);
+ }
}
static void xlog_debug_work(struct work_struct *work)
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index 889fbe5c4aff..c7cac996e5c0 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -662,7 +662,11 @@ static int mdss_dsi_get_dt_vreg_data(struct device *dev,
mp->vreg_config[i].post_off_sleep = tmp;
}
- pr_debug("%s: %s min=%d, max=%d, enable=%d, disable=%d, preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n",
+ mp->vreg_config[i].lp_disable_allowed =
+ of_property_read_bool(supply_node,
+ "qcom,supply-lp-mode-disable-allowed");
+
+ pr_debug("%s: %s min=%d, max=%d, enable=%d, disable=%d, preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d lp_disable_allowed=%d\n",
__func__,
mp->vreg_config[i].vreg_name,
mp->vreg_config[i].min_voltage,
@@ -672,8 +676,8 @@ static int mdss_dsi_get_dt_vreg_data(struct device *dev,
mp->vreg_config[i].pre_on_sleep,
mp->vreg_config[i].post_on_sleep,
mp->vreg_config[i].pre_off_sleep,
- mp->vreg_config[i].post_off_sleep
- );
+ mp->vreg_config[i].post_off_sleep,
+ mp->vreg_config[i].lp_disable_allowed);
++i;
}
@@ -4360,6 +4364,9 @@ int dsi_panel_device_register(struct platform_device *ctrl_pdev,
return rc;
}
+ /* default state of gpio is false */
+ ctrl_pdata->bklt_en_gpio_state = false;
+
pinfo->panel_max_fps = mdss_panel_get_framerate(pinfo);
pinfo->panel_max_vtotal = mdss_panel_get_vtotal(pinfo);
diff --git a/drivers/video/fbdev/msm/mdss_dsi.h b/drivers/video/fbdev/msm/mdss_dsi.h
index 335037860ffe..62d88f0af652 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.h
+++ b/drivers/video/fbdev/msm/mdss_dsi.h
@@ -454,6 +454,7 @@ struct mdss_dsi_ctrl_pdata {
int disp_en_gpio;
int bklt_en_gpio;
bool bklt_en_gpio_invert;
+ bool bklt_en_gpio_state;
int lcd_mode_sel_gpio;
int bklt_ctrl; /* backlight ctrl */
bool pwm_pmi;
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index 9faa1531c256..60012c71449c 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -260,16 +260,6 @@ static int mdss_dsi_request_gpios(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
rc);
goto rst_gpio_err;
}
- if (gpio_is_valid(ctrl_pdata->bklt_en_gpio)) {
- rc = gpio_request(ctrl_pdata->bklt_en_gpio,
- "bklt_enable");
- if (rc) {
- pr_err("request bklt gpio failed, rc=%d\n",
- rc);
- goto bklt_en_gpio_err;
- }
- }
-
if (gpio_is_valid(ctrl_pdata->lcd_mode_sel_gpio)) {
rc = gpio_request(ctrl_pdata->lcd_mode_sel_gpio, "mode_sel");
if (rc) {
@@ -282,9 +272,6 @@ static int mdss_dsi_request_gpios(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
return rc;
lcd_mode_sel_gpio_err:
- if (gpio_is_valid(ctrl_pdata->bklt_en_gpio))
- gpio_free(ctrl_pdata->bklt_en_gpio);
-bklt_en_gpio_err:
gpio_free(ctrl_pdata->rst_gpio);
rst_gpio_err:
if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
@@ -293,6 +280,81 @@ disp_en_gpio_err:
return rc;
}
+int mdss_dsi_bl_gpio_ctrl(struct mdss_panel_data *pdata, int enable)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ int rc = 0, val = 0;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+ if (ctrl_pdata == NULL) {
+ pr_err("%s: Invalid ctrl data\n", __func__);
+ return -EINVAL;
+ }
+
+ /* if gpio is not valid */
+ if (!gpio_is_valid(ctrl_pdata->bklt_en_gpio))
+ return rc;
+
+ pr_debug("%s: enable = %d\n", __func__, enable);
+
+ /*
+ * if gpio state is false and enable (bl level) is
+ * non zero then toggle the gpio
+ */
+ if (!ctrl_pdata->bklt_en_gpio_state && enable) {
+ rc = gpio_request(ctrl_pdata->bklt_en_gpio, "bklt_enable");
+ if (rc) {
+ pr_err("request bklt gpio failed, rc=%d\n", rc);
+ goto free;
+ }
+
+ if (ctrl_pdata->bklt_en_gpio_invert)
+ val = 0;
+ else
+ val = 1;
+
+ rc = gpio_direction_output(ctrl_pdata->bklt_en_gpio, val);
+ if (rc) {
+ pr_err("%s: unable to set dir for bklt gpio val %d\n",
+ __func__, val);
+ goto free;
+ }
+ ctrl_pdata->bklt_en_gpio_state = true;
+ goto ret;
+ } else if (ctrl_pdata->bklt_en_gpio_state && !enable) {
+ /*
+ * if gpio state is true and enable (bl level) is
+ * zero then toggle the gpio
+ */
+ if (ctrl_pdata->bklt_en_gpio_invert)
+ val = 1;
+ else
+ val = 0;
+
+ rc = gpio_direction_output(ctrl_pdata->bklt_en_gpio, val);
+ if (rc)
+ pr_err("%s: unable to set dir for bklt gpio val %d\n",
+ __func__, val);
+ goto free;
+ }
+
+ /* gpio state is true and bl level is non zero */
+ goto ret;
+
+free:
+ pr_debug("%s: free bklt gpio\n", __func__);
+ ctrl_pdata->bklt_en_gpio_state = false;
+ gpio_free(ctrl_pdata->bklt_en_gpio);
+ret:
+ return rc;
+}
+
int mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
{
struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
@@ -362,26 +424,6 @@ int mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
if (pdata->panel_info.rst_seq[++i])
usleep_range(pinfo->rst_seq[i] * 1000, pinfo->rst_seq[i] * 1000);
}
-
- if (gpio_is_valid(ctrl_pdata->bklt_en_gpio)) {
-
- if (ctrl_pdata->bklt_en_gpio_invert) {
- rc = gpio_direction_output(
- ctrl_pdata->bklt_en_gpio, 0);
- gpio_set_value(
- (ctrl_pdata->bklt_en_gpio), 0);
- } else {
- rc = gpio_direction_output(
- ctrl_pdata->bklt_en_gpio, 1);
- gpio_set_value(
- (ctrl_pdata->bklt_en_gpio), 1);
- }
- if (rc) {
- pr_err("%s: unable to set dir for bklt gpio\n",
- __func__);
- goto exit;
- }
- }
}
if (gpio_is_valid(ctrl_pdata->lcd_mode_sel_gpio)) {
@@ -410,15 +452,6 @@ int mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
pr_debug("%s: Reset panel done\n", __func__);
}
} else {
- if (gpio_is_valid(ctrl_pdata->bklt_en_gpio)) {
-
- if (ctrl_pdata->bklt_en_gpio_invert)
- gpio_set_value((ctrl_pdata->bklt_en_gpio), 1);
- else
- gpio_set_value((ctrl_pdata->bklt_en_gpio), 0);
-
- gpio_free(ctrl_pdata->bklt_en_gpio);
- }
if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
gpio_set_value((ctrl_pdata->disp_en_gpio), 0);
gpio_free(ctrl_pdata->disp_en_gpio);
@@ -801,6 +834,9 @@ static void mdss_dsi_panel_bl_ctrl(struct mdss_panel_data *pdata,
if ((bl_level < pdata->panel_info.bl_min) && (bl_level != 0))
bl_level = pdata->panel_info.bl_min;
+ /* enable the backlight gpio if present */
+ mdss_dsi_bl_gpio_ctrl(pdata, bl_level);
+
switch (ctrl_pdata->bklt_ctrl) {
case BL_WLED:
led_trigger_event(bl_led_trigger, bl_level);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_phy.h b/drivers/video/fbdev/msm/mdss_dsi_phy.h
index 5fff3123b63f..03df17d81f69 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_phy.h
+++ b/drivers/video/fbdev/msm/mdss_dsi_phy.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -115,4 +115,13 @@ int mdss_dsi_phy_v3_wait_for_lanes_stop_state(struct mdss_dsi_ctrl_pdata *ctrl,
* assumes that the link and core clocks are already on.
*/
int mdss_dsi_phy_v3_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl, bool enable);
+
+/**
+ * mdss_dsi_phy_v3_idle_pc_exit() - Called after Idle Power Collapse exit
+ * @ctrl: pointer to DSI controller structure
+ *
+ * This function is called after Idle Power Collapse, so driver
+ * can perform any sequence required after the Idle PC exit.
+ */
+void mdss_dsi_phy_v3_idle_pc_exit(struct mdss_dsi_ctrl_pdata *ctrl);
#endif /* MDSS_DSI_PHY_H */
diff --git a/drivers/video/fbdev/msm/mdss_dsi_phy_v3.c b/drivers/video/fbdev/msm/mdss_dsi_phy_v3.c
index 992fd51606ca..bb9dbfa32e4e 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_phy_v3.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_phy_v3.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -327,6 +327,7 @@ int mdss_dsi_phy_v3_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl, bool enable)
*/
DSI_PHY_W32(ctrl->phy_io.base, CMN_DSI_LANE_CTRL3,
active_lanes);
+ usleep_range(5, 15);
DSI_PHY_W32(ctrl->phy_io.base, CMN_DSI_LANE_CTRL3, 0);
@@ -340,6 +341,20 @@ error:
return rc;
}
+void mdss_dsi_phy_v3_idle_pc_exit(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 val = BIT(5);
+ u32 data;
+
+ /* Reset phy pll after idle pc exit */
+ data = DSI_PHY_R32(ctrl->phy_io.base, CMN_CTRL_1);
+ DSI_PHY_W32(ctrl->phy_io.base, CMN_CTRL_1, data | val);
+ usleep_range(10, 15);
+
+ data = DSI_PHY_R32(ctrl->phy_io.base, CMN_CTRL_1);
+ data &= ~(BIT(5));
+ DSI_PHY_W32(ctrl->phy_io.base, CMN_CTRL_1, data);
+}
int mdss_dsi_phy_v3_shutdown(struct mdss_dsi_ctrl_pdata *ctrl)
{
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 698c5633cf6a..4eca9cb39223 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -2121,6 +2121,7 @@ static int mdss_fb_blank(int blank_mode, struct fb_info *info)
mdss_mdp_enable_panel_disable_mode(mfd, false);
ret = mdss_fb_blank_sub(blank_mode, info, mfd->op_enable);
+ MDSS_XLOG(blank_mode);
end:
mutex_unlock(&mfd->mdss_sysfs_lock);
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
index 599f6cb44c63..102c22cba7dd 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
@@ -1283,6 +1283,7 @@ static void hdmi_edid_extract_speaker_allocation_data(
static void hdmi_edid_extract_sink_caps(struct hdmi_edid_ctrl *edid_ctrl,
const u8 *in_buf)
{
+ u8 len;
const u8 *vsd = NULL;
if (!edid_ctrl) {
@@ -1297,13 +1298,29 @@ static void hdmi_edid_extract_sink_caps(struct hdmi_edid_ctrl *edid_ctrl,
edid_ctrl->basic_audio_supp = false;
pr_debug("%s: basic audio supported: %s\n", __func__,
edid_ctrl->basic_audio_supp ? "true" : "false");
+ vsd = hdmi_edid_find_block(in_buf, DBC_START_OFFSET,
+ VENDOR_SPECIFIC_DATA_BLOCK, &len);
+
+ if (vsd == NULL || len == 0 || len > MAX_DATA_BLOCK_SIZE)
+ return;
+
+ /* Max TMDS clock is in multiples of 5Mhz. */
+ edid_ctrl->sink_caps.max_pclk_in_hz = vsd[7] * 5000000;
vsd = hdmi_edid_find_hfvsdb(in_buf);
if (vsd) {
- /* Max pixel clock is in multiples of 5Mhz. */
- edid_ctrl->sink_caps.max_pclk_in_hz =
- vsd[5]*5000000;
+ /*
+ * HF-VSDB define larger TMDS clock than VSDB. If sink
+ * supports TMDS Character Rates > 340M, the sink shall
+ * set Max_TMDS_Character_Rates appropriately and non-zero.
+ * Or, if sink dose not support TMDS Character Rates > 340M,
+ * the sink shall set this filed to 0. The max TMDS support
+ * clock Rate = Max_TMDS_Character_Rates * 5Mhz.
+ */
+ if (vsd[5] != 0)
+ edid_ctrl->sink_caps.max_pclk_in_hz =
+ vsd[5] * 5000000;
edid_ctrl->sink_caps.scdc_present =
(vsd[6] & 0x80) ? true : false;
edid_ctrl->sink_caps.scramble_support =
@@ -2425,6 +2442,25 @@ bool hdmi_edid_is_dvi_mode(void *input)
}
/**
+ * hdmi_edid_get_sink_caps_max_tmds_clk() - get max tmds clock supported.
+ * Sink side's limitation should be concerned as well.
+ * @input: edid parser data
+ *
+ * Return: max tmds clock
+ */
+u32 hdmi_edid_get_sink_caps_max_tmds_clk(void *input)
+{
+ struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return 0;
+ }
+
+ return edid_ctrl->sink_caps.max_pclk_in_hz;
+}
+
+/**
* hdmi_edid_get_deep_color() - get deep color info supported by sink
* @input: edid parser data
*
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.h b/drivers/video/fbdev/msm/mdss_hdmi_edid.h
index 557e9326a81d..af802bb45f89 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.h
@@ -81,5 +81,6 @@ void hdmi_edid_config_override(void *input, bool enable,
struct hdmi_edid_override_data *data);
void hdmi_edid_set_max_pclk_rate(void *input, u32 max_pclk_khz);
bool hdmi_edid_is_audio_supported(void *input);
+u32 hdmi_edid_get_sink_caps_max_tmds_clk(void *input);
#endif /* __HDMI_EDID_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index 42845f9ff192..8fa229aaa174 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -2224,6 +2224,14 @@ static int hdmi_tx_read_sink_info(struct hdmi_tx_ctrl *hdmi_ctrl)
status = hdmi_edid_parser(data);
if (status)
DEV_ERR("%s: edid parse failed\n", __func__);
+ else
+ /*
+ * Updata HDMI max supported TMDS clock, consider
+ * both sink and source capicity.
+ */
+ hdmi_edid_set_max_pclk_rate(data,
+ min(hdmi_edid_get_sink_caps_max_tmds_clk(data) / 1000,
+ hdmi_ctrl->max_pclk_khz));
}
bail:
if (hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM, false))
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index a645a3495593..d88d87bd2092 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1373,7 +1373,9 @@ static inline void __mdss_mdp_reg_access_clk_enable(
mdss_mdp_clk_update(MDSS_CLK_AHB, 1);
mdss_mdp_clk_update(MDSS_CLK_AXI, 1);
mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 1);
+ mdss_mdp_clk_update(MDSS_CLK_THROTTLE_AXI, 1);
} else {
+ mdss_mdp_clk_update(MDSS_CLK_THROTTLE_AXI, 0);
mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 0);
mdss_mdp_clk_update(MDSS_CLK_AXI, 0);
mdss_mdp_clk_update(MDSS_CLK_AHB, 0);
@@ -1415,6 +1417,7 @@ static void __mdss_mdp_clk_control(struct mdss_data_type *mdata, bool enable)
mdss_mdp_clk_update(MDSS_CLK_AXI, 1);
mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 1);
mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, 1);
+ mdss_mdp_clk_update(MDSS_CLK_THROTTLE_AXI, 1);
if (mdata->vsync_ena)
mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, 1);
} else {
@@ -1430,6 +1433,7 @@ static void __mdss_mdp_clk_control(struct mdss_data_type *mdata, bool enable)
mdss_mdp_clk_update(MDSS_CLK_AXI, 0);
mdss_mdp_clk_update(MDSS_CLK_AHB, 0);
mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, 0);
+ mdss_mdp_clk_update(MDSS_CLK_THROTTLE_AXI, 0);
/* release iommu control */
mdss_iommu_ctrl(0);
@@ -1915,8 +1919,7 @@ static int mdss_mdp_irq_clk_setup(struct mdss_data_type *mdata)
if (mdss_mdp_irq_clk_register(mdata, "bus_clk", MDSS_CLK_AXI) ||
mdss_mdp_irq_clk_register(mdata, "iface_clk", MDSS_CLK_AHB) ||
- mdss_mdp_irq_clk_register(mdata, "core_clk",
- MDSS_CLK_MDP_CORE))
+ mdss_mdp_irq_clk_register(mdata, "core_clk", MDSS_CLK_MDP_CORE))
return -EINVAL;
/* lut_clk is not present on all MDSS revisions */
@@ -1928,6 +1931,10 @@ static int mdss_mdp_irq_clk_setup(struct mdss_data_type *mdata)
/* this clk is not present on all MDSS revisions */
mdss_mdp_irq_clk_register(mdata, "mnoc_clk", MDSS_CLK_MNOC_AHB);
+ /* this clk is not present on all MDSS revisions */
+ mdss_mdp_irq_clk_register(mdata, "throttle_bus_clk",
+ MDSS_CLK_THROTTLE_AXI);
+
/* Setting the default clock rate to the max supported.*/
mdss_mdp_set_clk_rate(mdata->max_mdp_clk_rate, false);
pr_debug("mdp clk rate=%ld\n",
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index a2139f495f52..fd2c2cdb3820 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -2023,6 +2023,8 @@ void mdss_mdp_set_supported_formats(struct mdss_data_type *mdata);
int mdss_mdp_dest_scaler_setup_locked(struct mdss_mdp_mixer *mixer);
void *mdss_mdp_intf_get_ctx_base(struct mdss_mdp_ctl *ctl, int intf_num);
+int mdss_mdp_mixer_get_hw_num(struct mdss_mdp_mixer *mixer);
+
#ifdef CONFIG_FB_MSM_MDP_NONE
struct mdss_data_type *mdss_mdp_get_mdata(void)
{
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index 2968d883c8cb..c062de3c1e59 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -4802,7 +4802,7 @@ static void __mdss_mdp_mixer_get_offsets(u32 mixer_num,
offsets[2] = MDSS_MDP_REG_CTL_LAYER_EXTN2(mixer_num);
}
-static inline int __mdss_mdp_mixer_get_hw_num(struct mdss_mdp_mixer *mixer)
+int mdss_mdp_mixer_get_hw_num(struct mdss_mdp_mixer *mixer)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
@@ -4858,7 +4858,7 @@ static void __mdss_mdp_mixer_write_cfg(struct mdss_mdp_mixer *mixer,
if (!mixer)
return;
- mixer_num = __mdss_mdp_mixer_get_hw_num(mixer);
+ mixer_num = mdss_mdp_mixer_get_hw_num(mixer);
if (cfg) {
for (i = 0; i < NUM_MIXERCFG_REGS; i++)
@@ -4905,7 +4905,7 @@ bool mdss_mdp_mixer_reg_has_pipe(struct mdss_mdp_mixer *mixer,
memset(&mixercfg, 0, sizeof(mixercfg));
- mixer_num = __mdss_mdp_mixer_get_hw_num(mixer);
+ mixer_num = mdss_mdp_mixer_get_hw_num(mixer);
__mdss_mdp_mixer_get_offsets(mixer_num, offs, NUM_MIXERCFG_REGS);
for (i = 0; i < NUM_MIXERCFG_REGS; i++)
@@ -5130,7 +5130,7 @@ static void mdss_mdp_mixer_setup(struct mdss_mdp_ctl *master_ctl,
mixercfg.cursor_enabled = true;
update_mixer:
- mixer_num = __mdss_mdp_mixer_get_hw_num(mixer_hw);
+ mixer_num = mdss_mdp_mixer_get_hw_num(mixer_hw);
ctl_hw->flush_bits |= BIT(mixer_num < 5 ? 6 + mixer_num : 20);
/* Read GC enable/disable status on LM */
@@ -5775,6 +5775,7 @@ int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
if (ctl->ops.avr_ctrl_fnc) {
+ /* avr_ctrl_fnc will configure both master & slave */
ret = ctl->ops.avr_ctrl_fnc(ctl, true);
if (ret) {
pr_err("error configuring avr ctrl registers ctl=%d err=%d\n",
@@ -5784,16 +5785,6 @@ int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
}
}
- if (sctl && sctl->ops.avr_ctrl_fnc) {
- ret = sctl->ops.avr_ctrl_fnc(sctl, true);
- if (ret) {
- pr_err("error configuring avr ctrl registers sctl=%d err=%d\n",
- sctl->num, ret);
- mutex_unlock(&ctl->lock);
- return ret;
- }
- }
-
mutex_lock(&ctl->flush_lock);
/*
@@ -6046,6 +6037,10 @@ int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
ctl_flush_bits |= ctl->flush_bits;
ATRACE_BEGIN("flush_kickoff");
+
+ MDSS_XLOG(ctl->intf_num, ctl_flush_bits, sctl_flush_bits,
+ mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_FLUSH), split_lm_valid);
+
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl_flush_bits);
if (sctl) {
if (sctl_flush_bits) {
@@ -6057,8 +6052,6 @@ int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
}
ctl->commit_in_progress = false;
- MDSS_XLOG(ctl->intf_num, ctl_flush_bits, sctl_flush_bits,
- split_lm_valid);
wmb();
ctl->flush_reg_data = ctl_flush_bits;
ctl->flush_bits = 0;
@@ -6085,8 +6078,9 @@ int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
/* update backlight in commit */
if (mdss_mdp_handle_backlight_extn(ctl)) {
- if (!IS_CALIB_MODE_BL(ctl->mfd) && (!ctl->mfd->ext_bl_ctrl ||
- !ctl->mfd->bl_level)) {
+ if (ctl->mfd && !IS_CALIB_MODE_BL(ctl->mfd) &&
+ (!ctl->mfd->ext_bl_ctrl ||
+ !ctl->mfd->bl_level)) {
mutex_lock(&ctl->mfd->bl_lock);
mdss_fb_set_backlight(ctl->mfd,
ctl->mfd->bl_extn_level);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_debug.c b/drivers/video/fbdev/msm/mdss_mdp_debug.c
index 1ad6810a6bb6..1035d23fe9ce 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_debug.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_debug.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1757,6 +1757,8 @@ void mdss_mdp_hw_rev_debug_caps_init(struct mdss_data_type *mdata)
break;
case MDSS_MDP_HW_REV_300:
case MDSS_MDP_HW_REV_301:
+ case MDSS_MDP_HW_REV_320:
+ case MDSS_MDP_HW_REV_330:
mdata->dbg_bus = dbg_bus_msm8998;
mdata->dbg_bus_size = ARRAY_SIZE(dbg_bus_msm8998);
mdata->vbif_dbg_bus = vbif_dbg_bus_msm8998;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_hwio.h b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
index d9e2b042bfc3..78bfab7d8ce8 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_hwio.h
+++ b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
@@ -829,6 +829,8 @@ enum mdss_mdp_pingpong_index {
#define MMSS_VBIF_WR_LIM_CONF 0x0C0
#define MDSS_VBIF_WRITE_GATHER_EN 0x0AC
+#define MMSS_VBIF_SRC_ERR 0x194
+#define MMSS_VBIF_ERR_INFO 0x1A0
#define MMSS_VBIF_XIN_HALT_CTRL0 0x200
#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
#define MMSS_VBIF_AXI_HALT_CTRL0 0x208
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
index 13c70822e266..587150bbc9fa 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -72,6 +72,7 @@ struct mdss_mdp_video_ctx {
u8 ref_cnt;
u8 timegen_en;
+ bool timegen_flush_pending;
bool polling_en;
u32 poll_cnt;
struct completion vsync_comp;
@@ -451,13 +452,32 @@ static int mdss_mdp_video_intf_recovery(void *data, int event)
}
}
+static int mdss_mdp_video_wait_one_frame(struct mdss_mdp_ctl *ctl)
+{
+ u32 frame_time, frame_rate;
+ int ret = 0;
+ struct mdss_panel_data *pdata = ctl->panel_data;
+
+ if (pdata == NULL) {
+ frame_rate = DEFAULT_FRAME_RATE;
+ } else {
+ frame_rate = mdss_panel_get_framerate(&pdata->panel_info);
+ if (!(frame_rate >= 24 && frame_rate <= 240))
+ frame_rate = 24;
+ }
+
+ frame_time = ((1000/frame_rate) + 1);
+
+ msleep(frame_time);
+
+ return ret;
+}
+
static void mdss_mdp_video_avr_vtotal_setup(struct mdss_mdp_ctl *ctl,
struct intf_timing_params *p,
struct mdss_mdp_video_ctx *ctx)
{
struct mdss_data_type *mdata = ctl->mdata;
- struct mdss_mdp_ctl *sctl = NULL;
- struct mdss_mdp_video_ctx *sctx = NULL;
if (test_bit(MDSS_CAPS_AVR_SUPPORTED, mdata->mdss_caps_map)) {
struct mdss_panel_data *pdata = ctl->panel_data;
@@ -484,14 +504,11 @@ static void mdss_mdp_video_avr_vtotal_setup(struct mdss_mdp_ctl *ctl,
/*
* Make sure config goes through
+ * and queue timegen flush
*/
wmb();
- sctl = mdss_mdp_get_split_ctl(ctl);
- if (sctl)
- sctx = (struct mdss_mdp_video_ctx *)
- sctl->intf_ctx[MASTER_CTX];
- mdss_mdp_video_timegen_flush(ctl, sctx);
+ ctx->timegen_flush_pending = true;
MDSS_XLOG(pinfo->min_fps, pinfo->default_fps, avr_vtotal);
}
@@ -687,8 +704,10 @@ static void mdss_mdp_video_timegen_flush(struct mdss_mdp_ctl *ctl,
ctl_flush |= (BIT(31) >>
(sctx->intf_num - MDSS_MDP_INTF0));
}
+ MDSS_XLOG(ctl->intf_num, sctx?sctx->intf_num:0xf00, ctl_flush,
+ mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_FLUSH));
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl_flush);
- MDSS_XLOG(ctl->intf_num, sctx?sctx->intf_num:0xf00, ctl_flush);
+
}
static inline void video_vsync_irq_enable(struct mdss_mdp_ctl *ctl, bool clear)
@@ -2464,25 +2483,41 @@ static int mdss_mdp_video_early_wake_up(struct mdss_mdp_ctl *ctl)
static int mdss_mdp_video_avr_ctrl(struct mdss_mdp_ctl *ctl, bool enable)
{
struct mdss_mdp_video_ctx *ctx = NULL, *sctx = NULL;
+ struct mdss_mdp_ctl *sctl;
ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
if (!ctx || !ctx->ref_cnt) {
pr_err("invalid master ctx\n");
return -EINVAL;
}
- mdss_mdp_video_avr_ctrl_setup(ctx, ctl, ctl->is_master,
- enable);
- if (is_pingpong_split(ctl->mfd)) {
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl) {
+ sctx = (struct mdss_mdp_video_ctx *) sctl->intf_ctx[MASTER_CTX];
+ } else if (is_pingpong_split(ctl->mfd)) {
sctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[SLAVE_CTX];
if (!sctx || !sctx->ref_cnt) {
pr_err("invalid slave ctx\n");
return -EINVAL;
}
- mdss_mdp_video_avr_ctrl_setup(sctx, ctl, false,
- enable);
}
+ if (ctx->timegen_flush_pending) {
+ mdss_mdp_video_timegen_flush(ctl, sctx);
+
+ /* wait a frame for flush to be completed */
+ mdss_mdp_video_wait_one_frame(ctl);
+
+ ctx->timegen_flush_pending = false;
+ if (sctx)
+ sctx->timegen_flush_pending = false;
+ }
+
+ mdss_mdp_video_avr_ctrl_setup(ctx, ctl, ctl->is_master, enable);
+
+ if (sctx)
+ mdss_mdp_video_avr_ctrl_setup(sctx, ctl, false, enable);
+
return 0;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index 09a34223c2a5..472f1e8e8e3b 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -2160,115 +2160,157 @@ static int __multirect_validate_mode(struct msm_fb_data_type *mfd,
return 0;
}
-static int __update_multirect_info(struct msm_fb_data_type *mfd,
- struct mdss_mdp_validate_info_t *validate_info_list,
- struct mdp_input_layer *layer_list, int ndx, int layer_cnt)
+/*
+ * linear search for a layer with given source pipe and rectangle number.
+ * If rectangle number is invalid, it's dropped from search criteria
+ */
+static int find_layer(enum mdss_mdp_sspp_index pnum,
+ int rect_num,
+ struct mdp_input_layer *layer_list,
+ size_t layer_cnt, int start_index)
{
- struct mdss_data_type *mdata = mdss_mdp_get_mdata();
- struct mdss_mdp_validate_info_t *vinfo[MDSS_MDP_PIPE_MAX_RECTS];
- int i, ptype, max_rects, mode;
- int cnt = 1;
+ int i;
- mode = __multirect_layer_flags_to_mode(layer_list[ndx].flags);
- if (IS_ERR_VALUE(mode))
- return mode;
+ if (start_index < 0)
+ start_index = 0;
- pr_debug("layer #%d pipe_ndx=%d multirect mode=%d\n",
- ndx, layer_list[ndx].pipe_ndx, mode);
+ if (start_index >= layer_cnt)
+ return -EINVAL;
- vinfo[0] = &validate_info_list[ndx];
- vinfo[0]->layer = &layer_list[ndx];
- vinfo[0]->multirect.mode = mode;
- vinfo[0]->multirect.num = MDSS_MDP_PIPE_RECT0;
- vinfo[0]->multirect.next = NULL;
+ for (i = start_index; i < layer_cnt; i++) {
+ if (get_pipe_num_from_ndx(layer_list[i].pipe_ndx) == pnum &&
+ (rect_num < MDSS_MDP_PIPE_RECT0 ||
+ rect_num >= MDSS_MDP_PIPE_MAX_RECTS ||
+ layer_list[i].rect_num == rect_num))
+ return i;
+ }
- /* nothing to be done if multirect is disabled */
- if (mode == MDSS_MDP_PIPE_MULTIRECT_NONE)
- return cnt;
+ return -ENOENT; /* no match found */
+}
- ptype = get_pipe_type_from_ndx(layer_list[ndx].pipe_ndx);
- if (ptype == MDSS_MDP_PIPE_TYPE_INVALID) {
- pr_err("invalid pipe ndx %d\n", layer_list[ndx].pipe_ndx);
- return -EINVAL;
- }
+static int __validate_multirect_param(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_validate_info_t *validate_info_list,
+ struct mdp_input_layer *layer_list,
+ int ndx, size_t layer_count)
+{
+ int multirect_mode;
+ int pnum;
+ int rect_num;
+
+ /* populate v_info with default values */
+ validate_info_list[ndx].layer = &layer_list[ndx];
+ validate_info_list[ndx].multirect.max_rects = MDSS_MDP_PIPE_MAX_RECTS;
+ validate_info_list[ndx].multirect.next = NULL;
+ validate_info_list[ndx].multirect.num = MDSS_MDP_PIPE_RECT0;
+ validate_info_list[ndx].multirect.mode = MDSS_MDP_PIPE_MULTIRECT_NONE;
+
+ multirect_mode = __multirect_layer_flags_to_mode(
+ layer_list[ndx].flags);
+ if (IS_ERR_VALUE(multirect_mode))
+ return multirect_mode;
- max_rects = mdata->rects_per_sspp[ptype] ? : 1;
+ /* nothing to be done if multirect is disabled */
+ if (multirect_mode == MDSS_MDP_PIPE_MULTIRECT_NONE)
+ return 0;
- for (i = ndx + 1; i < layer_cnt; i++) {
- if (layer_list[ndx].pipe_ndx == layer_list[i].pipe_ndx) {
- if (cnt >= max_rects) {
- pr_err("more than %d layers of type %d with same pipe_ndx=%d indexes=%d %d\n",
- max_rects, ptype,
- layer_list[ndx].pipe_ndx, ndx, i);
- return -EINVAL;
- }
+ validate_info_list[ndx].multirect.mode = multirect_mode;
- mode = __multirect_layer_flags_to_mode(
- layer_list[i].flags);
- if (IS_ERR_VALUE(mode))
- return mode;
+ pnum = get_pipe_num_from_ndx(layer_list[ndx].pipe_ndx);
+ if (get_pipe_type_from_num(pnum) != MDSS_MDP_PIPE_TYPE_DMA) {
+ pr_err("Multirect not supported on pipe ndx 0x%x\n",
+ layer_list[ndx].pipe_ndx);
+ return -EINVAL;
+ }
- if (mode != vinfo[0]->multirect.mode) {
- pr_err("unable to set different multirect modes for pipe_ndx=%d (%d %d)\n",
- layer_list[ndx].pipe_ndx, ndx, i);
- return -EINVAL;
- }
+ rect_num = layer_list[ndx].rect_num;
+ if (rect_num >= MDSS_MDP_PIPE_MAX_RECTS)
+ return -EINVAL;
+ validate_info_list[ndx].multirect.num = rect_num;
- pr_debug("found matching pair for pipe_ndx=%d (%d %d)\n",
- layer_list[i].pipe_ndx, ndx, i);
+ return 0;
+}
- vinfo[cnt] = &validate_info_list[i];
- vinfo[cnt]->multirect.num = cnt;
- vinfo[cnt]->multirect.next = vinfo[0]->layer;
- vinfo[cnt]->multirect.mode = mode;
- vinfo[cnt]->layer = &layer_list[i];
+static int __update_multirect_info(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_validate_info_t *validate_info_list,
+ struct mdp_input_layer *layer_list,
+ int ndx, size_t layer_cnt, int is_rect_num_valid)
+{
+ int ret;
+ int pair_rect_num = -1;
+ int pair_index;
+
+ if (!is_rect_num_valid)
+ layer_list[ndx].rect_num = MDSS_MDP_PIPE_RECT0;
+
+ ret = __validate_multirect_param(mfd, validate_info_list,
+ layer_list, ndx, layer_cnt);
+ /* return if we hit error or multirectangle mode is disabled. */
+ if (IS_ERR_VALUE(ret) ||
+ (!ret && validate_info_list[ndx].multirect.mode ==
+ MDSS_MDP_PIPE_MULTIRECT_NONE))
+ return ret;
- vinfo[cnt - 1]->multirect.next = vinfo[cnt]->layer;
- cnt++;
- }
- }
+ if (is_rect_num_valid)
+ pair_rect_num = (validate_info_list[ndx].multirect.num ==
+ MDSS_MDP_PIPE_RECT0) ? MDSS_MDP_PIPE_RECT1 :
+ MDSS_MDP_PIPE_RECT0;
- if (cnt == 1) {
- pr_err("multirect mode enabled but unable to find extra rects for pipe_ndx=%x\n",
+ pair_index = find_layer(get_pipe_num_from_ndx(
+ layer_list[ndx].pipe_ndx), pair_rect_num,
+ layer_list, layer_cnt, ndx + 1);
+ if (IS_ERR_VALUE(pair_index)) {
+ pr_err("Multirect pair not found for pipe ndx 0x%x\n",
layer_list[ndx].pipe_ndx);
return -EINVAL;
}
- return cnt;
+ if (!is_rect_num_valid)
+ layer_list[pair_index].rect_num = MDSS_MDP_PIPE_RECT1;
+
+ ret = __validate_multirect_param(mfd, validate_info_list,
+ layer_list, pair_index, layer_cnt);
+ if (IS_ERR_VALUE(ret) ||
+ (validate_info_list[ndx].multirect.mode !=
+ validate_info_list[pair_index].multirect.mode))
+ return -EINVAL;
+
+ validate_info_list[ndx].multirect.next = &layer_list[pair_index];
+ validate_info_list[pair_index].multirect.next = &layer_list[ndx];
+
+ return 0;
}
static int __validate_multirect(struct msm_fb_data_type *mfd,
- struct mdss_mdp_validate_info_t *validate_info_list,
- struct mdp_input_layer *layer_list, int ndx, int layer_cnt)
+ struct mdss_mdp_validate_info_t *validate_info_list,
+ struct mdp_input_layer *layer_list,
+ int ndx, size_t layer_cnt, int is_rect_num_valid)
{
- struct mdp_input_layer *layers[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
- int i, cnt, rc;
-
- cnt = __update_multirect_info(mfd, validate_info_list,
- layer_list, ndx, layer_cnt);
- if (IS_ERR_VALUE(cnt))
- return cnt;
-
- if (cnt <= 1) {
- /* nothing to validate in single rect mode */
- return 0;
- } else if (cnt > 2) {
- pr_err("unsupported multirect configuration, multirect cnt=%d\n",
- cnt);
- return -EINVAL;
- }
+ int ret;
+ int i;
+ struct mdp_input_layer *layers[MDSS_MDP_PIPE_MAX_RECTS];
+ struct mdp_input_layer *pair_layer;
+
+ ret = __update_multirect_info(mfd, validate_info_list,
+ layer_list, ndx, layer_cnt, is_rect_num_valid);
+ /* return if we hit error or multirectangle mode is disabled. */
+ if (IS_ERR_VALUE(ret) ||
+ (!ret && validate_info_list[ndx].multirect.mode ==
+ MDSS_MDP_PIPE_MULTIRECT_NONE))
+ return ret;
- layers[0] = validate_info_list[ndx].layer;
- layers[1] = validate_info_list[ndx].multirect.next;
+ layers[validate_info_list[ndx].multirect.num] = &layer_list[ndx];
+ pair_layer = validate_info_list[ndx].multirect.next;
+ layers[pair_layer->rect_num] = pair_layer;
+ /* check against smart DMA v1.0 restrictions */
for (i = 0; i < ARRAY_SIZE(__multirect_validators); i++) {
- if (!__multirect_validators[i](layers, cnt))
+ if (!__multirect_validators[i](layers,
+ MDSS_MDP_PIPE_MAX_RECTS))
return -EINVAL;
}
-
- rc = __multirect_validate_mode(mfd, layers, cnt);
- if (IS_ERR_VALUE(rc))
- return rc;
+ ret = __multirect_validate_mode(mfd, layers, MDSS_MDP_PIPE_MAX_RECTS);
+ if (IS_ERR_VALUE(ret))
+ return ret;
return 0;
}
@@ -2416,14 +2458,14 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
if (!validate_info_list[i].layer) {
ret = __validate_multirect(mfd, validate_info_list,
- layer_list, i, layer_count);
+ layer_list, i, layer_count,
+ !!(commit->flags & MDP_COMMIT_RECT_NUM));
if (ret) {
pr_err("error validating multirect config. ret=%d i=%d\n",
ret, i);
goto end;
}
}
-
rect_num = validate_info_list[i].multirect.num;
BUG_ON(rect_num >= MDSS_MDP_PIPE_MAX_RECTS);
@@ -2782,7 +2824,8 @@ int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd,
for (i = 0; i < layer_count; i++) {
if (!validate_info_list[i].layer) {
ret = __update_multirect_info(mfd, validate_info_list,
- layer_list, i, layer_count);
+ layer_list, i, layer_count,
+ !!(commit->flags & MDP_COMMIT_RECT_NUM));
if (IS_ERR_VALUE(ret)) {
pr_err("error updating multirect config. ret=%d i=%d\n",
ret, i);
@@ -3035,6 +3078,12 @@ int mdss_mdp_layer_pre_commit_wfd(struct msm_fb_data_type *mfd,
wfd = mdp5_data->wfd;
output_layer = commit->output_layer;
+ if (output_layer->buffer.plane_count > MAX_PLANES) {
+ pr_err("Output buffer plane_count exceeds MAX_PLANES limit:%d\n",
+ output_layer->buffer.plane_count);
+ return -EINVAL;
+ }
+
data = mdss_mdp_wfd_add_data(wfd, output_layer);
if (IS_ERR_OR_NULL(data))
return PTR_ERR(data);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 8c612e2b83fb..87fff44af389 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -3053,6 +3053,13 @@ static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
goto pipe_release;
}
+ if (l_pipe_allocated &&
+ (l_pipe->multirect.num == MDSS_MDP_PIPE_RECT1)) {
+ pr_err("Invalid: L_Pipe-%d is assigned for RECT-%d\n",
+ l_pipe->num, l_pipe->multirect.num);
+ goto pipe_release;
+ }
+
if (mdss_mdp_pipe_map(l_pipe)) {
pr_err("unable to map base pipe\n");
goto pipe_release;
@@ -3100,6 +3107,16 @@ static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
goto iommu_disable;
}
+ if (l_pipe_allocated && r_pipe_allocated &&
+ (l_pipe->num != r_pipe->num) &&
+ (r_pipe->multirect.num ==
+ MDSS_MDP_PIPE_RECT1)) {
+ pr_err("Invalid: L_Pipe-%d,RECT-%d R_Pipe-%d,RECT-%d\n",
+ l_pipe->num, l_pipe->multirect.num,
+ r_pipe->num, l_pipe->multirect.num);
+ goto iommu_disable;
+ }
+
if (mdss_mdp_pipe_map(r_pipe)) {
pr_err("unable to map right base pipe\n");
goto iommu_disable;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index f128f82fab04..2e85072c4cf7 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -2599,6 +2599,7 @@ int mdss_mdp_dest_scaler_setup_locked(struct mdss_mdp_mixer *mixer)
u32 op_mode;
u32 mask;
char *ds_offset;
+ int mixer_num = 0;
if (!mixer || !mixer->ctl || !mixer->ctl->mdata)
return -EINVAL;
@@ -2658,6 +2659,14 @@ int mdss_mdp_dest_scaler_setup_locked(struct mdss_mdp_mixer *mixer)
pr_err("Failed setup destination scaler\n");
return ret;
}
+ /* Set LM Flush in order to update DS registers */
+ if (ds->flags & DS_SCALE_UPDATE) {
+ mutex_lock(&ctl->flush_lock);
+ mixer_num = mdss_mdp_mixer_get_hw_num(mixer);
+ ctl->flush_bits |=
+ BIT(mixer_num < 5 ? 6 + mixer_num : 20);
+ mutex_unlock(&ctl->flush_lock);
+ }
/*
* Clearing the flag because we don't need to program the block
* for each commit if there is no change.
diff --git a/drivers/video/fbdev/msm/mdss_mdp_util.c b/drivers/video/fbdev/msm/mdss_mdp_util.c
index d0bf61679f61..22656175edf8 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_util.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_util.c
@@ -1095,7 +1095,7 @@ static int mdss_mdp_get_img(struct msmfb_data *img,
return ret;
}
}
- if (!*start) {
+ if (start && !*start) {
pr_err("start address is zero!\n");
mdss_mdp_put_img(data, rotator, dir);
return -ENOMEM;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_wfd.c b/drivers/video/fbdev/msm/mdss_mdp_wfd.c
index 71a07f6b7d39..7868dc0f1999 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_wfd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_wfd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -322,6 +322,12 @@ int mdss_mdp_wb_import_data(struct device *device,
if (wfd_data->layer.flags & MDP_LAYER_SECURE_SESSION)
flags = MDP_SECURE_OVERLAY_SESSION;
+ if (buffer->plane_count > MAX_PLANES) {
+ pr_err("buffer plane_count exceeds MAX_PLANES limit:%d",
+ buffer->plane_count);
+ return -EINVAL;
+ }
+
memset(planes, 0, sizeof(planes));
for (i = 0; i < buffer->plane_count; i++) {
diff --git a/drivers/video/fbdev/msm/mdss_rotator.c b/drivers/video/fbdev/msm/mdss_rotator.c
index fdd1c0153ce0..399a12e3dcc8 100644
--- a/drivers/video/fbdev/msm/mdss_rotator.c
+++ b/drivers/video/fbdev/msm/mdss_rotator.c
@@ -501,6 +501,12 @@ static int mdss_rotator_import_buffer(struct mdp_layer_buffer *buffer,
memset(planes, 0, sizeof(planes));
+ if (buffer->plane_count > MAX_PLANES) {
+ pr_err("buffer plane_count exceeds MAX_PLANES limit:%d\n",
+ buffer->plane_count);
+ return -EINVAL;
+ }
+
for (i = 0; i < buffer->plane_count; i++) {
planes[i].memory_id = buffer->planes[i].fd;
planes[i].offset = buffer->planes[i].offset;
@@ -2104,6 +2110,20 @@ struct mdss_rot_entry_container *mdss_rotator_req_init(
struct mdss_rot_entry_container *req;
int size, i;
+ /*
+ * Check input and output plane_count from each given item
+ * are within the MAX_PLANES limit
+ */
+ for (i = 0 ; i < count; i++) {
+ if ((items[i].input.plane_count > MAX_PLANES) ||
+ (items[i].output.plane_count > MAX_PLANES)) {
+ pr_err("Input/Output plane_count exceeds MAX_PLANES limit, input:%d, output:%d\n",
+ items[i].input.plane_count,
+ items[i].output.plane_count);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
size = sizeof(struct mdss_rot_entry_container);
size += sizeof(struct mdss_rot_entry) * count;
req = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
index f56158446c0d..1b4765837c61 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.c
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -700,13 +700,13 @@ int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev)
}
static struct mdss_smmu_domain mdss_mdp_unsec = {
- "mdp_0", MDSS_IOMMU_DOMAIN_UNSECURE, SZ_128K, (SZ_4G - SZ_128K)};
+ "mdp_0", MDSS_IOMMU_DOMAIN_UNSECURE, SZ_128K, (SZ_4G - SZ_128M)};
static struct mdss_smmu_domain mdss_rot_unsec = {
- NULL, MDSS_IOMMU_DOMAIN_ROT_UNSECURE, SZ_128K, (SZ_4G - SZ_128K)};
+ NULL, MDSS_IOMMU_DOMAIN_ROT_UNSECURE, SZ_128K, (SZ_4G - SZ_128M)};
static struct mdss_smmu_domain mdss_mdp_sec = {
- "mdp_1", MDSS_IOMMU_DOMAIN_SECURE, SZ_128K, (SZ_4G - SZ_128K)};
+ "mdp_1", MDSS_IOMMU_DOMAIN_SECURE, SZ_128K, (SZ_4G - SZ_128M)};
static struct mdss_smmu_domain mdss_rot_sec = {
- NULL, MDSS_IOMMU_DOMAIN_ROT_SECURE, SZ_128K, (SZ_4G - SZ_128K)};
+ NULL, MDSS_IOMMU_DOMAIN_ROT_SECURE, SZ_128K, (SZ_4G - SZ_128M)};
static const struct of_device_id mdss_smmu_dt_match[] = {
{ .compatible = "qcom,smmu_mdp_unsec", .data = &mdss_mdp_unsec},
diff --git a/drivers/video/fbdev/msm/msm_dba/adv7533.c b/drivers/video/fbdev/msm/msm_dba/adv7533.c
index 8503d84e0de4..63e178d76403 100644
--- a/drivers/video/fbdev/msm/msm_dba/adv7533.c
+++ b/drivers/video/fbdev/msm/msm_dba/adv7533.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -31,7 +31,7 @@
#define ADV7533_REG_CHIP_REVISION (0x00)
#define ADV7533_DSI_CEC_I2C_ADDR_REG (0xE1)
-#define ADV7533_RESET_DELAY (100)
+#define ADV7533_RESET_DELAY (10)
#define PINCTRL_STATE_ACTIVE "pmx_adv7533_active"
#define PINCTRL_STATE_SUSPEND "pmx_adv7533_suspend"
@@ -1539,14 +1539,14 @@ exit:
static int adv7533_video_on(void *client, bool on,
struct msm_dba_video_cfg *cfg, u32 flags)
{
- int ret = -EINVAL;
+ int ret = 0;
u8 lanes;
u8 reg_val = 0;
struct adv7533 *pdata = adv7533_get_platform_data(client);
if (!pdata || !cfg) {
pr_err("%s: invalid platform data\n", __func__);
- return ret;
+ return -EINVAL;
}
mutex_lock(&pdata->ops_mutex);
diff --git a/drivers/video/fbdev/msm/msm_dba/msm_dba_helpers.c b/drivers/video/fbdev/msm/msm_dba/msm_dba_helpers.c
index f6128ae01a75..a0b45bfccb3c 100644
--- a/drivers/video/fbdev/msm/msm_dba/msm_dba_helpers.c
+++ b/drivers/video/fbdev/msm/msm_dba/msm_dba_helpers.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -123,7 +123,7 @@ int msm_dba_helper_i2c_write_byte(struct i2c_client *client,
return -EINVAL;
}
- pr_debug("%s: [%s:0x02%x] : W[0x%02x, 0x%02x]\n", __func__,
+ pr_debug("%s: [%s:0x%02x] : W[0x%02x, 0x%02x]\n", __func__,
client->name, addr, reg, val);
client->addr = addr;
diff --git a/drivers/video/fbdev/msm/msm_mdss_io_8974.c b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
index 87fe6d739acf..690d74fa5271 100644
--- a/drivers/video/fbdev/msm/msm_mdss_io_8974.c
+++ b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
@@ -2137,6 +2137,20 @@ error:
}
/**
+ * mdss_dsi_phy_idle_pc_exit() - Called after exit Idle PC
+ * @ctrl: pointer to DSI controller structure
+ *
+ * Perform any programming needed after Idle PC exit.
+ */
+static int mdss_dsi_phy_idle_pc_exit(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ if (ctrl->shared_data->phy_rev == DSI_PHY_REV_30)
+ mdss_dsi_phy_v3_idle_pc_exit(ctrl);
+
+ return 0;
+}
+
+/**
* mdss_dsi_clamp_ctrl_default() - Program DSI clamps
* @ctrl: pointer to DSI controller structure
* @enable: true to enable clamps, false to disable clamps
@@ -2615,9 +2629,16 @@ int mdss_dsi_post_clkoff_cb(void *priv,
pdata = &ctrl->panel_data;
for (i = DSI_MAX_PM - 1; i >= DSI_CORE_PM; i--) {
- if ((ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) &&
- (i != DSI_CORE_PM))
- continue;
+ /**
+ * If DSI_CTRL is active, proceed to turn off
+ * supplies which support turning off in low power
+ * state
+ */
+ if (ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE)
+ if (!sdata->power_data[i].vreg_config
+ ->lp_disable_allowed)
+ continue;
+
rc = msm_dss_enable_vreg(
sdata->power_data[i].vreg_config,
sdata->power_data[i].num_vreg, 0);
@@ -2627,6 +2648,12 @@ int mdss_dsi_post_clkoff_cb(void *priv,
__mdss_dsi_pm_name(i));
rc = 0;
} else {
+ pr_debug("%s: disabled vreg for %s panel_state %d\n",
+ __func__,
+ __mdss_dsi_pm_name(i),
+ pdata->panel_info.panel_power_state);
+ sdata->power_data[i].vreg_config->disabled =
+ true;
ctrl->core_power = false;
}
}
@@ -2666,7 +2693,7 @@ int mdss_dsi_pre_clkon_cb(void *priv,
for (i = DSI_CORE_PM; i < DSI_MAX_PM; i++) {
if ((ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) &&
(!pdata->panel_info.cont_splash_enabled) &&
- (i != DSI_CORE_PM))
+ (!sdata->power_data[i].vreg_config->disabled))
continue;
rc = msm_dss_enable_vreg(
sdata->power_data[i].vreg_config,
@@ -2676,11 +2703,21 @@ int mdss_dsi_pre_clkon_cb(void *priv,
__func__,
__mdss_dsi_pm_name(i));
} else {
+ pr_debug("%s: enabled vregs for %s\n",
+ __func__,
+ __mdss_dsi_pm_name(i));
+ sdata->power_data[i].vreg_config->disabled =
+ false;
ctrl->core_power = true;
}
}
}
+ if ((clk_type & MDSS_DSI_LINK_CLK) &&
+ (new_state == MDSS_DSI_CLK_ON) &&
+ !ctrl->panel_data.panel_info.cont_splash_enabled)
+ mdss_dsi_phy_idle_pc_exit(ctrl);
+
return rc;
}
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 0567d517eed3..ea2f19f5fbde 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -644,7 +644,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
break;
case XenbusStateInitWait:
-InitWait:
xenbus_switch_state(dev, XenbusStateConnected);
break;
@@ -655,7 +654,8 @@ InitWait:
* get Connected twice here.
*/
if (dev->state != XenbusStateConnected)
- goto InitWait; /* no InitWait seen yet, fudge it */
+ /* no InitWait seen yet, fudge it */
+ xenbus_switch_state(dev, XenbusStateConnected);
if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
"request-update", "%d", &val) < 0)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 8959b320d472..84c6add93f1f 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -421,6 +421,8 @@ static int init_vqs(struct virtio_balloon *vb)
* Prime this virtqueue with one buffer so the hypervisor can
* use it to signal us later (it can't be broken yet!).
*/
+ update_balloon_stats(vb);
+
sg_init_one(&sg, vb->stats, sizeof vb->stats);
if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
< 0)
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 611f9c11da85..2e319d0c395d 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -27,10 +27,10 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/syscore_ops.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <xen/xen.h>
-#include <xen/xen-ops.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
@@ -466,15 +466,33 @@ static int xen_upload_processor_pm_data(void)
return rc;
}
-static int xen_acpi_processor_resume(struct notifier_block *nb,
- unsigned long action, void *data)
+static void xen_acpi_processor_resume_worker(struct work_struct *dummy)
{
+ int rc;
+
bitmap_zero(acpi_ids_done, nr_acpi_bits);
- return xen_upload_processor_pm_data();
+
+ rc = xen_upload_processor_pm_data();
+ if (rc != 0)
+ pr_info("ACPI data upload failed, error = %d\n", rc);
+}
+
+static void xen_acpi_processor_resume(void)
+{
+ static DECLARE_WORK(wq, xen_acpi_processor_resume_worker);
+
+ /*
+ * xen_upload_processor_pm_data() calls non-atomic code.
+ * However, the context for xen_acpi_processor_resume is syscore
+ * with only the boot CPU online and in an atomic context.
+ *
+ * So defer the upload for some point safer.
+ */
+ schedule_work(&wq);
}
-struct notifier_block xen_acpi_processor_resume_nb = {
- .notifier_call = xen_acpi_processor_resume,
+static struct syscore_ops xap_syscore_ops = {
+ .resume = xen_acpi_processor_resume,
};
static int __init xen_acpi_processor_init(void)
@@ -527,7 +545,7 @@ static int __init xen_acpi_processor_init(void)
if (rc)
goto err_unregister;
- xen_resume_notifier_register(&xen_acpi_processor_resume_nb);
+ register_syscore_ops(&xap_syscore_ops);
return 0;
err_unregister:
@@ -544,7 +562,7 @@ static void __exit xen_acpi_processor_exit(void)
{
int i;
- xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb);
+ unregister_syscore_ops(&xap_syscore_ops);
kfree(acpi_ids_done);
kfree(acpi_id_present);
kfree(acpi_id_cst_present);